summaryrefslogtreecommitdiff
path: root/deps/v8/src/x64
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/x64')
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h23
-rw-r--r--deps/v8/src/x64/assembler-x64.cc329
-rw-r--r--deps/v8/src/x64/assembler-x64.h761
-rw-r--r--deps/v8/src/x64/builtins-x64.cc340
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc867
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h22
-rw-r--r--deps/v8/src/x64/codegen-x64.cc36
-rw-r--r--deps/v8/src/x64/debug-x64.cc32
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc86
-rw-r--r--deps/v8/src/x64/disasm-x64.cc60
-rw-r--r--deps/v8/src/x64/frames-x64.h2
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc751
-rw-r--r--deps/v8/src/x64/ic-x64.cc112
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc1097
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h7
-rw-r--r--deps/v8/src/x64/lithium-gap-resolver-x64.cc12
-rw-r--r--deps/v8/src/x64/lithium-x64.cc435
-rw-r--r--deps/v8/src/x64/lithium-x64.h259
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc790
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h43
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.cc222
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc248
22 files changed, 3466 insertions, 3068 deletions
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 073fcbe8e..a559b6275 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -205,12 +205,15 @@ void Assembler::emit_optional_rex_32(const Operand& op) {
}
-Address Assembler::target_address_at(Address pc) {
+Address Assembler::target_address_at(Address pc,
+ ConstantPoolArray* constant_pool) {
return Memory::int32_at(pc) + pc + 4;
}
-void Assembler::set_target_address_at(Address pc, Address target) {
+void Assembler::set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target) {
Memory::int32_at(pc) = static_cast<int32_t>(target - pc - 4);
CPU::FlushICache(pc, sizeof(int32_t));
}
@@ -255,7 +258,7 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- return Assembler::target_address_at(pc_);
+ return Assembler::target_address_at(pc_, host_);
}
@@ -267,6 +270,12 @@ Address RelocInfo::target_address_address() {
}
+Address RelocInfo::constant_pool_entry_address() {
+ UNREACHABLE();
+ return NULL;
+}
+
+
int RelocInfo::target_address_size() {
if (IsCodedSpecially()) {
return Assembler::kSpecialTargetSize;
@@ -278,7 +287,7 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
- Assembler::set_target_address_at(pc_, target);
+ Assembler::set_target_address_at(pc_, host_, target);
if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -369,7 +378,7 @@ void RelocInfo::WipeOut() {
Memory::Address_at(pc_) = NULL;
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
// Effectively write zero into the relocation.
- Assembler::set_target_address_at(pc_, pc_ + sizeof(int32_t));
+ Assembler::set_target_address_at(pc_, host_, pc_ + sizeof(int32_t));
} else {
UNREACHABLE();
}
@@ -408,14 +417,14 @@ Code* RelocInfo::code_age_stub() {
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
ASSERT(*pc_ == kCallOpcode);
return Code::GetCodeFromTargetAddress(
- Assembler::target_address_at(pc_ + 1));
+ Assembler::target_address_at(pc_ + 1, host_));
}
void RelocInfo::set_code_age_stub(Code* stub) {
ASSERT(*pc_ == kCallOpcode);
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Assembler::set_target_address_at(pc_ + 1, stub->instruction_start());
+ Assembler::set_target_address_at(pc_ + 1, host_, stub->instruction_start());
}
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index e7c20bb15..60383da01 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -110,7 +110,8 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
#endif
// Patch the code.
- patcher.masm()->movp(kScratchRegister, target, Assembler::RelocInfoNone());
+ patcher.masm()->movp(kScratchRegister, reinterpret_cast<void*>(target),
+ Assembler::RelocInfoNone());
patcher.masm()->call(kScratchRegister);
// Check that the size of the code generated is as expected.
@@ -750,6 +751,15 @@ void Assembler::bts(const Operand& dst, Register src) {
}
+void Assembler::bsrl(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xBD);
+ emit_modrm(dst, src);
+}
+
+
void Assembler::call(Label* L) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
@@ -934,33 +944,17 @@ void Assembler::cqo() {
}
-void Assembler::decq(Register dst) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xFF);
- emit_modrm(0x1, dst);
-}
-
-
-void Assembler::decq(const Operand& dst) {
+void Assembler::emit_dec(Register dst, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xFF);
- emit_operand(1, dst);
-}
-
-
-void Assembler::decl(Register dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
+ emit_rex(dst, size);
emit(0xFF);
emit_modrm(0x1, dst);
}
-void Assembler::decl(const Operand& dst) {
+void Assembler::emit_dec(const Operand& dst, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
+ emit_rex(dst, size);
emit(0xFF);
emit_operand(1, dst);
}
@@ -999,84 +993,43 @@ void Assembler::hlt() {
}
-void Assembler::idivq(Register src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(src);
- emit(0xF7);
- emit_modrm(0x7, src);
-}
-
-
-void Assembler::idivl(Register src) {
+void Assembler::emit_idiv(Register src, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(src);
+ emit_rex(src, size);
emit(0xF7);
emit_modrm(0x7, src);
}
-void Assembler::imul(Register src) {
+void Assembler::emit_imul(Register src, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(src);
+ emit_rex(src, size);
emit(0xF7);
emit_modrm(0x5, src);
}
-void Assembler::imul(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0xAF);
- emit_modrm(dst, src);
-}
-
-
-void Assembler::imul(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- emit(0x0F);
- emit(0xAF);
- emit_operand(dst, src);
-}
-
-
-void Assembler::imul(Register dst, Register src, Immediate imm) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- if (is_int8(imm.value_)) {
- emit(0x6B);
- emit_modrm(dst, src);
- emit(imm.value_);
- } else {
- emit(0x69);
- emit_modrm(dst, src);
- emitl(imm.value_);
- }
-}
-
-
-void Assembler::imull(Register dst, Register src) {
+void Assembler::emit_imul(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
+ emit_rex(dst, src, size);
emit(0x0F);
emit(0xAF);
emit_modrm(dst, src);
}
-void Assembler::imull(Register dst, const Operand& src) {
+void Assembler::emit_imul(Register dst, const Operand& src, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
+ emit_rex(dst, src, size);
emit(0x0F);
emit(0xAF);
emit_operand(dst, src);
}
-void Assembler::imull(Register dst, Register src, Immediate imm) {
+void Assembler::emit_imul(Register dst, Register src, Immediate imm, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
+ emit_rex(dst, src, size);
if (is_int8(imm.value_)) {
emit(0x6B);
emit_modrm(dst, src);
@@ -1089,38 +1042,22 @@ void Assembler::imull(Register dst, Register src, Immediate imm) {
}
-void Assembler::incq(Register dst) {
+void Assembler::emit_inc(Register dst, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
+ emit_rex(dst, size);
emit(0xFF);
emit_modrm(0x0, dst);
}
-void Assembler::incq(const Operand& dst) {
+void Assembler::emit_inc(const Operand& dst, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xFF);
- emit_operand(0, dst);
-}
-
-
-void Assembler::incl(const Operand& dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
+ emit_rex(dst, size);
emit(0xFF);
emit_operand(0, dst);
}
-void Assembler::incl(Register dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xFF);
- emit_modrm(0, dst);
-}
-
-
void Assembler::int3() {
EnsureSpace ensure_space(this);
emit(0xCC);
@@ -1287,17 +1224,9 @@ void Assembler::jmp(const Operand& src) {
}
-void Assembler::lea(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_rex_64(dst, src);
- emit(0x8D);
- emit_operand(dst, src);
-}
-
-
-void Assembler::leal(Register dst, const Operand& src) {
+void Assembler::emit_lea(Register dst, const Operand& src, int size) {
EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
+ emit_rex(dst, src, size);
emit(0x8D);
emit_operand(dst, src);
}
@@ -1536,7 +1465,7 @@ void Assembler::movsxlq(Register dst, const Operand& src) {
}
-void Assembler::movzxbq(Register dst, const Operand& src) {
+void Assembler::emit_movzxb(Register dst, const Operand& src, int size) {
EnsureSpace ensure_space(this);
// 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
// there is no need to make this a 64 bit operation.
@@ -1547,26 +1476,10 @@ void Assembler::movzxbq(Register dst, const Operand& src) {
}
-void Assembler::movzxbl(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xB6);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzxwq(Register dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0xB7);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzxwl(Register dst, const Operand& src) {
+void Assembler::emit_movzxw(Register dst, const Operand& src, int size) {
EnsureSpace ensure_space(this);
+ // 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
+ // there is no need to make this a 64 bit operation.
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xB7);
@@ -1574,8 +1487,10 @@ void Assembler::movzxwl(Register dst, const Operand& src) {
}
-void Assembler::movzxwl(Register dst, Register src) {
+void Assembler::emit_movzxw(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
+ // 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
+ // there is no need to make this a 64 bit operation.
emit_optional_rex_32(dst, src);
emit(0x0F);
emit(0xB7);
@@ -1598,17 +1513,10 @@ void Assembler::repmovsw() {
}
-void Assembler::repmovsl() {
+void Assembler::emit_repmovs(int size) {
EnsureSpace ensure_space(this);
emit(0xF3);
- emit(0xA5);
-}
-
-
-void Assembler::repmovsq() {
- EnsureSpace ensure_space(this);
- emit(0xF3);
- emit_rex_64();
+ emit_rex(size);
emit(0xA5);
}
@@ -1621,23 +1529,15 @@ void Assembler::mul(Register src) {
}
-void Assembler::neg(Register dst) {
+void Assembler::emit_neg(Register dst, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
- emit(0xF7);
- emit_modrm(0x3, dst);
-}
-
-
-void Assembler::negl(Register dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
+ emit_rex(dst, size);
emit(0xF7);
emit_modrm(0x3, dst);
}
-void Assembler::neg(const Operand& dst) {
+void Assembler::emit_neg(const Operand& dst, int size) {
EnsureSpace ensure_space(this);
emit_rex_64(dst);
emit(0xF7);
@@ -1651,30 +1551,22 @@ void Assembler::nop() {
}
-void Assembler::not_(Register dst) {
+void Assembler::emit_not(Register dst, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
+ emit_rex(dst, size);
emit(0xF7);
emit_modrm(0x2, dst);
}
-void Assembler::not_(const Operand& dst) {
+void Assembler::emit_not(const Operand& dst, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(dst);
+ emit_rex(dst, size);
emit(0xF7);
emit_operand(2, dst);
}
-void Assembler::notl(Register dst) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst);
- emit(0xF7);
- emit_modrm(0x2, dst);
-}
-
-
void Assembler::Nop(int n) {
// The recommended muti-byte sequences of NOP instructions from the Intel 64
// and IA-32 Architectures Software Developer's Manual.
@@ -1752,14 +1644,14 @@ void Assembler::Nop(int n) {
}
-void Assembler::pop(Register dst) {
+void Assembler::popq(Register dst) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
emit(0x58 | dst.low_bits());
}
-void Assembler::pop(const Operand& dst) {
+void Assembler::popq(const Operand& dst) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(dst);
emit(0x8F);
@@ -1773,14 +1665,14 @@ void Assembler::popfq() {
}
-void Assembler::push(Register src) {
+void Assembler::pushq(Register src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(src);
emit(0x50 | src.low_bits());
}
-void Assembler::push(const Operand& src) {
+void Assembler::pushq(const Operand& src) {
EnsureSpace ensure_space(this);
emit_optional_rex_32(src);
emit(0xFF);
@@ -1788,7 +1680,7 @@ void Assembler::push(const Operand& src) {
}
-void Assembler::push(Immediate value) {
+void Assembler::pushq(Immediate value) {
EnsureSpace ensure_space(this);
if (is_int8(value.value_)) {
emit(0x6A);
@@ -1800,7 +1692,7 @@ void Assembler::push(Immediate value) {
}
-void Assembler::push_imm32(int32_t imm32) {
+void Assembler::pushq_imm32(int32_t imm32) {
EnsureSpace ensure_space(this);
emit(0x68);
emitl(imm32);
@@ -1860,36 +1752,18 @@ void Assembler::shrd(Register dst, Register src) {
}
-void Assembler::xchgq(Register dst, Register src) {
+void Assembler::emit_xchg(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
if (src.is(rax) || dst.is(rax)) { // Single-byte encoding
Register other = src.is(rax) ? dst : src;
- emit_rex_64(other);
+ emit_rex(other, size);
emit(0x90 | other.low_bits());
} else if (dst.low_bits() == 4) {
- emit_rex_64(dst, src);
- emit(0x87);
- emit_modrm(dst, src);
- } else {
- emit_rex_64(src, dst);
- emit(0x87);
- emit_modrm(src, dst);
- }
-}
-
-
-void Assembler::xchgl(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- if (src.is(rax) || dst.is(rax)) { // Single-byte encoding
- Register other = src.is(rax) ? dst : src;
- emit_optional_rex_32(other);
- emit(0x90 | other.low_bits());
- } else if (dst.low_bits() == 4) {
- emit_optional_rex_32(dst, src);
+ emit_rex(dst, src, size);
emit(0x87);
emit_modrm(dst, src);
} else {
- emit_optional_rex_32(src, dst);
+ emit_rex(src, dst, size);
emit(0x87);
emit_modrm(src, dst);
}
@@ -1977,21 +1851,21 @@ void Assembler::testb(const Operand& op, Register reg) {
}
-void Assembler::testl(Register dst, Register src) {
+void Assembler::emit_test(Register dst, Register src, int size) {
EnsureSpace ensure_space(this);
if (src.low_bits() == 4) {
- emit_optional_rex_32(src, dst);
+ emit_rex(src, dst, size);
emit(0x85);
emit_modrm(src, dst);
} else {
- emit_optional_rex_32(dst, src);
+ emit_rex(dst, src, size);
emit(0x85);
emit_modrm(dst, src);
}
}
-void Assembler::testl(Register reg, Immediate mask) {
+void Assembler::emit_test(Register reg, Immediate mask, int size) {
// testl with a mask that fits in the low byte is exactly testb.
if (is_uint8(mask.value_)) {
testb(reg, mask);
@@ -1999,10 +1873,11 @@ void Assembler::testl(Register reg, Immediate mask) {
}
EnsureSpace ensure_space(this);
if (reg.is(rax)) {
+ emit_rex(rax, size);
emit(0xA9);
emit(mask);
} else {
- emit_optional_rex_32(rax, reg);
+ emit_rex(reg, size);
emit(0xF7);
emit_modrm(0x0, reg);
emit(mask);
@@ -2010,69 +1885,28 @@ void Assembler::testl(Register reg, Immediate mask) {
}
-void Assembler::testl(const Operand& op, Immediate mask) {
+void Assembler::emit_test(const Operand& op, Immediate mask, int size) {
// testl with a mask that fits in the low byte is exactly testb.
if (is_uint8(mask.value_)) {
testb(op, mask);
return;
}
EnsureSpace ensure_space(this);
- emit_optional_rex_32(rax, op);
+ emit_rex(rax, op, size);
emit(0xF7);
emit_operand(rax, op); // Operation code 0
emit(mask);
}
-void Assembler::testl(const Operand& op, Register reg) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(reg, op);
- emit(0x85);
- emit_operand(reg, op);
-}
-
-
-void Assembler::testq(const Operand& op, Register reg) {
+void Assembler::emit_test(const Operand& op, Register reg, int size) {
EnsureSpace ensure_space(this);
- emit_rex_64(reg, op);
+ emit_rex(reg, op, size);
emit(0x85);
emit_operand(reg, op);
}
-void Assembler::testq(Register dst, Register src) {
- EnsureSpace ensure_space(this);
- if (src.low_bits() == 4) {
- emit_rex_64(src, dst);
- emit(0x85);
- emit_modrm(src, dst);
- } else {
- emit_rex_64(dst, src);
- emit(0x85);
- emit_modrm(dst, src);
- }
-}
-
-
-void Assembler::testq(Register dst, Immediate mask) {
- if (is_uint8(mask.value_)) {
- testb(dst, mask);
- return;
- }
- EnsureSpace ensure_space(this);
- if (dst.is(rax)) {
- emit_rex_64();
- emit(0xA9);
- emit(mask);
- } else {
- emit_rex_64(dst);
- emit(0xF7);
- emit_modrm(0, dst);
- emit(mask);
- }
-}
-
-
// FPU instructions.
@@ -2789,6 +2623,16 @@ void Assembler::movss(const Operand& src, XMMRegister dst) {
}
+void Assembler::psllq(XMMRegister reg, byte imm8) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit(0x0F);
+ emit(0x73);
+ emit_sse_operand(rsi, reg); // rsi == 6
+ emit(imm8);
+}
+
+
void Assembler::cvttss2si(Register dst, const Operand& src) {
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -3172,6 +3016,19 @@ void Assembler::RecordComment(const char* msg, bool force) {
}
+MaybeObject* Assembler::AllocateConstantPool(Heap* heap) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+ return NULL;
+}
+
+
+void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+}
+
+
const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
1 << RelocInfo::RUNTIME_ENTRY |
1 << RelocInfo::INTERNAL_REFERENCE |
@@ -3185,6 +3042,12 @@ bool RelocInfo::IsCodedSpecially() {
return (1 << rmode_) & kApplyMask;
}
+
+bool RelocInfo::IsInConstantPool() {
+ return false;
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index ef513d1e5..d47ca32e0 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -44,27 +44,6 @@ namespace internal {
// Utility functions
-// Test whether a 64-bit value is in a specific range.
-inline bool is_uint32(int64_t x) {
- static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
- return static_cast<uint64_t>(x) <= kMaxUInt32;
-}
-
-inline bool is_int32(int64_t x) {
- static const int64_t kMinInt32 = -V8_INT64_C(0x80000000);
- return is_uint32(x - kMinInt32);
-}
-
-inline bool uint_is_int32(uint64_t x) {
- static const uint64_t kMaxInt32 = V8_UINT64_C(0x7fffffff);
- return x <= kMaxInt32;
-}
-
-inline bool is_uint32(uint64_t x) {
- static const uint64_t kMaxUInt32 = V8_UINT64_C(0xffffffff);
- return x <= kMaxUInt32;
-}
-
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@@ -530,8 +509,27 @@ class CpuFeatures : public AllStatic {
};
-#define ASSEMBLER_INSTRUCTION_LIST(V) \
- V(mov)
+#define ASSEMBLER_INSTRUCTION_LIST(V) \
+ V(add) \
+ V(and) \
+ V(cmp) \
+ V(dec) \
+ V(idiv) \
+ V(imul) \
+ V(inc) \
+ V(lea) \
+ V(mov) \
+ V(movzxb) \
+ V(movzxw) \
+ V(neg) \
+ V(not) \
+ V(or) \
+ V(repmovs) \
+ V(sbb) \
+ V(sub) \
+ V(test) \
+ V(xchg) \
+ V(xor)
class Assembler : public AssemblerBase {
@@ -576,8 +574,21 @@ class Assembler : public AssemblerBase {
// the absolute address of the target.
// These functions convert between absolute Addresses of Code objects and
// the relative displacements stored in the code.
- static inline Address target_address_at(Address pc);
- static inline void set_target_address_at(Address pc, Address target);
+ static inline Address target_address_at(Address pc,
+ ConstantPoolArray* constant_pool);
+ static inline void set_target_address_at(Address pc,
+ ConstantPoolArray* constant_pool,
+ Address target);
+ static inline Address target_address_at(Address pc, Code* code) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ return target_address_at(pc, constant_pool);
+ }
+ static inline void set_target_address_at(Address pc,
+ Code* code,
+ Address target) {
+ ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ set_target_address_at(pc, constant_pool, target);
+ }
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
@@ -586,8 +597,8 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the instruction on x64).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
- Address instruction_payload, Address target) {
- set_target_address_at(instruction_payload, target);
+ Address instruction_payload, Code* code, Address target) {
+ set_target_address_at(instruction_payload, code, target);
}
static inline RelocInfo::Mode RelocInfoNone() {
@@ -667,11 +678,24 @@ class Assembler : public AssemblerBase {
// - Instructions on 16-bit (word) operands/registers have a trailing 'w'.
// - Instructions on 32-bit (doubleword) operands/registers use 'l'.
// - Instructions on 64-bit (quadword) operands/registers use 'q'.
- //
- // Some mnemonics, such as "and", are the same as C++ keywords.
- // Naming conflicts with C++ keywords are resolved by adding a trailing '_'.
+ // - Instructions on operands/registers with pointer size use 'p'.
#define DECLARE_INSTRUCTION(instruction) \
+ template<class P1> \
+ void instruction##p(P1 p1) { \
+ emit_##instruction(p1, kPointerSize); \
+ } \
+ \
+ template<class P1> \
+ void instruction##l(P1 p1) { \
+ emit_##instruction(p1, kInt32Size); \
+ } \
+ \
+ template<class P1> \
+ void instruction##q(P1 p1) { \
+ emit_##instruction(p1, kInt64Size); \
+ } \
+ \
template<class P1, class P2> \
void instruction##p(P1 p1, P2 p2) { \
emit_##instruction(p1, p2, kPointerSize); \
@@ -685,6 +709,21 @@ class Assembler : public AssemblerBase {
template<class P1, class P2> \
void instruction##q(P1 p1, P2 p2) { \
emit_##instruction(p1, p2, kInt64Size); \
+ } \
+ \
+ template<class P1, class P2, class P3> \
+ void instruction##p(P1 p1, P2 p2, P3 p3) { \
+ emit_##instruction(p1, p2, p3, kPointerSize); \
+ } \
+ \
+ template<class P1, class P2, class P3> \
+ void instruction##l(P1 p1, P2 p2, P3 p3) { \
+ emit_##instruction(p1, p2, p3, kInt32Size); \
+ } \
+ \
+ template<class P1, class P2, class P3> \
+ void instruction##q(P1 p1, P2 p2, P3 p3) { \
+ emit_##instruction(p1, p2, p3, kInt64Size); \
}
ASSEMBLER_INSTRUCTION_LIST(DECLARE_INSTRUCTION)
#undef DECLARE_INSTRUCTION
@@ -701,15 +740,15 @@ class Assembler : public AssemblerBase {
void pushfq();
void popfq();
- void push(Immediate value);
+ void pushq(Immediate value);
// Push a 32 bit integer, and guarantee that it is actually pushed as a
// 32 bit value, the normal push will optimize the 8 bit case.
- void push_imm32(int32_t imm32);
- void push(Register src);
- void push(const Operand& src);
+ void pushq_imm32(int32_t imm32);
+ void pushq(Register src);
+ void pushq(const Operand& src);
- void pop(Register dst);
- void pop(const Operand& dst);
+ void popq(Register dst);
+ void popq(const Operand& dst);
void enter(Immediate size);
void leave();
@@ -741,18 +780,14 @@ class Assembler : public AssemblerBase {
void movsxwq(Register dst, const Operand& src);
void movsxlq(Register dst, Register src);
void movsxlq(Register dst, const Operand& src);
- void movzxbq(Register dst, const Operand& src);
- void movzxbl(Register dst, const Operand& src);
- void movzxwq(Register dst, const Operand& src);
- void movzxwl(Register dst, const Operand& src);
- void movzxwl(Register dst, Register src);
// Repeated moves.
void repmovsb();
void repmovsw();
- void repmovsl();
- void repmovsq();
+ void repmovsp() { emit_repmovs(kPointerSize); }
+ void repmovsl() { emit_repmovs(kInt32Size); }
+ void repmovsq() { emit_repmovs(kInt64Size); }
// Instruction to load from an immediate 64-bit pointer into RAX.
void load_rax(void* ptr, RelocInfo::Mode rmode);
@@ -764,59 +799,6 @@ class Assembler : public AssemblerBase {
void cmovl(Condition cc, Register dst, Register src);
void cmovl(Condition cc, Register dst, const Operand& src);
- // Exchange two registers
- void xchgq(Register dst, Register src);
- void xchgl(Register dst, Register src);
-
- // Arithmetics
- void addl(Register dst, Register src) {
- arithmetic_op_32(0x03, dst, src);
- }
-
- void addl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x0, dst, src);
- }
-
- void addl(Register dst, const Operand& src) {
- arithmetic_op_32(0x03, dst, src);
- }
-
- void addl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x0, dst, src);
- }
-
- void addl(const Operand& dst, Register src) {
- arithmetic_op_32(0x01, src, dst);
- }
-
- void addq(Register dst, Register src) {
- arithmetic_op(0x03, dst, src);
- }
-
- void addq(Register dst, const Operand& src) {
- arithmetic_op(0x03, dst, src);
- }
-
- void addq(const Operand& dst, Register src) {
- arithmetic_op(0x01, src, dst);
- }
-
- void addq(Register dst, Immediate src) {
- immediate_arithmetic_op(0x0, dst, src);
- }
-
- void addq(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x0, dst, src);
- }
-
- void sbbl(Register dst, Register src) {
- arithmetic_op_32(0x1b, dst, src);
- }
-
- void sbbq(Register dst, Register src) {
- arithmetic_op(0x1b, dst, src);
- }
-
void cmpb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x7, dst, src);
}
@@ -859,86 +841,10 @@ class Assembler : public AssemblerBase {
arithmetic_op_16(0x39, src, dst);
}
- void cmpl(Register dst, Register src) {
- arithmetic_op_32(0x3B, dst, src);
- }
-
- void cmpl(Register dst, const Operand& src) {
- arithmetic_op_32(0x3B, dst, src);
- }
-
- void cmpl(const Operand& dst, Register src) {
- arithmetic_op_32(0x39, src, dst);
- }
-
- void cmpl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x7, dst, src);
- }
-
- void cmpl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x7, dst, src);
- }
-
- void cmpq(Register dst, Register src) {
- arithmetic_op(0x3B, dst, src);
- }
-
- void cmpq(Register dst, const Operand& src) {
- arithmetic_op(0x3B, dst, src);
- }
-
- void cmpq(const Operand& dst, Register src) {
- arithmetic_op(0x39, src, dst);
- }
-
- void cmpq(Register dst, Immediate src) {
- immediate_arithmetic_op(0x7, dst, src);
- }
-
- void cmpq(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x7, dst, src);
- }
-
- void and_(Register dst, Register src) {
- arithmetic_op(0x23, dst, src);
- }
-
- void and_(Register dst, const Operand& src) {
- arithmetic_op(0x23, dst, src);
- }
-
- void and_(const Operand& dst, Register src) {
- arithmetic_op(0x21, src, dst);
- }
-
- void and_(Register dst, Immediate src) {
- immediate_arithmetic_op(0x4, dst, src);
- }
-
- void and_(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x4, dst, src);
- }
-
- void andl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x4, dst, src);
- }
-
- void andl(Register dst, Register src) {
- arithmetic_op_32(0x23, dst, src);
- }
-
- void andl(Register dst, const Operand& src) {
- arithmetic_op_32(0x23, dst, src);
- }
-
void andb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x4, dst, src);
}
- void decq(Register dst);
- void decq(const Operand& dst);
- void decl(Register dst);
- void decl(const Operand& dst);
void decb(Register dst);
void decb(const Operand& dst);
@@ -947,80 +853,9 @@ class Assembler : public AssemblerBase {
// Sign-extends eax into edx:eax.
void cdq();
- // Divide rdx:rax by src. Quotient in rax, remainder in rdx.
- void idivq(Register src);
- // Divide edx:eax by lower 32 bits of src. Quotient in eax, rem. in edx.
- void idivl(Register src);
-
- // Signed multiply instructions.
- void imul(Register src); // rdx:rax = rax * src.
- void imul(Register dst, Register src); // dst = dst * src.
- void imul(Register dst, const Operand& src); // dst = dst * src.
- void imul(Register dst, Register src, Immediate imm); // dst = src * imm.
- // Signed 32-bit multiply instructions.
- void imull(Register dst, Register src); // dst = dst * src.
- void imull(Register dst, const Operand& src); // dst = dst * src.
- void imull(Register dst, Register src, Immediate imm); // dst = src * imm.
-
- void incq(Register dst);
- void incq(const Operand& dst);
- void incl(Register dst);
- void incl(const Operand& dst);
-
- void lea(Register dst, const Operand& src);
- void leal(Register dst, const Operand& src);
-
// Multiply rax by src, put the result in rdx:rax.
void mul(Register src);
- void neg(Register dst);
- void neg(const Operand& dst);
- void negl(Register dst);
-
- void not_(Register dst);
- void not_(const Operand& dst);
- void notl(Register dst);
-
- void or_(Register dst, Register src) {
- arithmetic_op(0x0B, dst, src);
- }
-
- void orl(Register dst, Register src) {
- arithmetic_op_32(0x0B, dst, src);
- }
-
- void or_(Register dst, const Operand& src) {
- arithmetic_op(0x0B, dst, src);
- }
-
- void orl(Register dst, const Operand& src) {
- arithmetic_op_32(0x0B, dst, src);
- }
-
- void or_(const Operand& dst, Register src) {
- arithmetic_op(0x09, src, dst);
- }
-
- void orl(const Operand& dst, Register src) {
- arithmetic_op_32(0x09, src, dst);
- }
-
- void or_(Register dst, Immediate src) {
- immediate_arithmetic_op(0x1, dst, src);
- }
-
- void orl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x1, dst, src);
- }
-
- void or_(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x1, dst, src);
- }
-
- void orl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x1, dst, src);
- }
-
void rcl(Register dst, Immediate imm8) {
shift(dst, imm8, 0x2);
}
@@ -1112,46 +947,6 @@ class Assembler : public AssemblerBase {
void store_rax(void* dst, RelocInfo::Mode mode);
void store_rax(ExternalReference ref);
- void subq(Register dst, Register src) {
- arithmetic_op(0x2B, dst, src);
- }
-
- void subq(Register dst, const Operand& src) {
- arithmetic_op(0x2B, dst, src);
- }
-
- void subq(const Operand& dst, Register src) {
- arithmetic_op(0x29, src, dst);
- }
-
- void subq(Register dst, Immediate src) {
- immediate_arithmetic_op(0x5, dst, src);
- }
-
- void subq(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x5, dst, src);
- }
-
- void subl(Register dst, Register src) {
- arithmetic_op_32(0x2B, dst, src);
- }
-
- void subl(Register dst, const Operand& src) {
- arithmetic_op_32(0x2B, dst, src);
- }
-
- void subl(const Operand& dst, Register src) {
- arithmetic_op_32(0x29, src, dst);
- }
-
- void subl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x5, dst, src);
- }
-
- void subl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x5, dst, src);
- }
-
void subb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x5, dst, src);
}
@@ -1160,61 +955,11 @@ class Assembler : public AssemblerBase {
void testb(Register reg, Immediate mask);
void testb(const Operand& op, Immediate mask);
void testb(const Operand& op, Register reg);
- void testl(Register dst, Register src);
- void testl(Register reg, Immediate mask);
- void testl(const Operand& op, Register reg);
- void testl(const Operand& op, Immediate mask);
- void testq(const Operand& op, Register reg);
- void testq(Register dst, Register src);
- void testq(Register dst, Immediate mask);
-
- void xor_(Register dst, Register src) {
- if (dst.code() == src.code()) {
- arithmetic_op_32(0x33, dst, src);
- } else {
- arithmetic_op(0x33, dst, src);
- }
- }
-
- void xorl(Register dst, Register src) {
- arithmetic_op_32(0x33, dst, src);
- }
-
- void xorl(Register dst, const Operand& src) {
- arithmetic_op_32(0x33, dst, src);
- }
-
- void xorl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x6, dst, src);
- }
-
- void xorl(const Operand& dst, Register src) {
- arithmetic_op_32(0x31, src, dst);
- }
-
- void xorl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x6, dst, src);
- }
-
- void xor_(Register dst, const Operand& src) {
- arithmetic_op(0x33, dst, src);
- }
-
- void xor_(const Operand& dst, Register src) {
- arithmetic_op(0x31, src, dst);
- }
-
- void xor_(Register dst, Immediate src) {
- immediate_arithmetic_op(0x6, dst, src);
- }
-
- void xor_(const Operand& dst, Immediate src) {
- immediate_arithmetic_op(0x6, dst, src);
- }
// Bit operations.
void bt(const Operand& dst, Register src);
void bts(const Operand& dst, Register src);
+ void bsrl(Register dst, Register src);
// Miscellaneous
void clc();
@@ -1260,9 +1005,6 @@ class Assembler : public AssemblerBase {
// Call near absolute indirect, address in register
void call(Register adr);
- // Call near indirect
- void call(const Operand& operand);
-
// Jumps
// Jump short or near relative.
// Use a 32-bit signed displacement.
@@ -1274,9 +1016,6 @@ class Assembler : public AssemblerBase {
// Jump near absolute indirect (r64)
void jmp(Register adr);
- // Jump near absolute indirect (m64)
- void jmp(const Operand& src);
-
// Conditional jumps
void j(Condition cc,
Label* L,
@@ -1407,6 +1146,8 @@ class Assembler : public AssemblerBase {
void movapd(XMMRegister dst, XMMRegister src);
+ void psllq(XMMRegister reg, byte imm8);
+
void cvttsd2si(Register dst, const Operand& src);
void cvttsd2si(Register dst, XMMRegister src);
void cvttsd2siq(Register dst, XMMRegister src);
@@ -1472,6 +1213,12 @@ class Assembler : public AssemblerBase {
// Use --code-comments to enable.
void RecordComment(const char* msg, bool force = false);
+ // Allocate a constant pool of the correct size for the generated code.
+ MaybeObject* AllocateConstantPool(Heap* heap);
+
+ // Generate the constant pool for the generated code.
+ void PopulateConstantPool(ConstantPoolArray* constant_pool);
+
// Writes a single word of data in the code stream.
// Used for inline tables, e.g., jump-tables.
void db(uint8_t data);
@@ -1499,6 +1246,13 @@ class Assembler : public AssemblerBase {
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
+ protected:
+ // Call near indirect
+ void call(const Operand& operand);
+
+ // Jump near absolute indirect (m64)
+ void jmp(const Operand& src);
+
private:
byte* addr_at(int pos) { return buffer_ + pos; }
uint32_t long_at(int pos) {
@@ -1605,6 +1359,14 @@ class Assembler : public AssemblerBase {
// numbers have a high bit set.
inline void emit_optional_rex_32(const Operand& op);
+ void emit_rex(int size) {
+ if (size == kInt64Size) {
+ emit_rex_64();
+ } else {
+ ASSERT(size == kInt32Size);
+ }
+ }
+
template<class P1>
void emit_rex(P1 p1, int size) {
if (size == kInt64Size) {
@@ -1709,12 +1471,331 @@ class Assembler : public AssemblerBase {
// record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+ // Arithmetics
+ void emit_add(Register dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x03, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x03, dst, src);
+ }
+ }
+
+ void emit_add(Register dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x0, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x0, dst, src);
+ }
+ }
+
+ void emit_add(Register dst, const Operand& src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x03, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x03, dst, src);
+ }
+ }
+
+ void emit_add(const Operand& dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x1, src, dst);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x1, src, dst);
+ }
+ }
+
+ void emit_add(const Operand& dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x0, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x0, dst, src);
+ }
+ }
+
+ void emit_and(Register dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x23, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x23, dst, src);
+ }
+ }
+
+ void emit_and(Register dst, const Operand& src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x23, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x23, dst, src);
+ }
+ }
+
+ void emit_and(const Operand& dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x21, src, dst);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x21, src, dst);
+ }
+ }
+
+ void emit_and(Register dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x4, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x4, dst, src);
+ }
+ }
+
+ void emit_and(const Operand& dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x4, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x4, dst, src);
+ }
+ }
+
+ void emit_cmp(Register dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x3B, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x3B, dst, src);
+ }
+ }
+
+ void emit_cmp(Register dst, const Operand& src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x3B, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x3B, dst, src);
+ }
+ }
+
+ void emit_cmp(const Operand& dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x39, src, dst);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x39, src, dst);
+ }
+ }
+
+ void emit_cmp(Register dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x7, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x7, dst, src);
+ }
+ }
+
+ void emit_cmp(const Operand& dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x7, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x7, dst, src);
+ }
+ }
+
+ void emit_dec(Register dst, int size);
+ void emit_dec(const Operand& dst, int size);
+
+ // Divide rdx:rax by src. Quotient in rax, remainder in rdx when size is 64.
+ // Divide edx:eax by lower 32 bits of src. Quotient in eax, remainder in edx
+ // when size is 32.
+ void emit_idiv(Register src, int size);
+
+ // Signed multiply instructions.
+ // rdx:rax = rax * src when size is 64 or edx:eax = eax * src when size is 32.
+ void emit_imul(Register src, int size);
+ void emit_imul(Register dst, Register src, int size);
+ void emit_imul(Register dst, const Operand& src, int size);
+ void emit_imul(Register dst, Register src, Immediate imm, int size);
+
+ void emit_inc(Register dst, int size);
+ void emit_inc(const Operand& dst, int size);
+
+ void emit_lea(Register dst, const Operand& src, int size);
+
void emit_mov(Register dst, const Operand& src, int size);
void emit_mov(Register dst, Register src, int size);
void emit_mov(const Operand& dst, Register src, int size);
void emit_mov(Register dst, Immediate value, int size);
void emit_mov(const Operand& dst, Immediate value, int size);
+ void emit_movzxb(Register dst, const Operand& src, int size);
+ void emit_movzxw(Register dst, const Operand& src, int size);
+ void emit_movzxw(Register dst, Register src, int size);
+
+ void emit_neg(Register dst, int size);
+ void emit_neg(const Operand& dst, int size);
+
+ void emit_not(Register dst, int size);
+ void emit_not(const Operand& dst, int size);
+
+ void emit_or(Register dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x0B, dst, src);
+ } else {
+ arithmetic_op_32(0x0B, dst, src);
+ }
+ }
+
+ void emit_or(Register dst, const Operand& src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x0B, dst, src);
+ } else {
+ arithmetic_op_32(0x0B, dst, src);
+ }
+ }
+
+ void emit_or(const Operand& dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x9, src, dst);
+ } else {
+ arithmetic_op_32(0x9, src, dst);
+ }
+ }
+
+ void emit_or(Register dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x1, dst, src);
+ } else {
+ immediate_arithmetic_op_32(0x1, dst, src);
+ }
+ }
+
+ void emit_or(const Operand& dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x1, dst, src);
+ } else {
+ immediate_arithmetic_op_32(0x1, dst, src);
+ }
+ }
+
+ void emit_repmovs(int size);
+
+ void emit_sbb(Register dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x1b, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x1b, dst, src);
+ }
+ }
+
+ void emit_sub(Register dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x2B, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x2B, dst, src);
+ }
+ }
+
+ void emit_sub(Register dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x5, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x5, dst, src);
+ }
+ }
+
+ void emit_sub(Register dst, const Operand& src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x2B, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x2B, dst, src);
+ }
+ }
+
+ void emit_sub(const Operand& dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x29, src, dst);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x29, src, dst);
+ }
+ }
+
+ void emit_sub(const Operand& dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x5, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x5, dst, src);
+ }
+ }
+
+ void emit_test(Register dst, Register src, int size);
+ void emit_test(Register reg, Immediate mask, int size);
+ void emit_test(const Operand& op, Register reg, int size);
+ void emit_test(const Operand& op, Immediate mask, int size);
+
+ // Exchange two registers
+ void emit_xchg(Register dst, Register src, int size);
+
+ void emit_xor(Register dst, Register src, int size) {
+ if (size == kInt64Size) {
+ if (dst.code() == src.code()) {
+ arithmetic_op_32(0x33, dst, src);
+ } else {
+ arithmetic_op(0x33, dst, src);
+ }
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x33, dst, src);
+ }
+ }
+
+ void emit_xor(Register dst, const Operand& src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x33, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x33, dst, src);
+ }
+ }
+
+ void emit_xor(Register dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x6, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x6, dst, src);
+ }
+ }
+
+ void emit_xor(const Operand& dst, Immediate src, int size) {
+ if (size == kInt64Size) {
+ immediate_arithmetic_op(0x6, dst, src);
+ } else {
+ ASSERT(size == kInt32Size);
+ immediate_arithmetic_op_32(0x6, dst, src);
+ }
+ }
+
+ void emit_xor(const Operand& dst, Register src, int size) {
+ if (size == kInt64Size) {
+ arithmetic_op(0x31, src, dst);
+ } else {
+ ASSERT(size == kInt32Size);
+ arithmetic_op_32(0x31, src, dst);
+ }
+ }
+
friend class CodePatcher;
friend class EnsureSpace;
friend class RegExpMacroAssemblerX64;
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index 6717dd5d6..d5b1a7386 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -61,7 +61,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
if (extra_args == NEEDS_CALLED_FUNCTION) {
num_extra_args = 1;
__ PopReturnAddressTo(kScratchRegister);
- __ push(rdi);
+ __ Push(rdi);
__ PushReturnAddressFrom(kScratchRegister);
} else {
ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
@@ -69,7 +69,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// JumpToExternalReference expects rax to contain the number of arguments
// including the receiver and the extra arguments.
- __ addq(rax, Immediate(num_extra_args + 1));
+ __ addp(rax, Immediate(num_extra_args + 1));
__ JumpToExternalReference(ExternalReference(id, masm->isolate()), 1);
}
@@ -78,13 +78,13 @@ static void CallRuntimePassFunction(
MacroAssembler* masm, Runtime::FunctionId function_id) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
- __ push(rdi);
+ __ Push(rdi);
// Function is also the parameter to the runtime call.
- __ push(rdi);
+ __ Push(rdi);
__ CallRuntime(function_id, 1);
// Restore receiver.
- __ pop(rdi);
+ __ Pop(rdi);
}
@@ -93,13 +93,13 @@ static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movp(kScratchRegister,
FieldOperand(kScratchRegister, SharedFunctionInfo::kCodeOffset));
- __ lea(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
+ __ leap(kScratchRegister, FieldOperand(kScratchRegister, Code::kHeaderSize));
__ jmp(kScratchRegister);
}
static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
- __ lea(rax, FieldOperand(rax, Code::kHeaderSize));
+ __ leap(rax, FieldOperand(rax, Code::kHeaderSize));
__ jmp(rax);
}
@@ -114,7 +114,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &ok);
- CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
+ CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode);
GenerateTailCallToReturnedCode(masm);
__ bind(&ok);
@@ -124,25 +124,38 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
bool is_api_function,
- bool count_constructions) {
+ bool count_constructions,
+ bool create_memento) {
// ----------- S t a t e -------------
// -- rax: number of arguments
// -- rdi: constructor function
+ // -- rbx: allocation site or undefined
// -----------------------------------
// Should never count constructions for api objects.
- ASSERT(!is_api_function || !count_constructions);
+ ASSERT(!is_api_function || !count_constructions);\
+
+ // Should never create mementos for api functions.
+ ASSERT(!is_api_function || !create_memento);
+
+ // Should never create mementos before slack tracking is finished.
+ ASSERT(!count_constructions || !create_memento);
// Enter a construct frame.
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
+ if (create_memento) {
+ __ AssertUndefinedOrAllocationSite(rbx);
+ __ Push(rbx);
+ }
+
// Store a smi-tagged arguments count on the stack.
__ Integer32ToSmi(rax, rax);
- __ push(rax);
+ __ Push(rax);
// Push the function to invoke on the stack.
- __ push(rdi);
+ __ Push(rdi);
// Try to allocate the object without transitioning into C code. If any of
// the preconditions is not met, the code bails out to the runtime call.
@@ -154,7 +167,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(masm->isolate());
__ Move(kScratchRegister, debug_step_in_fp);
- __ cmpq(Operand(kScratchRegister, 0), Immediate(0));
+ __ cmpp(Operand(kScratchRegister, 0), Immediate(0));
__ j(not_equal, &rt_call);
#endif
@@ -186,22 +199,25 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
SharedFunctionInfo::kConstructionCountOffset));
__ j(not_zero, &allocate);
- __ push(rax);
- __ push(rdi);
+ __ Push(rax);
+ __ Push(rdi);
- __ push(rdi); // constructor
+ __ Push(rdi); // constructor
// The call will replace the stub, so the countdown is only done once.
- __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+ __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1);
- __ pop(rdi);
- __ pop(rax);
+ __ Pop(rdi);
+ __ Pop(rax);
__ bind(&allocate);
}
// Now allocate the JSObject on the heap.
- __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
+ __ movzxbp(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
__ shl(rdi, Immediate(kPointerSizeLog2));
+ if (create_memento) {
+ __ addp(rdi, Immediate(AllocationMemento::kSize));
+ }
// rdi: size of new object
__ Allocate(rdi,
rbx,
@@ -209,10 +225,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
no_reg,
&rt_call,
NO_ALLOCATION_FLAGS);
+ Factory* factory = masm->isolate()->factory();
// Allocated the JSObject, now initialize the fields.
// rax: initial map
// rbx: JSObject (not HeapObject tagged - the actual address).
- // rdi: start of next object
+ // rdi: start of next object (including memento if create_memento)
__ movp(Operand(rbx, JSObject::kMapOffset), rax);
__ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
__ movp(Operand(rbx, JSObject::kPropertiesOffset), rcx);
@@ -220,24 +237,39 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Set extra fields in the newly allocated object.
// rax: initial map
// rbx: JSObject
- // rdi: start of next object
- __ lea(rcx, Operand(rbx, JSObject::kHeaderSize));
+ // rdi: start of next object (including memento if create_memento)
+ __ leap(rcx, Operand(rbx, JSObject::kHeaderSize));
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
if (count_constructions) {
- __ movzxbq(rsi,
+ __ movzxbp(rsi,
FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
- __ lea(rsi,
+ __ leap(rsi,
Operand(rbx, rsi, times_pointer_size, JSObject::kHeaderSize));
// rsi: offset of first field after pre-allocated fields
if (FLAG_debug_code) {
- __ cmpq(rsi, rdi);
+ __ cmpp(rsi, rdi);
__ Assert(less_equal,
kUnexpectedNumberOfPreAllocatedPropertyFields);
}
__ InitializeFieldsWithFiller(rcx, rsi, rdx);
__ LoadRoot(rdx, Heap::kOnePointerFillerMapRootIndex);
+ __ InitializeFieldsWithFiller(rcx, rdi, rdx);
+ } else if (create_memento) {
+ __ leap(rsi, Operand(rdi, -AllocationMemento::kSize));
+ __ InitializeFieldsWithFiller(rcx, rsi, rdx);
+
+ // Fill in memento fields if necessary.
+ // rsi: points to the allocated but uninitialized memento.
+ Handle<Map> allocation_memento_map = factory->allocation_memento_map();
+ __ Move(Operand(rsi, AllocationMemento::kMapOffset),
+ allocation_memento_map);
+ // Get the cell or undefined.
+ __ movp(rdx, Operand(rsp, kPointerSize*2));
+ __ movp(Operand(rsi, AllocationMemento::kAllocationSiteOffset),
+ rdx);
+ } else {
+ __ InitializeFieldsWithFiller(rcx, rdi, rdx);
}
- __ InitializeFieldsWithFiller(rcx, rdi, rdx);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on. Any
@@ -246,7 +278,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rax: initial map
// rbx: JSObject
// rdi: start of next object
- __ or_(rbx, Immediate(kHeapObjectTag));
+ __ orp(rbx, Immediate(kHeapObjectTag));
// Check if a non-empty properties array is needed.
// Allocate and initialize a FixedArray if it is.
@@ -254,13 +286,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rbx: JSObject
// rdi: start of next object
// Calculate total properties described map.
- __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
- __ movzxbq(rcx,
+ __ movzxbp(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
+ __ movzxbp(rcx,
FieldOperand(rax, Map::kPreAllocatedPropertyFieldsOffset));
- __ addq(rdx, rcx);
+ __ addp(rdx, rcx);
// Calculate unused properties past the end of the in-object properties.
- __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
- __ subq(rdx, rcx);
+ __ movzxbp(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
+ __ subp(rdx, rcx);
// Done if no extra properties are to be allocated.
__ j(zero, &allocated);
__ Assert(positive, kPropertyAllocationCountFailed);
@@ -296,13 +328,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rdx: number of elements
{ Label loop, entry;
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize));
+ __ leap(rcx, Operand(rdi, FixedArray::kHeaderSize));
__ jmp(&entry);
__ bind(&loop);
__ movp(Operand(rcx, 0), rdx);
- __ addq(rcx, Immediate(kPointerSize));
+ __ addp(rcx, Immediate(kPointerSize));
__ bind(&entry);
- __ cmpq(rcx, rax);
+ __ cmpp(rcx, rax);
__ j(below, &loop);
}
@@ -310,7 +342,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// the JSObject
// rbx: JSObject
// rdi: FixedArray
- __ or_(rdi, Immediate(kHeapObjectTag)); // add the heap tag
+ __ orp(rdi, Immediate(kHeapObjectTag)); // add the heap tag
__ movp(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
@@ -329,17 +361,50 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Allocate the new receiver object using the runtime call.
// rdi: function (constructor)
__ bind(&rt_call);
+ int offset = 0;
+ if (create_memento) {
+ // Get the cell or allocation site.
+ __ movp(rdi, Operand(rsp, kPointerSize*2));
+ __ Push(rdi);
+ offset = kPointerSize;
+ }
+
// Must restore rdi (constructor) before calling runtime.
- __ movp(rdi, Operand(rsp, 0));
- __ push(rdi);
- __ CallRuntime(Runtime::kNewObject, 1);
+ __ movp(rdi, Operand(rsp, offset));
+ __ Push(rdi);
+ if (create_memento) {
+ __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2);
+ } else {
+ __ CallRuntime(Runtime::kHiddenNewObject, 1);
+ }
__ movp(rbx, rax); // store result in rbx
+ // If we ended up using the runtime, and we want a memento, then the
+ // runtime call made it for us, and we shouldn't do create count
+ // increment.
+ Label count_incremented;
+ if (create_memento) {
+ __ jmp(&count_incremented);
+ }
+
// New object allocated.
// rbx: newly allocated object
__ bind(&allocated);
+
+ if (create_memento) {
+ __ movp(rcx, Operand(rsp, kPointerSize*2));
+ __ Cmp(rcx, masm->isolate()->factory()->undefined_value());
+ __ j(equal, &count_incremented);
+ // rcx is an AllocationSite. We are creating a memento from it, so we
+ // need to increment the memento create count.
+ __ SmiAddConstant(
+ FieldOperand(rcx, AllocationSite::kPretenureCreateCountOffset),
+ Smi::FromInt(1));
+ __ bind(&count_incremented);
+ }
+
// Retrieve the function from the stack.
- __ pop(rdi);
+ __ Pop(rdi);
// Retrieve smi-tagged arguments count from the stack.
__ movp(rax, Operand(rsp, 0));
@@ -348,20 +413,20 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Push the allocated receiver to the stack. We need two copies
// because we may have to return the original one and the calling
// conventions dictate that the called function pops the receiver.
- __ push(rbx);
- __ push(rbx);
+ __ Push(rbx);
+ __ Push(rbx);
// Set up pointer to last argument.
- __ lea(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
+ __ leap(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
// Copy arguments and receiver to the expression stack.
Label loop, entry;
__ movp(rcx, rax);
__ jmp(&entry);
__ bind(&loop);
- __ push(Operand(rbx, rcx, times_pointer_size, 0));
+ __ Push(Operand(rbx, rcx, times_pointer_size, 0));
__ bind(&entry);
- __ decq(rcx);
+ __ decp(rcx);
__ j(greater_equal, &loop);
// Call the function.
@@ -411,7 +476,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Remove caller arguments from the stack and return.
__ PopReturnAddressTo(rcx);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
- __ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
+ __ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
__ PushReturnAddressFrom(rcx);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->constructed_objects(), 1);
@@ -420,17 +485,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true);
+ Generate_JSConstructStubHelper(masm, false, true, false);
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false);
+ Generate_JSConstructStubHelper(masm, false, false, FLAG_pretenuring_call_new);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
+ Generate_JSConstructStubHelper(masm, true, false, false);
}
@@ -470,8 +535,8 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ movp(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
// Push the function and the receiver onto the stack.
- __ push(rdx);
- __ push(r8);
+ __ Push(rdx);
+ __ Push(r8);
// Load the number of arguments and setup pointer to the arguments.
__ movp(rax, r9);
@@ -497,8 +562,8 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
FrameScope scope(masm, StackFrame::INTERNAL);
// Push the function and receiver and setup the context.
- __ push(rdi);
- __ push(rdx);
+ __ Push(rdi);
+ __ Push(rdx);
__ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Load the number of arguments and setup pointer to the arguments.
@@ -524,18 +589,16 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ jmp(&entry);
__ bind(&loop);
__ movp(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
- __ push(Operand(kScratchRegister, 0)); // dereference handle
- __ addq(rcx, Immediate(1));
+ __ Push(Operand(kScratchRegister, 0)); // dereference handle
+ __ addp(rcx, Immediate(1));
__ bind(&entry);
- __ cmpq(rcx, rax);
+ __ cmpp(rcx, rax);
__ j(not_equal, &loop);
// Invoke the code.
if (is_construct) {
// No type feedback cell is available
- Handle<Object> undefined_sentinel(
- masm->isolate()->factory()->undefined_value());
- __ Move(rbx, undefined_sentinel);
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
// Expects rdi to hold function pointer.
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ CallStub(&stub);
@@ -565,7 +628,7 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
- CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized);
+ CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized);
GenerateTailCallToReturnedCode(masm);
}
@@ -574,15 +637,15 @@ static void CallCompileOptimized(MacroAssembler* masm,
bool concurrent) {
FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
- __ push(rdi);
+ __ Push(rdi);
// Function is also the parameter to the runtime call.
- __ push(rdi);
+ __ Push(rdi);
// Whether to compile in a background thread.
__ Push(masm->isolate()->factory()->ToBoolean(concurrent));
- __ CallRuntime(Runtime::kCompileOptimized, 2);
+ __ CallRuntime(Runtime::kHiddenCompileOptimized, 2);
// Restore receiver.
- __ pop(rdi);
+ __ Pop(rdi);
}
@@ -607,7 +670,7 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
// Re-execute the code that was patched back to the young age when
// the stub returns.
- __ subq(Operand(rsp, 0), Immediate(5));
+ __ subp(Operand(rsp, 0), Immediate(5));
__ Pushad();
__ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
__ movp(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
@@ -643,7 +706,7 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
__ Pushad();
__ Move(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
__ movp(arg_reg_1, Operand(rsp, kNumSafepointRegisters * kPointerSize));
- __ subq(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
+ __ subp(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
{ // NOLINT
FrameScope scope(masm, StackFrame::MANUAL);
__ PrepareCallCFunction(2);
@@ -655,10 +718,10 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
// Perform prologue operations usually performed by the young code stub.
__ PopReturnAddressTo(kScratchRegister);
- __ push(rbp); // Caller's frame pointer.
+ __ pushq(rbp); // Caller's frame pointer.
__ movp(rbp, rsp);
- __ push(rsi); // Callee's context.
- __ push(rdi); // Callee's JS Function.
+ __ Push(rsi); // Callee's context.
+ __ Push(rdi); // Callee's JS Function.
__ PushReturnAddressFrom(kScratchRegister);
// Jump to point after the code-age stub.
@@ -681,12 +744,12 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
// stubs that tail call the runtime on deopts passing their parameters in
// registers.
__ Pushad();
- __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+ __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles);
__ Popad();
// Tear down internal frame.
}
- __ pop(MemOperand(rsp, 0)); // Ignore state offset
+ __ Pop(MemOperand(rsp, 0)); // Ignore state offset
__ ret(0); // Return to IC Miss stub, continuation still on stack.
}
@@ -710,7 +773,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Pass the deoptimization type to the runtime system.
__ Push(Smi::FromInt(static_cast<int>(type)));
- __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1);
// Tear down internal frame.
}
@@ -719,13 +782,13 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
// Switch on the state.
Label not_no_registers, not_tos_rax;
- __ cmpq(kScratchRegister, Immediate(FullCodeGenerator::NO_REGISTERS));
+ __ cmpp(kScratchRegister, Immediate(FullCodeGenerator::NO_REGISTERS));
__ j(not_equal, &not_no_registers, Label::kNear);
__ ret(1 * kPointerSize); // Remove state.
__ bind(&not_no_registers);
__ movp(rax, Operand(rsp, kPCOnStackSize + kPointerSize));
- __ cmpq(kScratchRegister, Immediate(FullCodeGenerator::TOS_REG));
+ __ cmpp(kScratchRegister, Immediate(FullCodeGenerator::TOS_REG));
__ j(not_equal, &not_tos_rax, Label::kNear);
__ ret(2 * kPointerSize); // Remove state, rax.
@@ -762,12 +825,12 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
//
// 1. Make sure we have at least one argument.
{ Label done;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(not_zero, &done);
__ PopReturnAddressTo(rbx);
__ Push(masm->isolate()->factory()->undefined_value());
__ PushReturnAddressFrom(rbx);
- __ incq(rax);
+ __ incp(rax);
__ bind(&done);
}
@@ -799,7 +862,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
__ j(not_zero, &shift_arguments);
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
__ movp(rbx, args.GetArgumentOperand(1));
__ JumpIfSmi(rbx, &convert_to_object, Label::kNear);
@@ -817,14 +880,14 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Enter an internal frame in order to preserve argument count.
FrameScope scope(masm, StackFrame::INTERNAL);
__ Integer32ToSmi(rax, rax);
- __ push(rax);
+ __ Push(rax);
- __ push(rbx);
+ __ Push(rbx);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ movp(rbx, rax);
__ Set(rdx, 0); // indicate regular JS_FUNCTION
- __ pop(rax);
+ __ Pop(rax);
__ SmiToInteger32(rax, rax);
}
@@ -866,25 +929,25 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ bind(&loop);
__ movp(rbx, Operand(rsp, rcx, times_pointer_size, 0));
__ movp(Operand(rsp, rcx, times_pointer_size, 1 * kPointerSize), rbx);
- __ decq(rcx);
+ __ decp(rcx);
__ j(not_sign, &loop); // While non-negative (to copy return address).
- __ pop(rbx); // Discard copy of return address.
- __ decq(rax); // One fewer argument (first argument is new receiver).
+ __ popq(rbx); // Discard copy of return address.
+ __ decp(rax); // One fewer argument (first argument is new receiver).
}
// 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
// or a function proxy via CALL_FUNCTION_PROXY.
{ Label function, non_proxy;
- __ testq(rdx, rdx);
+ __ testp(rdx, rdx);
__ j(zero, &function);
__ Set(rbx, 0);
- __ cmpq(rdx, Immediate(1));
+ __ cmpp(rdx, Immediate(1));
__ j(not_equal, &non_proxy);
__ PopReturnAddressTo(rdx);
- __ push(rdi); // re-add proxy object as additional argument
+ __ Push(rdi); // re-add proxy object as additional argument
__ PushReturnAddressFrom(rdx);
- __ incq(rax);
+ __ incp(rax);
__ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
__ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -904,7 +967,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
FieldOperand(rdx,
SharedFunctionInfo::kFormalParameterCountOffset));
__ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- __ cmpq(rax, rbx);
+ __ cmpp(rax, rbx);
__ j(not_equal,
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
@@ -932,8 +995,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
static const int kReceiverOffset = kArgumentsOffset + kPointerSize;
static const int kFunctionOffset = kReceiverOffset + kPointerSize;
- __ push(Operand(rbp, kFunctionOffset));
- __ push(Operand(rbp, kArgumentsOffset));
+ __ Push(Operand(rbp, kFunctionOffset));
+ __ Push(Operand(rbp, kArgumentsOffset));
__ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
// Check the stack for overflow. We are not trying to catch
@@ -944,17 +1007,17 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ movp(rcx, rsp);
// Make rcx the space we have left. The stack might already be overflowed
// here which will cause rcx to become negative.
- __ subq(rcx, kScratchRegister);
+ __ subp(rcx, kScratchRegister);
// Make rdx the space we need for the array when it is unrolled onto the
// stack.
__ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
// Check if the arguments will overflow the stack.
- __ cmpq(rcx, rdx);
+ __ cmpp(rcx, rdx);
__ j(greater, &okay); // Signed comparison.
// Out of stack space.
- __ push(Operand(rbp, kFunctionOffset));
- __ push(rax);
+ __ Push(Operand(rbp, kFunctionOffset));
+ __ Push(rax);
__ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
__ bind(&okay);
// End of stack check.
@@ -963,8 +1026,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
const int kLimitOffset =
StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ push(rax); // limit
- __ push(Immediate(0)); // index
+ __ Push(rax); // limit
+ __ Push(Immediate(0)); // index
// Get the receiver.
__ movp(rbx, Operand(rbp, kReceiverOffset));
@@ -990,7 +1053,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
__ j(not_equal, &push_receiver);
- // Compute the receiver in non-strict mode.
+ // Compute the receiver in sloppy mode.
__ JumpIfSmi(rbx, &call_to_object, Label::kNear);
__ CompareRoot(rbx, Heap::kNullValueRootIndex);
__ j(equal, &use_global_receiver);
@@ -1005,7 +1068,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Convert the receiver to an object.
__ bind(&call_to_object);
- __ push(rbx);
+ __ Push(rbx);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ movp(rbx, rax);
__ jmp(&push_receiver, Label::kNear);
@@ -1017,7 +1080,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Push the receiver.
__ bind(&push_receiver);
- __ push(rbx);
+ __ Push(rbx);
// Copy all arguments from the array to the stack.
Label entry, loop;
@@ -1036,7 +1099,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// case, we know that we are not generating a test instruction next.
// Push the nth argument.
- __ push(rax);
+ __ Push(rax);
// Update the index on the stack and in register rax.
__ movp(rax, Operand(rbp, kIndexOffset));
@@ -1044,7 +1107,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ movp(Operand(rbp, kIndexOffset), rax);
__ bind(&entry);
- __ cmpq(rax, Operand(rbp, kLimitOffset));
+ __ cmpp(rax, Operand(rbp, kLimitOffset));
__ j(not_equal, &loop);
// Call the function.
@@ -1061,8 +1124,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Call the function proxy.
__ bind(&call_proxy);
- __ push(rdi); // add function proxy as last argument
- __ incq(rax);
+ __ Push(rdi); // add function proxy as last argument
+ __ incp(rax);
__ Set(rbx, 0);
__ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
__ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
@@ -1128,10 +1191,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Run the native code for the Array function called as a normal function.
// tail call a stub
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
- __ Move(rbx, undefined_sentinel);
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
@@ -1150,7 +1210,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, rcx);
- __ cmpq(rdi, rcx);
+ __ cmpp(rdi, rcx);
__ Assert(equal, kUnexpectedStringFunction);
}
@@ -1158,11 +1218,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// (including the receiver).
StackArgumentsAccessor args(rsp, rax);
Label no_arguments;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(zero, &no_arguments);
__ movp(rbx, args.GetArgumentOperand(1));
__ PopReturnAddressTo(rcx);
- __ lea(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
+ __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
__ PushReturnAddressFrom(rcx);
__ movp(rax, rbx);
@@ -1233,10 +1293,10 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ IncrementCounter(counters->string_ctor_conversions(), 1);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rdi); // Preserve the function.
- __ push(rax);
+ __ Push(rdi); // Preserve the function.
+ __ Push(rax);
__ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
- __ pop(rdi);
+ __ Pop(rdi);
}
__ movp(rbx, rax);
__ jmp(&argument_is_string);
@@ -1246,7 +1306,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ bind(&no_arguments);
__ LoadRoot(rbx, Heap::kempty_stringRootIndex);
__ PopReturnAddressTo(rcx);
- __ lea(rsp, Operand(rsp, kPointerSize));
+ __ leap(rsp, Operand(rsp, kPointerSize));
__ PushReturnAddressFrom(rcx);
__ jmp(&argument_is_string);
@@ -1256,7 +1316,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ IncrementCounter(counters->string_ctor_gc_required(), 1);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rbx);
+ __ Push(rbx);
__ CallRuntime(Runtime::kNewStringWrapper, 1);
}
__ ret(0);
@@ -1264,20 +1324,20 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
- __ push(rbp);
+ __ pushq(rbp);
__ movp(rbp, rsp);
// Store the arguments adaptor context sentinel.
__ Push(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
// Push the function on the stack.
- __ push(rdi);
+ __ Push(rdi);
// Preserve the number of arguments on the stack. Must preserve rax,
// rbx and rcx because these registers are used when copying the
// arguments and the receiver.
__ Integer32ToSmi(r8, rax);
- __ push(r8);
+ __ Push(r8);
}
@@ -1287,12 +1347,12 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Leave the frame.
__ movp(rsp, rbp);
- __ pop(rbp);
+ __ popq(rbp);
// Remove caller arguments from the stack.
__ PopReturnAddressTo(rcx);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
- __ lea(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
+ __ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
__ PushReturnAddressFrom(rcx);
}
@@ -1310,9 +1370,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label enough, too_few;
__ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- __ cmpq(rax, rbx);
+ __ cmpp(rax, rbx);
__ j(less, &too_few);
- __ cmpq(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ __ cmpp(rbx, Immediate(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ j(equal, &dont_adapt_arguments);
{ // Enough parameters: Actual >= expected.
@@ -1321,15 +1381,15 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(rax, Operand(rbp, rax, times_pointer_size, offset));
+ __ leap(rax, Operand(rbp, rax, times_pointer_size, offset));
__ Set(r8, -1); // account for receiver
Label copy;
__ bind(&copy);
- __ incq(r8);
- __ push(Operand(rax, 0));
- __ subq(rax, Immediate(kPointerSize));
- __ cmpq(r8, rbx);
+ __ incp(r8);
+ __ Push(Operand(rax, 0));
+ __ subp(rax, Immediate(kPointerSize));
+ __ cmpp(r8, rbx);
__ j(less, &copy);
__ jmp(&invoke);
}
@@ -1340,24 +1400,24 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Copy receiver and all actual arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(rdi, Operand(rbp, rax, times_pointer_size, offset));
+ __ leap(rdi, Operand(rbp, rax, times_pointer_size, offset));
__ Set(r8, -1); // account for receiver
Label copy;
__ bind(&copy);
- __ incq(r8);
- __ push(Operand(rdi, 0));
- __ subq(rdi, Immediate(kPointerSize));
- __ cmpq(r8, rax);
+ __ incp(r8);
+ __ Push(Operand(rdi, 0));
+ __ subp(rdi, Immediate(kPointerSize));
+ __ cmpp(r8, rax);
__ j(less, &copy);
// Fill remaining expected arguments with undefined values.
Label fill;
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
__ bind(&fill);
- __ incq(r8);
- __ push(kScratchRegister);
- __ cmpq(r8, rbx);
+ __ incp(r8);
+ __ Push(kScratchRegister);
+ __ cmpp(r8, rbx);
__ j(less, &fill);
// Restore function pointer.
@@ -1389,13 +1449,13 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
- __ push(rax);
+ __ Push(rax);
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
}
Label skip;
// If the code object is null, just return to the unoptimized code.
- __ cmpq(rax, Immediate(0));
+ __ cmpp(rax, Immediate(0));
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
@@ -1409,7 +1469,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
// Compute the target address = code_obj + header_size + osr_offset
- __ lea(rax, Operand(rax, rbx, times_1, Code::kHeaderSize - kHeapObjectTag));
+ __ leap(rax, Operand(rax, rbx, times_1, Code::kHeaderSize - kHeapObjectTag));
// Overwrite the return address on the stack.
__ movq(StackOperandForReturnAddress(0), rax);
@@ -1426,7 +1486,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ j(above_equal, &ok);
{
FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kStackGuard, 0);
+ __ CallRuntime(Runtime::kHiddenStackGuard, 0);
}
__ jmp(masm->isolate()->builtins()->OnStackReplacement(),
RelocInfo::CODE_TARGET);
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 075964bce..c949a423a 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -46,7 +46,7 @@ void FastNewClosureStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
}
@@ -77,7 +77,7 @@ void NumberToStringStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 1;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kNumberToString)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
}
@@ -88,7 +88,8 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
+ Runtime::FunctionForId(
+ Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
}
@@ -99,15 +100,15 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
}
void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
- static Register registers[] = { rbx };
- descriptor->register_param_count_ = 1;
+ static Register registers[] = { rbx, rdx };
+ descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ = NULL;
}
@@ -142,7 +143,7 @@ void RegExpConstructResultStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
}
@@ -166,6 +167,26 @@ void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
}
+void StringLengthStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rax, rcx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
+void KeyedStringLengthStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rdx, rax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = NULL;
+}
+
+
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -213,7 +234,7 @@ static void InitializeArrayConstructorDescriptor(
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
}
@@ -241,7 +262,7 @@ static void InitializeInternalArrayConstructorDescriptor(
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
}
@@ -365,7 +386,7 @@ void StringAddStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 2;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kStringAdd)->entry;
+ Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
}
@@ -470,7 +491,7 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
rax.is(descriptor->register_params_[param_count - 1]));
// Push arguments
for (int i = 0; i < param_count; ++i) {
- __ push(descriptor->register_params_[i]);
+ __ Push(descriptor->register_params_[i]);
}
ExternalReference miss = descriptor->miss_handler();
__ CallExternalReference(miss, descriptor->register_param_count_);
@@ -521,7 +542,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
int double_offset = offset();
// Account for return address and saved regs if input is rsp.
- if (input_reg.is(rsp)) double_offset += 3 * kPointerSize;
+ if (input_reg.is(rsp)) double_offset += 3 * kRegisterSize;
MemOperand mantissa_operand(MemOperand(input_reg, double_offset));
MemOperand exponent_operand(MemOperand(input_reg,
@@ -541,14 +562,14 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// is the return register, then save the temp register we use in its stead
// for the result.
Register save_reg = final_result_reg.is(rcx) ? rax : rcx;
- __ push(scratch1);
- __ push(save_reg);
+ __ pushq(scratch1);
+ __ pushq(save_reg);
bool stash_exponent_copy = !input_reg.is(rsp);
__ movl(scratch1, mantissa_operand);
__ movsd(xmm0, mantissa_operand);
__ movl(rcx, exponent_operand);
- if (stash_exponent_copy) __ push(rcx);
+ if (stash_exponent_copy) __ pushq(rcx);
__ andl(rcx, Immediate(HeapNumber::kExponentMask));
__ shrl(rcx, Immediate(HeapNumber::kExponentShift));
@@ -583,14 +604,14 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// Restore registers
__ bind(&done);
if (stash_exponent_copy) {
- __ addq(rsp, Immediate(kDoubleSize));
+ __ addp(rsp, Immediate(kDoubleSize));
}
if (!final_result_reg.is(result_reg)) {
ASSERT(final_result_reg.is(rcx));
__ movl(final_result_reg, result_reg);
}
- __ pop(save_reg);
- __ pop(scratch1);
+ __ popq(save_reg);
+ __ popq(scratch1);
__ ret(0);
}
@@ -601,14 +622,14 @@ void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
// Load operand in rdx into xmm0, or branch to not_numbers.
__ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
__ JumpIfSmi(rdx, &load_smi_rdx);
- __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
+ __ cmpp(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
__ j(not_equal, not_numbers); // Argument in rdx is not a number.
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
// Load operand in rax into xmm1, or branch to not_numbers.
__ JumpIfSmi(rax, &load_smi_rax);
__ bind(&load_nonsmi_rax);
- __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
+ __ cmpp(FieldOperand(rax, HeapObject::kMapOffset), rcx);
__ j(not_equal, not_numbers);
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
__ jmp(&done);
@@ -689,8 +710,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&try_arithmetic_simplification);
__ cvttsd2si(exponent, double_exponent);
// Skip to runtime if possibly NaN (indicated by the indefinite integer).
- __ cmpl(exponent, Immediate(0x80000000u));
- __ j(equal, &call_runtime);
+ __ cmpl(exponent, Immediate(0x1));
+ __ j(overflow, &call_runtime);
if (exponent_type_ == ON_STACK) {
// Detect square root case. Crankshaft detects constant +/-0.5 at
@@ -767,7 +788,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&fast_power);
__ fnclex(); // Clear flags to catch exceptions later.
// Transfer (B)ase and (E)xponent onto the FPU register stack.
- __ subq(rsp, Immediate(kDoubleSize));
+ __ subp(rsp, Immediate(kDoubleSize));
__ movsd(Operand(rsp, 0), double_exponent);
__ fld_d(Operand(rsp, 0)); // E
__ movsd(Operand(rsp, 0), double_base);
@@ -794,12 +815,12 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ j(not_zero, &fast_power_failed, Label::kNear);
__ fstp_d(Operand(rsp, 0));
__ movsd(double_result, Operand(rsp, 0));
- __ addq(rsp, Immediate(kDoubleSize));
+ __ addp(rsp, Immediate(kDoubleSize));
__ jmp(&done);
__ bind(&fast_power_failed);
__ fninit();
- __ addq(rsp, Immediate(kDoubleSize));
+ __ addp(rsp, Immediate(kDoubleSize));
__ jmp(&call_runtime);
}
@@ -913,99 +934,6 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
}
-void StringLengthStub::Generate(MacroAssembler* masm) {
- Label miss;
- Register receiver;
- if (kind() == Code::KEYED_LOAD_IC) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- __ Cmp(rax, masm->isolate()->factory()->length_string());
- __ j(not_equal, &miss);
- receiver = rdx;
- } else {
- ASSERT(kind() == Code::LOAD_IC);
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- receiver = rax;
- }
-
- StubCompiler::GenerateLoadStringLength(masm, receiver, r8, r9, &miss);
- __ bind(&miss);
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
-void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- //
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
-
- Label miss;
-
- Register receiver = rdx;
- Register value = rax;
- Register scratch = rbx;
- if (kind() == Code::KEYED_STORE_IC) {
- __ Cmp(rcx, masm->isolate()->factory()->length_string());
- __ j(not_equal, &miss);
- }
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ movp(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
- __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ movp(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
- __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(equal, &miss);
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ PopReturnAddressTo(scratch);
- __ push(receiver);
- __ push(value);
- __ PushReturnAddressFrom(scratch);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- StubCompiler::TailCallBuiltin(
- masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
-}
-
-
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The key is in rdx and the parameter count is in rax.
@@ -1026,7 +954,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// Check index against formal parameters count limit passed in
// through register rax. Use unsigned comparison to get negative
// check for free.
- __ cmpq(rdx, rax);
+ __ cmpp(rdx, rax);
__ j(above_equal, &slow);
// Read the argument from the stack and return it.
@@ -1041,7 +969,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// comparison to get negative check for free.
__ bind(&adaptor);
__ movp(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmpq(rdx, rcx);
+ __ cmpp(rdx, rcx);
__ j(above_equal, &slow);
// Read the argument from the stack and return it.
@@ -1056,13 +984,13 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// by calling the runtime system.
__ bind(&slow);
__ PopReturnAddressTo(rbx);
- __ push(rdx);
+ __ Push(rdx);
__ PushReturnAddressFrom(rbx);
__ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
}
-void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// Stack layout:
// rsp[0] : return address
// rsp[8] : number of parameters (tagged)
@@ -1095,14 +1023,14 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ SmiToInteger64(rcx,
Operand(rdx,
ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
+ __ leap(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
__ movp(args.GetArgumentOperand(1), rdx);
// rbx = parameter count (untagged)
// rcx = argument count (untagged)
// Compute the mapped parameter count = min(rbx, rcx) in rbx.
- __ cmpq(rbx, rcx);
+ __ cmpp(rbx, rcx);
__ j(less_equal, &try_allocate, Label::kNear);
__ movp(rbx, rcx);
@@ -1113,17 +1041,17 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
const int kParameterMapHeaderSize =
FixedArray::kHeaderSize + 2 * kPointerSize;
Label no_parameter_map;
- __ xor_(r8, r8);
- __ testq(rbx, rbx);
+ __ xorp(r8, r8);
+ __ testp(rbx, rbx);
__ j(zero, &no_parameter_map, Label::kNear);
- __ lea(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
+ __ leap(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
__ bind(&no_parameter_map);
// 2. Backing store.
- __ lea(r8, Operand(r8, rcx, times_pointer_size, FixedArray::kHeaderSize));
+ __ leap(r8, Operand(r8, rcx, times_pointer_size, FixedArray::kHeaderSize));
// 3. Arguments object.
- __ addq(r8, Immediate(Heap::kArgumentsObjectSize));
+ __ addp(r8, Immediate(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
__ Allocate(r8, rax, rdx, rdi, &runtime, TAG_OBJECT);
@@ -1134,10 +1062,10 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
Label has_mapped_parameters, copy;
__ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
- __ testq(rbx, rbx);
+ __ testp(rbx, rbx);
__ j(not_zero, &has_mapped_parameters, Label::kNear);
- const int kIndex = Context::ARGUMENTS_BOILERPLATE_INDEX;
+ const int kIndex = Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX;
__ movp(rdi, Operand(rdi, Context::SlotOffset(kIndex)));
__ jmp(&copy, Label::kNear);
@@ -1174,7 +1102,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, edi will point there, otherwise to the
// backing store.
- __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
+ __ leap(rdi, Operand(rax, Heap::kSloppyArgumentsObjectSize));
__ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
// rax = address of new object (tagged)
@@ -1184,16 +1112,16 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Initialize parameter map. If there are no mapped arguments, we're done.
Label skip_parameter_map;
- __ testq(rbx, rbx);
+ __ testp(rbx, rbx);
__ j(zero, &skip_parameter_map);
- __ LoadRoot(kScratchRegister, Heap::kNonStrictArgumentsElementsMapRootIndex);
+ __ LoadRoot(kScratchRegister, Heap::kSloppyArgumentsElementsMapRootIndex);
// rbx contains the untagged argument count. Add 2 and tag to write.
__ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
__ Integer64PlusConstantToSmi(r9, rbx, 2);
__ movp(FieldOperand(rdi, FixedArray::kLengthOffset), r9);
__ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
- __ lea(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
+ __ leap(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
__ movp(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
// Copy the parameter slots and the holes in the arguments.
@@ -1209,11 +1137,11 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
// Load tagged parameter count into r9.
__ Integer32ToSmi(r9, rbx);
__ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
- __ addq(r8, args.GetArgumentOperand(2));
- __ subq(r8, r9);
+ __ addp(r8, args.GetArgumentOperand(2));
+ __ subp(r8, r9);
__ Move(r11, factory->the_hole_value());
__ movp(rdx, rdi);
- __ lea(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
+ __ leap(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
// r9 = loop variable (tagged)
// r8 = mapping index (tagged)
// r11 = the hole value
@@ -1251,21 +1179,21 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ movp(rdx, args.GetArgumentOperand(1));
// Untag rcx for the loop below.
__ SmiToInteger64(rcx, rcx);
- __ lea(kScratchRegister, Operand(r8, times_pointer_size, 0));
- __ subq(rdx, kScratchRegister);
+ __ leap(kScratchRegister, Operand(r8, times_pointer_size, 0));
+ __ subp(rdx, kScratchRegister);
__ jmp(&arguments_test, Label::kNear);
__ bind(&arguments_loop);
- __ subq(rdx, Immediate(kPointerSize));
+ __ subp(rdx, Immediate(kPointerSize));
__ movp(r9, Operand(rdx, 0));
__ movp(FieldOperand(rdi, r8,
times_pointer_size,
FixedArray::kHeaderSize),
r9);
- __ addq(r8, Immediate(1));
+ __ addp(r8, Immediate(1));
__ bind(&arguments_test);
- __ cmpq(r8, rcx);
+ __ cmpp(r8, rcx);
__ j(less, &arguments_loop, Label::kNear);
// Return and remove the on-stack parameters.
@@ -1276,11 +1204,11 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ bind(&runtime);
__ Integer32ToSmi(rcx, rcx);
__ movp(args.GetArgumentOperand(2), rcx); // Patch argument count.
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
}
-void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// rsp[0] : return address
// rsp[8] : number of parameters
// rsp[16] : receiver displacement
@@ -1298,12 +1226,12 @@ void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
__ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ movp(args.GetArgumentOperand(2), rcx);
__ SmiToInteger64(rcx, rcx);
- __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
+ __ leap(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
__ movp(args.GetArgumentOperand(1), rdx);
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewArgumentsFast, 3, 1);
}
@@ -1331,7 +1259,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ movp(args.GetArgumentOperand(2), rcx);
__ SmiToInteger64(rcx, rcx);
- __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
+ __ leap(rdx, Operand(rdx, rcx, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
__ movp(args.GetArgumentOperand(1), rdx);
@@ -1339,11 +1267,11 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// the arguments object and the elements array.
Label add_arguments_object;
__ bind(&try_allocate);
- __ testq(rcx, rcx);
+ __ testp(rcx, rcx);
__ j(zero, &add_arguments_object, Label::kNear);
- __ lea(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
+ __ leap(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
__ bind(&add_arguments_object);
- __ addq(rcx, Immediate(Heap::kArgumentsObjectSizeStrict));
+ __ addp(rcx, Immediate(Heap::kStrictArgumentsObjectSize));
// Do the allocation of both objects in one go.
__ Allocate(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
@@ -1352,7 +1280,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
const int offset =
- Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
+ Context::SlotOffset(Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX);
__ movp(rdi, Operand(rdi, offset));
// Copy the JS object part.
@@ -1370,7 +1298,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// If there are no actual arguments, we're done.
Label done;
- __ testq(rcx, rcx);
+ __ testp(rcx, rcx);
__ j(zero, &done);
// Get the parameters pointer from the stack.
@@ -1378,7 +1306,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
- __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSizeStrict));
+ __ leap(rdi, Operand(rax, Heap::kStrictArgumentsObjectSize));
__ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
__ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
__ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
@@ -1393,9 +1321,9 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ bind(&loop);
__ movp(rbx, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
__ movp(FieldOperand(rdi, FixedArray::kHeaderSize), rbx);
- __ addq(rdi, Immediate(kPointerSize));
- __ subq(rdx, Immediate(kPointerSize));
- __ decq(rcx);
+ __ addp(rdi, Immediate(kPointerSize));
+ __ subp(rdx, Immediate(kPointerSize));
+ __ decp(rcx);
__ j(not_zero, &loop);
// Return and remove the on-stack parameters.
@@ -1404,7 +1332,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenNewStrictArgumentsFast, 3, 1);
}
@@ -1413,7 +1341,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -1441,7 +1369,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
ExternalReference address_of_regexp_stack_memory_size =
ExternalReference::address_of_regexp_stack_memory_size(isolate);
__ Load(kScratchRegister, address_of_regexp_stack_memory_size);
- __ testq(kScratchRegister, kScratchRegister);
+ __ testp(kScratchRegister, kScratchRegister);
__ j(zero, &runtime);
// Check that the first argument is a JSRegExp object.
@@ -1533,7 +1461,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
- __ cmpq(rbx, Immediate(kExternalStringTag));
+ __ cmpp(rbx, Immediate(kExternalStringTag));
__ j(greater_equal, &not_seq_nor_cons); // Go to (7).
// (4) Cons string. Check that it's flat.
@@ -1614,7 +1542,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Move(kScratchRegister, address_of_regexp_stack_memory_address);
__ movp(r9, Operand(kScratchRegister, 0));
__ Move(kScratchRegister, address_of_regexp_stack_memory_size);
- __ addq(r9, Operand(kScratchRegister, 0));
+ __ addp(r9, Operand(kScratchRegister, 0));
__ movq(Operand(rsp, (argument_slots_on_stack - 3) * kRegisterSize), r9);
// Argument 6: Set the number of capture registers to zero to force global
@@ -1650,24 +1578,24 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
Label setup_two_byte, setup_rest, got_length, length_not_from_slice;
// Prepare start and end index of the input.
// Load the length from the original sliced string if that is the case.
- __ addq(rbx, r14);
+ __ addp(rbx, r14);
__ SmiToInteger32(arg_reg_3, FieldOperand(r15, String::kLengthOffset));
- __ addq(r14, arg_reg_3); // Using arg3 as scratch.
+ __ addp(r14, arg_reg_3); // Using arg3 as scratch.
// rbx: start index of the input
// r14: end index of the input
// r15: original subject string
__ testb(rcx, rcx); // Last use of rcx as encoding of subject string.
__ j(zero, &setup_two_byte, Label::kNear);
- __ lea(arg_reg_4,
+ __ leap(arg_reg_4,
FieldOperand(rdi, r14, times_1, SeqOneByteString::kHeaderSize));
- __ lea(arg_reg_3,
+ __ leap(arg_reg_3,
FieldOperand(rdi, rbx, times_1, SeqOneByteString::kHeaderSize));
__ jmp(&setup_rest, Label::kNear);
__ bind(&setup_two_byte);
- __ lea(arg_reg_4,
+ __ leap(arg_reg_4,
FieldOperand(rdi, r14, times_2, SeqTwoByteString::kHeaderSize));
- __ lea(arg_reg_3,
+ __ leap(arg_reg_3,
FieldOperand(rdi, rbx, times_2, SeqTwoByteString::kHeaderSize));
__ bind(&setup_rest);
@@ -1679,7 +1607,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ movp(arg_reg_1, r15);
// Locate the code entry and call it.
- __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ addp(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(r11);
__ LeaveApiExitFrame(true);
@@ -1764,7 +1692,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Capture register counter starts from number of capture registers and
// counts down until wraping after zero.
__ bind(&next_capture);
- __ subq(rdx, Immediate(1));
+ __ subp(rdx, Immediate(1));
__ j(negative, &done, Label::kNear);
// Read the value from the static offsets vector buffer and make it a smi.
__ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
@@ -1793,7 +1721,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
masm->ExternalOperand(pending_exception_address, rbx);
__ movp(rax, pending_exception_operand);
__ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ cmpq(rax, rdx);
+ __ cmpp(rax, rdx);
__ j(equal, &runtime);
__ movp(pending_exception_operand, rdx);
@@ -1807,7 +1735,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
// Deferred code for string handling.
// (7) Not a long external string? If yes, go to (10).
@@ -1828,7 +1756,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ movp(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ subp(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
STATIC_ASSERT(kTwoByteStringTag == 0);
// (8a) Is the external string one byte? If yes, go to (6).
__ testb(rbx, Immediate(kStringEncodingMask));
@@ -1890,7 +1818,7 @@ static void BranchIfNotInternalizedString(MacroAssembler* masm,
Register scratch) {
__ JumpIfSmi(object, label);
__ movp(scratch, FieldOperand(object, HeapObject::kMapOffset));
- __ movzxbq(scratch,
+ __ movzxbp(scratch,
FieldOperand(scratch, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ testb(scratch, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
@@ -1910,9 +1838,9 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Compare two smis.
Label non_smi, smi_done;
__ JumpIfNotBothSmi(rax, rdx, &non_smi);
- __ subq(rdx, rax);
+ __ subp(rdx, rax);
__ j(no_overflow, &smi_done);
- __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
+ __ notp(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
__ bind(&smi_done);
__ movp(rax, rdx);
__ ret(0);
@@ -1926,7 +1854,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Two identical objects are equal unless they are both NaN or undefined.
{
Label not_identical;
- __ cmpq(rax, rdx);
+ __ cmpp(rax, rdx);
__ j(not_equal, &not_identical, Label::kNear);
if (cc != equal) {
@@ -1966,7 +1894,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ setcc(parity_even, rax);
// rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
if (cc == greater_equal || cc == greater) {
- __ neg(rax);
+ __ negp(rax);
}
__ ret(0);
@@ -2044,7 +1972,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Return a result of -1, 0, or 1, based on EFLAGS.
__ setcc(above, rax);
__ setcc(below, rcx);
- __ subq(rax, rcx);
+ __ subp(rax, rcx);
__ ret(0);
// If one of the numbers was NaN, then the result is always false.
@@ -2112,7 +2040,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// a heap object has the low bit clear.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagMask == 1);
- __ lea(rcx, Operand(rax, rdx, times_1, 0));
+ __ leap(rcx, Operand(rax, rdx, times_1, 0));
__ testb(rcx, Immediate(kSmiTagMask));
__ j(not_zero, &not_both_objects, Label::kNear);
__ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
@@ -2137,8 +2065,8 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Push arguments below the return address to prepare jump to builtin.
__ PopReturnAddressTo(rcx);
- __ push(rdx);
- __ push(rax);
+ __ Push(rdx);
+ __ Push(rax);
// Figure out which native to call and setup the arguments.
Builtins::JavaScript builtin;
@@ -2161,92 +2089,118 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
static void GenerateRecordCallTarget(MacroAssembler* masm) {
- // Cache the called function in a global property cell. Cache states
+ // Cache the called function in a feedback vector slot. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// rax : number of arguments to the construct function
- // rbx : cache cell for call target
+ // rbx : Feedback vector
+ // rdx : slot in feedback vector (Smi)
// rdi : the function to call
Isolate* isolate = masm->isolate();
- Label initialize, done, miss, megamorphic, not_array_function;
+ Label initialize, done, miss, megamorphic, not_array_function,
+ done_no_smi_convert;
// Load the cache state into rcx.
- __ movp(rcx, FieldOperand(rbx, Cell::kValueOffset));
+ __ SmiToInteger32(rdx, rdx);
+ __ movp(rcx, FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
- __ cmpq(rcx, rdi);
+ __ cmpp(rcx, rdi);
__ j(equal, &done);
- __ Cmp(rcx, TypeFeedbackCells::MegamorphicSentinel(isolate));
+ __ Cmp(rcx, TypeFeedbackInfo::MegamorphicSentinel(isolate));
__ j(equal, &done);
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the cell either some other function or an
- // AllocationSite. Do a map check on the object in rcx.
- Handle<Map> allocation_site_map =
- masm->isolate()->factory()->allocation_site_map();
- __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
- __ j(not_equal, &miss);
-
- // Make sure the function is the Array() function
- __ LoadArrayFunction(rcx);
- __ cmpq(rdi, rcx);
- __ j(not_equal, &megamorphic);
- __ jmp(&done);
+ if (!FLAG_pretenuring_call_new) {
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite. Do a map check on the object in rcx.
+ Handle<Map> allocation_site_map =
+ masm->isolate()->factory()->allocation_site_map();
+ __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
+ __ j(not_equal, &miss);
+
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
+ __ cmpp(rdi, rcx);
+ __ j(not_equal, &megamorphic);
+ __ jmp(&done);
+ }
__ bind(&miss);
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
// megamorphic.
- __ Cmp(rcx, TypeFeedbackCells::UninitializedSentinel(isolate));
+ __ Cmp(rcx, TypeFeedbackInfo::UninitializedSentinel(isolate));
__ j(equal, &initialize);
// MegamorphicSentinel is an immortal immovable object (undefined) so no
// write-barrier is needed.
__ bind(&megamorphic);
- __ Move(FieldOperand(rbx, Cell::kValueOffset),
- TypeFeedbackCells::MegamorphicSentinel(isolate));
+ __ Move(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
+ TypeFeedbackInfo::MegamorphicSentinel(isolate));
__ jmp(&done);
// An uninitialized cache is patched with the function or sentinel to
// indicate the ElementsKind if function is the Array constructor.
__ bind(&initialize);
- // Make sure the function is the Array() function
- __ LoadArrayFunction(rcx);
- __ cmpq(rdi, rcx);
- __ j(not_equal, &not_array_function);
-
- // The target function is the Array constructor,
- // Create an AllocationSite if we don't already have it, store it in the cell
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Arguments register must be smi-tagged to call out.
- __ Integer32ToSmi(rax, rax);
- __ push(rax);
- __ push(rdi);
- __ push(rbx);
+ if (!FLAG_pretenuring_call_new) {
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
+ __ cmpp(rdi, rcx);
+ __ j(not_equal, &not_array_function);
- CreateAllocationSiteStub create_stub;
- __ CallStub(&create_stub);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Arguments register must be smi-tagged to call out.
+ __ Integer32ToSmi(rax, rax);
+ __ Push(rax);
+ __ Push(rdi);
+ __ Integer32ToSmi(rdx, rdx);
+ __ Push(rdx);
+ __ Push(rbx);
+
+ CreateAllocationSiteStub create_stub;
+ __ CallStub(&create_stub);
+
+ __ Pop(rbx);
+ __ Pop(rdx);
+ __ Pop(rdi);
+ __ Pop(rax);
+ __ SmiToInteger32(rax, rax);
+ }
+ __ jmp(&done_no_smi_convert);
- __ pop(rbx);
- __ pop(rdi);
- __ pop(rax);
- __ SmiToInteger32(rax, rax);
+ __ bind(&not_array_function);
}
- __ jmp(&done);
- __ bind(&not_array_function);
- __ movp(FieldOperand(rbx, Cell::kValueOffset), rdi);
- // No need for a write barrier here - cells are rescanned.
+ __ movp(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
+ rdi);
+
+ // We won't need rdx or rbx anymore, just save rdi
+ __ Push(rdi);
+ __ Push(rbx);
+ __ Push(rdx);
+ __ RecordWriteArray(rbx, rdi, rdx, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ __ Pop(rdx);
+ __ Pop(rbx);
+ __ Pop(rdi);
__ bind(&done);
+ __ Integer32ToSmi(rdx, rdx);
+
+ __ bind(&done_no_smi_convert);
}
void CallFunctionStub::Generate(MacroAssembler* masm) {
- // rbx : cache cell for call target
+ // rbx : feedback vector
+ // rdx : (only if rbx is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
// rdi : the function to call
Isolate* isolate = masm->isolate();
Label slow, non_function, wrap, cont;
@@ -2262,6 +2216,10 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
+ // Type information was updated. Because we may call Array, which
+ // expects either undefined or an AllocationSite in rbx we need
+ // to set rbx to undefined.
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
}
}
@@ -2283,6 +2241,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &cont);
}
+
// Load the receiver from the stack.
__ movp(rax, args.GetReceiverOperand());
@@ -2305,15 +2264,18 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
if (RecordCallTarget()) {
// If there is a call target cache, mark it megamorphic in the
// non-function case. MegamorphicSentinel is an immortal immovable
- // object (undefined) so no write barrier is needed.
- __ Move(FieldOperand(rbx, Cell::kValueOffset),
- TypeFeedbackCells::MegamorphicSentinel(isolate));
+ // object (megamorphic symbol) so no write barrier is needed.
+ __ SmiToInteger32(rdx, rdx);
+ __ Move(FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize),
+ TypeFeedbackInfo::MegamorphicSentinel(isolate));
+ __ Integer32ToSmi(rdx, rdx);
}
// Check for function proxy.
__ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
__ j(not_equal, &non_function);
__ PopReturnAddressTo(rcx);
- __ push(rdi); // put proxy as additional argument under return address
+ __ Push(rdi); // put proxy as additional argument under return address
__ PushReturnAddressFrom(rcx);
__ Set(rax, argc_ + 1);
__ Set(rbx, 0);
@@ -2340,10 +2302,10 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ bind(&wrap);
// Wrap the receiver and patch it back onto the stack.
{ FrameScope frame_scope(masm, StackFrame::INTERNAL);
- __ push(rdi);
- __ push(rax);
+ __ Push(rdi);
+ __ Push(rax);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
- __ pop(rdi);
+ __ Pop(rdi);
}
__ movp(args.GetReceiverOperand(), rax);
__ jmp(&cont);
@@ -2353,7 +2315,9 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
void CallConstructStub::Generate(MacroAssembler* masm) {
// rax : number of arguments
- // rbx : cache cell for call target
+ // rbx : feedback vector
+ // rdx : (only if rbx is not the megamorphic symbol) slot in feedback
+ // vector (Smi)
// rdi : constructor function
Label slow, non_function_call;
@@ -2365,6 +2329,26 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm);
+
+ __ SmiToInteger32(rdx, rdx);
+ if (FLAG_pretenuring_call_new) {
+ // Put the AllocationSite from the feedback vector into ebx.
+ // By adding kPointerSize we encode that we know the AllocationSite
+ // entry is at the feedback vector slot given by rdx + 1.
+ __ movp(rbx, FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ } else {
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into rbx, or undefined.
+ __ movp(rbx, FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ CompareRoot(FieldOperand(rbx, 0), Heap::kAllocationSiteMapRootIndex);
+ __ j(equal, &feedback_register_initialized);
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
+ }
+
+ __ AssertUndefinedOrAllocationSite(rbx);
}
// Jump to the function-specific construct stub.
@@ -2372,7 +2356,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ movp(jmp_reg, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movp(jmp_reg, FieldOperand(jmp_reg,
SharedFunctionInfo::kConstructStubOffset));
- __ lea(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
+ __ leap(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
__ jmp(jmp_reg);
// rdi: called object
@@ -2424,23 +2408,9 @@ void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
}
-static void JumpIfOOM(MacroAssembler* masm,
- Register value,
- Register scratch,
- Label* oom_label) {
- __ movp(scratch, value);
- STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
- STATIC_ASSERT(kFailureTag == 3);
- __ and_(scratch, Immediate(0xf));
- __ cmpq(scratch, Immediate(0xf));
- __ j(equal, oom_label);
-}
-
-
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
bool do_gc,
bool always_allocate_scope) {
// rax: result parameter for PerformGC, if any.
@@ -2494,7 +2464,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
} else {
ASSERT_EQ(2, result_size_);
// Pass a pointer to the result location as the first argument.
- __ lea(rcx, StackSpaceOperand(2));
+ __ leap(rcx, StackSpaceOperand(2));
// Pass a pointer to the Arguments object as the second argument.
__ movp(rdx, r14); // argc.
__ movp(r8, r15); // argv.
@@ -2529,7 +2499,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ movq(rdx, Operand(rsp, 7 * kRegisterSize));
}
#endif
- __ lea(rcx, Operand(rax, 1));
+ __ leap(rcx, Operand(rax, 1));
// Lower 2 bits of rcx are 0 iff rax has failure tag.
__ testl(rcx, Immediate(kFailureTagMask));
__ j(zero, &failure_returned);
@@ -2547,9 +2517,6 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
__ j(zero, &retry, Label::kNear);
- // Special handling of out of memory exceptions.
- JumpIfOOM(masm, rax, kScratchRegister, throw_out_of_memory_exception);
-
// Retrieve the pending exception.
ExternalReference pending_exception_address(
Isolate::kPendingExceptionAddress, masm->isolate());
@@ -2557,9 +2524,6 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
masm->ExternalOperand(pending_exception_address);
__ movp(rax, pending_exception_operand);
- // See if we just retrieved an OOM exception.
- JumpIfOOM(masm, rax, kScratchRegister, throw_out_of_memory_exception);
-
// Clear the pending exception.
pending_exception_operand =
masm->ExternalOperand(pending_exception_address);
@@ -2615,13 +2579,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
Label throw_normal_exception;
Label throw_termination_exception;
- Label throw_out_of_memory_exception;
// Call into the runtime system.
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
- &throw_out_of_memory_exception,
false,
false);
@@ -2629,7 +2591,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
- &throw_out_of_memory_exception,
true,
false);
@@ -2639,27 +2600,14 @@ void CEntryStub::Generate(MacroAssembler* masm) {
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
- &throw_out_of_memory_exception,
true,
true);
- __ bind(&throw_out_of_memory_exception);
- // Set external caught exception to false.
- Isolate* isolate = masm->isolate();
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate);
- __ Set(rax, static_cast<int64_t>(false));
- __ Store(external_caught, rax);
-
- // Set pending exception and rax to out of memory exception.
- ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
- isolate);
- Label already_have_failure;
- JumpIfOOM(masm, rax, kScratchRegister, &already_have_failure);
- __ Move(rax, Failure::OutOfMemoryException(0x1), Assembler::RelocInfoNone());
- __ bind(&already_have_failure);
- __ Store(pending_exception, rax);
- // Fall through to the next label.
+ { FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(0);
+ __ CallCFunction(
+ ExternalReference::out_of_memory_function(masm->isolate()), 0);
+ }
__ bind(&throw_termination_exception);
__ ThrowUncatchable(rax);
@@ -2678,7 +2626,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
{ // NOLINT. Scope block confuses linter.
MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
// Set up frame.
- __ push(rbp);
+ __ pushq(rbp);
__ movp(rbp, rsp);
// Push the stack frame type marker twice.
@@ -2687,22 +2635,22 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// platform. It's free to use at this point.
// Cannot use smi-register for loading yet.
__ Move(kScratchRegister, Smi::FromInt(marker), Assembler::RelocInfoNone());
- __ push(kScratchRegister); // context slot
- __ push(kScratchRegister); // function slot
- // Save callee-saved registers (X64/Win64 calling conventions).
- __ push(r12);
- __ push(r13);
- __ push(r14);
- __ push(r15);
+ __ Push(kScratchRegister); // context slot
+ __ Push(kScratchRegister); // function slot
+ // Save callee-saved registers (X64/X32/Win64 calling conventions).
+ __ pushq(r12);
+ __ pushq(r13);
+ __ pushq(r14);
+ __ pushq(r15);
#ifdef _WIN64
- __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
- __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
+ __ pushq(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
+ __ pushq(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
#endif
- __ push(rbx);
+ __ pushq(rbx);
#ifdef _WIN64
// On Win64 XMM6-XMM15 are callee-save
- __ subq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
+ __ subp(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
__ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0), xmm6);
__ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1), xmm7);
__ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2), xmm8);
@@ -2727,13 +2675,13 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
{
Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
- __ push(c_entry_fp_operand);
+ __ Push(c_entry_fp_operand);
}
// If this is the outermost JS call, set js_entry_sp value.
ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
__ Load(rax, js_entry_sp);
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(not_zero, &not_outermost_js);
__ Push(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ movp(rax, rbp);
@@ -2767,7 +2715,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ Store(pending_exception, rax);
// Fake a receiver (NULL).
- __ push(Immediate(0)); // receiver
+ __ Push(Immediate(0)); // receiver
// Invoke the function by calling through JS entry trampoline builtin and
// pop the faked function when we return. We load the address from an
@@ -2782,7 +2730,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
__ Load(rax, entry);
}
- __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
+ __ leap(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
__ call(kScratchRegister);
// Unlink this frame from the handler chain.
@@ -2790,7 +2738,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ bind(&exit);
// Check if the current stack frame is marked as the outermost JS frame.
- __ pop(rbx);
+ __ Pop(rbx);
__ Cmp(rbx, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ j(not_equal, &not_outermost_js_2);
__ Move(kScratchRegister, js_entry_sp);
@@ -2799,7 +2747,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Restore the top frame descriptor from the stack.
{ Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
- __ pop(c_entry_fp_operand);
+ __ Pop(c_entry_fp_operand);
}
// Restore callee-saved registers (X64 conventions).
@@ -2815,23 +2763,23 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ movdqu(xmm13, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7));
__ movdqu(xmm14, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8));
__ movdqu(xmm15, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9));
- __ addq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
+ __ addp(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize));
#endif
- __ pop(rbx);
+ __ popq(rbx);
#ifdef _WIN64
// Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
- __ pop(rsi);
- __ pop(rdi);
+ __ popq(rsi);
+ __ popq(rdi);
#endif
- __ pop(r15);
- __ pop(r14);
- __ pop(r13);
- __ pop(r12);
- __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers
+ __ popq(r15);
+ __ popq(r14);
+ __ popq(r13);
+ __ popq(r12);
+ __ addp(rsp, Immediate(2 * kPointerSize)); // remove markers
// Restore frame pointer and return.
- __ pop(rbp);
+ __ popq(rbp);
__ ret(0);
}
@@ -2917,7 +2865,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
} else {
// Get return address and delta to inlined map check.
__ movq(kScratchRegister, StackOperandForReturnAddress(0));
- __ subq(kScratchRegister, args.GetArgumentOperand(2));
+ __ subp(kScratchRegister, args.GetArgumentOperand(2));
if (FLAG_debug_code) {
__ movl(rdi, Immediate(kWordBeforeMapCheckValue));
__ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
@@ -2934,9 +2882,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
Label loop, is_instance, is_not_instance;
__ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
__ bind(&loop);
- __ cmpq(rcx, rbx);
+ __ cmpp(rcx, rbx);
__ j(equal, &is_instance, Label::kNear);
- __ cmpq(rcx, kScratchRegister);
+ __ cmpp(rcx, kScratchRegister);
// The code at is_not_instance assumes that kScratchRegister contains a
// non-zero GCable value (the null object in this case).
__ j(equal, &is_not_instance, Label::kNear);
@@ -2958,7 +2906,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
ASSERT(true_offset >= 0 && true_offset < 0x100);
__ movl(rax, Immediate(true_offset));
__ movq(kScratchRegister, StackOperandForReturnAddress(0));
- __ subq(kScratchRegister, args.GetArgumentOperand(2));
+ __ subp(kScratchRegister, args.GetArgumentOperand(2));
__ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
if (FLAG_debug_code) {
__ movl(rax, Immediate(kWordBeforeResultValue));
@@ -2981,7 +2929,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
ASSERT(false_offset >= 0 && false_offset < 0x100);
__ movl(rax, Immediate(false_offset));
__ movq(kScratchRegister, StackOperandForReturnAddress(0));
- __ subq(kScratchRegister, args.GetArgumentOperand(2));
+ __ subp(kScratchRegister, args.GetArgumentOperand(2));
__ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
if (FLAG_debug_code) {
__ movl(rax, Immediate(kWordBeforeResultValue));
@@ -2996,7 +2944,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
if (HasCallSiteInlineCheck()) {
// Remove extra value from the stack.
__ PopReturnAddressTo(rcx);
- __ pop(rax);
+ __ Pop(rax);
__ PushReturnAddressFrom(rcx);
}
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
@@ -3061,21 +3009,21 @@ void StringCharCodeAtGenerator::GenerateSlow(
index_not_number_,
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
- __ push(object_);
- __ push(index_); // Consumed by runtime conversion function.
+ __ Push(object_);
+ __ Push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
} else {
ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
// NumberToSmi discards numbers that are not exact integers.
- __ CallRuntime(Runtime::kNumberToSmi, 1);
+ __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
}
if (!index_.is(rax)) {
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
__ movp(index_, rax);
}
- __ pop(object_);
+ __ Pop(object_);
// Reload the instance type.
__ movp(result_, FieldOperand(object_, HeapObject::kMapOffset));
__ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
@@ -3090,10 +3038,10 @@ void StringCharCodeAtGenerator::GenerateSlow(
// is too complex (e.g., when the string needs to be flattened).
__ bind(&call_runtime_);
call_helper.BeforeCall(masm);
- __ push(object_);
+ __ Push(object_);
__ Integer32ToSmi(index_, index_);
- __ push(index_);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ __ Push(index_);
+ __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
if (!result_.is(rax)) {
__ movp(result_, rax);
}
@@ -3130,7 +3078,7 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
- __ push(code_);
+ __ Push(code_);
__ CallRuntime(Runtime::kCharFromCode, 1);
if (!result_.is(rax)) {
__ movp(result_, rax);
@@ -3174,11 +3122,11 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
// Copy from edi to esi using rep movs instruction.
__ movl(kScratchRegister, count);
__ shr(count, Immediate(kPointerSizeLog2)); // Number of doublewords to copy.
- __ repmovsq();
+ __ repmovsp();
// Find number of bytes left.
__ movl(count, kScratchRegister);
- __ and_(count, Immediate(kPointerSize - 1));
+ __ andp(count, Immediate(kPointerSize - 1));
// Check if there are more bytes to copy.
__ bind(&last_bytes);
@@ -3190,8 +3138,8 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
__ bind(&loop);
__ movb(kScratchRegister, Operand(src, 0));
__ movb(Operand(dest, 0), kScratchRegister);
- __ incq(src);
- __ incq(dest);
+ __ incp(src);
+ __ incp(dest);
__ decl(count);
__ j(not_zero, &loop);
@@ -3293,7 +3241,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ JumpUnlessBothNonNegativeSmi(rcx, rdx, &runtime);
__ SmiSub(rcx, rcx, rdx); // Overflow doesn't happen.
- __ cmpq(rcx, FieldOperand(rax, String::kLengthOffset));
+ __ cmpp(rcx, FieldOperand(rax, String::kLengthOffset));
Label not_original_string;
// Shorter than original string's length: an actual substring.
__ j(below, &not_original_string, Label::kNear);
@@ -3339,7 +3287,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ bind(&sliced_string);
// Sliced string. Fetch parent and correct start index by offset.
- __ addq(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
+ __ addp(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
__ movp(rdi, FieldOperand(rax, SlicedString::kParentOffset));
// Update instance type.
__ movp(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
@@ -3360,7 +3308,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// rcx: length
// If coming from the make_two_character_string path, the string
// is too short to be sliced anyways.
- __ cmpq(rcx, Immediate(SlicedString::kMinLength));
+ __ cmpp(rcx, Immediate(SlicedString::kMinLength));
// Short slice. Copy instead of slicing.
__ j(less, &copy_routine);
// Allocate new sliced string. At this point we do not reload the instance
@@ -3410,7 +3358,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ movp(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ subp(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
__ bind(&sequential_string);
STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
@@ -3425,11 +3373,11 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ movp(r14, rsi); // esi used by following code.
{ // Locate character of sub string start.
SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1);
- __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
+ __ leap(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
SeqOneByteString::kHeaderSize - kHeapObjectTag));
}
// Locate first character of result.
- __ lea(rdi, FieldOperand(rax, SeqOneByteString::kHeaderSize));
+ __ leap(rdi, FieldOperand(rax, SeqOneByteString::kHeaderSize));
// rax: result string
// rcx: result length
@@ -3450,11 +3398,11 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ movp(r14, rsi); // esi used by following code.
{ // Locate character of sub string start.
SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2);
- __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
+ __ leap(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
SeqOneByteString::kHeaderSize - kHeapObjectTag));
}
// Locate first character of result.
- __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
+ __ leap(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
// rax: result string
// rcx: result length
@@ -3468,7 +3416,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
__ bind(&single_char);
// rax: string
@@ -3610,11 +3558,11 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
// start. This means that loop ends when index reaches zero, which
// doesn't need an additional compare.
__ SmiToInteger32(length, length);
- __ lea(left,
+ __ leap(left,
FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
- __ lea(right,
+ __ leap(right,
FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
- __ neg(length);
+ __ negq(length);
Register index = length; // index = -length;
// Compare loop.
@@ -3642,7 +3590,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Check for identity.
Label not_same;
- __ cmpq(rdx, rax);
+ __ cmpp(rdx, rax);
__ j(not_equal, &not_same, Label::kNear);
__ Move(rax, Smi::FromInt(EQUAL));
Counters* counters = masm->isolate()->counters();
@@ -3658,14 +3606,14 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(counters->string_compare_native(), 1);
// Drop arguments from the stack
__ PopReturnAddressTo(rcx);
- __ addq(rsp, Immediate(2 * kPointerSize));
+ __ addp(rsp, Immediate(2 * kPointerSize));
__ PushReturnAddressFrom(rcx);
GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
}
@@ -3763,7 +3711,7 @@ void ArrayPushStub::Generate(MacroAssembler* masm) {
// Verify that the object can be transitioned in place.
const int origin_offset = header_size + elements_kind() * kPointerSize;
__ movp(rdi, FieldOperand(rbx, origin_offset));
- __ cmpq(rdi, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ cmpp(rdi, FieldOperand(rdx, HeapObject::kMapOffset));
__ j(not_equal, &call_builtin);
const int target_offset = header_size + target_kind * kPointerSize;
@@ -3777,7 +3725,7 @@ void ArrayPushStub::Generate(MacroAssembler* masm) {
__ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
// Store the value.
- __ lea(rdx, FieldOperand(rdi,
+ __ leap(rdx, FieldOperand(rdi,
rax, times_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
__ movp(Operand(rdx, 0), rcx);
@@ -3816,14 +3764,14 @@ void ArrayPushStub::Generate(MacroAssembler* masm) {
__ Load(rcx, new_space_allocation_top);
// Check if it's the end of elements.
- __ lea(rdx, FieldOperand(rdi,
+ __ leap(rdx, FieldOperand(rdi,
rax, times_pointer_size,
FixedArray::kHeaderSize - argc * kPointerSize));
- __ cmpq(rdx, rcx);
+ __ cmpp(rdx, rcx);
__ j(not_equal, &call_builtin);
- __ addq(rcx, Immediate(kAllocationDelta * kPointerSize));
+ __ addp(rcx, Immediate(kAllocationDelta * kPointerSize));
Operand limit_operand = masm->ExternalOperand(new_space_allocation_limit);
- __ cmpq(rcx, limit_operand);
+ __ cmpp(rcx, limit_operand);
__ j(above, &call_builtin);
// We fit and could grow elements.
@@ -3901,13 +3849,13 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
if (GetCondition() == equal) {
// For equality we do not care about the sign of the result.
- __ subq(rax, rdx);
+ __ subp(rax, rdx);
} else {
Label done;
- __ subq(rdx, rax);
+ __ subp(rdx, rax);
__ j(no_overflow, &done, Label::kNear);
// Correct sign of result in case of overflow.
- __ not_(rdx);
+ __ notp(rdx);
__ bind(&done);
__ movp(rax, rdx);
}
@@ -3965,7 +3913,7 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
__ movl(rax, Immediate(0));
__ movl(rcx, Immediate(0));
__ setcc(above, rax); // Add one to zero if carry clear and not equal.
- __ sbbq(rax, rcx); // Subtract one if below (aka. carry set).
+ __ sbbp(rax, rcx); // Subtract one if below (aka. carry set).
__ ret(0);
__ bind(&unordered);
@@ -4013,16 +3961,16 @@ void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
// Check that both operands are internalized strings.
__ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
__ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
- __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
- __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+ __ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+ __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
- __ or_(tmp1, tmp2);
+ __ orp(tmp1, tmp2);
__ testb(tmp1, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
__ j(not_zero, &miss, Label::kNear);
// Internalized strings are compared by identity.
Label done;
- __ cmpq(left, right);
+ __ cmpp(left, right);
// Make sure rax is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(rax));
@@ -4057,15 +4005,15 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
// types loaded in tmp1 and tmp2.
__ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
__ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
- __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
- __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+ __ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+ __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueName(tmp1, &miss, Label::kNear);
__ JumpIfNotUniqueName(tmp2, &miss, Label::kNear);
// Unique names are compared by identity.
Label done;
- __ cmpq(left, right);
+ __ cmpp(left, right);
// Make sure rax is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(rax));
@@ -4102,17 +4050,17 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// types loaded in tmp1 and tmp2.
__ movp(tmp1, FieldOperand(left, HeapObject::kMapOffset));
__ movp(tmp2, FieldOperand(right, HeapObject::kMapOffset));
- __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
- __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+ __ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+ __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
__ movp(tmp3, tmp1);
STATIC_ASSERT(kNotStringTag != 0);
- __ or_(tmp3, tmp2);
+ __ orp(tmp3, tmp2);
__ testb(tmp3, Immediate(kIsNotStringMask));
__ j(not_zero, &miss);
// Fast check for identical strings.
Label not_same;
- __ cmpq(left, right);
+ __ cmpp(left, right);
__ j(not_equal, &not_same, Label::kNear);
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
@@ -4128,7 +4076,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
Label do_compare;
STATIC_ASSERT(kInternalizedTag == 0);
- __ or_(tmp1, tmp2);
+ __ orp(tmp1, tmp2);
__ testb(tmp1, Immediate(kIsNotInternalizedMask));
__ j(not_zero, &do_compare, Label::kNear);
// Make sure rax is non-zero. At this point input operands are
@@ -4154,13 +4102,13 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// Handle more complex cases in runtime.
__ bind(&runtime);
__ PopReturnAddressTo(tmp1);
- __ push(left);
- __ push(right);
+ __ Push(left);
+ __ Push(right);
__ PushReturnAddressFrom(tmp1);
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
}
__ bind(&miss);
@@ -4180,7 +4128,7 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
__ j(not_equal, &miss, Label::kNear);
ASSERT(GetCondition() == equal);
- __ subq(rax, rdx);
+ __ subp(rax, rdx);
__ ret(0);
__ bind(&miss);
@@ -4200,7 +4148,7 @@ void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
__ Cmp(rbx, known_map_);
__ j(not_equal, &miss, Label::kNear);
- __ subq(rax, rdx);
+ __ subp(rax, rdx);
__ ret(0);
__ bind(&miss);
@@ -4215,17 +4163,17 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(rdx);
- __ push(rax);
- __ push(rdx);
- __ push(rax);
+ __ Push(rdx);
+ __ Push(rax);
+ __ Push(rdx);
+ __ Push(rax);
__ Push(Smi::FromInt(op_));
__ CallExternalReference(miss, 3);
// Compute the entry point of the rewritten stub.
- __ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
- __ pop(rax);
- __ pop(rdx);
+ __ leap(rdi, FieldOperand(rax, Code::kHeaderSize));
+ __ Pop(rax);
+ __ Pop(rdx);
}
// Do a tail call to the rewritten stub.
@@ -4252,12 +4200,12 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
// Capacity is smi 2^n.
__ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
__ decl(index);
- __ and_(index,
+ __ andp(index,
Immediate(name->Hash() + NameDictionary::GetProbeOffset(i)));
// Scale the index by multiplying by the entry size.
ASSERT(NameDictionary::kEntrySize == 3);
- __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
+ __ leap(index, Operand(index, index, times_2, 0)); // index *= 3.
Register entity_name = r0;
// Having undefined at this place means the name is not contained.
@@ -4287,9 +4235,9 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
NameDictionaryLookupStub stub(properties, r0, r0, NEGATIVE_LOOKUP);
__ Push(Handle<Object>(name));
- __ push(Immediate(name->Hash()));
+ __ Push(Immediate(name->Hash()));
__ CallStub(&stub);
- __ testq(r0, r0);
+ __ testp(r0, r0);
__ j(not_zero, miss);
__ jmp(done);
}
@@ -4323,26 +4271,26 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
if (i > 0) {
__ addl(r1, Immediate(NameDictionary::GetProbeOffset(i)));
}
- __ and_(r1, r0);
+ __ andp(r1, r0);
// Scale the index by multiplying by the entry size.
ASSERT(NameDictionary::kEntrySize == 3);
- __ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
+ __ leap(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
// Check if the key is identical to the name.
- __ cmpq(name, Operand(elements, r1, times_pointer_size,
+ __ cmpp(name, Operand(elements, r1, times_pointer_size,
kElementsStartOffset - kHeapObjectTag));
__ j(equal, done);
}
NameDictionaryLookupStub stub(elements, r0, r1, POSITIVE_LOOKUP);
- __ push(name);
+ __ Push(name);
__ movl(r0, FieldOperand(name, Name::kHashFieldOffset));
__ shrl(r0, Immediate(Name::kHashShift));
- __ push(r0);
+ __ Push(r0);
__ CallStub(&stub);
- __ testq(r0, r0);
+ __ testp(r0, r0);
__ j(zero, miss);
__ jmp(done);
}
@@ -4369,7 +4317,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ SmiToInteger32(scratch, FieldOperand(dictionary_, kCapacityOffset));
__ decl(scratch);
- __ push(scratch);
+ __ Push(scratch);
// If names of slots in range from 1 to kProbes - 1 for the hash value are
// not equal to the name and kProbes-th slot is not used (its name is the
@@ -4384,11 +4332,11 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
if (i > 0) {
__ addl(scratch, Immediate(NameDictionary::GetProbeOffset(i)));
}
- __ and_(scratch, Operand(rsp, 0));
+ __ andp(scratch, Operand(rsp, 0));
// Scale the index by multiplying by the entry size.
ASSERT(NameDictionary::kEntrySize == 3);
- __ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
+ __ leap(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
// Having undefined at this place means the name is not contained.
__ movp(scratch, Operand(dictionary_,
@@ -4400,7 +4348,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ j(equal, &not_in_dictionary);
// Stop if found the property.
- __ cmpq(scratch, args.GetArgumentOperand(0));
+ __ cmpp(scratch, args.GetArgumentOperand(0));
__ j(equal, &in_dictionary);
if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
@@ -4511,7 +4459,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
// remembered set.
CheckNeedsToInformIncrementalMarker(
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ RememberedSetHelper(object_,
address_,
@@ -4524,13 +4472,13 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
CheckNeedsToInformIncrementalMarker(
masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
- InformIncrementalMarker(masm, mode);
+ InformIncrementalMarker(masm);
regs_.Restore(masm);
__ ret(0);
}
-void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
Register address =
arg_reg_1.is(regs_.address()) ? kScratchRegister : regs_.address();
@@ -4546,18 +4494,10 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(argument_count);
- if (mode == INCREMENTAL_COMPACTION) {
- __ CallCFunction(
- ExternalReference::incremental_evacuation_record_write_function(
- masm->isolate()),
- argument_count);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ CallCFunction(
- ExternalReference::incremental_marking_record_write_function(
- masm->isolate()),
- argument_count);
- }
+ __ CallCFunction(
+ ExternalReference::incremental_marking_record_write_function(
+ masm->isolate()),
+ argument_count);
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
}
@@ -4571,11 +4511,11 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Label need_incremental_pop_object;
__ movp(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
- __ and_(regs_.scratch0(), regs_.object());
+ __ andp(regs_.scratch0(), regs_.object());
__ movp(regs_.scratch1(),
Operand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset));
- __ subq(regs_.scratch1(), Immediate(1));
+ __ subp(regs_.scratch1(), Immediate(1));
__ movp(Operand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset),
regs_.scratch1());
@@ -4626,13 +4566,13 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// We need an extra register for this, so we push the object register
// temporarily.
- __ push(regs_.object());
+ __ Push(regs_.object());
__ EnsureNotWhite(regs_.scratch0(), // The value.
regs_.scratch1(), // Scratch.
regs_.object(), // Scratch.
&need_incremental_pop_object,
Label::kNear);
- __ pop(regs_.object());
+ __ Pop(regs_.object());
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
@@ -4646,7 +4586,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
__ bind(&need_incremental_pop_object);
- __ pop(regs_.object());
+ __ Pop(regs_.object());
__ bind(&need_incremental);
@@ -4687,12 +4627,12 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ bind(&slow_elements);
__ PopReturnAddressTo(rdi);
- __ push(rbx);
- __ push(rcx);
- __ push(rax);
+ __ Push(rbx);
+ __ Push(rcx);
+ __ Push(rax);
__ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
- __ push(rdx);
+ __ Push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ __ Push(rdx);
__ PushReturnAddressFrom(rdi);
__ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
@@ -4700,7 +4640,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ bind(&fast_elements);
__ SmiToInteger32(kScratchRegister, rcx);
__ movp(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
- __ lea(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size,
+ __ leap(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size,
FixedArrayBase::kHeaderSize));
__ movp(Operand(rcx, 0), rax);
// Update the write barrier for the array store.
@@ -4744,7 +4684,7 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE
? kPointerSize
: 0;
- __ lea(rsp, MemOperand(rsp, rbx, times_pointer_size, additional_offset));
+ __ leap(rsp, MemOperand(rsp, rbx, times_pointer_size, additional_offset));
__ jmp(rcx); // Return to IC Miss stub, continuation still on stack.
}
@@ -4761,16 +4701,16 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// This stub can be called from essentially anywhere, so it needs to save
// all volatile and callee-save registers.
const size_t kNumSavedRegisters = 2;
- __ push(arg_reg_1);
- __ push(arg_reg_2);
+ __ pushq(arg_reg_1);
+ __ pushq(arg_reg_2);
// Calculate the original stack pointer and store it in the second arg.
- __ lea(arg_reg_2,
+ __ leap(arg_reg_2,
Operand(rsp, kNumSavedRegisters * kRegisterSize + kPCOnStackSize));
// Calculate the function address to the first arg.
__ movp(arg_reg_1, Operand(rsp, kNumSavedRegisters * kRegisterSize));
- __ subq(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
+ __ subp(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength));
// Save the remainder of the volatile registers.
masm->PushCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2);
@@ -4787,8 +4727,8 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// Restore volatile regs.
masm->PopCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2);
- __ pop(arg_reg_2);
- __ pop(arg_reg_1);
+ __ popq(arg_reg_2);
+ __ popq(arg_reg_1);
__ Ret();
}
@@ -4850,7 +4790,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// look at the first argument
StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
__ movp(rcx, args.GetArgumentOperand(0));
- __ testq(rcx, rcx);
+ __ testp(rcx, rcx);
__ j(zero, &normal_sequence);
if (mode == DISABLE_ALLOCATION_SITES) {
@@ -4867,7 +4807,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
// We are going to create a holey array, but our kind is non-holey.
- // Fix kind and retry (only if we have an allocation site in the cell).
+ // Fix kind and retry (only if we have an allocation site in the slot).
__ incl(rdx);
if (FLAG_debug_code) {
@@ -4951,7 +4891,7 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
AllocationSiteOverrideMode mode) {
if (argument_count_ == ANY) {
Label not_zero_case, not_one_case;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(not_zero, &not_zero_case);
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
@@ -4977,15 +4917,11 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub(
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
- // -- rbx : type info cell
+ // -- rbx : AllocationSite or undefined
// -- rdi : constructor
// -- rsp[0] : return address
// -- rsp[8] : last argument
// -----------------------------------
- Handle<Object> undefined_sentinel(
- masm->isolate()->heap()->undefined_value(),
- masm->isolate());
-
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
@@ -4999,31 +4935,21 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ CmpObjectType(rcx, MAP_TYPE, rcx);
__ Check(equal, kUnexpectedInitialMapForArrayFunction);
- // We should either have undefined in rbx or a valid cell
- Label okay_here;
- Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
- __ Cmp(rbx, undefined_sentinel);
- __ j(equal, &okay_here);
- __ Cmp(FieldOperand(rbx, 0), cell_map);
- __ Assert(equal, kExpectedPropertyCellInRegisterRbx);
- __ bind(&okay_here);
+ // We should either have undefined in rbx or a valid AllocationSite
+ __ AssertUndefinedOrAllocationSite(rbx);
}
Label no_info;
- // If the type cell is undefined, or contains anything other than an
- // AllocationSite, call an array constructor that doesn't use AllocationSites.
- __ Cmp(rbx, undefined_sentinel);
+ // If the feedback vector is the undefined value call an array constructor
+ // that doesn't use AllocationSites.
+ __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(equal, &no_info);
- __ movp(rbx, FieldOperand(rbx, Cell::kValueOffset));
- __ Cmp(FieldOperand(rbx, 0),
- masm->isolate()->factory()->allocation_site_map());
- __ j(not_equal, &no_info);
// Only look at the lower 16 bits of the transition info.
__ movp(rdx, FieldOperand(rbx, AllocationSite::kTransitionInfoOffset));
__ SmiToInteger32(rdx, rdx);
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ and_(rdx, Immediate(AllocationSite::ElementsKindBits::kMask));
+ __ andp(rdx, Immediate(AllocationSite::ElementsKindBits::kMask));
GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
__ bind(&no_info);
@@ -5036,7 +4962,7 @@ void InternalArrayConstructorStub::GenerateCase(
Label not_zero_case, not_one_case;
Label normal_sequence;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(not_zero, &not_zero_case);
InternalArrayNoArgumentConstructorStub stub0(kind);
__ TailCallStub(&stub0);
@@ -5050,7 +4976,7 @@ void InternalArrayConstructorStub::GenerateCase(
// look at the first argument
StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
__ movp(rcx, args.GetArgumentOperand(0));
- __ testq(rcx, rcx);
+ __ testp(rcx, rcx);
__ j(zero, &normal_sequence);
InternalArraySingleArgumentConstructorStub
@@ -5071,7 +4997,6 @@ void InternalArrayConstructorStub::GenerateCase(
void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : argc
- // -- rbx : type info cell
// -- rdi : constructor
// -- rsp[0] : return address
// -- rsp[8] : last argument
@@ -5096,9 +5021,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// Load the map's "bit field 2" into |result|. We only need the first byte,
// but the following masking takes care of that anyway.
- __ movzxbq(rcx, FieldOperand(rcx, Map::kBitField2Offset));
+ __ movzxbp(rcx, FieldOperand(rcx, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
- __ and_(rcx, Immediate(Map::kElementsKindMask));
+ __ andp(rcx, Immediate(Map::kElementsKindMask));
__ shr(rcx, Immediate(Map::kElementsKindShift));
if (FLAG_debug_code) {
@@ -5144,7 +5069,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
Register context = rsi;
int argc = ArgumentBits::decode(bit_field_);
- bool restore_context = RestoreContextBits::decode(bit_field_);
+ bool is_store = IsStoreBits::decode(bit_field_);
bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
typedef FunctionCallbackArguments FCA;
@@ -5161,29 +5086,29 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
__ PopReturnAddressTo(return_address);
// context save
- __ push(context);
+ __ Push(context);
// load context from callee
__ movp(context, FieldOperand(callee, JSFunction::kContextOffset));
// callee
- __ push(callee);
+ __ Push(callee);
// call data
- __ push(call_data);
+ __ Push(call_data);
Register scratch = call_data;
if (!call_data_undefined) {
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
}
// return value
- __ push(scratch);
+ __ Push(scratch);
// return value default
- __ push(scratch);
+ __ Push(scratch);
// isolate
__ Move(scratch,
ExternalReference::isolate_address(masm->isolate()));
- __ push(scratch);
+ __ Push(scratch);
// holder
- __ push(holder);
+ __ Push(holder);
__ movp(scratch, rsp);
// Push return address back on stack.
@@ -5197,7 +5122,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
// FunctionCallbackInfo::implicit_args_.
__ movp(StackSpaceOperand(0), scratch);
- __ addq(scratch, Immediate((argc + FCA::kArgsLength - 1) * kPointerSize));
+ __ addp(scratch, Immediate((argc + FCA::kArgsLength - 1) * kPointerSize));
__ movp(StackSpaceOperand(1), scratch); // FunctionCallbackInfo::values_.
__ Set(StackSpaceOperand(2), argc); // FunctionCallbackInfo::length_.
// FunctionCallbackInfo::is_construct_call_.
@@ -5216,23 +5141,25 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) {
ASSERT(!api_function_address.is(arguments_arg));
// v8::InvocationCallback's argument.
- __ lea(arguments_arg, StackSpaceOperand(0));
+ __ leap(arguments_arg, StackSpaceOperand(0));
Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
- StackArgumentsAccessor args_from_rbp(rbp, FCA::kArgsLength,
+ // Accessor for FunctionCallbackInfo and first js arg.
+ StackArgumentsAccessor args_from_rbp(rbp, FCA::kArgsLength + 1,
ARGUMENTS_DONT_CONTAIN_RECEIVER);
Operand context_restore_operand = args_from_rbp.GetArgumentOperand(
- FCA::kArgsLength - 1 - FCA::kContextSaveIndex);
+ FCA::kArgsLength - FCA::kContextSaveIndex);
+ // Stores return the first js argument
Operand return_value_operand = args_from_rbp.GetArgumentOperand(
- FCA::kArgsLength - 1 - FCA::kReturnValueOffset);
+ is_store ? 0 : FCA::kArgsLength - FCA::kReturnValueOffset);
__ CallApiFunctionAndReturn(
api_function_address,
thunk_address,
callback_arg,
argc + FCA::kArgsLength + 1,
return_value_operand,
- restore_context ? &context_restore_operand : NULL);
+ &context_restore_operand);
}
@@ -5263,17 +5190,17 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
// Allocate v8::AccessorInfo in non-GCed stack space.
const int kArgStackSpace = 1;
- __ lea(name_arg, Operand(rsp, kPCOnStackSize));
+ __ leap(name_arg, Operand(rsp, kPCOnStackSize));
__ PrepareCallApiFunction(kArgStackSpace);
- __ lea(scratch, Operand(name_arg, 1 * kPointerSize));
+ __ leap(scratch, Operand(name_arg, 1 * kPointerSize));
// v8::PropertyAccessorInfo::args_.
__ movp(StackSpaceOperand(0), scratch);
// The context register (rsi) has been saved in PrepareCallApiFunction and
// could be used to pass arguments.
- __ lea(accessor_info_arg, StackSpaceOperand(0));
+ __ leap(accessor_info_arg, StackSpaceOperand(0));
Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
index c65307a74..8c8ab691a 100644
--- a/deps/v8/src/x64/code-stubs-x64.h
+++ b/deps/v8/src/x64/code-stubs-x64.h
@@ -305,19 +305,19 @@ class RecordWriteStub: public PlatformCodeStub {
// We don't have to save scratch0_orig_ because it was given to us as
// a scratch register. But if we had to switch to a different reg then
// we should save the new scratch0_.
- if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_);
+ if (!scratch0_.is(scratch0_orig_)) masm->Push(scratch0_);
if (!rcx.is(scratch0_orig_) &&
!rcx.is(object_orig_) &&
!rcx.is(address_orig_)) {
- masm->push(rcx);
+ masm->Push(rcx);
}
- masm->push(scratch1_);
+ masm->Push(scratch1_);
if (!address_.is(address_orig_)) {
- masm->push(address_);
+ masm->Push(address_);
masm->movp(address_, address_orig_);
}
if (!object_.is(object_orig_)) {
- masm->push(object_);
+ masm->Push(object_);
masm->movp(object_, object_orig_);
}
}
@@ -328,19 +328,19 @@ class RecordWriteStub: public PlatformCodeStub {
// one, since only one of them can alias with rcx.
if (!object_.is(object_orig_)) {
masm->movp(object_orig_, object_);
- masm->pop(object_);
+ masm->Pop(object_);
}
if (!address_.is(address_orig_)) {
masm->movp(address_orig_, address_);
- masm->pop(address_);
+ masm->Pop(address_);
}
- masm->pop(scratch1_);
+ masm->Pop(scratch1_);
if (!rcx.is(scratch0_orig_) &&
!rcx.is(object_orig_) &&
!rcx.is(address_orig_)) {
- masm->pop(rcx);
+ masm->Pop(rcx);
}
- if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_);
+ if (!scratch0_.is(scratch0_orig_)) masm->Pop(scratch0_);
}
// If we have to call into C then we need to save and restore all caller-
@@ -401,7 +401,7 @@ class RecordWriteStub: public PlatformCodeStub {
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
- void InformIncrementalMarker(MacroAssembler* masm, Mode mode);
+ void InformIncrementalMarker(MacroAssembler* masm);
Major MajorKey() { return RecordWrite; }
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index f292f7d25..9b92dc867 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -66,13 +66,13 @@ UnaryMathFunction CreateExpFunction() {
// xmm0: raw double input.
XMMRegister input = xmm0;
XMMRegister result = xmm1;
- __ push(rax);
- __ push(rbx);
+ __ pushq(rax);
+ __ pushq(rbx);
MathExpGenerator::EmitMathExp(&masm, input, result, xmm2, rax, rbx);
- __ pop(rbx);
- __ pop(rax);
+ __ popq(rbx);
+ __ popq(rax);
__ movsd(xmm0, result);
__ Ret();
@@ -300,7 +300,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Allocate new backing store.
__ bind(&new_backing_store);
- __ lea(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
+ __ leap(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
__ Allocate(rdi, r14, r11, r15, fail, TAG_OBJECT);
// Set backing store's map
__ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
@@ -353,7 +353,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
__ bind(&entry);
- __ decq(r9);
+ __ decp(r9);
__ j(not_sign, &loop);
__ bind(&done);
@@ -381,13 +381,13 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
__ j(equal, &only_change_map);
- __ push(rax);
+ __ Push(rax);
__ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset));
__ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
// r8 : source FixedDoubleArray
// r9 : number of elements
- __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
+ __ leap(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
__ Allocate(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
// r11: destination FixedArray
__ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
@@ -404,7 +404,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Call into runtime if GC is required.
__ bind(&gc_required);
- __ pop(rax);
+ __ Pop(rax);
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ jmp(fail);
@@ -446,7 +446,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
rdi);
__ bind(&entry);
- __ decq(r9);
+ __ decp(r9);
__ j(not_sign, &loop);
// Replace receiver's backing store with newly created and filled FixedArray.
@@ -458,7 +458,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
- __ pop(rax);
+ __ Pop(rax);
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&only_change_map);
@@ -496,7 +496,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// Handle slices.
Label indirect_string_loaded;
__ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
- __ addq(index, result);
+ __ addp(index, result);
__ movp(string, FieldOperand(string, SlicedString::kParentOffset));
__ jmp(&indirect_string_loaded, Label::kNear);
@@ -606,13 +606,13 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ movq(temp2, double_scratch);
__ subsd(double_scratch, result);
__ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
- __ lea(temp1, Operand(temp2, 0x1ff800));
- __ and_(temp2, Immediate(0x7ff));
+ __ leaq(temp1, Operand(temp2, 0x1ff800));
+ __ andq(temp2, Immediate(0x7ff));
__ shr(temp1, Immediate(11));
__ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
__ Move(kScratchRegister, ExternalReference::math_exp_log_table());
__ shl(temp1, Immediate(52));
- __ or_(temp1, Operand(kScratchRegister, temp2, times_8, 0));
+ __ orq(temp1, Operand(kScratchRegister, temp2, times_8, 0));
__ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
__ subsd(double_scratch, input);
__ movsd(input, double_scratch);
@@ -640,10 +640,10 @@ static byte* GetNoCodeAgeSequence(uint32_t* length) {
// following boilerplate stack-building prologue that is found both in
// FUNCTION and OPTIMIZED_FUNCTION code:
CodePatcher patcher(sequence, kNoCodeAgeSequenceLength);
- patcher.masm()->push(rbp);
+ patcher.masm()->pushq(rbp);
patcher.masm()->movp(rbp, rsp);
- patcher.masm()->push(rsi);
- patcher.masm()->push(rdi);
+ patcher.masm()->Push(rsi);
+ patcher.masm()->Push(rdi);
initialized = true;
}
return sequence;
diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc
index 8ae03deae..36d5df678 100644
--- a/deps/v8/src/x64/debug-x64.cc
+++ b/deps/v8/src/x64/debug-x64.cc
@@ -121,7 +121,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
Register reg = { r };
ASSERT(!reg.is(kScratchRegister));
if ((object_regs & (1 << r)) != 0) {
- __ push(reg);
+ __ Push(reg);
}
if ((non_object_regs & (1 << r)) != 0) {
__ PushInt64AsTwoSmis(reg);
@@ -145,7 +145,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ Set(reg, kDebugZapValue);
}
if ((object_regs & (1 << r)) != 0) {
- __ pop(reg);
+ __ Pop(reg);
}
// Reconstruct the 64-bit value from two smis.
if ((non_object_regs & (1 << r)) != 0) {
@@ -154,9 +154,9 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
}
// Read current padding counter and skip corresponding number of words.
- __ pop(kScratchRegister);
+ __ Pop(kScratchRegister);
__ SmiToInteger32(kScratchRegister, kScratchRegister);
- __ lea(rsp, Operand(rsp, kScratchRegister, times_pointer_size, 0));
+ __ leap(rsp, Operand(rsp, kScratchRegister, times_pointer_size, 0));
// Get rid of the internal frame.
}
@@ -164,7 +164,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// If this call did not replace a call but patched other code then there will
// be an unwanted return address left on the stack. Here we get rid of that.
if (convert_call_to_jmp) {
- __ addq(rsp, Immediate(kPCOnStackSize));
+ __ addp(rsp, Immediate(kPCOnStackSize));
}
// Now that the break point has been handled, resume normal execution by
@@ -173,7 +173,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
ExternalReference after_break_target =
ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate());
__ Move(kScratchRegister, after_break_target);
- __ jmp(Operand(kScratchRegister, 0));
+ __ Jump(Operand(kScratchRegister, 0));
}
@@ -261,9 +261,11 @@ void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
// Register state for CallFunctionStub (from code-stubs-x64.cc).
// ----------- S t a t e -------------
// -- rdi : function
- // -- rbx: cache cell for call target
+ // -- rbx: feedback array
+ // -- rdx: slot in feedback array
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, rbx.bit() | rdi.bit(), 0, false);
+ Generate_DebugBreakCallHelper(masm, rbx.bit() | rdx.bit() | rdi.bit(),
+ 0, false);
}
@@ -285,10 +287,12 @@ void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
// above IC call.
// ----------- S t a t e -------------
// -- rax: number of arguments
- // -- rbx: cache cell for call target
+ // -- rbx: feedback array
+ // -- rdx: feedback slot (smi)
// -----------------------------------
// The number of arguments in rax is not smi encoded.
- Generate_DebugBreakCallHelper(masm, rbx.bit() | rdi.bit(), rax.bit(), false);
+ Generate_DebugBreakCallHelper(masm, rbx.bit() | rdx.bit() | rdi.bit(),
+ rax.bit(), false);
}
@@ -323,10 +327,10 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
__ movp(Operand(rax, 0), Immediate(0));
// We do not know our frame height, but set rsp based on rbp.
- __ lea(rsp, Operand(rbp, -1 * kPointerSize));
+ __ leap(rsp, Operand(rbp, -1 * kPointerSize));
- __ pop(rdi); // Function.
- __ pop(rbp);
+ __ Pop(rdi); // Function.
+ __ popq(rbp);
// Load context from the function.
__ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
@@ -334,7 +338,7 @@ void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
// Get function code.
__ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movp(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
- __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
+ __ leap(rdx, FieldOperand(rdx, Code::kHeaderSize));
// Re-run JSFunction, rdi is function, rsi is context.
__ jmp(rdx);
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index aee8be6e1..4bc644def 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -51,6 +51,26 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// code patching below, and is not needed any more.
code->InvalidateRelocation();
+ if (FLAG_zap_code_space) {
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength;
+ } else {
+ pointer = code->instruction_start();
+ }
+ CodePatcher patcher(pointer, 1);
+ patcher.masm()->int3();
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+ osr_patcher.masm()->int3();
+ }
+ }
+
// For each LLazyBailout instruction insert a absolute call to the
// corresponding deoptimization entry, or a short call to an absolute
// jump if space is short. The absolute jumps are put in a table just
@@ -63,6 +83,12 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
#endif
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
+ SharedFunctionInfo* shared =
+ SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
+ shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
+ deopt_data->SetSharedFunctionInfo(Smi::FromInt(0));
+ // For each LLazyBailout instruction insert a call to the corresponding
+ // deoptimization entry.
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
// Position where Call will be patched in.
@@ -141,7 +167,7 @@ void Deoptimizer::EntryGenerator::Generate() {
const int kDoubleRegsSize = kDoubleSize *
XMMRegister::NumAllocatableRegisters();
- __ subq(rsp, Immediate(kDoubleRegsSize));
+ __ subp(rsp, Immediate(kDoubleRegsSize));
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
@@ -153,7 +179,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// to restore all later.
for (int i = 0; i < kNumberOfRegisters; i++) {
Register r = Register::from_code(i);
- __ push(r);
+ __ pushq(r);
}
const int kSavedRegistersAreaSize = kNumberOfRegisters * kRegisterSize +
@@ -170,11 +196,11 @@ void Deoptimizer::EntryGenerator::Generate() {
// Get the address of the location in the code object
// and compute the fp-to-sp delta in register arg5.
__ movp(arg_reg_4, Operand(rsp, kSavedRegistersAreaSize + 1 * kRegisterSize));
- __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kRegisterSize +
+ __ leap(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kRegisterSize +
kPCOnStackSize));
- __ subq(arg5, rbp);
- __ neg(arg5);
+ __ subp(arg5, rbp);
+ __ negp(arg5);
// Allocate a new deoptimizer object.
__ PrepareCallCFunction(6);
@@ -204,40 +230,40 @@ void Deoptimizer::EntryGenerator::Generate() {
// Fill in the input registers.
for (int i = kNumberOfRegisters -1; i >= 0; i--) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ pop(Operand(rbx, offset));
+ __ Pop(Operand(rbx, offset));
}
// Fill in the double input registers.
int double_regs_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
int dst_offset = i * kDoubleSize + double_regs_offset;
- __ pop(Operand(rbx, dst_offset));
+ __ popq(Operand(rbx, dst_offset));
}
// Remove the bailout id and return address from the stack.
- __ addq(rsp, Immediate(1 * kRegisterSize + kPCOnStackSize));
+ __ addp(rsp, Immediate(1 * kRegisterSize + kPCOnStackSize));
// Compute a pointer to the unwinding limit in register rcx; that is
// the first stack slot not part of the input frame.
__ movp(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
- __ addq(rcx, rsp);
+ __ addp(rcx, rsp);
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
- __ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
+ __ leap(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
Label pop_loop_header;
__ jmp(&pop_loop_header);
Label pop_loop;
__ bind(&pop_loop);
- __ pop(Operand(rdx, 0));
- __ addq(rdx, Immediate(sizeof(intptr_t)));
+ __ Pop(Operand(rdx, 0));
+ __ addp(rdx, Immediate(sizeof(intptr_t)));
__ bind(&pop_loop_header);
- __ cmpq(rcx, rsp);
+ __ cmpp(rcx, rsp);
__ j(not_equal, &pop_loop);
// Compute the output frame in the deoptimizer.
- __ push(rax);
+ __ pushq(rax);
__ PrepareCallCFunction(2);
__ movp(arg_reg_1, rax);
__ LoadAddress(arg_reg_2, ExternalReference::isolate_address(isolate()));
@@ -246,7 +272,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ CallCFunction(
ExternalReference::compute_output_frames_function(isolate()), 2);
}
- __ pop(rax);
+ __ popq(rax);
// Replace the current frame with the output frames.
Label outer_push_loop, inner_push_loop,
@@ -255,7 +281,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// last FrameDescription**.
__ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
__ movp(rax, Operand(rax, Deoptimizer::output_offset()));
- __ lea(rdx, Operand(rax, rdx, times_pointer_size, 0));
+ __ leap(rdx, Operand(rax, rdx, times_pointer_size, 0));
__ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: rbx = current FrameDescription*, rcx = loop index.
@@ -263,14 +289,14 @@ void Deoptimizer::EntryGenerator::Generate() {
__ movp(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
__ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
- __ subq(rcx, Immediate(sizeof(intptr_t)));
- __ push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
+ __ subp(rcx, Immediate(sizeof(intptr_t)));
+ __ Push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
__ bind(&inner_loop_header);
- __ testq(rcx, rcx);
+ __ testp(rcx, rcx);
__ j(not_zero, &inner_push_loop);
- __ addq(rax, Immediate(kPointerSize));
+ __ addp(rax, Immediate(kPointerSize));
__ bind(&outer_loop_header);
- __ cmpq(rax, rdx);
+ __ cmpp(rax, rdx);
__ j(below, &outer_push_loop);
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
@@ -280,14 +306,14 @@ void Deoptimizer::EntryGenerator::Generate() {
}
// Push state, pc, and continuation from the last output frame.
- __ push(Operand(rbx, FrameDescription::state_offset()));
- __ push(Operand(rbx, FrameDescription::pc_offset()));
- __ push(Operand(rbx, FrameDescription::continuation_offset()));
+ __ Push(Operand(rbx, FrameDescription::state_offset()));
+ __ Push(Operand(rbx, FrameDescription::pc_offset()));
+ __ Push(Operand(rbx, FrameDescription::continuation_offset()));
// Push the registers from the last output frame.
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
- __ push(Operand(rbx, offset));
+ __ Push(Operand(rbx, offset));
}
// Restore the registers from the stack.
@@ -299,7 +325,7 @@ void Deoptimizer::EntryGenerator::Generate() {
ASSERT(i > 0);
r = Register::from_code(i - 1);
}
- __ pop(r);
+ __ popq(r);
}
// Set up the roots register.
@@ -317,7 +343,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
- __ push_imm32(i);
+ __ pushq_imm32(i);
__ jmp(&done);
ASSERT(masm()->pc_offset() - start == table_entry_size_);
}
@@ -335,6 +361,12 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
}
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+}
+
+
#undef __
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 2d659cf0e..b870eae85 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -485,9 +485,11 @@ int DisassemblerX64::PrintRightOperandHelper(
} else if (base == 5) {
// base == rbp means no base register (when mod == 0).
int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
- AppendToBuffer("[%s*%d+0x%x]",
+ AppendToBuffer("[%s*%d%s0x%x]",
NameOfCPURegister(index),
- 1 << scale, disp);
+ 1 << scale,
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
return 6;
} else if (index != 4 && base != 5) {
// [base+index*scale]
@@ -512,38 +514,29 @@ int DisassemblerX64::PrintRightOperandHelper(
int scale, index, base;
get_sib(sib, &scale, &index, &base);
int disp = (mod == 2) ? *reinterpret_cast<int32_t*>(modrmp + 2)
- : *reinterpret_cast<char*>(modrmp + 2);
+ : *reinterpret_cast<int8_t*>(modrmp + 2);
if (index == 4 && (base & 7) == 4 && scale == 0 /*times_1*/) {
- if (-disp > 0) {
- AppendToBuffer("[%s-0x%x]", NameOfCPURegister(base), -disp);
- } else {
- AppendToBuffer("[%s+0x%x]", NameOfCPURegister(base), disp);
- }
+ AppendToBuffer("[%s%s0x%x]",
+ NameOfCPURegister(base),
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
} else {
- if (-disp > 0) {
- AppendToBuffer("[%s+%s*%d-0x%x]",
- NameOfCPURegister(base),
- NameOfCPURegister(index),
- 1 << scale,
- -disp);
- } else {
- AppendToBuffer("[%s+%s*%d+0x%x]",
- NameOfCPURegister(base),
- NameOfCPURegister(index),
- 1 << scale,
- disp);
- }
+ AppendToBuffer("[%s+%s*%d%s0x%x]",
+ NameOfCPURegister(base),
+ NameOfCPURegister(index),
+ 1 << scale,
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
}
return mod == 2 ? 6 : 3;
} else {
// No sib.
int disp = (mod == 2) ? *reinterpret_cast<int32_t*>(modrmp + 1)
- : *reinterpret_cast<char*>(modrmp + 1);
- if (-disp > 0) {
- AppendToBuffer("[%s-0x%x]", NameOfCPURegister(rm), -disp);
- } else {
- AppendToBuffer("[%s+0x%x]", NameOfCPURegister(rm), disp);
- }
+ : *reinterpret_cast<int8_t*>(modrmp + 1);
+ AppendToBuffer("[%s%s0x%x]",
+ NameOfCPURegister(rm),
+ disp < 0 ? "-" : "+",
+ disp < 0 ? -disp : disp);
return (mod == 2) ? 5 : 2;
}
break;
@@ -1096,6 +1089,11 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else if (opcode == 0x50) {
AppendToBuffer("movmskpd %s,", NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
+ } else if (opcode == 0x73) {
+ current += 1;
+ ASSERT(regop == 6);
+ AppendToBuffer("psllq,%s,%d", NameOfXMMRegister(rm), *current & 0x7f);
+ current += 1;
} else {
const char* mnemonic = "?";
if (opcode == 0x54) {
@@ -1326,6 +1324,12 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
} else {
AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
}
+ } else if (opcode == 0xBD) {
+ AppendToBuffer("%s%c ", mnemonic, operand_size_code());
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("%s,", NameOfCPURegister(regop));
+ current += PrintRightOperand(current);
} else {
UnimplementedInstruction();
}
@@ -1368,6 +1372,8 @@ const char* DisassemblerX64::TwoByteMnemonic(byte opcode) {
return "movzxb";
case 0xB7:
return "movzxw";
+ case 0xBD:
+ return "bsr";
case 0xBE:
return "movsxb";
case 0xBF:
diff --git a/deps/v8/src/x64/frames-x64.h b/deps/v8/src/x64/frames-x64.h
index 6eb02a917..1fb77ffa6 100644
--- a/deps/v8/src/x64/frames-x64.h
+++ b/deps/v8/src/x64/frames-x64.h
@@ -77,6 +77,8 @@ class ExitFrameConstants : public AllStatic {
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
static const int kCallerSPDisplacement = kCallerPCOffset + kPCOnStackSize;
+
+ static const int kConstantPoolOffset = 0; // Not used
};
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index 621eacc70..f0b943862 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -101,6 +101,23 @@ class JumpPatchSite BASE_EMBEDDED {
};
+static void EmitStackCheck(MacroAssembler* masm_,
+ int pointers = 0,
+ Register scratch = rsp) {
+ Isolate* isolate = masm_->isolate();
+ Label ok;
+ ASSERT(scratch.is(rsp) == (pointers == 0));
+ if (pointers != 0) {
+ __ movq(scratch, rsp);
+ __ subq(scratch, Immediate(pointers * kPointerSize));
+ }
+ __ CompareRoot(scratch, Heap::kStackLimitRootIndex);
+ __ j(above_equal, &ok, Label::kNear);
+ __ call(isolate->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+ __ bind(&ok);
+}
+
+
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right, with the
// return address on top of them. The actual argument count matches the
@@ -118,6 +135,9 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+
+ InitializeFeedbackVector();
+
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
@@ -132,10 +152,10 @@ void FullCodeGenerator::Generate() {
}
#endif
- // Classic mode functions and builtins need to replace the receiver with the
+ // Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info->is_classic_mode() && !info->is_native()) {
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
Label ok;
// +1 for return address.
StackArgumentsAccessor args(rsp, info->scope()->num_parameters());
@@ -168,9 +188,28 @@ void FullCodeGenerator::Generate() {
if (locals_count == 1) {
__ PushRoot(Heap::kUndefinedValueRootIndex);
} else if (locals_count > 1) {
+ if (locals_count >= 128) {
+ EmitStackCheck(masm_, locals_count, rcx);
+ }
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
- for (int i = 0; i < locals_count; i++) {
- __ push(rdx);
+ const int kMaxPushes = 32;
+ if (locals_count >= kMaxPushes) {
+ int loop_iterations = locals_count / kMaxPushes;
+ __ movq(rcx, Immediate(loop_iterations));
+ Label loop_header;
+ __ bind(&loop_header);
+ // Do pushes.
+ for (int i = 0; i < kMaxPushes; i++) {
+ __ Push(rdx);
+ }
+ // Continue loop if not done.
+ __ decq(rcx);
+ __ j(not_zero, &loop_header, Label::kNear);
+ }
+ int remaining = locals_count % kMaxPushes;
+ // Emit the remaining pushes.
+ for (int i = 0; i < remaining; i++) {
+ __ Push(rdx);
}
}
}
@@ -183,15 +222,15 @@ void FullCodeGenerator::Generate() {
Comment cmnt(masm_, "[ Allocate context");
// Argument to NewContext is the function, which is still in rdi.
if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
- __ push(rdi);
+ __ Push(rdi);
__ Push(info->scope()->GetScopeInfo());
- __ CallRuntime(Runtime::kNewGlobalContext, 2);
+ __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
- __ push(rdi);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ Push(rdi);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
function_in_register = false;
// Context is returned in rax. It replaces the context passed to us.
@@ -225,28 +264,28 @@ void FullCodeGenerator::Generate() {
// case the "arguments" or ".arguments" variables are in the context.
Comment cmnt(masm_, "[ Allocate arguments object");
if (function_in_register) {
- __ push(rdi);
+ __ Push(rdi);
} else {
- __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
// The receiver is just before the parameters on the caller's stack.
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
- __ lea(rdx,
+ __ leap(rdx,
Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
- __ push(rdx);
+ __ Push(rdx);
__ Push(Smi::FromInt(num_parameters));
// Arguments to ArgumentsAccessStub:
// function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (!is_classic_mode()) {
+ if (strict_mode() == STRICT) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+ type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
} else {
- type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+ type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
ArgumentsAccessStub stub(type);
__ CallStub(&stub);
@@ -272,7 +311,7 @@ void FullCodeGenerator::Generate() {
if (scope()->is_function_scope() && scope()->function() != NULL) {
VariableDeclaration* function = scope()->function();
ASSERT(function->proxy()->var()->mode() == CONST ||
- function->proxy()->var()->mode() == CONST_HARMONY);
+ function->proxy()->var()->mode() == CONST_LEGACY);
ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
VisitVariableDeclaration(function);
}
@@ -281,11 +320,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Stack check");
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
- Label ok;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok, Label::kNear);
- __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
- __ bind(&ok);
+ EmitStackCheck(masm_);
}
{ Comment cmnt(masm_, "[ Body");
@@ -360,7 +395,7 @@ void FullCodeGenerator::EmitReturnSequence() {
} else {
__ bind(&return_label_);
if (FLAG_trace) {
- __ push(rax);
+ __ Push(rax);
__ CallRuntime(Runtime::kTraceExit, 1);
}
// Pretend that the exit is a backwards jump to the entry.
@@ -375,10 +410,10 @@ void FullCodeGenerator::EmitReturnSequence() {
EmitProfilingCounterDecrement(weight);
Label ok;
__ j(positive, &ok, Label::kNear);
- __ push(rax);
+ __ Push(rax);
__ call(isolate()->builtins()->InterruptCheck(),
RelocInfo::CODE_TARGET);
- __ pop(rax);
+ __ Pop(rax);
EmitProfilingCounterReset();
__ bind(&ok);
#ifdef DEBUG
@@ -391,7 +426,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// Do not use the leave instruction here because it is too short to
// patch with the code required by the debugger.
__ movp(rsp, rbp);
- __ pop(rbp);
+ __ popq(rbp);
int no_frame_start = masm_->pc_offset();
int arguments_bytes = (info_->scope()->num_parameters() + 1) * kPointerSize;
@@ -429,7 +464,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
MemOperand operand = codegen()->VarOperand(var, result_register());
- __ push(operand);
+ __ Push(operand);
}
@@ -638,8 +673,8 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_false,
Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
- CallIC(ic, NOT_CONTEXTUAL, condition->test_id());
- __ testq(result_register(), result_register());
+ CallIC(ic, condition->test_id());
+ __ testp(result_register(), result_register());
// The stub returns nonzero for true.
Split(not_zero, if_true, if_false, fall_through);
}
@@ -755,7 +790,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
VariableProxy* proxy = declaration->proxy();
VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
- bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET;
+ bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
switch (variable->location()) {
case Variable::UNALLOCATED:
globals_->Add(variable->name(), zone());
@@ -787,7 +822,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
case Variable::LOOKUP: {
Comment cmnt(masm_, "[ VariableDeclaration");
- __ push(rsi);
+ __ Push(rsi);
__ Push(variable->name());
// Declaration nodes are always introduced in one of four modes.
ASSERT(IsDeclaredVariableMode(mode));
@@ -803,7 +838,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
} else {
__ Push(Smi::FromInt(0)); // Indicates no initial value.
}
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -853,11 +888,11 @@ void FullCodeGenerator::VisitFunctionDeclaration(
case Variable::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
- __ push(rsi);
+ __ Push(rsi);
__ Push(variable->name());
__ Push(Smi::FromInt(NONE));
VisitForStackValue(declaration->fun());
- __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
break;
}
}
@@ -924,10 +959,10 @@ void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
- __ push(rsi); // The context is the first argument.
+ __ Push(rsi); // The context is the first argument.
__ Push(pairs);
__ Push(Smi::FromInt(DeclareGlobalsFlags()));
- __ CallRuntime(Runtime::kDeclareGlobals, 3);
+ __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3);
// Return value is ignored.
}
@@ -935,7 +970,7 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
+ __ CallRuntime(Runtime::kHiddenDeclareModules, 1);
// Return value is ignored.
}
@@ -978,10 +1013,10 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
if (inline_smi_code) {
Label slow_case;
__ movp(rcx, rdx);
- __ or_(rcx, rax);
+ __ orp(rcx, rax);
patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
- __ cmpq(rdx, rax);
+ __ cmpp(rdx, rax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ jmp(clause->body_target());
@@ -991,7 +1026,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
- CallIC(ic, NOT_CONTEXTUAL, clause->CompareId());
+ CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo();
Label skip;
@@ -1003,7 +1038,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
__ jmp(clause->body_target());
__ bind(&skip);
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(not_equal, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ jmp(clause->body_target());
@@ -1035,6 +1070,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
+ int slot = stmt->ForInFeedbackSlot();
SetStatementPosition(stmt);
Label loop, exit;
@@ -1048,7 +1084,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ j(equal, &exit);
Register null_value = rdi;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ cmpq(rax, null_value);
+ __ cmpp(rax, null_value);
__ j(equal, &exit);
PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
@@ -1059,10 +1095,10 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
__ j(above_equal, &done_convert);
__ bind(&convert);
- __ push(rax);
+ __ Push(rax);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ bind(&done_convert);
- __ push(rax);
+ __ Push(rax);
// Check for proxies.
Label call_runtime;
@@ -1084,7 +1120,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
- __ push(rax); // Duplicate the enumerable object on the stack.
+ __ Push(rax); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
// If we got a map from the runtime call, we can do a fast
@@ -1109,28 +1145,29 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ movp(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
- __ push(rax); // Map.
- __ push(rcx); // Enumeration cache.
- __ push(rdx); // Number of valid entries for the map in the enum cache.
+ __ Push(rax); // Map.
+ __ Push(rcx); // Enumeration cache.
+ __ Push(rdx); // Number of valid entries for the map in the enum cache.
__ Push(Smi::FromInt(0)); // Initial index.
__ jmp(&loop);
__ bind(&no_descriptors);
- __ addq(rsp, Immediate(kPointerSize));
+ __ addp(rsp, Immediate(kPointerSize));
__ jmp(&exit);
// We got a fixed array in register rax. Iterate through that.
Label non_proxy;
__ bind(&fixed_array);
- Handle<Cell> cell = isolate()->factory()->NewCell(
- Handle<Object>(Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
- isolate()));
- RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
- __ Move(rbx, cell);
- __ Move(FieldOperand(rbx, Cell::kValueOffset),
- Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker));
+ Handle<Object> feedback = Handle<Object>(
+ Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker),
+ isolate());
+ StoreFeedbackVectorSlot(slot, feedback);
+ // No need for a write barrier, we are storing a Smi in the feedback vector.
+ __ Move(rbx, FeedbackVector());
+ __ Move(FieldOperand(rbx, FixedArray::OffsetOfElementAt(slot)),
+ Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker));
__ Move(rbx, Smi::FromInt(1)); // Smi indicates slow check
__ movp(rcx, Operand(rsp, 0 * kPointerSize)); // Get enumerated object
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
@@ -1138,17 +1175,17 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ j(above, &non_proxy);
__ Move(rbx, Smi::FromInt(0)); // Zero indicates proxy
__ bind(&non_proxy);
- __ push(rbx); // Smi
- __ push(rax); // Array
+ __ Push(rbx); // Smi
+ __ Push(rax); // Array
__ movp(rax, FieldOperand(rax, FixedArray::kLengthOffset));
- __ push(rax); // Fixed array length (as smi).
+ __ Push(rax); // Fixed array length (as smi).
__ Push(Smi::FromInt(0)); // Initial index.
// Generate code for doing the condition check.
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
__ movp(rax, Operand(rsp, 0 * kPointerSize)); // Get the current index.
- __ cmpq(rax, Operand(rsp, 1 * kPointerSize)); // Compare to the array length.
+ __ cmpp(rax, Operand(rsp, 1 * kPointerSize)); // Compare to the array length.
__ j(above_equal, loop_statement.break_label());
// Get the current entry of the array into register rbx.
@@ -1167,7 +1204,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// If not, we may have to filter the key.
Label update_each;
__ movp(rcx, Operand(rsp, 4 * kPointerSize));
- __ cmpq(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ cmpp(rdx, FieldOperand(rcx, HeapObject::kMapOffset));
__ j(equal, &update_each, Label::kNear);
// For proxies, no filtering is done.
@@ -1178,8 +1215,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Convert the entry to a string or null if it isn't a property
// anymore. If the property has been removed while iterating, we
// just skip it.
- __ push(rcx); // Enumerable.
- __ push(rbx); // Current entry.
+ __ Push(rcx); // Enumerable.
+ __ Push(rbx); // Current entry.
__ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
__ Cmp(rax, Smi::FromInt(0));
__ j(equal, loop_statement.continue_label());
@@ -1207,7 +1244,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Remove the pointers stored on the stack.
__ bind(loop_statement.break_label());
- __ addq(rsp, Immediate(5 * kPointerSize));
+ __ addp(rsp, Immediate(5 * kPointerSize));
// Exit and decrement the loop depth.
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1238,7 +1275,7 @@ void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
__ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
__ j(above_equal, &done_convert);
__ bind(&convert);
- __ push(rax);
+ __ Push(rax);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ bind(&done_convert);
@@ -1287,16 +1324,16 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode(), info->is_generator());
+ FastNewClosureStub stub(info->strict_mode(), info->is_generator());
__ Move(rbx, info);
__ CallStub(&stub);
} else {
- __ push(rsi);
+ __ Push(rsi);
__ Push(info);
__ Push(pretenure
? isolate()->factory()->true_value()
: isolate()->factory()->false_value());
- __ CallRuntime(Runtime::kNewClosure, 3);
+ __ CallRuntime(Runtime::kHiddenNewClosure, 3);
}
context()->Plug(rax);
}
@@ -1317,9 +1354,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
Scope* s = scope();
while (s != NULL) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
+ __ cmpp(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
__ j(not_equal, slow);
}
@@ -1331,7 +1368,7 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
// If no outer scope calls eval, we do not need to check more
// context extensions. If we have reached an eval scope, we check
// all extensions from this point.
- if (!s->outer_scope_calls_non_strict_eval() || s->is_eval_scope()) break;
+ if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
s = s->outer_scope();
}
@@ -1346,10 +1383,10 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
__ LoadRoot(kScratchRegister, Heap::kNativeContextMapRootIndex);
__ bind(&next);
// Terminate at native context.
- __ cmpq(kScratchRegister, FieldOperand(temp, HeapObject::kMapOffset));
+ __ cmpp(kScratchRegister, FieldOperand(temp, HeapObject::kMapOffset));
__ j(equal, &fast, Label::kNear);
// Check that extension is NULL.
- __ cmpq(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
+ __ cmpp(ContextOperand(temp, Context::EXTENSION_INDEX), Immediate(0));
__ j(not_equal, slow);
// Load next context in chain.
__ movp(temp, ContextOperand(temp, Context::PREVIOUS_INDEX));
@@ -1376,9 +1413,9 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
- if (s->calls_non_strict_eval()) {
+ if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
+ __ cmpp(ContextOperand(context, Context::EXTENSION_INDEX),
Immediate(0));
__ j(not_equal, slow);
}
@@ -1388,7 +1425,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
}
}
// Check that last extension is NULL.
- __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
+ __ cmpp(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
__ j(not_equal, slow);
// This function is used only for loads, not stores, so it's safe to
@@ -1413,16 +1450,15 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ movp(rax, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET ||
- local->mode() == CONST ||
- local->mode() == CONST_HARMONY) {
+ if (local->mode() == LET || local->mode() == CONST ||
+ local->mode() == CONST_LEGACY) {
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ j(not_equal, done);
- if (local->mode() == CONST) {
+ if (local->mode() == CONST_LEGACY) {
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- } else { // LET || CONST_HARMONY
+ } else { // LET || CONST
__ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
}
}
__ jmp(done);
@@ -1439,7 +1475,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// variables.
switch (var->location()) {
case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
// Use inline caching. Variable name is passed in rcx and the global
// object on the stack.
__ Move(rcx, var->name());
@@ -1452,7 +1488,8 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT: {
- Comment cmnt(masm_, var->IsContextSlot() ? "Context slot" : "Stack slot");
+ Comment cmnt(masm_, var->IsContextSlot() ? "[ Context slot"
+ : "[ Stack slot");
if (var->binding_needs_init()) {
// var->scope() may be NULL when the proxy is located in eval code and
// refers to a potential outside binding. Currently those bindings are
@@ -1484,7 +1521,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// Check that we always have valid source position.
ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
ASSERT(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST &&
+ skip_init_check = var->mode() != CONST_LEGACY &&
var->initializer_position() < proxy->position();
}
@@ -1494,14 +1531,14 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
GetVar(rax, var);
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &done, Label::kNear);
- if (var->mode() == LET || var->mode() == CONST_HARMONY) {
+ if (var->mode() == LET || var->mode() == CONST) {
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
__ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
} else {
// Uninitalized const bindings outside of harmony mode are unholed.
- ASSERT(var->mode() == CONST);
+ ASSERT(var->mode() == CONST_LEGACY);
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
}
__ bind(&done);
@@ -1514,15 +1551,15 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
}
case Variable::LOOKUP: {
+ Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
- Comment cmnt(masm_, "Lookup slot");
- __ push(rsi); // Context.
+ __ Push(rsi); // Context.
__ Push(var->name());
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
__ bind(&done);
context()->Plug(rax);
break;
@@ -1549,11 +1586,11 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
// Create regexp literal using runtime function
// Result will be in rax.
- __ push(rcx);
+ __ Push(rcx);
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(expr->pattern());
__ Push(expr->flags());
- __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4);
__ movp(rbx, rax);
__ bind(&materialized);
@@ -1563,10 +1600,10 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ jmp(&allocated);
__ bind(&runtime_allocate);
- __ push(rbx);
+ __ Push(rbx);
__ Push(Smi::FromInt(size));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ pop(rbx);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
+ __ Pop(rbx);
__ bind(&allocated);
// Copy the content into the newly allocated memory.
@@ -1606,16 +1643,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
int properties_count = constant_properties->length() / 2;
- if ((FLAG_track_double_fields && expr->may_store_doubles()) ||
- expr->depth() > 1 || Serializer::enabled() ||
+ if (expr->may_store_doubles() || expr->depth() > 1 || Serializer::enabled() ||
flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ __ Push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(constant_properties);
__ Push(Smi::FromInt(flags));
- __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
} else {
__ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ movp(rax, FieldOperand(rdi, JSFunction::kLiteralsOffset));
@@ -1643,7 +1679,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Literal* key = property->key();
Expression* value = property->value();
if (!result_saved) {
- __ push(rax); // Save result on the stack
+ __ Push(rax); // Save result on the stack
result_saved = true;
}
switch (property->kind()) {
@@ -1658,14 +1694,14 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
__ Move(rcx, key->value());
__ movp(rdx, Operand(rsp, 0));
- CallStoreIC(NOT_CONTEXTUAL, key->LiteralFeedbackId());
+ CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
}
break;
}
- __ push(Operand(rsp, 0)); // Duplicate receiver.
+ __ Push(Operand(rsp, 0)); // Duplicate receiver.
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
@@ -1676,7 +1712,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
}
break;
case ObjectLiteral::Property::PROTOTYPE:
- __ push(Operand(rsp, 0)); // Duplicate receiver.
+ __ Push(Operand(rsp, 0)); // Duplicate receiver.
VisitForStackValue(value);
if (property->emit_store()) {
__ CallRuntime(Runtime::kSetPrototype, 2);
@@ -1698,7 +1734,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
for (AccessorTable::Iterator it = accessor_table.begin();
it != accessor_table.end();
++it) {
- __ push(Operand(rsp, 0)); // Duplicate receiver.
+ __ Push(Operand(rsp, 0)); // Duplicate receiver.
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
EmitAccessor(it->second->setter);
@@ -1708,7 +1744,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (expr->has_function()) {
ASSERT(result_saved);
- __ push(Operand(rsp, 0));
+ __ Push(Operand(rsp, 0));
__ CallRuntime(Runtime::kToFastProperties, 1);
}
@@ -1764,11 +1800,11 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else if (expr->depth() > 1 || Serializer::enabled() ||
length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ __ Push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
__ Push(constant_elements);
__ Push(Smi::FromInt(flags));
- __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+ __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
@@ -1800,7 +1836,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
if (!result_saved) {
- __ push(rax); // array literal
+ __ Push(rax); // array literal
__ Push(Smi::FromInt(expr->literal_index()));
result_saved = true;
}
@@ -1830,7 +1866,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
}
if (result_saved) {
- __ addq(rsp, Immediate(kPointerSize)); // literal index
+ __ addp(rsp, Immediate(kPointerSize)); // literal index
context()->PlugTOS();
} else {
context()->Plug(rax);
@@ -1839,13 +1875,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+ ASSERT(expr->target()->IsValidLeftHandSide());
+
Comment cmnt(masm_, "[ Assignment");
- // Invalid left-hand sides are rewritten to have a 'throw ReferenceError'
- // on the left-hand side.
- if (!expr->target()->IsValidLeftHandSide()) {
- VisitForEffect(expr->target());
- return;
- }
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -1867,7 +1899,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
if (expr->is_compound()) {
// We need the receiver both on the stack and in the accumulator.
VisitForAccumulatorValue(property->obj());
- __ push(result_register());
+ __ Push(result_register());
} else {
VisitForStackValue(property->obj());
}
@@ -1877,7 +1909,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
VisitForStackValue(property->obj());
VisitForAccumulatorValue(property->key());
__ movp(rdx, Operand(rsp, 0));
- __ push(rax);
+ __ Push(rax);
} else {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
@@ -1907,7 +1939,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
Token::Value op = expr->binary_op();
- __ push(rax); // Left operand goes on the stack.
+ __ Push(rax); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
@@ -1961,7 +1993,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
case Yield::SUSPEND:
// Pop value from top-of-stack slot; box result into result register.
EmitCreateIteratorResult(false);
- __ push(result_register());
+ __ Push(result_register());
// Fall through.
case Yield::INITIAL: {
Label suspend, continuation, post_runtime, resume;
@@ -1980,16 +2012,16 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ movp(rcx, rsi);
__ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
kDontSaveFPRegs);
- __ lea(rbx, Operand(rbp, StandardFrameConstants::kExpressionsOffset));
- __ cmpq(rsp, rbx);
+ __ leap(rbx, Operand(rbp, StandardFrameConstants::kExpressionsOffset));
+ __ cmpp(rsp, rbx);
__ j(equal, &post_runtime);
- __ push(rax); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ Push(rax); // generator object
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
__ movp(context_register(),
Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&post_runtime);
- __ pop(result_register());
+ __ Pop(result_register());
EmitReturnSequence();
__ bind(&resume);
@@ -2026,26 +2058,26 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ bind(&l_catch);
handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
__ LoadRoot(rcx, Heap::kthrow_stringRootIndex); // "throw"
- __ push(rcx);
- __ push(Operand(rsp, 2 * kPointerSize)); // iter
- __ push(rax); // exception
+ __ Push(rcx);
+ __ Push(Operand(rsp, 2 * kPointerSize)); // iter
+ __ Push(rax); // exception
__ jmp(&l_call);
// try { received = %yield result }
// Shuffle the received result above a try handler and yield it without
// re-boxing.
__ bind(&l_try);
- __ pop(rax); // result
+ __ Pop(rax); // result
__ PushTryHandler(StackHandler::CATCH, expr->index());
const int handler_size = StackHandlerConstants::kSize;
- __ push(rax); // result
+ __ Push(rax); // result
__ jmp(&l_suspend);
__ bind(&l_continuation);
__ jmp(&l_resume);
__ bind(&l_suspend);
const int generator_object_depth = kPointerSize + handler_size;
__ movp(rax, Operand(rsp, generator_object_depth));
- __ push(rax); // g
+ __ Push(rax); // g
ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
__ Move(FieldOperand(rax, JSGeneratorObject::kContinuationOffset),
Smi::FromInt(l_continuation.pos()));
@@ -2053,10 +2085,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ movp(rcx, rsi);
__ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
__ movp(context_register(),
Operand(rbp, StandardFrameConstants::kContextOffset));
- __ pop(rax); // result
+ __ Pop(rax); // result
EmitReturnSequence();
__ bind(&l_resume); // received in rax
__ PopTryHandler();
@@ -2064,16 +2096,16 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// receiver = iter; f = 'next'; arg = received;
__ bind(&l_next);
__ LoadRoot(rcx, Heap::knext_stringRootIndex); // "next"
- __ push(rcx);
- __ push(Operand(rsp, 2 * kPointerSize)); // iter
- __ push(rax); // received
+ __ Push(rcx);
+ __ Push(Operand(rsp, 2 * kPointerSize)); // iter
+ __ Push(rax); // received
// result = receiver[f](arg);
__ bind(&l_call);
__ movp(rdx, Operand(rsp, kPointerSize));
__ movp(rax, Operand(rsp, 2 * kPointerSize));
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, NOT_CONTEXTUAL, TypeFeedbackId::None());
+ CallIC(ic, TypeFeedbackId::None());
__ movp(rdi, rax);
__ movp(Operand(rsp, 2 * kPointerSize), rdi);
CallFunctionStub stub(1, CALL_AS_METHOD);
@@ -2084,16 +2116,16 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// if (!result.done) goto l_try;
__ bind(&l_loop);
- __ push(rax); // save result
+ __ Push(rax); // save result
__ LoadRoot(rcx, Heap::kdone_stringRootIndex); // "done"
CallLoadIC(NOT_CONTEXTUAL); // result.done in rax
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
- __ testq(result_register(), result_register());
+ __ testp(result_register(), result_register());
__ j(zero, &l_try);
// result.value
- __ pop(rax); // result
+ __ Pop(rax); // result
__ LoadRoot(rcx, Heap::kvalue_stringRootIndex); // "value"
CallLoadIC(NOT_CONTEXTUAL); // result.value in rax
context()->DropAndPlug(2, rax); // drop iter and g
@@ -2107,12 +2139,12 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
Expression *value,
JSGeneratorObject::ResumeMode resume_mode) {
// The value stays in rax, and is ultimately read by the resumed generator, as
- // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
+ // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it
// is read to throw the value when the resumed generator is already closed.
// rbx will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
VisitForAccumulatorValue(value);
- __ pop(rbx);
+ __ Pop(rbx);
// Check generator state.
Label wrong_state, closed_state, done;
@@ -2128,7 +2160,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
// Push receiver.
- __ push(FieldOperand(rbx, JSGeneratorObject::kReceiverOffset));
+ __ Push(FieldOperand(rbx, JSGeneratorObject::kReceiverOffset));
// Push holes for arguments to generator function.
__ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
@@ -2138,9 +2170,9 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
Label push_argument_holes, push_frame;
__ bind(&push_argument_holes);
- __ subq(rdx, Immediate(1));
+ __ subp(rdx, Immediate(1));
__ j(carry, &push_frame);
- __ push(rcx);
+ __ Push(rcx);
__ jmp(&push_argument_holes);
// Enter a new JavaScript frame, and initialize its slots as they were when
@@ -2150,10 +2182,10 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ call(&resume_frame);
__ jmp(&done);
__ bind(&resume_frame);
- __ push(rbp); // Caller's frame pointer.
+ __ pushq(rbp); // Caller's frame pointer.
__ movp(rbp, rsp);
- __ push(rsi); // Callee's context.
- __ push(rdi); // Callee's JS Function.
+ __ Push(rsi); // Callee's context.
+ __ Push(rdi); // Callee's JS Function.
// Load the operand stack size.
__ movp(rdx, FieldOperand(rbx, JSGeneratorObject::kOperandStackOffset));
@@ -2164,12 +2196,12 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// in directly.
if (resume_mode == JSGeneratorObject::NEXT) {
Label slow_resume;
- __ cmpq(rdx, Immediate(0));
+ __ cmpp(rdx, Immediate(0));
__ j(not_zero, &slow_resume);
__ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
__ SmiToInteger64(rcx,
FieldOperand(rbx, JSGeneratorObject::kContinuationOffset));
- __ addq(rdx, rcx);
+ __ addp(rdx, rcx);
__ Move(FieldOperand(rbx, JSGeneratorObject::kContinuationOffset),
Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
__ jmp(rdx);
@@ -2180,15 +2212,15 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
// up the stack and the handlers.
Label push_operand_holes, call_resume;
__ bind(&push_operand_holes);
- __ subq(rdx, Immediate(1));
+ __ subp(rdx, Immediate(1));
__ j(carry, &call_resume);
- __ push(rcx);
+ __ Push(rcx);
__ jmp(&push_operand_holes);
__ bind(&call_resume);
- __ push(rbx);
- __ push(result_register());
+ __ Push(rbx);
+ __ Push(result_register());
__ Push(Smi::FromInt(resume_mode));
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+ __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3);
// Not reached: the runtime call returns elsewhere.
__ Abort(kGeneratorFailedToResume);
@@ -2201,15 +2233,15 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
EmitCreateIteratorResult(true);
} else {
// Throw the provided value.
- __ push(rax);
- __ CallRuntime(Runtime::kThrow, 1);
+ __ Push(rax);
+ __ CallRuntime(Runtime::kHiddenThrow, 1);
}
__ jmp(&done);
// Throw error if we attempt to operate on a running generator.
__ bind(&wrong_state);
- __ push(rbx);
- __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
+ __ Push(rbx);
+ __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1);
__ bind(&done);
context()->Plug(result_register());
@@ -2227,13 +2259,13 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ bind(&gc_required);
__ Push(Smi::FromInt(map->instance_size()));
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
__ movp(context_register(),
Operand(rbp, StandardFrameConstants::kContextOffset));
__ bind(&allocated);
__ Move(rbx, map);
- __ pop(rcx);
+ __ Pop(rcx);
__ Move(rdx, isolate()->factory()->ToBoolean(done));
ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
__ movp(FieldOperand(rax, HeapObject::kMapOffset), rbx);
@@ -2264,7 +2296,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- CallIC(ic, NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+ CallIC(ic, prop->PropertyFeedbackId());
}
@@ -2277,17 +2309,16 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
// stack (popped into rdx). Right operand is in rax but moved into
// rcx to make the shifts easier.
Label done, stub_call, smi_case;
- __ pop(rdx);
+ __ Pop(rdx);
__ movp(rcx, rax);
- __ or_(rax, rdx);
+ __ orp(rax, rdx);
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(rax, &smi_case, Label::kNear);
__ bind(&stub_call);
__ movp(rax, rcx);
BinaryOpICStub stub(op, mode);
- CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
- expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -2333,23 +2364,17 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op,
OverwriteMode mode) {
- __ pop(rdx);
+ __ Pop(rdx);
BinaryOpICStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(isolate()), NOT_CONTEXTUAL,
- expr->BinaryOperationFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(rax);
}
void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten by the parser to have a 'throw
- // ReferenceError' on the left-hand side.
- if (!expr->IsValidLeftHandSide()) {
- VisitForEffect(expr);
- return;
- }
+ ASSERT(expr->IsValidLeftHandSide());
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@@ -2370,22 +2395,22 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
break;
}
case NAMED_PROPERTY: {
- __ push(rax); // Preserve value.
+ __ Push(rax); // Preserve value.
VisitForAccumulatorValue(prop->obj());
__ movp(rdx, rax);
- __ pop(rax); // Restore value.
+ __ Pop(rax); // Restore value.
__ Move(rcx, prop->key()->AsLiteral()->value());
- CallStoreIC(NOT_CONTEXTUAL);
+ CallStoreIC();
break;
}
case KEYED_PROPERTY: {
- __ push(rax); // Preserve value.
+ __ Push(rax); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ movp(rcx, rax);
- __ pop(rdx);
- __ pop(rax); // Restore value.
- Handle<Code> ic = is_classic_mode()
+ __ Pop(rdx);
+ __ Pop(rax); // Restore value.
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
CallIC(ic);
@@ -2396,44 +2421,58 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
}
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+ Variable* var, MemOperand location) {
+ __ movp(location, rax);
+ if (var->IsContextSlot()) {
+ __ movp(rdx, rax);
+ __ RecordWriteContextSlot(
+ rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
+ }
+}
+
+
+void FullCodeGenerator::EmitCallStoreContextSlot(
+ Handle<String> name, StrictMode strict_mode) {
+ __ Push(rax); // Value.
+ __ Push(rsi); // Context.
+ __ Push(name);
+ __ Push(Smi::FromInt(strict_mode));
+ __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4);
+}
+
+
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ Move(rcx, var->name());
__ movp(rdx, GlobalObjectOperand());
- CallStoreIC(CONTEXTUAL);
- } else if (op == Token::INIT_CONST) {
+ CallStoreIC();
+
+ } else if (op == Token::INIT_CONST_LEGACY) {
// Const initializers need a write barrier.
ASSERT(!var->IsParameter()); // No const parameters.
- if (var->IsStackLocal()) {
+ if (var->IsLookupSlot()) {
+ __ Push(rax);
+ __ Push(rsi);
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3);
+ } else {
+ ASSERT(var->IsStackLocal() || var->IsContextSlot());
Label skip;
- __ movp(rdx, StackOperand(var));
+ MemOperand location = VarOperand(var, rcx);
+ __ movp(rdx, location);
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &skip);
- __ movp(StackOperand(var), rax);
+ EmitStoreToStackLocalOrContextSlot(var, location);
__ bind(&skip);
- } else {
- ASSERT(var->IsContextSlot() || var->IsLookupSlot());
- // Like var declarations, const declarations are hoisted to function
- // scope. However, unlike var initializers, const initializers are
- // able to drill a hole to that function context, even from inside a
- // 'with' context. We thus bypass the normal static scope lookup for
- // var->IsContextSlot().
- __ push(rax);
- __ push(rsi);
- __ Push(var->name());
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
}
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
if (var->IsLookupSlot()) {
- __ push(rax); // Value.
- __ push(rsi); // Context.
- __ Push(var->name());
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitCallStoreContextSlot(var->name(), strict_mode());
} else {
ASSERT(var->IsStackAllocated() || var->IsContextSlot());
Label assign;
@@ -2442,20 +2481,18 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &assign, Label::kNear);
__ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
__ bind(&assign);
- __ movp(location, rax);
- if (var->IsContextSlot()) {
- __ movp(rdx, rax);
- __ RecordWriteContextSlot(
- rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
- }
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (!var->is_const_mode() || op == Token::INIT_CONST_HARMONY) {
+ } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
// Assignment to var or initializing assignment to let/const
// in harmony mode.
- if (var->IsStackAllocated() || var->IsContextSlot()) {
+ if (var->IsLookupSlot()) {
+ EmitCallStoreContextSlot(var->name(), strict_mode());
+ } else {
+ ASSERT(var->IsStackAllocated() || var->IsContextSlot());
MemOperand location = VarOperand(var, rcx);
if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
@@ -2463,20 +2500,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
__ Check(equal, kLetBindingReInitialization);
}
- // Perform the assignment.
- __ movp(location, rax);
- if (var->IsContextSlot()) {
- __ movp(rdx, rax);
- __ RecordWriteContextSlot(
- rcx, Context::SlotOffset(var->index()), rdx, rbx, kDontSaveFPRegs);
- }
- } else {
- ASSERT(var->IsLookupSlot());
- __ push(rax); // Value.
- __ push(rsi); // Context.
- __ Push(var->name());
- __ Push(Smi::FromInt(language_mode()));
- __ CallRuntime(Runtime::kStoreContextSlot, 4);
+ EmitStoreToStackLocalOrContextSlot(var, location);
}
}
// Non-initializing assignments to consts are ignored.
@@ -2492,8 +2516,8 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
__ Move(rcx, prop->key()->AsLiteral()->value());
- __ pop(rdx);
- CallStoreIC(NOT_CONTEXTUAL, expr->AssignmentFeedbackId());
+ __ Pop(rdx);
+ CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(rax);
@@ -2503,14 +2527,14 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
- __ pop(rcx);
- __ pop(rdx);
+ __ Pop(rcx);
+ __ Pop(rdx);
// Record source code position before IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic = is_classic_mode()
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, NOT_CONTEXTUAL, expr->AssignmentFeedbackId());
+ CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
context()->Plug(rax);
@@ -2529,7 +2553,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
} else {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
- __ pop(rdx);
+ __ Pop(rdx);
EmitKeyedPropertyLoad(expr);
context()->Plug(rax);
}
@@ -2537,10 +2561,8 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
void FullCodeGenerator::CallIC(Handle<Code> code,
- ContextualMode mode,
TypeFeedbackId ast_id) {
ic_total_count_++;
- ASSERT(mode != CONTEXTUAL || ast_id.IsNone());
__ call(code, RelocInfo::CODE_TARGET, ast_id);
}
@@ -2559,7 +2581,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr) {
PrepareForBailout(callee, NO_REGISTERS);
}
// Push undefined as receiver. This is patched in the method prologue if it
- // is a classic mode method.
+ // is a sloppy mode method.
__ Push(isolate()->factory()->undefined_value());
flags = NO_CALL_FUNCTION_FLAGS;
} else {
@@ -2569,7 +2591,7 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr) {
EmitNamedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
- __ push(Operand(rsp, 0));
+ __ Push(Operand(rsp, 0));
__ movp(Operand(rsp, kPointerSize), rax);
flags = CALL_AS_METHOD;
}
@@ -2613,7 +2635,7 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
- __ push(Operand(rsp, 0));
+ __ Push(Operand(rsp, 0));
__ movp(Operand(rsp, kPointerSize), rax);
// Load the arguments.
@@ -2650,15 +2672,15 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
SetSourcePosition(expr->position());
Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallFeedbackId(), cell);
- __ Move(rbx, cell);
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized);
+ __ Move(rbx, FeedbackVector());
+ __ Move(rdx, Smi::FromInt(expr->CallFeedbackSlot()));
// Record call targets in unoptimized code.
CallFunctionStub stub(arg_count, RECORD_CALL_TARGET);
__ movp(rdi, Operand(rsp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub, expr->CallFeedbackId());
+ __ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -2670,23 +2692,23 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
// Push copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
- __ push(Operand(rsp, arg_count * kPointerSize));
+ __ Push(Operand(rsp, arg_count * kPointerSize));
} else {
__ PushRoot(Heap::kUndefinedValueRootIndex);
}
// Push the receiver of the enclosing function and do runtime call.
StackArgumentsAccessor args(rbp, info_->scope()->num_parameters());
- __ push(args.GetReceiverOperand());
+ __ Push(args.GetReceiverOperand());
// Push the language mode.
- __ Push(Smi::FromInt(language_mode()));
+ __ Push(Smi::FromInt(strict_mode()));
// Push the start position of the scope the calls resides in.
__ Push(Smi::FromInt(scope()->start_position()));
// Do the runtime call.
- __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5);
+ __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5);
}
@@ -2702,8 +2724,8 @@ void FullCodeGenerator::VisitCall(Call* expr) {
Call::CallType call_type = expr->GetCallType(isolate());
if (call_type == Call::POSSIBLY_EVAL_CALL) {
- // In a call to eval, we first call %ResolvePossiblyDirectEval to
- // resolve the function we need to call and the receiver of the call.
+ // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+ // to resolve the function we need to call and the receiver of the call.
// Then we call the resolved function using the given arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -2718,7 +2740,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Push a copy of the function (found below the arguments) and resolve
// eval.
- __ push(Operand(rsp, (arg_count + 1) * kPointerSize));
+ __ Push(Operand(rsp, (arg_count + 1) * kPointerSize));
EmitResolvePossiblyDirectEval(arg_count);
// The runtime call returns a pair of values in rax (function) and
@@ -2751,11 +2773,11 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ bind(&slow);
// Call the runtime to find the function to call (returned in rax) and
// the object holding it (returned in rdx).
- __ push(context_register());
+ __ Push(context_register());
__ Push(proxy->name());
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- __ push(rax); // Function.
- __ push(rdx); // Receiver.
+ __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
+ __ Push(rax); // Function.
+ __ Push(rdx); // Receiver.
// If fast case code has been generated, emit code to push the function
// and receiver and have the slow path jump around this code.
@@ -2764,7 +2786,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ jmp(&call, Label::kNear);
__ bind(&done);
// Push function.
- __ push(rax);
+ __ Push(rax);
// The receiver is implicitly the global receiver. Indicate this by
// passing the hole to the call function stub.
__ PushRoot(Heap::kUndefinedValueRootIndex);
@@ -2830,10 +2852,17 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Record call targets in unoptimized code, but not in the snapshot.
Handle<Object> uninitialized =
- TypeFeedbackCells::UninitializedSentinel(isolate());
- Handle<Cell> cell = isolate()->factory()->NewCell(uninitialized);
- RecordTypeFeedbackCell(expr->CallNewFeedbackId(), cell);
- __ Move(rbx, cell);
+ TypeFeedbackInfo::UninitializedSentinel(isolate());
+ StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized);
+ if (FLAG_pretenuring_call_new) {
+ StoreFeedbackVectorSlot(expr->AllocationSiteFeedbackSlot(),
+ isolate()->factory()->NewAllocationSite());
+ ASSERT(expr->AllocationSiteFeedbackSlot() ==
+ expr->CallNewFeedbackSlot() + 1);
+ }
+
+ __ Move(rbx, FeedbackVector());
+ __ Move(rdx, Smi::FromInt(expr->CallNewFeedbackSlot()));
CallConstructStub stub(RECORD_CALL_TARGET);
__ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
@@ -2905,10 +2934,10 @@ void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
__ testb(FieldOperand(rbx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, if_false);
- __ movzxbq(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- __ cmpq(rbx, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ movzxbp(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+ __ cmpp(rbx, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(below, if_false);
- __ cmpq(rbx, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ cmpp(rbx, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(below_equal, if_true, if_false, fall_through);
@@ -2998,20 +3027,20 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// Skip loop if no descriptors are valid.
__ NumberOfOwnDescriptors(rcx, rbx);
- __ cmpq(rcx, Immediate(0));
+ __ cmpp(rcx, Immediate(0));
__ j(equal, &done);
__ LoadInstanceDescriptors(rbx, r8);
// rbx: descriptor array.
// rcx: valid entries in the descriptor array.
// Calculate the end of the descriptor array.
- __ imul(rcx, rcx, Immediate(DescriptorArray::kDescriptorSize));
+ __ imulp(rcx, rcx, Immediate(DescriptorArray::kDescriptorSize));
SmiIndex index = masm_->SmiToIndex(rdx, rcx, kPointerSizeLog2);
- __ lea(rcx,
+ __ leap(rcx,
Operand(
r8, index.reg, index.scale, DescriptorArray::kFirstOffset));
// Calculate location of the first key name.
- __ addq(r8, Immediate(DescriptorArray::kFirstOffset));
+ __ addp(r8, Immediate(DescriptorArray::kFirstOffset));
// Loop through all the keys in the descriptor array. If one of these is the
// internalized string "valueOf" the result is false.
__ jmp(&entry);
@@ -3019,15 +3048,15 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ movp(rdx, FieldOperand(r8, 0));
__ Cmp(rdx, isolate()->factory()->value_of_string());
__ j(equal, if_false);
- __ addq(r8, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
+ __ addp(r8, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
__ bind(&entry);
- __ cmpq(r8, rcx);
+ __ cmpp(r8, rcx);
__ j(not_equal, &loop);
__ bind(&done);
// Set the bit in the map to indicate that there is no local valueOf field.
- __ or_(FieldOperand(rbx, Map::kBitField2Offset),
+ __ orp(FieldOperand(rbx, Map::kBitField2Offset),
Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
__ bind(&skip_lookup);
@@ -3035,12 +3064,12 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// If a valueOf property is not found on the object check that its
// prototype is the un-modified String prototype. If not result is false.
__ movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
- __ testq(rcx, Immediate(kSmiTagMask));
+ __ testp(rcx, Immediate(kSmiTagMask));
__ j(zero, if_false);
__ movp(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
__ movp(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ movp(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
- __ cmpq(rcx,
+ __ cmpp(rcx,
ContextOperand(rdx, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -3087,8 +3116,8 @@ void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
__ CheckMap(rax, map, if_false, DO_SMI_CHECK);
__ cmpl(FieldOperand(rax, HeapNumber::kExponentOffset),
- Immediate(0x80000000));
- __ j(not_equal, if_false);
+ Immediate(0x1));
+ __ j(no_overflow, if_false);
__ cmpl(FieldOperand(rax, HeapNumber::kMantissaOffset),
Immediate(0x00000000));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -3189,8 +3218,8 @@ void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
context()->PrepareTest(&materialize_true, &materialize_false,
&if_true, &if_false, &fall_through);
- __ pop(rbx);
- __ cmpq(rax, rbx);
+ __ Pop(rbx);
+ __ cmpp(rax, rbx);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(equal, if_true, if_false, fall_through);
@@ -3310,7 +3339,7 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) {
if (CodeGenerator::ShouldGenerateLog(isolate(), args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
- __ CallRuntime(Runtime::kLog, 2);
+ __ CallRuntime(Runtime::kHiddenLog, 2);
}
// Finally, we're expected to leave a value on the top of the stack.
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
@@ -3389,7 +3418,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
Operand stamp_operand = __ ExternalOperand(stamp);
__ movp(scratch, stamp_operand);
- __ cmpq(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
+ __ cmpp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
__ j(not_equal, &runtime, Label::kNear);
__ movp(result, FieldOperand(object, JSDate::kValueOffset +
kPointerSize * index->value()));
@@ -3405,7 +3434,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
__ bind(&not_date_object);
- __ CallRuntime(Runtime::kThrowNotDateError, 0);
+ __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0);
__ bind(&done);
context()->Plug(rax);
}
@@ -3422,8 +3451,8 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
VisitForAccumulatorValue(args->at(0)); // string
- __ pop(value);
- __ pop(index);
+ __ Pop(value);
+ __ Pop(index);
if (FLAG_debug_code) {
__ Check(__ CheckSmi(value), kNonSmiValue);
@@ -3455,8 +3484,8 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
VisitForStackValue(args->at(1)); // index
VisitForStackValue(args->at(2)); // value
VisitForAccumulatorValue(args->at(0)); // string
- __ pop(value);
- __ pop(index);
+ __ Pop(value);
+ __ Pop(index);
if (FLAG_debug_code) {
__ Check(__ CheckSmi(value), kNonSmiValue);
@@ -3495,7 +3524,7 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
VisitForStackValue(args->at(0)); // Load the object.
VisitForAccumulatorValue(args->at(1)); // Load the value.
- __ pop(rbx); // rax = value. rbx = object.
+ __ Pop(rbx); // rax = value. rbx = object.
Label done;
// If the object is a smi, return the value.
@@ -3560,7 +3589,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
Register index = rax;
Register result = rdx;
- __ pop(object);
+ __ Pop(object);
Label need_conversion;
Label index_out_of_range;
@@ -3607,7 +3636,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
Register scratch = rdx;
Register result = rax;
- __ pop(object);
+ __ Pop(object);
Label need_conversion;
Label index_out_of_range;
@@ -3649,7 +3678,7 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
VisitForStackValue(args->at(0));
VisitForAccumulatorValue(args->at(1));
- __ pop(rdx);
+ __ Pop(rdx);
StringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED);
__ CallStub(&stub);
context()->Plug(rax);
@@ -3713,7 +3742,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
__ jmp(&done);
__ bind(&runtime);
- __ push(rax);
+ __ Push(rax);
__ CallRuntime(Runtime::kCall, args->length());
__ bind(&done);
@@ -3728,8 +3757,8 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
VisitForAccumulatorValue(args->at(2));
- __ pop(rbx);
- __ pop(rcx);
+ __ Pop(rbx);
+ __ Pop(rcx);
__ CallStub(&stub);
context()->Plug(rax);
}
@@ -3770,7 +3799,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
// tmp now holds finger offset as a smi.
SmiIndex index =
__ SmiToIndex(kScratchRegister, tmp, kPointerSizeLog2);
- __ cmpq(key, FieldOperand(cache,
+ __ cmpp(key, FieldOperand(cache,
index.reg,
index.scale,
FixedArray::kHeaderSize));
@@ -3783,9 +3812,9 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
__ bind(&not_found);
// Call runtime to perform the lookup.
- __ push(cache);
- __ push(key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
+ __ Push(cache);
+ __ Push(key);
+ __ CallRuntime(Runtime::kHiddenGetFromCache, 2);
__ bind(&done);
context()->Plug(rax);
@@ -3861,7 +3890,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Separator operand is already pushed. Make room for the two
// other stack fields, and clear the direction flag in anticipation
// of calling CopyBytes.
- __ subq(rsp, Immediate(2 * kPointerSize));
+ __ subp(rsp, Immediate(2 * kPointerSize));
__ cld();
// Check that the array is a JSArray
__ JumpIfSmi(array, &bailout);
@@ -3899,7 +3928,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Live loop registers: index(int32), array_length(int32), string(String*),
// scratch, string_length(int32), elements(FixedArray*).
if (generate_debug_code_) {
- __ cmpq(index, array_length);
+ __ cmpp(index, array_length);
__ Assert(below, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
}
__ bind(&loop);
@@ -3975,7 +4004,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ AllocateAsciiString(result_pos, string_length, scratch,
index, string, &bailout);
__ movp(result_operand, result_pos);
- __ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
+ __ leap(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
__ movp(string, separator_operand);
__ SmiCompare(FieldOperand(string, SeqOneByteString::kLengthOffset),
@@ -4003,7 +4032,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
FixedArray::kHeaderSize));
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
- __ lea(string,
+ __ leap(string,
FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length);
__ incl(index);
@@ -4038,7 +4067,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Copy the separator character to the result.
__ movb(Operand(result_pos, 0), scratch);
- __ incq(result_pos);
+ __ incp(result_pos);
__ bind(&loop_2_entry);
// Get string = array[index].
@@ -4047,7 +4076,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
FixedArray::kHeaderSize));
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
- __ lea(string,
+ __ leap(string,
FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length);
__ incl(index);
@@ -4063,16 +4092,16 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// count from -array_length to zero, so we don't need to maintain
// a loop limit.
__ movl(index, array_length_operand);
- __ lea(elements, FieldOperand(elements, index, times_pointer_size,
+ __ leap(elements, FieldOperand(elements, index, times_pointer_size,
FixedArray::kHeaderSize));
- __ neg(index);
+ __ negq(index);
// Replace separator string with pointer to its first character, and
// make scratch be its length.
__ movp(string, separator_operand);
__ SmiToInteger32(scratch,
FieldOperand(string, String::kLengthOffset));
- __ lea(string,
+ __ leap(string,
FieldOperand(string, SeqOneByteString::kHeaderSize));
__ movp(separator_operand, string);
@@ -4098,7 +4127,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ movp(string, Operand(elements, index, times_pointer_size, 0));
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
- __ lea(string,
+ __ leap(string,
FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length);
__ incq(index);
@@ -4109,15 +4138,15 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ bind(&return_result);
// Drop temp values from the stack, and restore context register.
- __ addq(rsp, Immediate(3 * kPointerSize));
+ __ addp(rsp, Immediate(3 * kPointerSize));
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
context()->Plug(rax);
}
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (name->length() > 0 && name->Get(0) == '_') {
+ if (expr->function() != NULL &&
+ expr->function()->intrinsic_type == Runtime::INLINE) {
Comment cmnt(masm_, "[ InlineRuntimeCall");
EmitInlineRuntimeCall(expr);
return;
@@ -4130,7 +4159,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
if (expr->is_jsruntime()) {
// Push the builtins object as receiver.
__ movp(rax, GlobalObjectOperand());
- __ push(FieldOperand(rax, GlobalObject::kBuiltinsOffset));
+ __ Push(FieldOperand(rax, GlobalObject::kBuiltinsOffset));
// Load the function from the receiver.
__ movp(rax, Operand(rsp, 0));
@@ -4138,7 +4167,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
// Push the target function under the receiver.
- __ push(Operand(rsp, 0));
+ __ Push(Operand(rsp, 0));
__ movp(Operand(rsp, kPointerSize), rax);
// Push the arguments ("left-to-right").
@@ -4179,20 +4208,18 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
- __ Push(Smi::FromInt(strict_mode_flag));
+ __ Push(Smi::FromInt(strict_mode()));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(rax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
- ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
+ ASSERT(strict_mode() == SLOPPY || var->is_this());
if (var->IsUnallocated()) {
- __ push(GlobalObjectOperand());
+ __ Push(GlobalObjectOperand());
__ Push(var->name());
- __ Push(Smi::FromInt(kNonStrictMode));
+ __ Push(Smi::FromInt(SLOPPY));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(rax);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
@@ -4203,9 +4230,9 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
} else {
// Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
- __ push(context_register());
+ __ Push(context_register());
__ Push(var->name());
- __ CallRuntime(Runtime::kDeleteContextSlot, 2);
+ __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2);
context()->Plug(rax);
}
} else {
@@ -4286,16 +4313,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ ASSERT(expr->expression()->IsValidLeftHandSide());
+
Comment cmnt(masm_, "[ CountOperation");
SetSourcePosition(expr->position());
- // Invalid left-hand-sides are rewritten to have a 'throw
- // ReferenceError' as the left-hand side.
- if (!expr->expression()->IsValidLeftHandSide()) {
- VisitForEffect(expr->expression());
- return;
- }
-
// Expression can only be a property, a global or a (parameter or local)
// slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
@@ -4320,13 +4342,13 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
}
if (assign_type == NAMED_PROPERTY) {
VisitForAccumulatorValue(prop->obj());
- __ push(rax); // Copy of receiver, needed for later store.
+ __ Push(rax); // Copy of receiver, needed for later store.
EmitNamedPropertyLoad(prop);
} else {
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
__ movp(rdx, Operand(rsp, 0)); // Leave receiver on stack
- __ push(rax); // Copy of key, needed for later store.
+ __ Push(rax); // Copy of key, needed for later store.
EmitKeyedPropertyLoad(prop);
}
}
@@ -4354,7 +4376,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// of the stack.
switch (assign_type) {
case VARIABLE:
- __ push(rax);
+ __ Push(rax);
break;
case NAMED_PROPERTY:
__ movp(Operand(rsp, kPointerSize), rax);
@@ -4389,7 +4411,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// of the stack.
switch (assign_type) {
case VARIABLE:
- __ push(rax);
+ __ Push(rax);
break;
case NAMED_PROPERTY:
__ movp(Operand(rsp, kPointerSize), rax);
@@ -4409,9 +4431,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ movp(rdx, rax);
__ Move(rax, Smi::FromInt(1));
BinaryOpICStub stub(expr->binary_op(), NO_OVERWRITE);
- CallIC(stub.GetCode(isolate()),
- NOT_CONTEXTUAL,
- expr->CountBinOpFeedbackId());
+ CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4441,8 +4461,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
case NAMED_PROPERTY: {
__ Move(rcx, prop->key()->AsLiteral()->value());
- __ pop(rdx);
- CallStoreIC(NOT_CONTEXTUAL, expr->CountStoreFeedbackId());
+ __ Pop(rdx);
+ CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4454,12 +4474,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break;
}
case KEYED_PROPERTY: {
- __ pop(rcx);
- __ pop(rdx);
- Handle<Code> ic = is_classic_mode()
+ __ Pop(rcx);
+ __ Pop(rdx);
+ Handle<Code> ic = strict_mode() == SLOPPY
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
- CallIC(ic, NOT_CONTEXTUAL, expr->CountStoreFeedbackId());
+ CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4480,7 +4500,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
ASSERT(!context()->IsTest());
if (proxy != NULL && proxy->var()->IsUnallocated()) {
- Comment cmnt(masm_, "Global variable");
+ Comment cmnt(masm_, "[ Global variable");
__ Move(rcx, proxy->name());
__ movp(rax, GlobalObjectOperand());
// Use a regular load, not a contextual load, to avoid a reference
@@ -4489,6 +4509,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
PrepareForBailout(expr, TOS_REG);
context()->Plug(rax);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+ Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -4496,9 +4517,9 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
- __ push(rsi);
+ __ Push(rsi);
__ Push(proxy->name());
- __ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2);
PrepareForBailout(expr, TOS_REG);
__ bind(&done);
@@ -4621,7 +4642,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
InstanceofStub stub(InstanceofStub::kNoFlags);
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ testq(rax, rax);
+ __ testp(rax, rax);
// The stub returns 0 for true.
Split(zero, if_true, if_false, fall_through);
break;
@@ -4630,16 +4651,16 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
Condition cc = CompareIC::ComputeCondition(op);
- __ pop(rdx);
+ __ Pop(rdx);
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
__ movp(rcx, rdx);
- __ or_(rcx, rax);
+ __ orp(rcx, rax);
patch_site.EmitJumpIfNotSmi(rcx, &slow_case, Label::kNear);
- __ cmpq(rdx, rax);
+ __ cmpp(rdx, rax);
Split(cc, if_true, if_false, NULL);
__ bind(&slow_case);
}
@@ -4647,11 +4668,11 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
- CallIC(ic, NOT_CONTEXTUAL, expr->CompareOperationFeedbackId());
+ CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ testq(rax, rax);
+ __ testp(rax, rax);
Split(cc, if_true, if_false, fall_through);
}
}
@@ -4682,8 +4703,8 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Split(equal, if_true, if_false, fall_through);
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
- CallIC(ic, NOT_CONTEXTUAL, expr->CompareOperationFeedbackId());
- __ testq(rax, rax);
+ CallIC(ic, expr->CompareOperationFeedbackId());
+ __ testp(rax, rax);
Split(not_zero, if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
@@ -4730,10 +4751,10 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
- __ push(ContextOperand(rsi, Context::CLOSURE_INDEX));
+ __ Push(ContextOperand(rsi, Context::CLOSURE_INDEX));
} else {
ASSERT(declaration_scope->is_function_scope());
- __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
}
@@ -4748,29 +4769,29 @@ void FullCodeGenerator::EnterFinallyBlock() {
// Cook return address on top of stack (smi encoded Code* delta)
__ PopReturnAddressTo(rdx);
__ Move(rcx, masm_->CodeObject());
- __ subq(rdx, rcx);
+ __ subp(rdx, rcx);
__ Integer32ToSmi(rdx, rdx);
- __ push(rdx);
+ __ Push(rdx);
// Store result register while executing finally block.
- __ push(result_register());
+ __ Push(result_register());
// Store pending message while executing finally block.
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ Load(rdx, pending_message_obj);
- __ push(rdx);
+ __ Push(rdx);
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ Load(rdx, has_pending_message);
__ Integer32ToSmi(rdx, rdx);
- __ push(rdx);
+ __ Push(rdx);
ExternalReference pending_message_script =
ExternalReference::address_of_pending_message_script(isolate());
__ Load(rdx, pending_message_script);
- __ push(rdx);
+ __ Push(rdx);
}
@@ -4778,30 +4799,30 @@ void FullCodeGenerator::ExitFinallyBlock() {
ASSERT(!result_register().is(rdx));
ASSERT(!result_register().is(rcx));
// Restore pending message from stack.
- __ pop(rdx);
+ __ Pop(rdx);
ExternalReference pending_message_script =
ExternalReference::address_of_pending_message_script(isolate());
__ Store(pending_message_script, rdx);
- __ pop(rdx);
+ __ Pop(rdx);
__ SmiToInteger32(rdx, rdx);
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ Store(has_pending_message, rdx);
- __ pop(rdx);
+ __ Pop(rdx);
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ Store(pending_message_obj, rdx);
// Restore result register from stack.
- __ pop(result_register());
+ __ Pop(result_register());
// Uncook return address.
- __ pop(rdx);
+ __ Pop(rdx);
__ SmiToInteger32(rdx, rdx);
__ Move(rcx, masm_->CodeObject());
- __ addq(rdx, rcx);
+ __ addp(rdx, rcx);
__ jmp(rdx);
}
@@ -4876,6 +4897,7 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
}
Assembler::set_target_address_at(call_target_address,
+ unoptimized_code,
replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, call_target_address, replacement_code);
@@ -4893,20 +4915,23 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
if (*jns_instr_address == kJnsInstruction) {
ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
ASSERT_EQ(isolate->builtins()->InterruptCheck()->entry(),
- Assembler::target_address_at(call_target_address));
+ Assembler::target_address_at(call_target_address,
+ unoptimized_code));
return INTERRUPT;
}
ASSERT_EQ(kNopByteOne, *jns_instr_address);
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- if (Assembler::target_address_at(call_target_address) ==
+ if (Assembler::target_address_at(call_target_address,
+ unoptimized_code) ==
isolate->builtins()->OnStackReplacement()->entry()) {
return ON_STACK_REPLACEMENT;
}
ASSERT_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
- Assembler::target_address_at(call_target_address));
+ Assembler::target_address_at(call_target_address,
+ unoptimized_code));
return OSR_AFTER_STACK_CHECK;
}
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index c76eca04d..ea118d076 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -212,7 +212,7 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
// Store the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ lea(scratch1, Operand(elements,
+ __ leap(scratch1, Operand(elements,
scratch1,
times_pointer_size,
kValueOffset - kHeapObjectTag));
@@ -424,9 +424,9 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ shr(rcx, Immediate(KeyedLookupCache::kMapHashShift));
__ movl(rdi, FieldOperand(rax, String::kHashFieldOffset));
__ shr(rdi, Immediate(String::kHashShift));
- __ xor_(rcx, rdi);
+ __ xorp(rcx, rdi);
int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
- __ and_(rcx, Immediate(mask));
+ __ andp(rcx, Immediate(mask));
// Load the key (consisting of map and internalized string) from the cache and
// check for match.
@@ -442,17 +442,17 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ shl(rdi, Immediate(kPointerSizeLog2 + 1));
__ LoadAddress(kScratchRegister, cache_keys);
int off = kPointerSize * i * 2;
- __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
+ __ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off));
__ j(not_equal, &try_next_entry);
- __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
+ __ cmpp(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
__ j(equal, &hit_on_nth_entry[i]);
__ bind(&try_next_entry);
}
int off = kPointerSize * (kEntriesPerBucket - 1) * 2;
- __ cmpq(rbx, Operand(kScratchRegister, rdi, times_1, off));
+ __ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off));
__ j(not_equal, &slow);
- __ cmpq(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
+ __ cmpp(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
__ j(not_equal, &slow);
// Get field offset, which is a 32-bit integer.
@@ -467,8 +467,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
}
__ LoadAddress(kScratchRegister, cache_field_offsets);
__ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0));
- __ movzxbq(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
- __ subq(rdi, rcx);
+ __ movzxbp(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
+ __ subp(rdi, rcx);
__ j(above_equal, &property_array_property);
if (i != 0) {
__ jmp(&load_in_object_property);
@@ -477,8 +477,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Load in-object property.
__ bind(&load_in_object_property);
- __ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
- __ addq(rcx, rdi);
+ __ movzxbp(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
+ __ addp(rcx, rdi);
__ movp(rax, FieldOperand(rdx, rcx, times_pointer_size, 0));
__ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
__ ret(0);
@@ -571,8 +571,8 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
// Everything is fine, call runtime.
__ PopReturnAddressTo(rcx);
- __ push(rdx); // receiver
- __ push(rax); // key
+ __ Push(rdx); // receiver
+ __ Push(rax); // key
__ PushReturnAddressFrom(rcx);
// Perform tail call to the entry.
@@ -734,7 +734,7 @@ static void KeyedStoreGenerateGenericHelper(
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -852,14 +852,14 @@ static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
// Load the elements into scratch1 and check its map. If not, jump
// to the unmapped lookup with the parameter map in scratch1.
- Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
+ Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
__ movp(scratch1, FieldOperand(object, JSObject::kElementsOffset));
__ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
// Check if element is in the range of mapped arguments.
__ movp(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
__ SmiSubConstant(scratch2, scratch2, Smi::FromInt(2));
- __ cmpq(key, scratch2);
+ __ cmpp(key, scratch2);
__ j(greater_equal, unmapped_case);
// Load element index and check whether it is the hole.
@@ -899,7 +899,7 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
__ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
__ movp(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
- __ cmpq(key, scratch);
+ __ cmpp(key, scratch);
__ j(greater_equal, slow_case);
__ SmiToInteger64(scratch, key);
return FieldOperand(backing_store,
@@ -909,7 +909,7 @@ static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
}
-void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
@@ -934,7 +934,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
}
-void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -945,7 +945,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
Operand mapped_location = GenerateMappedArgumentsLookup(
masm, rdx, rcx, rbx, rdi, r8, &notin, &slow);
__ movp(mapped_location, rax);
- __ lea(r9, mapped_location);
+ __ leap(r9, mapped_location);
__ movp(r8, rax);
__ RecordWrite(rbx,
r9,
@@ -959,7 +959,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
Operand unmapped_location =
GenerateUnmappedArgumentsLookup(masm, rcx, rbx, rdi, &slow);
__ movp(unmapped_location, rax);
- __ lea(r9, unmapped_location);
+ __ leap(r9, unmapped_location);
__ movp(r8, rax);
__ RecordWrite(rbx,
r9,
@@ -973,8 +973,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
}
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm,
- ExtraICState extra_state) {
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : receiver
// -- rcx : name
@@ -982,9 +981,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, extra_state,
- Code::NORMAL, Code::LOAD_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, rax, rcx, rbx, rdx);
@@ -1024,8 +1021,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
__ IncrementCounter(counters->load_miss(), 1);
__ PopReturnAddressTo(rbx);
- __ push(rax); // receiver
- __ push(rcx); // name
+ __ Push(rax); // receiver
+ __ Push(rcx); // name
__ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
@@ -1043,8 +1040,8 @@ void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rax); // receiver
- __ push(rcx); // name
+ __ Push(rax); // receiver
+ __ Push(rcx); // name
__ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
@@ -1063,8 +1060,8 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
__ IncrementCounter(counters->keyed_load_miss(), 1);
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rax); // name
+ __ Push(rdx); // receiver
+ __ Push(rax); // name
__ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
@@ -1082,8 +1079,8 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rax); // name
+ __ Push(rdx); // receiver
+ __ Push(rax); // name
__ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
@@ -1091,8 +1088,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
- ExtraICState extra_ic_state) {
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -1101,9 +1097,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
// -----------------------------------
// Get the receiver from the stack and probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(
- Code::HANDLER, MONOMORPHIC, extra_ic_state,
- Code::NORMAL, Code::STORE_IC);
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
masm->isolate()->stub_cache()->GenerateProbe(
masm, flags, rdx, rcx, rbx, no_reg);
@@ -1121,9 +1115,9 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // name
- __ push(rax); // value
+ __ Push(rdx); // receiver
+ __ Push(rcx); // name
+ __ Push(rax); // value
__ PushReturnAddressFrom(rbx);
// Perform tail call to the entry.
@@ -1157,7 +1151,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -1165,9 +1159,9 @@ void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
// -- rsp[0] : return address
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx);
- __ push(rcx);
- __ push(rax);
+ __ Push(rdx);
+ __ Push(rcx);
+ __ Push(rax);
__ Push(Smi::FromInt(NONE)); // PropertyAttributes
__ Push(Smi::FromInt(strict_mode));
__ PushReturnAddressFrom(rbx);
@@ -1178,7 +1172,7 @@ void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
- StrictModeFlag strict_mode) {
+ StrictMode strict_mode) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -1187,9 +1181,9 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // key
- __ push(rax); // value
+ __ Push(rdx); // receiver
+ __ Push(rcx); // key
+ __ Push(rax); // value
__ Push(Smi::FromInt(NONE)); // PropertyAttributes
__ Push(Smi::FromInt(strict_mode)); // Strict mode.
__ PushReturnAddressFrom(rbx);
@@ -1208,9 +1202,9 @@ void StoreIC::GenerateSlow(MacroAssembler* masm) {
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // key
- __ push(rax); // value
+ __ Push(rdx); // receiver
+ __ Push(rcx); // key
+ __ Push(rax); // value
__ PushReturnAddressFrom(rbx);
// Do tail-call to runtime routine.
@@ -1228,9 +1222,9 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // key
- __ push(rax); // value
+ __ Push(rdx); // receiver
+ __ Push(rcx); // key
+ __ Push(rax); // value
__ PushReturnAddressFrom(rbx);
// Do tail-call to runtime routine.
@@ -1248,9 +1242,9 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
// -----------------------------------
__ PopReturnAddressTo(rbx);
- __ push(rdx); // receiver
- __ push(rcx); // key
- __ push(rax); // value
+ __ Push(rdx); // receiver
+ __ Push(rcx); // key
+ __ Push(rax); // value
__ PushReturnAddressFrom(rbx);
// Do tail-call to runtime routine.
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index 2cb09325f..894a4dd3a 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -87,7 +87,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
- RegisterDependentCodeForEmbeddedMaps(code);
+ if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
PopulateDeoptimizationData(code);
info()->CommitDependencies(code);
}
@@ -154,10 +154,10 @@ bool LCodeGen::GeneratePrologue() {
}
#endif
- // Classic mode functions need to replace the receiver with the global proxy
+ // Sloppy mode functions need to replace the receiver with the global proxy
// when called as functions (without an explicit receiver object).
if (info_->this_has_uses() &&
- info_->is_classic_mode() &&
+ info_->strict_mode() == SLOPPY &&
!info_->is_native()) {
Label ok;
StackArgumentsAccessor args(rsp, scope()->num_parameters());
@@ -187,11 +187,11 @@ bool LCodeGen::GeneratePrologue() {
int slots = GetStackSlotCount();
if (slots > 0) {
if (FLAG_debug_code) {
- __ subq(rsp, Immediate(slots * kPointerSize));
+ __ subp(rsp, Immediate(slots * kPointerSize));
#ifdef _MSC_VER
MakeSureStackPagesMapped(slots * kPointerSize);
#endif
- __ push(rax);
+ __ Push(rax);
__ Set(rax, slots);
__ movq(kScratchRegister, kSlotsZapValue);
Label loop;
@@ -200,9 +200,9 @@ bool LCodeGen::GeneratePrologue() {
kScratchRegister);
__ decl(rax);
__ j(not_zero, &loop);
- __ pop(rax);
+ __ Pop(rax);
} else {
- __ subq(rsp, Immediate(slots * kPointerSize));
+ __ subp(rsp, Immediate(slots * kPointerSize));
#ifdef _MSC_VER
MakeSureStackPagesMapped(slots * kPointerSize);
#endif
@@ -222,8 +222,8 @@ bool LCodeGen::GeneratePrologue() {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
- __ push(rdi);
- __ CallRuntime(Runtime::kNewFunctionContext, 1);
+ __ Push(rdi);
+ __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoLazyDeopt);
// Context is returned in rax. It replaces the context passed to us.
@@ -269,17 +269,36 @@ void LCodeGen::GenerateOsrPrologue() {
// optimized frame.
int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
ASSERT(slots >= 0);
- __ subq(rsp, Immediate(slots * kPointerSize));
+ __ subp(rsp, Immediate(slots * kPointerSize));
}
void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+ if (instr->IsCall()) {
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ }
if (!instr->IsLazyBailout() && !instr->IsGap()) {
safepoints_.BumpLastLazySafepointIndex();
}
}
+void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
+ if (instr->HasResult() && instr->MustSignExtendResult(chunk())) {
+ if (instr->result()->IsRegister()) {
+ Register result_reg = ToRegister(instr->result());
+ __ movsxlq(result_reg, result_reg);
+ } else {
+ // Sign extend the 32bit result in the stack slots.
+ ASSERT(instr->result()->IsStackSlot());
+ Operand src = ToOperand(instr->result());
+ __ movsxlq(kScratchRegister, src);
+ __ movq(src, kScratchRegister);
+ }
+ }
+}
+
+
bool LCodeGen::GenerateJumpTable() {
Label needs_frame;
if (jump_table_.length() > 0) {
@@ -303,15 +322,15 @@ bool LCodeGen::GenerateJumpTable() {
} else {
__ bind(&needs_frame);
__ movp(rsi, MemOperand(rbp, StandardFrameConstants::kContextOffset));
- __ push(rbp);
+ __ pushq(rbp);
__ movp(rbp, rsp);
- __ push(rsi);
+ __ Push(rsi);
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
ASSERT(info()->IsStub());
__ Move(rsi, Smi::FromInt(StackFrame::STUB));
- __ push(rsi);
+ __ Push(rsi);
__ movp(rsi, MemOperand(rsp, kPointerSize));
__ call(kScratchRegister);
}
@@ -335,7 +354,8 @@ bool LCodeGen::GenerateDeferredCode() {
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
- RecordAndWritePosition(value->position());
+ RecordAndWritePosition(
+ chunk()->graph()->SourcePositionToScriptPosition(value->position()));
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
@@ -349,10 +369,10 @@ bool LCodeGen::GenerateDeferredCode() {
ASSERT(info()->IsStub());
frame_is_built_ = true;
// Build the frame in such a way that esi isn't trashed.
- __ push(rbp); // Caller's frame pointer.
- __ push(Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ pushq(rbp); // Caller's frame pointer.
+ __ Push(Operand(rbp, StandardFrameConstants::kContextOffset));
__ Push(Smi::FromInt(StackFrame::STUB));
- __ lea(rbp, Operand(rsp, 2 * kPointerSize));
+ __ leap(rbp, Operand(rsp, 2 * kPointerSize));
Comment(";;; Deferred code");
}
code->Generate();
@@ -362,7 +382,7 @@ bool LCodeGen::GenerateDeferredCode() {
ASSERT(frame_is_built_);
frame_is_built_ = false;
__ movp(rsp, rbp);
- __ pop(rbp);
+ __ popq(rbp);
}
__ jmp(code->exit());
}
@@ -405,20 +425,18 @@ XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
- return op->IsConstantOperand() &&
- chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+ return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
}
-bool LCodeGen::IsSmiConstant(LConstantOperand* op) const {
+bool LCodeGen::IsDehoistedKeyConstant(LConstantOperand* op) const {
return op->IsConstantOperand() &&
- chunk_->LookupLiteralRepresentation(op).IsSmi();
+ chunk_->IsDehoistedKey(chunk_->LookupConstant(op));
}
-bool LCodeGen::IsTaggedConstant(LConstantOperand* op) const {
- return op->IsConstantOperand() &&
- chunk_->LookupLiteralRepresentation(op).IsTagged();
+bool LCodeGen::IsSmiConstant(LConstantOperand* op) const {
+ return chunk_->LookupLiteralRepresentation(op).IsSmi();
}
@@ -577,10 +595,6 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
}
} else if (op->IsDoubleStackSlot()) {
translation->StoreDoubleStackSlot(op->index());
- } else if (op->IsArgument()) {
- ASSERT(is_tagged);
- int src_index = GetStackSlotCount() + op->index();
- translation->StoreStackSlot(src_index);
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
@@ -725,7 +739,7 @@ void LCodeGen::DeoptimizeIf(Condition cc,
ExternalReference count = ExternalReference::stress_deopt_count(isolate());
Label no_deopt;
__ pushfq();
- __ push(rax);
+ __ Push(rax);
Operand count_operand = masm()->ExternalOperand(count, kScratchRegister);
__ movl(rax, count_operand);
__ subl(rax, Immediate(1));
@@ -733,13 +747,13 @@ void LCodeGen::DeoptimizeIf(Condition cc,
if (FLAG_trap_on_deopt) __ int3();
__ movl(rax, Immediate(FLAG_deopt_every_n_times));
__ movl(count_operand, rax);
- __ pop(rax);
+ __ Pop(rax);
__ popfq();
ASSERT(frame_is_built_);
__ call(entry, RelocInfo::RUNTIME_ENTRY);
__ bind(&no_deopt);
__ movl(count_operand, rax);
- __ pop(rax);
+ __ Pop(rax);
__ popfq();
}
@@ -798,6 +812,14 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+ data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
+ if (info_->IsOptimizing()) {
+ // Reference to shared function info does not change between phases.
+ AllowDeferredHandleDereference allow_handle_dereference;
+ data->SetSharedFunctionInfo(*info_->shared_info());
+ } else {
+ data->SetSharedFunctionInfo(Smi::FromInt(0));
+ }
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
@@ -985,281 +1007,324 @@ void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
}
-void LCodeGen::DoModI(LModI* instr) {
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister(instr->result())));
+
+ // Theoretically, a variation of the branch-free code for integer division by
+ // a power of 2 (calculating the remainder via an additional multiplication
+ // (which gets simplified to an 'and') and subtraction) should be faster, and
+ // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+ // indicate that positive dividends are heavily favored, so the branching
+ // version performs better.
HMod* hmod = instr->hydrogen();
- HValue* left = hmod->left();
- HValue* right = hmod->right();
- if (hmod->RightIsPowerOf2()) {
- // TODO(svenpanne) We should really do the strength reduction on the
- // Hydrogen level.
- Register left_reg = ToRegister(instr->left());
- ASSERT(left_reg.is(ToRegister(instr->result())));
-
- // Note: The code below even works when right contains kMinInt.
- int32_t divisor = Abs(right->GetInteger32Constant());
-
- Label left_is_not_negative, done;
- if (left->CanBeNegative()) {
- __ testl(left_reg, left_reg);
- __ j(not_sign, &left_is_not_negative, Label::kNear);
- __ negl(left_reg);
- __ andl(left_reg, Immediate(divisor - 1));
- __ negl(left_reg);
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- __ jmp(&done, Label::kNear);
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ Label dividend_is_not_negative, done;
+ if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+ __ testl(dividend, dividend);
+ __ j(not_sign, &dividend_is_not_negative, Label::kNear);
+ // Note that this is correct even for kMinInt operands.
+ __ negl(dividend);
+ __ andl(dividend, Immediate(mask));
+ __ negl(dividend);
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
}
+ __ jmp(&done, Label::kNear);
+ }
- __ bind(&left_is_not_negative);
- __ andl(left_reg, Immediate(divisor - 1));
- __ bind(&done);
- } else {
- Register left_reg = ToRegister(instr->left());
- ASSERT(left_reg.is(rax));
- Register right_reg = ToRegister(instr->right());
- ASSERT(!right_reg.is(rax));
- ASSERT(!right_reg.is(rdx));
- Register result_reg = ToRegister(instr->result());
- ASSERT(result_reg.is(rdx));
+ __ bind(&dividend_is_not_negative);
+ __ andl(dividend, Immediate(mask));
+ __ bind(&done);
+}
- Label done;
- // Check for x % 0, idiv would signal a divide error. We have to
- // deopt in this case because we can't return a NaN.
- if (right->CanBeZero()) {
- __ testl(right_reg, right_reg);
- DeoptimizeIf(zero, instr->environment());
- }
- // Check for kMinInt % -1, idiv would signal a divide error. We
- // have to deopt if we care about -0, because we can't return that.
- if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
- Label no_overflow_possible;
- __ cmpl(left_reg, Immediate(kMinInt));
- __ j(not_zero, &no_overflow_possible, Label::kNear);
- __ cmpl(right_reg, Immediate(-1));
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(equal, instr->environment());
- } else {
- __ j(not_equal, &no_overflow_possible, Label::kNear);
- __ Set(result_reg, 0);
- __ jmp(&done, Label::kNear);
- }
- __ bind(&no_overflow_possible);
- }
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(ToRegister(instr->result()).is(rax));
- // Sign extend dividend in eax into edx:eax, since we are using only the low
- // 32 bits of the values.
- __ cdq();
-
- // If we care about -0, test if the dividend is <0 and the result is 0.
- if (left->CanBeNegative() &&
- hmod->CanBeZero() &&
- hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label positive_left;
- __ testl(left_reg, left_reg);
- __ j(not_sign, &positive_left, Label::kNear);
- __ idivl(right_reg);
- __ testl(result_reg, result_reg);
- DeoptimizeIf(zero, instr->environment());
+ if (divisor == 0) {
+ DeoptimizeIf(no_condition, instr->environment());
+ return;
+ }
+
+ __ TruncatingDiv(dividend, Abs(divisor));
+ __ imull(rdx, rdx, Immediate(Abs(divisor)));
+ __ movl(rax, dividend);
+ __ subl(rax, rdx);
+
+ // Check for negative zero.
+ HMod* hmod = instr->hydrogen();
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label remainder_not_zero;
+ __ j(not_zero, &remainder_not_zero, Label::kNear);
+ __ cmpl(dividend, Immediate(0));
+ DeoptimizeIf(less, instr->environment());
+ __ bind(&remainder_not_zero);
+ }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+ HMod* hmod = instr->hydrogen();
+
+ Register left_reg = ToRegister(instr->left());
+ ASSERT(left_reg.is(rax));
+ Register right_reg = ToRegister(instr->right());
+ ASSERT(!right_reg.is(rax));
+ ASSERT(!right_reg.is(rdx));
+ Register result_reg = ToRegister(instr->result());
+ ASSERT(result_reg.is(rdx));
+
+ Label done;
+ // Check for x % 0, idiv would signal a divide error. We have to
+ // deopt in this case because we can't return a NaN.
+ if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ testl(right_reg, right_reg);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ // Check for kMinInt % -1, idiv would signal a divide error. We
+ // have to deopt if we care about -0, because we can't return that.
+ if (hmod->CheckFlag(HValue::kCanOverflow)) {
+ Label no_overflow_possible;
+ __ cmpl(left_reg, Immediate(kMinInt));
+ __ j(not_zero, &no_overflow_possible, Label::kNear);
+ __ cmpl(right_reg, Immediate(-1));
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(equal, instr->environment());
+ } else {
+ __ j(not_equal, &no_overflow_possible, Label::kNear);
+ __ Set(result_reg, 0);
__ jmp(&done, Label::kNear);
- __ bind(&positive_left);
}
+ __ bind(&no_overflow_possible);
+ }
+
+ // Sign extend dividend in eax into edx:eax, since we are using only the low
+ // 32 bits of the values.
+ __ cdq();
+
+ // If we care about -0, test if the dividend is <0 and the result is 0.
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label positive_left;
+ __ testl(left_reg, left_reg);
+ __ j(not_sign, &positive_left, Label::kNear);
__ idivl(right_reg);
- __ bind(&done);
+ __ testl(result_reg, result_reg);
+ DeoptimizeIf(zero, instr->environment());
+ __ jmp(&done, Label::kNear);
+ __ bind(&positive_left);
}
+ __ idivl(right_reg);
+ __ bind(&done);
}
-void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
- ASSERT(instr->right()->IsConstantOperand());
-
- const Register dividend = ToRegister(instr->left());
- int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
- const Register result = ToRegister(instr->result());
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(dividend.is(ToRegister(instr->result())));
- switch (divisor) {
- case 0:
- DeoptimizeIf(no_condition, instr->environment());
+ // If the divisor is positive, things are easy: There can be no deopts and we
+ // can simply do an arithmetic right shift.
+ if (divisor == 1) return;
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (divisor > 1) {
+ __ sarl(dividend, Immediate(shift));
return;
+ }
- case 1:
- if (!result.is(dividend)) {
- __ movl(result, dividend);
+ // If the divisor is negative, we have to negate and handle edge cases.
+ Label not_kmin_int, done;
+ __ negl(dividend);
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ DeoptimizeIf(zero, instr->environment());
+ }
+ if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+ // Note that we could emit branch-free code, but that would need one more
+ // register.
+ __ j(no_overflow, &not_kmin_int, Label::kNear);
+ if (divisor == -1) {
+ DeoptimizeIf(no_condition, instr->environment());
+ } else {
+ __ movl(dividend, Immediate(kMinInt / divisor));
+ __ jmp(&done, Label::kNear);
}
- return;
+ }
+ __ bind(&not_kmin_int);
+ __ sarl(dividend, Immediate(shift));
+ __ bind(&done);
+}
- case -1:
- if (!result.is(dividend)) {
- __ movl(result, dividend);
- }
- __ negl(result);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- DeoptimizeIf(overflow, instr->environment());
- }
+
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(ToRegister(instr->result()).is(rdx));
+
+ if (divisor == 0) {
+ DeoptimizeIf(no_condition, instr->environment());
return;
}
- uint32_t divisor_abs = abs(divisor);
- if (IsPowerOf2(divisor_abs)) {
- int32_t power = WhichPowerOf2(divisor_abs);
- if (divisor < 0) {
- __ movsxlq(result, dividend);
- __ neg(result);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(zero, instr->environment());
- }
- __ sar(result, Immediate(power));
- } else {
- if (!result.is(dividend)) {
- __ movl(result, dividend);
- }
- __ sarl(result, Immediate(power));
- }
- } else {
- Register reg1 = ToRegister(instr->temp());
- Register reg2 = ToRegister(instr->result());
-
- // Find b which: 2^b < divisor_abs < 2^(b+1).
- unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs);
- unsigned shift = 32 + b; // Precision +1bit (effectively).
- double multiplier_f =
- static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs;
- int64_t multiplier;
- if (multiplier_f - std::floor(multiplier_f) < 0.5) {
- multiplier = static_cast<int64_t>(std::floor(multiplier_f));
- } else {
- multiplier = static_cast<int64_t>(std::floor(multiplier_f)) + 1;
- }
- // The multiplier is a uint32.
- ASSERT(multiplier > 0 &&
- multiplier < (static_cast<int64_t>(1) << 32));
- // The multiply is int64, so sign-extend to r64.
- __ movsxlq(reg1, dividend);
- if (divisor < 0 &&
- instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ neg(reg1);
- DeoptimizeIf(zero, instr->environment());
- }
- __ Set(reg2, multiplier);
- // Result just fit in r64, because it's int32 * uint32.
- __ imul(reg2, reg1);
+ // Check for (0 / -x) that will produce negative zero.
+ HMathFloorOfDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ testl(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
- __ addq(reg2, Immediate(1 << 30));
- __ sar(reg2, Immediate(shift));
+ // Easy case: We need no dynamic check for the dividend and the flooring
+ // division is the same as the truncating division.
+ if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+ __ TruncatingDiv(dividend, Abs(divisor));
+ if (divisor < 0) __ negl(rdx);
+ return;
}
+
+ // In the general case we may need to adjust before and after the truncating
+ // division to get a flooring division.
+ Register temp = ToRegister(instr->temp3());
+ ASSERT(!temp.is(dividend) && !temp.is(rax) && !temp.is(rdx));
+ Label needs_adjustment, done;
+ __ cmpl(dividend, Immediate(0));
+ __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
+ __ TruncatingDiv(dividend, Abs(divisor));
+ if (divisor < 0) __ negl(rdx);
+ __ jmp(&done, Label::kNear);
+ __ bind(&needs_adjustment);
+ __ leal(temp, Operand(dividend, divisor > 0 ? 1 : -1));
+ __ TruncatingDiv(temp, Abs(divisor));
+ if (divisor < 0) __ negl(rdx);
+ __ decl(rdx);
+ __ bind(&done);
}
-void LCodeGen::DoDivI(LDivI* instr) {
- if (!instr->is_flooring() && instr->hydrogen()->RightIsPowerOf2()) {
- Register dividend = ToRegister(instr->left());
- int32_t divisor =
- HConstant::cast(instr->hydrogen()->right())->Integer32Value();
- int32_t test_value = 0;
- int32_t power = 0;
-
- if (divisor > 0) {
- test_value = divisor - 1;
- power = WhichPowerOf2(divisor);
- } else {
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ testl(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
- }
- // Check for (kMinInt / -1).
- if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- __ cmpl(dividend, Immediate(kMinInt));
- DeoptimizeIf(zero, instr->environment());
- }
- test_value = - divisor - 1;
- power = WhichPowerOf2(-divisor);
- }
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ Register result = ToRegister(instr->result());
+ ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor))));
+ ASSERT(!result.is(dividend));
+
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ testl(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
+ // Check for (kMinInt / -1).
+ if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+ __ cmpl(dividend, Immediate(kMinInt));
+ DeoptimizeIf(zero, instr->environment());
+ }
+ // Deoptimize if remainder will not be 0.
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1) {
+ int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+ __ testl(dividend, Immediate(mask));
+ DeoptimizeIf(not_zero, instr->environment());
+ }
+ __ Move(result, dividend);
+ int32_t shift = WhichPowerOf2Abs(divisor);
+ if (shift > 0) {
+ // The arithmetic shift is always OK, the 'if' is an optimization only.
+ if (shift > 1) __ sarl(result, Immediate(31));
+ __ shrl(result, Immediate(32 - shift));
+ __ addl(result, dividend);
+ __ sarl(result, Immediate(shift));
+ }
+ if (divisor < 0) __ negl(result);
+}
- if (test_value != 0) {
- if (instr->hydrogen()->CheckFlag(
- HInstruction::kAllUsesTruncatingToInt32)) {
- Label done, negative;
- __ cmpl(dividend, Immediate(0));
- __ j(less, &negative, Label::kNear);
- __ sarl(dividend, Immediate(power));
- if (divisor < 0) __ negl(dividend);
- __ jmp(&done, Label::kNear);
-
- __ bind(&negative);
- __ negl(dividend);
- __ sarl(dividend, Immediate(power));
- if (divisor > 0) __ negl(dividend);
- __ bind(&done);
- return; // Don't fall through to "__ neg" below.
- } else {
- // Deoptimize if remainder is not 0.
- __ testl(dividend, Immediate(test_value));
- DeoptimizeIf(not_zero, instr->environment());
- __ sarl(dividend, Immediate(power));
- }
- }
- if (divisor < 0) __ negl(dividend);
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+ Register dividend = ToRegister(instr->dividend());
+ int32_t divisor = instr->divisor();
+ ASSERT(ToRegister(instr->result()).is(rdx));
+ if (divisor == 0) {
+ DeoptimizeIf(no_condition, instr->environment());
return;
}
- LOperand* right = instr->right();
- ASSERT(ToRegister(instr->result()).is(rax));
- ASSERT(ToRegister(instr->left()).is(rax));
- ASSERT(!ToRegister(instr->right()).is(rax));
- ASSERT(!ToRegister(instr->right()).is(rdx));
+ // Check for (0 / -x) that will produce negative zero.
+ HDiv* hdiv = instr->hydrogen();
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+ __ testl(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
+
+ __ TruncatingDiv(dividend, Abs(divisor));
+ if (divisor < 0) __ negp(rdx);
+
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ __ movl(rax, rdx);
+ __ imull(rax, rax, Immediate(divisor));
+ __ subl(rax, dividend);
+ DeoptimizeIf(not_equal, instr->environment());
+ }
+}
+
- Register left_reg = rax;
+void LCodeGen::DoDivI(LDivI* instr) {
+ HBinaryOperation* hdiv = instr->hydrogen();
+ Register dividend = ToRegister(instr->left());
+ Register divisor = ToRegister(instr->right());
+ Register remainder = ToRegister(instr->temp());
+ Register result = ToRegister(instr->result());
+ ASSERT(dividend.is(rax));
+ ASSERT(remainder.is(rdx));
+ ASSERT(result.is(rax));
+ ASSERT(!divisor.is(rax));
+ ASSERT(!divisor.is(rdx));
// Check for x / 0.
- Register right_reg = ToRegister(right);
- if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ testl(right_reg, right_reg);
+ if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+ __ testl(divisor, divisor);
DeoptimizeIf(zero, instr->environment());
}
// Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- Label left_not_zero;
- __ testl(left_reg, left_reg);
- __ j(not_zero, &left_not_zero, Label::kNear);
- __ testl(right_reg, right_reg);
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ Label dividend_not_zero;
+ __ testl(dividend, dividend);
+ __ j(not_zero, &dividend_not_zero, Label::kNear);
+ __ testl(divisor, divisor);
DeoptimizeIf(sign, instr->environment());
- __ bind(&left_not_zero);
+ __ bind(&dividend_not_zero);
}
// Check for (kMinInt / -1).
- if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)) {
- Label left_not_min_int;
- __ cmpl(left_reg, Immediate(kMinInt));
- __ j(not_zero, &left_not_min_int, Label::kNear);
- __ cmpl(right_reg, Immediate(-1));
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+ Label dividend_not_min_int;
+ __ cmpl(dividend, Immediate(kMinInt));
+ __ j(not_zero, &dividend_not_min_int, Label::kNear);
+ __ cmpl(divisor, Immediate(-1));
DeoptimizeIf(zero, instr->environment());
- __ bind(&left_not_min_int);
+ __ bind(&dividend_not_min_int);
}
- // Sign extend to rdx.
+ // Sign extend to rdx (= remainder).
__ cdq();
- __ idivl(right_reg);
+ __ idivl(divisor);
- if (instr->is_flooring()) {
+ if (hdiv->IsMathFloorOfDiv()) {
Label done;
- __ testl(rdx, rdx);
+ __ testl(remainder, remainder);
__ j(zero, &done, Label::kNear);
- __ xorl(rdx, right_reg);
- __ sarl(rdx, Immediate(31));
- __ addl(rax, rdx);
+ __ xorl(remainder, divisor);
+ __ sarl(remainder, Immediate(31));
+ __ addl(result, remainder);
__ bind(&done);
- } else if (!instr->hydrogen()->CheckFlag(
- HInstruction::kAllUsesTruncatingToInt32)) {
+ } else if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
- __ testl(rdx, rdx);
+ __ testl(remainder, remainder);
DeoptimizeIf(not_zero, instr->environment());
}
}
@@ -1323,14 +1388,14 @@ void LCodeGen::DoMulI(LMulI* instr) {
} else if (right->IsStackSlot()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
__ SmiToInteger64(left, left);
- __ imul(left, ToOperand(right));
+ __ imulp(left, ToOperand(right));
} else {
__ imull(left, ToOperand(right));
}
} else {
if (instr->hydrogen_value()->representation().IsSmi()) {
__ SmiToInteger64(left, left);
- __ imul(left, ToRegister(right));
+ __ imulp(left, ToRegister(right));
} else {
__ imull(left, ToRegister(right));
}
@@ -1344,7 +1409,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
// Bail out if the result is supposed to be negative zero.
Label done;
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ testq(left, left);
+ __ testp(left, left);
} else {
__ testl(left, left);
}
@@ -1360,7 +1425,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
} else if (right->IsStackSlot()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ or_(kScratchRegister, ToOperand(right));
+ __ orp(kScratchRegister, ToOperand(right));
} else {
__ orl(kScratchRegister, ToOperand(right));
}
@@ -1368,7 +1433,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
} else {
// Test the non-zero operand for negative sign.
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ or_(kScratchRegister, ToRegister(right));
+ __ orp(kScratchRegister, ToRegister(right));
} else {
__ orl(kScratchRegister, ToRegister(right));
}
@@ -1408,13 +1473,13 @@ void LCodeGen::DoBitI(LBitI* instr) {
} else if (right->IsStackSlot()) {
switch (instr->op()) {
case Token::BIT_AND:
- __ and_(ToRegister(left), ToOperand(right));
+ __ andp(ToRegister(left), ToOperand(right));
break;
case Token::BIT_OR:
- __ or_(ToRegister(left), ToOperand(right));
+ __ orp(ToRegister(left), ToOperand(right));
break;
case Token::BIT_XOR:
- __ xor_(ToRegister(left), ToOperand(right));
+ __ xorp(ToRegister(left), ToOperand(right));
break;
default:
UNREACHABLE();
@@ -1424,13 +1489,13 @@ void LCodeGen::DoBitI(LBitI* instr) {
ASSERT(right->IsRegister());
switch (instr->op()) {
case Token::BIT_AND:
- __ and_(ToRegister(left), ToRegister(right));
+ __ andp(ToRegister(left), ToRegister(right));
break;
case Token::BIT_OR:
- __ or_(ToRegister(left), ToRegister(right));
+ __ orp(ToRegister(left), ToRegister(right));
break;
case Token::BIT_XOR:
- __ xor_(ToRegister(left), ToRegister(right));
+ __ xorp(ToRegister(left), ToRegister(right));
break;
default:
UNREACHABLE();
@@ -1518,13 +1583,13 @@ void LCodeGen::DoSubI(LSubI* instr) {
Immediate(ToInteger32(LConstantOperand::cast(right))));
} else if (right->IsRegister()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ subq(ToRegister(left), ToRegister(right));
+ __ subp(ToRegister(left), ToRegister(right));
} else {
__ subl(ToRegister(left), ToRegister(right));
}
} else {
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ subq(ToRegister(left), ToOperand(right));
+ __ subp(ToRegister(left), ToOperand(right));
} else {
__ subl(ToRegister(left), ToOperand(right));
}
@@ -1601,7 +1666,7 @@ void LCodeGen::DoDateField(LDateField* instr) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
Operand stamp_operand = __ ExternalOperand(stamp);
__ movp(kScratchRegister, stamp_operand);
- __ cmpq(kScratchRegister, FieldOperand(object,
+ __ cmpp(kScratchRegister, FieldOperand(object,
JSDate::kCacheStampOffset));
__ j(not_equal, &runtime, Label::kNear);
__ movp(result, FieldOperand(object, JSDate::kValueOffset +
@@ -1642,17 +1707,17 @@ void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
Register string = ToRegister(instr->string());
if (FLAG_debug_code) {
- __ push(string);
+ __ Push(string);
__ movp(string, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbq(string, FieldOperand(string, Map::kInstanceTypeOffset));
+ __ movzxbp(string, FieldOperand(string, Map::kInstanceTypeOffset));
__ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ cmpq(string, Immediate(encoding == String::ONE_BYTE_ENCODING
+ __ cmpp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
? one_byte_seq_type : two_byte_seq_type));
__ Check(equal, kUnexpectedStringType);
- __ pop(string);
+ __ Pop(string);
}
Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
@@ -1706,44 +1771,44 @@ void LCodeGen::DoAddI(LAddI* instr) {
LOperand* right = instr->right();
Representation target_rep = instr->hydrogen()->representation();
- bool is_q = target_rep.IsSmi() || target_rep.IsExternal();
+ bool is_p = target_rep.IsSmi() || target_rep.IsExternal();
if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
if (right->IsConstantOperand()) {
int32_t offset = ToInteger32(LConstantOperand::cast(right));
- if (is_q) {
- __ lea(ToRegister(instr->result()),
- MemOperand(ToRegister(left), offset));
+ if (is_p) {
+ __ leap(ToRegister(instr->result()),
+ MemOperand(ToRegister(left), offset));
} else {
__ leal(ToRegister(instr->result()),
MemOperand(ToRegister(left), offset));
}
} else {
Operand address(ToRegister(left), ToRegister(right), times_1, 0);
- if (is_q) {
- __ lea(ToRegister(instr->result()), address);
+ if (is_p) {
+ __ leap(ToRegister(instr->result()), address);
} else {
__ leal(ToRegister(instr->result()), address);
}
}
} else {
if (right->IsConstantOperand()) {
- if (is_q) {
- __ addq(ToRegister(left),
+ if (is_p) {
+ __ addp(ToRegister(left),
Immediate(ToInteger32(LConstantOperand::cast(right))));
} else {
__ addl(ToRegister(left),
Immediate(ToInteger32(LConstantOperand::cast(right))));
}
} else if (right->IsRegister()) {
- if (is_q) {
- __ addq(ToRegister(left), ToRegister(right));
+ if (is_p) {
+ __ addp(ToRegister(left), ToRegister(right));
} else {
__ addl(ToRegister(left), ToRegister(right));
}
} else {
- if (is_q) {
- __ addq(ToRegister(left), ToOperand(right));
+ if (is_p) {
+ __ addp(ToRegister(left), ToOperand(right));
} else {
__ addl(ToRegister(left), ToOperand(right));
}
@@ -1776,7 +1841,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
} else if (right->IsRegister()) {
Register right_reg = ToRegister(right);
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ cmpq(left_reg, right_reg);
+ __ cmpp(left_reg, right_reg);
} else {
__ cmpl(left_reg, right_reg);
}
@@ -1785,7 +1850,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
} else {
Operand right_op = ToOperand(right);
if (instr->hydrogen_value()->representation().IsSmi()) {
- __ cmpq(left_reg, right_op);
+ __ cmpp(left_reg, right_op);
} else {
__ cmpl(left_reg, right_op);
}
@@ -1924,7 +1989,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (r.IsSmi()) {
ASSERT(!info()->IsStub());
Register reg = ToRegister(instr->value());
- __ testq(reg, reg);
+ __ testp(reg, reg);
EmitBranch(instr, not_zero);
} else if (r.IsDouble()) {
ASSERT(!info()->IsStub());
@@ -1956,7 +2021,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
EmitBranch(instr, not_equal);
} else if (type.IsString()) {
ASSERT(!info()->IsStub());
- __ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+ __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
EmitBranch(instr, not_equal);
} else {
ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
@@ -2016,7 +2081,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
Label not_string;
__ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
__ j(above_equal, &not_string, Label::kNear);
- __ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+ __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
__ j(not_zero, instr->TrueLabel(chunk_));
__ jmp(instr->FalseLabel(chunk_));
__ bind(&not_string);
@@ -2139,9 +2204,9 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
cc = ReverseCondition(cc);
} else if (instr->hydrogen_value()->representation().IsSmi()) {
if (right->IsRegister()) {
- __ cmpq(ToRegister(left), ToRegister(right));
+ __ cmpp(ToRegister(left), ToRegister(right));
} else {
- __ cmpq(ToRegister(left), ToOperand(right));
+ __ cmpp(ToRegister(left), ToOperand(right));
}
} else {
if (right->IsRegister()) {
@@ -2164,7 +2229,7 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
__ Cmp(left, right);
} else {
Register right = ToRegister(instr->right());
- __ cmpq(left, right);
+ __ cmpp(left, right);
}
EmitBranch(instr, equal);
}
@@ -2182,9 +2247,9 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
__ ucomisd(input_reg, input_reg);
EmitFalseBranch(instr, parity_odd);
- __ subq(rsp, Immediate(kDoubleSize));
+ __ subp(rsp, Immediate(kDoubleSize));
__ movsd(MemOperand(rsp, 0), input_reg);
- __ addq(rsp, Immediate(kDoubleSize));
+ __ addp(rsp, Immediate(kDoubleSize));
int offset = sizeof(kHoleNanUpper32);
__ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32));
@@ -2210,8 +2275,8 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
__ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
__ cmpl(FieldOperand(value, HeapNumber::kExponentOffset),
- Immediate(0x80000000));
- EmitFalseBranch(instr, not_equal);
+ Immediate(0x1));
+ EmitFalseBranch(instr, no_overflow);
__ cmpl(FieldOperand(value, HeapNumber::kMantissaOffset),
Immediate(0x00000000));
EmitBranch(instr, equal);
@@ -2318,7 +2383,7 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = TokenToCondition(op, false);
- __ testq(rax, rax);
+ __ testp(rax, rax);
EmitBranch(instr, condition);
}
@@ -2411,8 +2476,8 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
// actual type and do a signed compare with the width of the type range.
__ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
__ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
- __ subq(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ cmpq(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+ __ subp(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ cmpp(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(above, is_false);
}
@@ -2470,11 +2535,11 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
ASSERT(ToRegister(instr->context()).is(rsi));
InstanceofStub stub(InstanceofStub::kNoFlags);
- __ push(ToRegister(instr->left()));
- __ push(ToRegister(instr->right()));
+ __ Push(ToRegister(instr->left()));
+ __ Push(ToRegister(instr->right()));
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
Label true_value, done;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(zero, &true_value, Label::kNear);
__ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
__ jmp(&done, Label::kNear);
@@ -2520,7 +2585,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
__ bind(deferred->map_check()); // Label for calculating code patching.
Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
__ Move(kScratchRegister, cache_cell, RelocInfo::CELL);
- __ cmpq(map, Operand(kScratchRegister, 0));
+ __ cmpp(map, Operand(kScratchRegister, 0));
__ j(not_equal, &cache_miss, Label::kNear);
// Patched to load either true or false.
__ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
@@ -2557,14 +2622,14 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
InstanceofStub stub(flags);
- __ push(ToRegister(instr->value()));
+ __ Push(ToRegister(instr->value()));
__ Push(instr->function());
static const int kAdditionalDelta = 10;
int delta =
masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
ASSERT(delta >= 0);
- __ push_imm32(delta);
+ __ PushImm32(delta);
// We are pushing three values on the stack but recording a
// safepoint with two arguments because stub is going to
@@ -2582,7 +2647,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
// PushSafepointRegisterScope.
__ movp(kScratchRegister, rax);
}
- __ testq(kScratchRegister, kScratchRegister);
+ __ testp(kScratchRegister, kScratchRegister);
Label load_false;
Label done;
__ j(not_zero, &load_false, Label::kNear);
@@ -2603,7 +2668,7 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
Condition condition = TokenToCondition(op, false);
Label true_value, done;
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(condition, &true_value, Label::kNear);
__ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
__ jmp(&done, Label::kNear);
@@ -2619,7 +2684,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
// to return the value in the same register. We're leaving the code
// managed by the register allocator and tearing down the frame, it's
// safe to write to the context register.
- __ push(rax);
+ __ Push(rax);
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kTraceExit, 1);
}
@@ -2629,7 +2694,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
int no_frame_start = -1;
if (NeedsEagerFrame()) {
__ movp(rsp, rbp);
- __ pop(rbp);
+ __ popq(rbp);
no_frame_start = masm_->pc_offset();
}
if (instr->has_constant_parameter_count()) {
@@ -2642,7 +2707,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
__ PopReturnAddressTo(return_addr_reg);
__ shl(reg, Immediate(kPointerSizeLog2));
- __ addq(rsp, reg);
+ __ addp(rsp, reg);
__ jmp(return_addr_reg);
}
if (no_frame_start != -1) {
@@ -2785,6 +2850,12 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
Representation representation = access.representation();
if (representation.IsSmi() &&
instr->hydrogen()->representation().IsInteger32()) {
+#ifdef DEBUG
+ Register scratch = kScratchRegister;
+ __ Load(scratch, FieldOperand(object, offset), representation);
+ __ AssertSmi(scratch);
+#endif
+
// Read int value directly from upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
@@ -2861,9 +2932,13 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
instr->index()->IsConstantOperand()) {
int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length()));
- StackArgumentsAccessor args(arguments, const_length,
- ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movp(result, args.GetArgumentOperand(const_index));
+ if (const_index >= 0 && const_index < const_length) {
+ StackArgumentsAccessor args(arguments, const_length,
+ ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movp(result, args.GetArgumentOperand(const_index));
+ } else if (FLAG_debug_code) {
+ __ int3();
+ }
} else {
Register length = ToRegister(instr->length());
// There are two words between the frame pointer and the last argument.
@@ -2883,19 +2958,6 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyed (in this case) instructions force
- // the input representation for the key to be an integer, the input
- // gets replaced during bound check elimination with the index argument
- // to the bounds check, which can be tagged, so that case must be
- // handled here, too.
- if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
int base_offset = instr->is_fixed_typed_array()
? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
: 0;
@@ -2925,7 +2987,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
- __ movzxbq(result, operand);
+ __ movzxbp(result, operand);
break;
case EXTERNAL_INT16_ELEMENTS:
case INT16_ELEMENTS:
@@ -2933,7 +2995,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
break;
case EXTERNAL_UINT16_ELEMENTS:
case UINT16_ELEMENTS:
- __ movzxwq(result, operand);
+ __ movzxwp(result, operand);
break;
case EXTERNAL_INT32_ELEMENTS:
case INT32_ELEMENTS:
@@ -2958,7 +3020,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -2969,19 +3031,6 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
XMMRegister result(ToDoubleRegister(instr->result()));
LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyed instructions force the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
-
if (instr->hydrogen()->RequiresHoleCheck()) {
int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
sizeof(kHoleNanLower32);
@@ -3009,20 +3058,6 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
HLoadKeyed* hinstr = instr->hydrogen();
Register result = ToRegister(instr->result());
LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the input
- // gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that
- // case must be handled here, too.
- if (hinstr->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
-
bool requires_hole_check = hinstr->RequiresHoleCheck();
int offset = FixedArray::kHeaderSize - kHeapObjectTag;
Representation representation = hinstr->representation();
@@ -3030,6 +3065,17 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (representation.IsInteger32() &&
hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
ASSERT(!requires_hole_check);
+#ifdef DEBUG
+ Register scratch = kScratchRegister;
+ __ Load(scratch,
+ BuildFastArrayOperand(instr->elements(),
+ key,
+ FAST_ELEMENTS,
+ offset,
+ instr->additional_index()),
+ Representation::Smi());
+ __ AssertSmi(scratch);
+#endif
// Read int value directly from upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
@@ -3108,7 +3154,7 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
Register result = ToRegister(instr->result());
if (instr->hydrogen()->from_inlined()) {
- __ lea(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
+ __ leap(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
} else {
// Check for arguments adapter frame.
Label done, adapted;
@@ -3139,9 +3185,9 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
// If no arguments adaptor frame the number of arguments is fixed.
if (instr->elements()->IsRegister()) {
- __ cmpq(rbp, ToRegister(instr->elements()));
+ __ cmpp(rbp, ToRegister(instr->elements()));
} else {
- __ cmpq(rbp, ToOperand(instr->elements()));
+ __ cmpp(rbp, ToOperand(instr->elements()));
}
__ movl(result, Immediate(scope()->num_parameters()));
__ j(equal, &done, Label::kNear);
@@ -3221,10 +3267,10 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// Copy the arguments to this function possibly from the
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
- __ cmpq(length, Immediate(kArgumentsLimit));
+ __ cmpp(length, Immediate(kArgumentsLimit));
DeoptimizeIf(above, instr->environment());
- __ push(receiver);
+ __ Push(receiver);
__ movp(receiver, length);
// Loop through the arguments pushing them onto the execution
@@ -3236,7 +3282,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ bind(&loop);
StackArgumentsAccessor args(elements, length,
ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ push(args.GetArgumentOperand(0));
+ __ Push(args.GetArgumentOperand(0));
__ decl(length);
__ j(not_zero, &loop);
@@ -3281,10 +3327,10 @@ void LCodeGen::DoContext(LContext* instr) {
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
ASSERT(ToRegister(instr->context()).is(rsi));
- __ push(rsi); // The context is the first argument.
+ __ Push(rsi); // The context is the first argument.
__ Push(instr->hydrogen()->pairs());
__ Push(Smi::FromInt(instr->hydrogen()->flags()));
- CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+ CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
}
@@ -3318,7 +3364,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
if (function.is_identical_to(info()->closure())) {
__ CallSelf();
} else {
- __ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ __ Call(FieldOperand(rdi, JSFunction::kCodeEntryOffset));
}
// Set up deoptimization.
@@ -3349,7 +3395,7 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
ASSERT(instr->target()->IsRegister());
Register target = ToRegister(instr->target());
generator.BeforeCall(__ CallSize(target));
- __ addq(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(target);
}
generator.AfterCall();
@@ -3383,7 +3429,7 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
} else {
Operand target = FieldOperand(rdi, JSFunction::kCodeEntryOffset);
generator.BeforeCall(__ CallSize(target));
- __ call(target);
+ __ Call(target);
}
generator.AfterCall();
}
@@ -3416,7 +3462,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
CallRuntimeFromDeferred(
- Runtime::kAllocateHeapNumber, 0, instr, instr->context());
+ Runtime::kHiddenAllocateHeapNumber, 0, instr, instr->context());
// Set the pointer to the new heap number in tmp.
if (!tmp.is(rax)) __ movp(tmp, rax);
// Restore input_reg after call to runtime.
@@ -3446,10 +3492,10 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
- __ testq(input_reg, input_reg);
+ __ testp(input_reg, input_reg);
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
- __ neg(input_reg); // Sets flags.
+ __ negp(input_reg); // Sets flags.
DeoptimizeIf(negative, instr->environment());
__ bind(&is_positive);
}
@@ -3509,8 +3555,8 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
}
__ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
__ cvttsd2si(output_reg, xmm_scratch);
- __ cmpl(output_reg, Immediate(0x80000000));
- DeoptimizeIf(equal, instr->environment());
+ __ cmpl(output_reg, Immediate(0x1));
+ DeoptimizeIf(overflow, instr->environment());
} else {
Label negative_sign, done;
// Deoptimize on unordered.
@@ -3534,8 +3580,8 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
// Use truncating instruction (OK because input is positive).
__ cvttsd2si(output_reg, input_reg);
// Overflow is signalled with minint.
- __ cmpl(output_reg, Immediate(0x80000000));
- DeoptimizeIf(equal, instr->environment());
+ __ cmpl(output_reg, Immediate(0x1));
+ DeoptimizeIf(overflow, instr->environment());
__ jmp(&done, Label::kNear);
// Non-zero negative reaches here.
@@ -3572,9 +3618,9 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ addsd(xmm_scratch, input_reg);
__ cvttsd2si(output_reg, xmm_scratch);
// Overflow is signalled with minint.
- __ cmpl(output_reg, Immediate(0x80000000));
+ __ cmpl(output_reg, Immediate(0x1));
__ RecordComment("D2I conversion overflow");
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(overflow, instr->environment());
__ jmp(&done, dist);
__ bind(&below_one_half);
@@ -3589,9 +3635,9 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ subsd(input_temp, xmm_scratch);
__ cvttsd2si(output_reg, input_temp);
// Catch minint due to overflow, and to prevent overflow when compensating.
- __ cmpl(output_reg, Immediate(0x80000000));
+ __ cmpl(output_reg, Immediate(0x1));
__ RecordComment("D2I conversion overflow");
- DeoptimizeIf(equal, instr->environment());
+ DeoptimizeIf(overflow, instr->environment());
__ Cvtlsi2sd(xmm_scratch, output_reg);
__ ucomisd(xmm_scratch, input_temp);
@@ -3721,17 +3767,31 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
__ jmp(&done, Label::kNear);
__ bind(&positive);
__ fldln2();
- __ subq(rsp, Immediate(kDoubleSize));
+ __ subp(rsp, Immediate(kDoubleSize));
__ movsd(Operand(rsp, 0), input_reg);
__ fld_d(Operand(rsp, 0));
__ fyl2x();
__ fstp_d(Operand(rsp, 0));
__ movsd(input_reg, Operand(rsp, 0));
- __ addq(rsp, Immediate(kDoubleSize));
+ __ addp(rsp, Immediate(kDoubleSize));
__ bind(&done);
}
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+ Register input = ToRegister(instr->value());
+ Register result = ToRegister(instr->result());
+ Label not_zero_input;
+ __ bsrl(result, input);
+
+ __ j(not_zero, &not_zero_input);
+ __ Set(result, 63); // 63^31 == 32
+
+ __ bind(&not_zero_input);
+ __ xorl(result, Immediate(31)); // for x in [0..31], 31^x == 31-x.
+}
+
+
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
ASSERT(ToRegister(instr->context()).is(rsi));
ASSERT(ToRegister(instr->function()).is(rdi));
@@ -3771,8 +3831,7 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
__ Set(rax, instr->arity());
// No cell in ebx for construct type feedback in optimized code
- Handle<Object> undefined_value(isolate()->factory()->undefined_value());
- __ Move(rbx, undefined_value);
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -3784,7 +3843,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
__ Set(rax, instr->arity());
- __ Move(rbx, factory()->undefined_value());
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
(AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
@@ -3801,7 +3860,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
// We might need a change here
// look at the first argument
__ movp(rcx, Operand(rsp, 0));
- __ testq(rcx, rcx);
+ __ testp(rcx, rcx);
__ j(zero, &packed_case, Label::kNear);
ElementsKind holey_kind = GetHoleyElementsKind(kind);
@@ -3830,7 +3889,7 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
Register function = ToRegister(instr->function());
Register code_object = ToRegister(instr->code_object());
- __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
+ __ leap(code_object, FieldOperand(code_object, Code::kHeaderSize));
__ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
}
@@ -3840,10 +3899,10 @@ void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register base = ToRegister(instr->base_object());
if (instr->offset()->IsConstantOperand()) {
LConstantOperand* offset = LConstantOperand::cast(instr->offset());
- __ lea(result, Operand(base, ToInteger32(offset)));
+ __ leap(result, Operand(base, ToInteger32(offset)));
} else {
Register offset = ToRegister(instr->offset());
- __ lea(result, Operand(base, offset, times_1, 0));
+ __ leap(result, Operand(base, offset, times_1, 0));
}
}
@@ -3860,7 +3919,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register value = ToRegister(instr->value());
if (instr->object()->IsConstantOperand()) {
ASSERT(value.is(rax));
- ASSERT(!access.representation().IsSpecialization());
LConstantOperand* object = LConstantOperand::cast(instr->object());
__ store_rax(ToExternalReference(object));
} else {
@@ -3872,19 +3930,16 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register object = ToRegister(instr->object());
Handle<Map> transition = instr->transition();
+ SmiCheck check_needed = hinstr->value()->IsHeapObject()
+ ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- if (FLAG_track_fields && representation.IsSmi()) {
+ ASSERT(!(representation.IsSmi() &&
+ instr->value()->IsConstantOperand() &&
+ !IsInteger32Constant(LConstantOperand::cast(instr->value()))));
+ if (representation.IsHeapObject()) {
if (instr->value()->IsConstantOperand()) {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (!IsInteger32Constant(operand_value) &&
- !IsSmiConstant(operand_value)) {
- DeoptimizeIf(no_condition, instr->environment());
- }
- }
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
- if (instr->value()->IsConstantOperand()) {
- LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
- if (IsInteger32Constant(operand_value)) {
+ if (chunk_->LookupConstant(operand_value)->HasSmiValue()) {
DeoptimizeIf(no_condition, instr->environment());
}
} else {
@@ -3892,6 +3947,9 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register value = ToRegister(instr->value());
Condition cc = masm()->CheckSmi(value);
DeoptimizeIf(cc, instr->environment());
+
+ // We know that value is a smi now, so we can omit the check below.
+ check_needed = OMIT_SMI_CHECK;
}
}
} else if (representation.IsDouble()) {
@@ -3922,9 +3980,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
}
// Do the store.
- SmiCheck check_needed = hinstr->value()->IsHeapObject()
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
-
Register write_register = object;
if (!access.IsInobject()) {
write_register = ToRegister(instr->temp());
@@ -3934,6 +3989,11 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (representation.IsSmi() &&
hinstr->value()->representation().IsInteger32()) {
ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+#ifdef DEBUG
+ Register scratch = kScratchRegister;
+ __ Load(scratch, FieldOperand(write_register, offset), representation);
+ __ AssertSmi(scratch);
+#endif
// Store int value directly to upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
@@ -3986,8 +4046,7 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
ASSERT(ToRegister(instr->value()).is(rax));
__ Move(rcx, instr->hydrogen()->name());
- Handle<Code> ic = StoreIC::initialize_stub(isolate(),
- instr->strict_mode_flag());
+ Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4026,7 +4085,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
} else {
Register reg2 = ToRegister(instr->index());
if (representation.IsSmi()) {
- __ cmpq(reg, reg2);
+ __ cmpp(reg, reg2);
} else {
__ cmpl(reg, reg2);
}
@@ -4043,7 +4102,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
}
} else {
if (representation.IsSmi()) {
- __ cmpq(length, ToRegister(instr->index()));
+ __ cmpp(length, ToRegister(instr->index()));
} else {
__ cmpl(length, ToRegister(instr->index()));
}
@@ -4057,19 +4116,6 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the input
- // gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that case
- // must be handled here, too.
- if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
int base_offset = instr->is_fixed_typed_array()
? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
: 0;
@@ -4122,7 +4168,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -4133,20 +4179,6 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
XMMRegister value = ToDoubleRegister(instr->value());
LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the
- // input gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that case
- // must be handled here, too.
- if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
-
if (instr->NeedsCanonicalization()) {
Label have_value;
@@ -4174,26 +4206,23 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
HStoreKeyed* hinstr = instr->hydrogen();
LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the
- // input gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that case
- // must be handled here, too.
- if (hinstr->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
-
int offset = FixedArray::kHeaderSize - kHeapObjectTag;
Representation representation = hinstr->value()->representation();
if (representation.IsInteger32()) {
ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
ASSERT(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
+#ifdef DEBUG
+ Register scratch = kScratchRegister;
+ __ Load(scratch,
+ BuildFastArrayOperand(instr->elements(),
+ key,
+ FAST_ELEMENTS,
+ offset,
+ instr->additional_index()),
+ Representation::Smi());
+ __ AssertSmi(scratch);
+#endif
// Store int value directly to upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
@@ -4234,7 +4263,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
Register key_reg(ToRegister(key));
- __ lea(key_reg, operand);
+ __ leap(key_reg, operand);
__ RecordWrite(elements,
key_reg,
value,
@@ -4262,7 +4291,7 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->key()).is(rcx));
ASSERT(ToRegister(instr->value()).is(rax));
- Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
+ Handle<Code> ic = instr->strict_mode() == STRICT
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
@@ -4360,7 +4389,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ Set(result, 0);
PushSafepointRegistersScope scope(this);
- __ push(string);
+ __ Push(string);
// Push the index as a smi. This is safe because of the checks in
// DoStringCharCodeAt above.
STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
@@ -4370,10 +4399,10 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
} else {
Register index = ToRegister(instr->index());
__ Integer32ToSmi(index, index);
- __ push(index);
+ __ Push(index);
}
CallRuntimeFromDeferred(
- Runtime::kStringCharCodeAt, 2, instr, instr->context());
+ Runtime::kHiddenStringCharCodeAt, 2, instr, instr->context());
__ AssertSmi(rax);
__ SmiToInteger32(rax, rax);
__ StoreToSafepointRegisterSlot(result, rax);
@@ -4425,7 +4454,7 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
PushSafepointRegistersScope scope(this);
__ Integer32ToSmi(char_code, char_code);
- __ push(char_code);
+ __ Push(char_code);
CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, rax);
}
@@ -4444,18 +4473,6 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
}
-void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- LOperand* output = instr->result();
- __ Integer32ToSmi(ToRegister(output), ToRegister(input));
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange()) {
- DeoptimizeIf(overflow, instr->environment());
- }
-}
-
-
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
LOperand* input = instr->value();
LOperand* output = instr->result();
@@ -4467,22 +4484,6 @@ void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
}
-void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister());
- LOperand* output = instr->result();
- if (!instr->hydrogen()->value()->HasRange() ||
- !instr->hydrogen()->value()->range()->IsInSmiRange() ||
- instr->hydrogen()->value()->range()->upper() == kMaxInt) {
- // The Range class can't express upper bounds in the (kMaxInt, kMaxUint32]
- // interval, so we treat kMaxInt as a sentinel for this entire interval.
- __ testl(ToRegister(input), Immediate(0x80000000));
- DeoptimizeIf(not_zero, instr->environment());
- }
- __ Integer32ToSmi(ToRegister(output), ToRegister(input));
-}
-
-
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
LOperand* input = instr->value();
ASSERT(input->IsRegister() && input->Equals(instr->result()));
@@ -4518,15 +4519,11 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
- Label slow;
+ Label done, slow;
Register reg = ToRegister(instr->value());
- Register tmp = reg.is(rax) ? rcx : rax;
- XMMRegister temp_xmm = ToDoubleRegister(instr->temp());
-
- // Preserve the value of all registers.
- PushSafepointRegistersScope scope(this);
+ Register tmp = ToRegister(instr->temp1());
+ XMMRegister temp_xmm = ToDoubleRegister(instr->temp2());
- Label done;
// Load value into temp_xmm which will be preserved across potential call to
// runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
// XMM registers on x64).
@@ -4540,29 +4537,31 @@ void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
+ {
+ // Put a valid pointer value in the stack slot where the result
+ // register is stored, as this register is in the pointer map, but contains
+ // an integer value.
+ __ Set(reg, 0);
- // Put a valid pointer value in the stack slot where the result
- // register is stored, as this register is in the pointer map, but contains an
- // integer value.
- __ StoreToSafepointRegisterSlot(reg, Immediate(0));
-
- // NumberTagU uses the context from the frame, rather than
- // the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
- // The corresponding HChange instructions are added in a phase that does
- // not have easy access to the local context.
- __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
- RecordSafepointWithRegisters(
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ // Preserve the value of all registers.
+ PushSafepointRegistersScope scope(this);
- if (!reg.is(rax)) __ movp(reg, rax);
+ // NumberTagU uses the context from the frame, rather than
+ // the environment's HContext or HInlinedContext value.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
+ // The corresponding HChange instructions are added in a phase that does
+ // not have easy access to the local context.
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+ __ StoreToSafepointRegisterSlot(reg, rax);
+ }
// Done. Put the value in temp_xmm into the value of the allocated heap
// number.
__ bind(&done);
__ movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm);
- __ StoreToSafepointRegisterSlot(reg, reg);
}
@@ -4605,11 +4604,11 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
PushSafepointRegistersScope scope(this);
// NumberTagD uses the context from the frame, rather than
// the environment's HContext or HInlinedContext value.
- // They only call Runtime::kAllocateHeapNumber.
+ // They only call Runtime::kHiddenAllocateHeapNumber.
// The corresponding HChange instructions are added in a phase that does
// not have easy access to the local context.
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ movp(kScratchRegister, rax);
@@ -4619,10 +4618,19 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
void LCodeGen::DoSmiTag(LSmiTag* instr) {
- ASSERT(instr->value()->Equals(instr->result()));
+ HChange* hchange = instr->hydrogen();
Register input = ToRegister(instr->value());
- ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ Integer32ToSmi(input, input);
+ Register output = ToRegister(instr->result());
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ hchange->value()->CheckFlag(HValue::kUint32)) {
+ __ testl(input, input);
+ DeoptimizeIf(sign, instr->environment());
+ }
+ __ Integer32ToSmi(output, input);
+ if (hchange->CheckFlag(HValue::kCanOverflow) &&
+ !hchange->value()->CheckFlag(HValue::kUint32)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
}
@@ -4916,13 +4924,13 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
{
PushSafepointRegistersScope scope(this);
- __ push(object);
+ __ Push(object);
__ Set(rsi, 0);
__ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
RecordSafepointWithRegisters(
instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
- __ testq(rax, Immediate(kSmiTagMask));
+ __ testp(rax, Immediate(kSmiTagMask));
}
DeoptimizeIf(zero, instr->environment());
}
@@ -5011,7 +5019,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// conversions.
__ Cmp(input_reg, factory()->undefined_value());
DeoptimizeIf(not_equal, instr->environment());
- __ movp(input_reg, Immediate(0));
+ __ xorl(input_reg, input_reg);
__ jmp(&done, Label::kNear);
// Heap number
@@ -5029,6 +5037,30 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+ XMMRegister value_reg = ToDoubleRegister(instr->value());
+ Register result_reg = ToRegister(instr->result());
+ if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+ __ movq(result_reg, value_reg);
+ __ shr(result_reg, Immediate(32));
+ } else {
+ __ movd(result_reg, value_reg);
+ }
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+ Register hi_reg = ToRegister(instr->hi());
+ Register lo_reg = ToRegister(instr->lo());
+ XMMRegister result_reg = ToDoubleRegister(instr->result());
+ XMMRegister xmm_scratch = double_scratch0();
+ __ movd(result_reg, hi_reg);
+ __ psllq(result_reg, 32);
+ __ movd(xmm_scratch, lo_reg);
+ __ orps(result_reg, xmm_scratch);
+}
+
+
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate V8_FINAL : public LDeferredCode {
public:
@@ -5108,7 +5140,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
Register size = ToRegister(instr->size());
ASSERT(!size.is(result));
__ Integer32ToSmi(size, size);
- __ push(size);
+ __ Push(size);
} else {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
__ Push(Smi::FromInt(size));
@@ -5128,14 +5160,14 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ Push(Smi::FromInt(flags));
CallRuntimeFromDeferred(
- Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
+ Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, rax);
}
void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
ASSERT(ToRegister(instr->value()).is(rax));
- __ push(rax);
+ __ Push(rax);
CallRuntime(Runtime::kToFastProperties, 1, instr);
}
@@ -5156,11 +5188,11 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
// Create regexp literal using runtime function
// Result will be in rax.
- __ push(rcx);
+ __ Push(rcx);
__ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
__ Push(instr->hydrogen()->pattern());
__ Push(instr->hydrogen()->flags());
- CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
__ movp(rbx, rax);
__ bind(&materialized);
@@ -5170,10 +5202,10 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ jmp(&allocated, Label::kNear);
__ bind(&runtime_allocate);
- __ push(rbx);
+ __ Push(rbx);
__ Push(Smi::FromInt(size));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
- __ pop(rbx);
+ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
+ __ Pop(rbx);
__ bind(&allocated);
// Copy the content into the newly allocated memory.
@@ -5197,16 +5229,16 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(instr->hydrogen()->language_mode(),
+ FastNewClosureStub stub(instr->hydrogen()->strict_mode(),
instr->hydrogen()->is_generator());
__ Move(rbx, instr->hydrogen()->shared_info());
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else {
- __ push(rsi);
+ __ Push(rsi);
__ Push(instr->hydrogen()->shared_info());
__ PushRoot(pretenure ? Heap::kTrueValueRootIndex :
Heap::kFalseValueRootIndex);
- CallRuntime(Runtime::kNewClosure, 3, instr);
+ CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
}
}
@@ -5224,9 +5256,9 @@ void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
if (operand->IsConstantOperand()) {
__ Push(ToHandle(LConstantOperand::cast(operand)));
} else if (operand->IsRegister()) {
- __ push(ToRegister(operand));
+ __ Push(ToRegister(operand));
} else {
- __ push(ToOperand(operand));
+ __ Push(ToOperand(operand));
}
}
@@ -5365,7 +5397,7 @@ void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ last_lazy_deopt_pc_ = masm()->pc_offset();
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5401,7 +5433,7 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) {
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this);
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
@@ -5437,10 +5469,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
CallCode(isolate()->builtins()->StackCheck(),
RelocInfo::CODE_TARGET,
instr);
- EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
__ bind(&done);
- RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
} else {
ASSERT(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
@@ -5481,7 +5510,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
Register null_value = rdi;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ cmpq(rax, null_value);
+ __ cmpp(rax, null_value);
DeoptimizeIf(equal, instr->environment());
Condition cc = masm()->CheckSmi(rax);
@@ -5499,7 +5528,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
// Get the set of properties to enumerate.
__ bind(&call_runtime);
- __ push(rax);
+ __ Push(rax);
CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
@@ -5532,7 +5561,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
- __ cmpq(ToRegister(instr->map()),
+ __ cmpp(ToRegister(instr->map()),
FieldOperand(object, HeapObject::kMapOffset));
DeoptimizeIf(not_equal, instr->environment());
}
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index 431f77b23..37807ede0 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -86,12 +86,12 @@ class LCodeGen: public LCodeGenBase {
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
bool IsInteger32Constant(LConstantOperand* op) const;
+ bool IsDehoistedKeyConstant(LConstantOperand* op) const;
bool IsSmiConstant(LConstantOperand* op) const;
int32_t ToInteger32(LConstantOperand* op) const;
Smi* ToSmi(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
ExternalReference ToExternalReference(LConstantOperand* op) const;
- bool IsTaggedConstant(LConstantOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
Operand ToOperand(LOperand* op) const;
@@ -130,9 +130,7 @@ class LCodeGen: public LCodeGenBase {
#undef DECLARE_DO
private:
- StrictModeFlag strict_mode_flag() const {
- return info()->is_classic_mode() ? kNonStrictMode : kStrictMode;
- }
+ StrictMode strict_mode() const { return info()->strict_mode(); }
LPlatformChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
@@ -160,6 +158,7 @@ class LCodeGen: public LCodeGenBase {
// Code generation passes. Returns true if code generation should
// continue.
void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
+ void GenerateBodyInstructionPost(LInstruction* instr) V8_OVERRIDE;
bool GeneratePrologue();
bool GenerateDeferredCode();
bool GenerateJumpTable();
diff --git a/deps/v8/src/x64/lithium-gap-resolver-x64.cc b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
index c3bfd9e61..7c7fc29e0 100644
--- a/deps/v8/src/x64/lithium-gap-resolver-x64.cc
+++ b/deps/v8/src/x64/lithium-gap-resolver-x64.cc
@@ -198,7 +198,14 @@ void LGapResolver::EmitMove(int index) {
if (cgen_->IsSmiConstant(constant_source)) {
__ Move(dst, cgen_->ToSmi(constant_source));
} else if (cgen_->IsInteger32Constant(constant_source)) {
- __ Set(dst, static_cast<uint32_t>(cgen_->ToInteger32(constant_source)));
+ int32_t constant = cgen_->ToInteger32(constant_source);
+ // Do sign extension only for constant used as de-hoisted array key.
+ // Others only need zero extension, which saves 2 bytes.
+ if (cgen_->IsDehoistedKeyConstant(constant_source)) {
+ __ Set(dst, constant);
+ } else {
+ __ Set(dst, static_cast<uint32_t>(constant));
+ }
} else {
__ Move(dst, cgen_->ToHandle(constant_source));
}
@@ -218,8 +225,7 @@ void LGapResolver::EmitMove(int index) {
if (cgen_->IsSmiConstant(constant_source)) {
__ Move(dst, cgen_->ToSmi(constant_source));
} else if (cgen_->IsInteger32Constant(constant_source)) {
- // Zero top 32 bits of a 64 bit spill slot that holds a 32 bit untagged
- // value.
+ // Do sign extension to 64 bits when stored into stack slot.
__ movp(dst, Immediate(cgen_->ToInteger32(constant_source)));
} else {
__ Move(kScratchRegister, cgen_->ToHandle(constant_source));
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index 1f2b1e98e..8c4f24e8f 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -175,6 +175,19 @@ bool LGoto::HasInterestingComment(LCodeGen* gen) const {
}
+template<int R>
+bool LTemplateResultInstruction<R>::MustSignExtendResult(
+ LPlatformChunk* chunk) const {
+ HValue* hvalue = this->hydrogen_value();
+
+ if (hvalue == NULL) return false;
+ if (!hvalue->representation().IsInteger32()) return false;
+ if (hvalue->HasRange() && !hvalue->range()->CanBeNegative()) return false;
+
+ return chunk->GetDehoistedKeyIds()->Contains(hvalue->id());
+}
+
+
void LGoto::PrintDataTo(StringStream* stream) {
stream->Add("B%d", block_id());
}
@@ -947,18 +960,20 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
if (goto_instr != NULL) return goto_instr;
HValue* value = instr->value();
- LBranch* result = new(zone()) LBranch(UseRegister(value));
- // Tagged values that are not known smis or booleans require a
- // deoptimization environment. If the instruction is generic no
- // environment is needed since all cases are handled.
- ToBooleanStub::Types expected = instr->expected_input_types();
- Representation rep = value->representation();
+ Representation r = value->representation();
HType type = value->type();
- if (rep.IsTagged() && !type.IsSmi() && !type.IsBoolean() &&
- !expected.IsGeneric()) {
- return AssignEnvironment(result);
+ ToBooleanStub::Types expected = instr->expected_input_types();
+ if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+ bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
+ type.IsJSArray() || type.IsHeapNumber() || type.IsString();
+ LInstruction* branch = new(zone()) LBranch(UseRegister(value));
+ if (!easy_case &&
+ ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+ !expected.IsGeneric())) {
+ branch = AssignEnvironment(branch);
}
- return result;
+ return branch;
}
@@ -1117,6 +1132,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathExp: return DoMathExp(instr);
case kMathSqrt: return DoMathSqrt(instr);
case kMathPowHalf: return DoMathPowHalf(instr);
+ case kMathClz32: return DoMathClz32(instr);
default:
UNREACHABLE();
return NULL;
@@ -1142,8 +1158,12 @@ LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
LOperand* context = UseAny(instr->context());
LOperand* input = UseRegisterAtStart(instr->value());
- LMathAbs* result = new(zone()) LMathAbs(context, input);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ LInstruction* result =
+ DefineSameAsFirst(new(zone()) LMathAbs(context, input));
+ Representation r = instr->value()->representation();
+ if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
+ if (!r.IsDouble()) result = AssignEnvironment(result);
+ return result;
}
@@ -1155,6 +1175,13 @@ LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
}
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathClz32* result = new(zone()) LMathClz32(input);
+ return DefineAsRegister(result);
+}
+
+
LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
@@ -1246,24 +1273,72 @@ LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
}
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+ (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+ divisor != 1 && divisor != -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp1 = FixedTemp(rax);
+ LOperand* temp2 = FixedTemp(rdx);
+ LInstruction* result = DefineFixed(new(zone()) LDivByConstI(
+ dividend, divisor, temp1, temp2), rdx);
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseFixed(instr->left(), rax);
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = FixedTemp(rdx);
+ LInstruction* result = DefineFixed(new(zone()) LDivI(
+ dividend, divisor, temp), rax);
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ instr->CheckFlag(HValue::kCanOverflow) ||
+ (!instr->IsMathFloorOfDiv() &&
+ !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(instr->left()->representation().Equals(instr->representation()));
- ASSERT(instr->right()->representation().Equals(instr->representation()));
if (instr->RightIsPowerOf2()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- LDivI* div =
- new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL);
- return AssignEnvironment(DefineSameAsFirst(div));
+ return DoDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoDivByConstI(instr);
+ } else {
+ return DoDivI(instr);
}
- // The temporary operand is necessary to ensure that right is not allocated
- // into rdx.
- LOperand* temp = FixedTemp(rdx);
- LOperand* dividend = UseFixed(instr->left(), rax);
- LOperand* divisor = UseRegister(instr->right());
- LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineFixed(result, rax));
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
} else {
@@ -1272,74 +1347,114 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
}
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LFlooringDivByPowerOf2I(
+ dividend, divisor));
+ if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+ (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp1 = FixedTemp(rax);
+ LOperand* temp2 = FixedTemp(rdx);
+ LOperand* temp3 =
+ ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+ (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
+ NULL : TempRegister();
+ LInstruction* result =
+ DefineFixed(new(zone()) LFlooringDivByConstI(dividend,
+ divisor,
+ temp1,
+ temp2,
+ temp3),
+ rdx);
+ if (divisor == 0 ||
+ (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- HValue* right = instr->right();
- if (!right->IsConstant()) {
- ASSERT(right->representation().IsInteger32());
- // The temporary operand is necessary to ensure that right is not allocated
- // into rdx.
- LOperand* temp = FixedTemp(rdx);
- LOperand* dividend = UseFixed(instr->left(), rax);
- LOperand* divisor = UseRegister(instr->right());
- LDivI* flooring_div = new(zone()) LDivI(dividend, divisor, temp);
- return AssignEnvironment(DefineFixed(flooring_div, rax));
- }
-
- ASSERT(right->IsConstant() && HConstant::cast(right)->HasInteger32Value());
- LOperand* divisor = chunk_->DefineConstantOperand(HConstant::cast(right));
- int32_t divisor_si = HConstant::cast(right)->Integer32Value();
- if (divisor_si == 0) {
- LOperand* dividend = UseRegister(instr->left());
- return AssignEnvironment(DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, NULL)));
- } else if (IsPowerOf2(abs(divisor_si))) {
- LOperand* dividend = UseRegisterAtStart(instr->left());
- LInstruction* result = DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, NULL));
- return divisor_si < 0 ? AssignEnvironment(result) : result;
+ if (instr->RightIsPowerOf2()) {
+ return DoFlooringDivByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoFlooringDivByConstI(instr);
} else {
- // use two r64
- LOperand* dividend = UseRegisterAtStart(instr->left());
- LOperand* temp = TempRegister();
- LInstruction* result = DefineAsRegister(
- new(zone()) LMathFloorOfDiv(dividend, divisor, temp));
- return divisor_si < 0 ? AssignEnvironment(result) : result;
+ return DoDivI(instr);
}
}
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegisterAtStart(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
+ dividend, divisor));
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseRegister(instr->left());
+ int32_t divisor = instr->right()->GetInteger32Constant();
+ LOperand* temp1 = FixedTemp(rax);
+ LOperand* temp2 = FixedTemp(rdx);
+ LInstruction* result = DefineFixed(new(zone()) LModByConstI(
+ dividend, divisor, temp1, temp2), rax);
+ if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+ ASSERT(instr->representation().IsSmiOrInteger32());
+ ASSERT(instr->left()->representation().Equals(instr->representation()));
+ ASSERT(instr->right()->representation().Equals(instr->representation()));
+ LOperand* dividend = UseFixed(instr->left(), rax);
+ LOperand* divisor = UseRegister(instr->right());
+ LOperand* temp = FixedTemp(rdx);
+ LInstruction* result = DefineFixed(new(zone()) LModI(
+ dividend, divisor, temp), rdx);
+ if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+ instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
- HValue* left = instr->left();
- HValue* right = instr->right();
if (instr->representation().IsSmiOrInteger32()) {
- ASSERT(left->representation().Equals(instr->representation()));
- ASSERT(right->representation().Equals(instr->representation()));
if (instr->RightIsPowerOf2()) {
- ASSERT(!right->CanBeZero());
- LModI* mod = new(zone()) LModI(UseRegisterAtStart(left),
- UseOrConstant(right),
- NULL);
- LInstruction* result = DefineSameAsFirst(mod);
- return (left->CanBeNegative() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero))
- ? AssignEnvironment(result)
- : result;
+ return DoModByPowerOf2I(instr);
+ } else if (instr->right()->IsConstant()) {
+ return DoModByConstI(instr);
} else {
- // The temporary operand is necessary to ensure that right is not
- // allocated into edx.
- LModI* mod = new(zone()) LModI(UseFixed(left, rax),
- UseRegister(right),
- FixedTemp(rdx));
- LInstruction* result = DefineFixed(mod, rdx);
- return (right->CanBeZero() ||
- (left->RangeCanInclude(kMinInt) &&
- right->RangeCanInclude(-1) &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)) ||
- (left->CanBeNegative() &&
- instr->CanBeZero() &&
- instr->CheckFlag(HValue::kBailoutOnMinusZero)))
- ? AssignEnvironment(result)
- : result;
+ return DoModI(instr);
}
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MOD, instr);
@@ -1702,8 +1817,11 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
if (from.IsTagged()) {
if (to.IsDouble()) {
LOperand* value = UseRegister(instr->value());
- LNumberUntagD* res = new(zone()) LNumberUntagD(value);
- return AssignEnvironment(DefineAsRegister(res));
+ LInstruction* res = DefineAsRegister(new(zone()) LNumberUntagD(value));
+ if (!instr->value()->representation().IsSmi()) {
+ res = AssignEnvironment(res);
+ }
+ return res;
} else if (to.IsSmi()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
@@ -1720,8 +1838,13 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else {
bool truncating = instr->CanTruncateToInt32();
LOperand* xmm_temp = truncating ? NULL : FixedTemp(xmm1);
- LTaggedToI* res = new(zone()) LTaggedToI(value, xmm_temp);
- return AssignEnvironment(DefineSameAsFirst(res));
+ LInstruction* res =
+ DefineSameAsFirst(new(zone()) LTaggedToI(value, xmm_temp));
+ if (!instr->value()->representation().IsSmi()) {
+ // Note: Only deopts in deferred code.
+ res = AssignEnvironment(res);
+ }
+ return res;
}
}
} else if (from.IsDouble()) {
@@ -1741,41 +1864,37 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else {
ASSERT(to.IsInteger32());
LOperand* value = UseRegister(instr->value());
- return AssignEnvironment(
- DefineAsRegister(new(zone()) LDoubleToI(value)));
+ LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value));
+ if (!instr->CanTruncateToInt32()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
} else if (from.IsInteger32()) {
info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
- if (val->CheckFlag(HInstruction::kUint32)) {
- LOperand* temp = FixedTemp(xmm1);
- LNumberTagU* result = new(zone()) LNumberTagU(value, temp);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- } else if (val->HasRange() && val->range()->IsInSmiRange()) {
- return DefineSameAsFirst(new(zone()) LSmiTag(value));
+ if (!instr->CheckFlag(HValue::kCanOverflow)) {
+ return DefineAsRegister(new(zone()) LSmiTag(value));
+ } else if (val->CheckFlag(HInstruction::kUint32)) {
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = FixedTemp(xmm1);
+ LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2);
+ return AssignPointerMap(DefineSameAsFirst(result));
} else {
LNumberTagI* result = new(zone()) LNumberTagI(value);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ return AssignPointerMap(DefineSameAsFirst(result));
}
} else if (to.IsSmi()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
- LInstruction* result = NULL;
- if (val->CheckFlag(HInstruction::kUint32)) {
- result = DefineAsRegister(new(zone()) LUint32ToSmi(value));
- if (val->HasRange() && val->range()->IsInSmiRange() &&
- val->range()->upper() != kMaxInt) {
- return result;
- }
- } else {
- result = DefineAsRegister(new(zone()) LInteger32ToSmi(value));
- if (val->HasRange() && val->range()->IsInSmiRange()) {
- return result;
- }
+ LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ ASSERT(val->CheckFlag(HValue::kUint32));
+ result = AssignEnvironment(result);
}
- return AssignEnvironment(result);
+ return result;
} else {
if (instr->value()->CheckFlag(HInstruction::kUint32)) {
LOperand* temp = FixedTemp(xmm1);
@@ -1826,6 +1945,7 @@ LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
}
LCheckMaps* result = new(zone()) LCheckMaps(value);
if (!instr->CanOmitMapChecks()) {
+ // Note: Only deopts in deferred code.
AssignEnvironment(result);
if (instr->has_migration_target()) return AssignPointerMap(result);
}
@@ -1852,6 +1972,20 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
}
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+ HValue* value = instr->value();
+ ASSERT(value->representation().IsDouble());
+ return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+ LOperand* lo = UseRegister(instr->lo());
+ LOperand* hi = UseRegister(instr->hi());
+ return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
+}
+
+
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
LOperand* context = info()->IsStub() ? UseFixed(instr->context(), rsi) : NULL;
LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
@@ -1911,7 +2045,10 @@ LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
DefineAsRegister(new(zone()) LLoadContextSlot(context));
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -1928,7 +2065,10 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
temp = NULL;
}
LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
+ if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -1969,32 +2109,51 @@ LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
}
+void LChunkBuilder::FindDehoistedKeyDefinitions(HValue* candidate) {
+ BitVector* dehoisted_key_ids = chunk_->GetDehoistedKeyIds();
+ if (dehoisted_key_ids->Contains(candidate->id())) return;
+ dehoisted_key_ids->Add(candidate->id());
+ if (!candidate->IsPhi()) return;
+ for (int i = 0; i < candidate->OperandCount(); ++i) {
+ FindDehoistedKeyDefinitions(candidate->OperandAt(i));
+ }
+}
+
+
LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
ASSERT(instr->key()->representation().IsInteger32());
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyed* result = NULL;
+ LInstruction* result = NULL;
+
+ if (instr->IsDehoisted()) {
+ FindDehoistedKeyDefinitions(instr->key());
+ }
if (!instr->is_typed_elements()) {
LOperand* obj = UseRegisterAtStart(instr->elements());
- result = new(zone()) LLoadKeyed(obj, key);
+ result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
} else {
ASSERT(
(instr->representation().IsInteger32() &&
- !(IsDoubleOrFloatElementsKind(instr->elements_kind()))) ||
+ !(IsDoubleOrFloatElementsKind(elements_kind))) ||
(instr->representation().IsDouble() &&
- (IsDoubleOrFloatElementsKind(instr->elements_kind()))));
+ (IsDoubleOrFloatElementsKind(elements_kind))));
LOperand* backing_store = UseRegister(instr->elements());
- result = new(zone()) LLoadKeyed(backing_store, key);
+ result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
}
- DefineAsRegister(result);
- bool can_deoptimize = instr->RequiresHoleCheck() ||
- (elements_kind == EXTERNAL_UINT32_ELEMENTS) ||
- (elements_kind == UINT32_ELEMENTS);
- // An unsigned int array load might overflow and cause a deopt, make sure it
- // has an environment.
- return can_deoptimize ? AssignEnvironment(result) : result;
+ if ((instr->is_external() || instr->is_fixed_typed_array()) ?
+ // see LCodeGen::DoLoadKeyedExternalArray
+ ((elements_kind == EXTERNAL_UINT32_ELEMENTS ||
+ elements_kind == UINT32_ELEMENTS) &&
+ !instr->CheckFlag(HInstruction::kUint32)) :
+ // see LCodeGen::DoLoadKeyedFixedDoubleArray and
+ // LCodeGen::DoLoadKeyedFixedArray
+ instr->RequiresHoleCheck()) {
+ result = AssignEnvironment(result);
+ }
+ return result;
}
@@ -2012,6 +2171,10 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
+ if (instr->IsDehoisted()) {
+ FindDehoistedKeyDefinitions(instr->key());
+ }
+
if (!instr->is_typed_elements()) {
ASSERT(instr->elements()->representation().IsTagged());
bool needs_write_barrier = instr->NeedsWriteBarrier();
@@ -2022,7 +2185,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
Representation value_representation = instr->value()->representation();
if (value_representation.IsDouble()) {
object = UseRegisterAtStart(instr->elements());
- val = UseTempRegister(instr->value());
+ val = UseRegisterAtStart(instr->value());
key = UseRegisterOrConstantAtStart(instr->key());
} else {
ASSERT(value_representation.IsSmiOrTagged() ||
@@ -2133,7 +2296,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool can_be_constant = instr->value()->IsConstant() &&
HConstant::cast(instr->value())->NotInNewSpace() &&
- !(FLAG_track_double_fields && instr->field_representation().IsDouble());
+ !instr->field_representation().IsDouble();
LOperand* val;
if (needs_write_barrier) {
@@ -2142,10 +2305,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
val = UseFixed(instr->value(), rax);
} else if (can_be_constant) {
val = UseRegisterOrConstant(instr->value());
- } else if (FLAG_track_fields && instr->field_representation().IsSmi()) {
+ } else if (instr->field_representation().IsSmi()) {
val = UseRegister(instr->value());
- } else if (FLAG_track_double_fields &&
- instr->field_representation().IsDouble()) {
+ } else if (instr->field_representation().IsDouble()) {
val = UseRegisterAtStart(instr->value());
} else {
val = UseRegister(instr->value());
@@ -2156,12 +2318,13 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LOperand* temp = (!is_in_object || needs_write_barrier ||
needs_write_barrier_for_map) ? TempRegister() : NULL;
- LStoreNamedField* result = new(zone()) LStoreNamedField(obj, val, temp);
- if (FLAG_track_heap_object_fields &&
- instr->field_representation().IsHeapObject()) {
- if (!instr->value()->type().IsHeapObject()) {
- return AssignEnvironment(result);
- }
+ LInstruction* result = new(zone()) LStoreNamedField(obj, val, temp);
+ if (!instr->access().IsExternalMemory() &&
+ instr->field_representation().IsHeapObject() &&
+ (val->IsConstantOperand()
+ ? HConstant::cast(instr->value())->HasSmiValue()
+ : !instr->value()->type().IsHeapObject())) {
+ result = AssignEnvironment(result);
}
return result;
}
@@ -2193,7 +2356,7 @@ LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
LOperand* context = UseAny(instr->context());
LStringCharCodeAt* result =
new(zone()) LStringCharCodeAt(context, string, index);
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+ return AssignPointerMap(DefineAsRegister(result));
}
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index 36b274440..9d9ac1ea1 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -80,17 +80,23 @@ class LCodeGen;
V(ConstantI) \
V(ConstantS) \
V(ConstantT) \
+ V(ConstructDouble) \
V(Context) \
V(DateField) \
V(DebugBreak) \
V(DeclareGlobals) \
V(Deoptimize) \
+ V(DivByConstI) \
+ V(DivByPowerOf2I) \
V(DivI) \
+ V(DoubleBits) \
V(DoubleToI) \
V(DoubleToSmi) \
V(Drop) \
V(DummyUse) \
V(Dummy) \
+ V(FlooringDivByConstI) \
+ V(FlooringDivByPowerOf2I) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
V(FunctionLiteral) \
@@ -103,7 +109,6 @@ class LCodeGen;
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
- V(Integer32ToSmi) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsObjectAndBranch) \
@@ -124,14 +129,16 @@ class LCodeGen;
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathAbs) \
+ V(MathClz32) \
V(MathExp) \
V(MathFloor) \
- V(MathFloorOfDiv) \
V(MathLog) \
V(MathMinMax) \
V(MathPowHalf) \
V(MathRound) \
V(MathSqrt) \
+ V(ModByConstI) \
+ V(ModByPowerOf2I) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@@ -170,7 +177,6 @@ class LCodeGen;
V(Typeof) \
V(TypeofIsAndBranch) \
V(Uint32ToDouble) \
- V(Uint32ToSmi) \
V(UnknownOSRValue) \
V(WrapReceiver)
@@ -265,6 +271,10 @@ class LInstruction : public ZoneObject {
virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+ virtual bool MustSignExtendResult(LPlatformChunk* chunk) const {
+ return false;
+ }
+
#ifdef DEBUG
void VerifyCall();
#endif
@@ -300,6 +310,9 @@ class LTemplateResultInstruction : public LInstruction {
void set_result(LOperand* operand) { results_[0] = operand; }
LOperand* result() const { return results_[0]; }
+ virtual bool MustSignExtendResult(
+ LPlatformChunk* chunk) const V8_FINAL V8_OVERRIDE;
+
protected:
EmbeddedContainer<LOperand*, R> results_;
};
@@ -614,6 +627,49 @@ class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
};
+class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LModByConstI(LOperand* dividend,
+ int32_t divisor,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+ int32_t divisor_;
+};
+
+
class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LModI(LOperand* left, LOperand* right, LOperand* temp) {
@@ -631,6 +687,49 @@ class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
};
+class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+ LDivByConstI(LOperand* dividend,
+ int32_t divisor,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+ int32_t divisor_;
+};
+
+
class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LDivI(LOperand* left, LOperand* right, LOperand* temp) {
@@ -643,29 +742,55 @@ class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
LOperand* right() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
- bool is_flooring() { return hydrogen_value()->IsMathFloorOfDiv(); }
-
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
- DECLARE_HYDROGEN_ACCESSOR(Div)
+ DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
};
-class LMathFloorOfDiv V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
- LMathFloorOfDiv(LOperand* left,
- LOperand* right,
- LOperand* temp = NULL) {
- inputs_[0] = left;
- inputs_[1] = right;
- temps_[0] = temp;
+ LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
}
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
- DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv, "math-floor-of-div")
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+ "flooring-div-by-power-of-2-i")
DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 3> {
+ public:
+ LFlooringDivByConstI(LOperand* dividend,
+ int32_t divisor,
+ LOperand* temp1,
+ LOperand* temp2,
+ LOperand* temp3) {
+ inputs_[0] = dividend;
+ divisor_ = divisor;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = temp3;
+ }
+
+ LOperand* dividend() { return inputs_[0]; }
+ int32_t divisor() const { return divisor_; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* temp3() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+ DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+ int32_t divisor_;
};
@@ -762,6 +887,18 @@ class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
+class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathClz32(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
+};
+
+
class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
LMathExp(LOperand* value, LOperand* temp1, LOperand* temp2) {
@@ -1829,19 +1966,6 @@ class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LInteger32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LInteger32ToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Integer32ToSmi, "int32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> {
public:
explicit LUint32ToDouble(LOperand* value, LOperand* temp) {
@@ -1856,19 +1980,6 @@ class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LUint32ToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LUint32ToSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(Uint32ToSmi, "uint32-to-smi")
- DECLARE_HYDROGEN_ACCESSOR(Change)
-};
-
-
class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LNumberTagI(LOperand* value) {
@@ -1881,15 +1992,17 @@ class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
public:
- explicit LNumberTagU(LOperand* value, LOperand* temp) {
+ LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
- temps_[0] = temp;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
}
LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
};
@@ -1966,6 +2079,7 @@ class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
};
@@ -2041,7 +2155,7 @@ class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
Handle<Object> name() const { return hydrogen()->name(); }
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2096,7 +2210,7 @@ class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
- StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
+ StrictMode strict_mode() { return hydrogen()->strict_mode(); }
};
@@ -2300,6 +2414,33 @@ class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
};
+class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDoubleBits(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+ DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+ LConstructDouble(LOperand* hi, LOperand* lo) {
+ inputs_[0] = hi;
+ inputs_[1] = lo;
+ }
+
+ LOperand* hi() { return inputs_[0]; }
+ LOperand* lo() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
@@ -2492,10 +2633,18 @@ class LChunkBuilder;
class LPlatformChunk V8_FINAL : public LChunk {
public:
LPlatformChunk(CompilationInfo* info, HGraph* graph)
- : LChunk(info, graph) { }
+ : LChunk(info, graph),
+ dehoisted_key_ids_(graph->GetMaximumValueID(), graph->zone()) { }
int GetNextSpillIndex(RegisterKind kind);
LOperand* GetNextSpillSlot(RegisterKind kind);
+ BitVector* GetDehoistedKeyIds() { return &dehoisted_key_ids_; }
+ bool IsDehoistedKey(HValue* value) {
+ return dehoisted_key_ids_.Contains(value->id());
+ }
+
+ private:
+ BitVector dehoisted_key_ids_;
};
@@ -2529,6 +2678,15 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
LInstruction* DoMathExp(HUnaryMathOperation* instr);
LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+ LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+ LInstruction* DoDivByPowerOf2I(HDiv* instr);
+ LInstruction* DoDivByConstI(HDiv* instr);
+ LInstruction* DoDivI(HBinaryOperation* instr);
+ LInstruction* DoModByPowerOf2I(HMod* instr);
+ LInstruction* DoModByConstI(HMod* instr);
+ LInstruction* DoModI(HMod* instr);
+ LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+ LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
private:
enum Status {
@@ -2637,6 +2795,7 @@ class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
HArithmeticBinaryOperation* instr);
LInstruction* DoArithmeticT(Token::Value op,
HBinaryOperation* instr);
+ void FindDehoistedKeyDefinitions(HValue* candidate);
LPlatformChunk* chunk_;
CompilationInfo* info_;
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 4c19fced6..6f313f7a6 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -128,7 +128,7 @@ void MacroAssembler::LoadAddress(Register destination,
intptr_t delta = RootRegisterDelta(source);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
Serializer::TooLateToEnableNow();
- lea(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
+ leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
return;
}
}
@@ -145,7 +145,7 @@ int MacroAssembler::LoadAddressSize(ExternalReference source) {
intptr_t delta = RootRegisterDelta(source);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
Serializer::TooLateToEnableNow();
- // Operand is lea(scratch, Operand(kRootRegister, delta));
+ // Operand is leap(scratch, Operand(kRootRegister, delta));
// Opcodes : REX.W 8D ModRM Disp8/Disp32 - 4 or 7.
int size = 4;
if (!is_int8(static_cast<int32_t>(delta))) {
@@ -165,11 +165,11 @@ void MacroAssembler::PushAddress(ExternalReference source) {
if (emit_debug_code()) {
Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone());
}
- push(Immediate(static_cast<int32_t>(address)));
+ Push(Immediate(static_cast<int32_t>(address)));
return;
}
LoadAddress(kScratchRegister, source);
- push(kScratchRegister);
+ Push(kScratchRegister);
}
@@ -200,13 +200,13 @@ void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
void MacroAssembler::PushRoot(Heap::RootListIndex index) {
ASSERT(root_array_available_);
- push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
+ Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
}
void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
ASSERT(root_array_available_);
- cmpq(with, Operand(kRootRegister,
+ cmpp(with, Operand(kRootRegister,
(index << kPointerSizeLog2) - kRootRegisterBias));
}
@@ -216,7 +216,7 @@ void MacroAssembler::CompareRoot(const Operand& with,
ASSERT(root_array_available_);
ASSERT(!with.AddressUsesRegister(kScratchRegister));
LoadRoot(kScratchRegister, index);
- cmpq(with, kScratchRegister);
+ cmpp(with, kScratchRegister);
}
@@ -236,13 +236,13 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
// Store pointer to buffer.
movp(Operand(scratch, 0), addr);
// Increment buffer top.
- addq(scratch, Immediate(kPointerSize));
+ addp(scratch, Immediate(kPointerSize));
// Write back new top of buffer.
StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
// Call stub on end of buffer.
Label done;
// Check for end of buffer.
- testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
+ testp(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
if (and_then == kReturnAtEnd) {
Label buffer_overflowed;
j(not_equal, &buffer_overflowed, Label::kNear);
@@ -276,13 +276,13 @@ void MacroAssembler::InNewSpace(Register object,
// and the running system.
if (scratch.is(object)) {
Move(kScratchRegister, ExternalReference::new_space_mask(isolate()));
- and_(scratch, kScratchRegister);
+ andp(scratch, kScratchRegister);
} else {
Move(scratch, ExternalReference::new_space_mask(isolate()));
- and_(scratch, object);
+ andp(scratch, object);
}
Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
- cmpq(scratch, kScratchRegister);
+ cmpp(scratch, kScratchRegister);
j(cc, branch, distance);
} else {
ASSERT(is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask())));
@@ -291,11 +291,11 @@ void MacroAssembler::InNewSpace(Register object,
Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
Assembler::RelocInfoNone());
if (scratch.is(object)) {
- addq(scratch, kScratchRegister);
+ addp(scratch, kScratchRegister);
} else {
- lea(scratch, Operand(object, kScratchRegister, times_1, 0));
+ leap(scratch, Operand(object, kScratchRegister, times_1, 0));
}
- and_(scratch,
+ andp(scratch,
Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask())));
j(cc, branch, distance);
}
@@ -323,7 +323,7 @@ void MacroAssembler::RecordWriteField(
// of the object, so so offset must be a multiple of kPointerSize.
ASSERT(IsAligned(offset, kPointerSize));
- lea(dst, FieldOperand(object, offset));
+ leap(dst, FieldOperand(object, offset));
if (emit_debug_code()) {
Label ok;
testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
@@ -363,7 +363,7 @@ void MacroAssembler::RecordWriteArray(Register object,
// Array access: calculate the destination address. Index is not a smi.
Register dst = index;
- lea(dst, Operand(object, index, times_pointer_size,
+ leap(dst, Operand(object, index, times_pointer_size,
FixedArray::kHeaderSize - kHeapObjectTag));
RecordWrite(
@@ -398,7 +398,7 @@ void MacroAssembler::RecordWrite(Register object,
if (emit_debug_code()) {
Label ok;
- cmpq(value, Operand(address, 0));
+ cmpp(value, Operand(address, 0));
j(equal, &ok, Label::kNear);
int3();
bind(&ok);
@@ -483,7 +483,7 @@ void MacroAssembler::CheckStackAlignment() {
if (frame_alignment > kPointerSize) {
ASSERT(IsPowerOf2(frame_alignment));
Label alignment_as_expected;
- testq(rsp, Immediate(frame_alignment_mask));
+ testp(rsp, Immediate(frame_alignment_mask));
j(zero, &alignment_as_expected, Label::kNear);
// Abort if stack is not aligned.
int3();
@@ -505,17 +505,8 @@ void MacroAssembler::NegativeZeroTest(Register result,
void MacroAssembler::Abort(BailoutReason reason) {
- // We want to pass the msg string like a smi to avoid GC
- // problems, however msg is not guaranteed to be aligned
- // properly. Instead, we pass an aligned pointer that is
- // a proper v8 smi, but also pass the alignment difference
- // from the real pointer as a smi.
- const char* msg = GetBailoutReason(reason);
- intptr_t p1 = reinterpret_cast<intptr_t>(msg);
- intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
- // Note: p0 might not be a valid Smi _value_, but it has a valid Smi tag.
- ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
#ifdef DEBUG
+ const char* msg = GetBailoutReason(reason);
if (msg != NULL) {
RecordComment("Abort message: ");
RecordComment(msg);
@@ -527,21 +518,18 @@ void MacroAssembler::Abort(BailoutReason reason) {
}
#endif
- push(rax);
- Move(kScratchRegister, reinterpret_cast<Smi*>(p0),
- Assembler::RelocInfoNone());
- push(kScratchRegister);
- Move(kScratchRegister, Smi::FromInt(static_cast<int>(p1 - p0)),
+ Push(rax);
+ Move(kScratchRegister, Smi::FromInt(static_cast<int>(reason)),
Assembler::RelocInfoNone());
- push(kScratchRegister);
+ Push(kScratchRegister);
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
} else {
- CallRuntime(Runtime::kAbort, 2);
+ CallRuntime(Runtime::kAbort, 1);
}
// Control will not return here.
int3();
@@ -572,7 +560,7 @@ bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
void MacroAssembler::IllegalOperation(int num_arguments) {
if (num_arguments > 0) {
- addq(rsp, Immediate(num_arguments * kPointerSize));
+ addp(rsp, Immediate(num_arguments * kPointerSize));
}
LoadRoot(rax, Heap::kUndefinedValueRootIndex);
}
@@ -588,7 +576,7 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) {
// the slow case, converting the key to a smi is always valid.
// key: string key
// hash: key's hash field, including its array index value.
- and_(hash, Immediate(String::kArrayIndexValueMask));
+ andp(hash, Immediate(String::kArrayIndexValueMask));
shr(hash, Immediate(String::kHashShift));
// Here we actually clobber the key which will be used if calling into
// runtime later. However as the new key is the numeric value of a string key
@@ -757,7 +745,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
// previous handle scope.
subl(Operand(base_reg, kLevelOffset), Immediate(1));
movp(Operand(base_reg, kNextOffset), prev_next_address_reg);
- cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset));
+ cmpp(prev_limit_reg, Operand(base_reg, kLimitOffset));
j(not_equal, &delete_allocated_handles);
bind(&leave_exit_frame);
@@ -812,7 +800,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
bind(&promote_scheduled_exception);
{
FrameScope frame(this, StackFrame::INTERNAL);
- CallRuntime(Runtime::kPromoteScheduledException, 0);
+ CallRuntime(Runtime::kHiddenPromoteScheduledException, 0);
}
jmp(&exception_handled);
@@ -893,12 +881,12 @@ void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
for (int i = 0; i < kNumberOfSavedRegs; i++) {
Register reg = saved_regs[i];
if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
- push(reg);
+ pushq(reg);
}
}
// R12 to r15 are callee save on all platforms.
if (fp_mode == kSaveFPRegs) {
- subq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
+ subp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
movsd(Operand(rsp, i * kDoubleSize), reg);
@@ -916,12 +904,12 @@ void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
XMMRegister reg = XMMRegister::from_code(i);
movsd(reg, Operand(rsp, i * kDoubleSize));
}
- addq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
+ addp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
}
for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
Register reg = saved_regs[i];
if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
- pop(reg);
+ popq(reg);
}
}
}
@@ -984,12 +972,17 @@ void MacroAssembler::Set(Register dst, int64_t x) {
}
-void MacroAssembler::Set(const Operand& dst, int64_t x) {
- if (is_int32(x)) {
- movq(dst, Immediate(static_cast<int32_t>(x)));
+void MacroAssembler::Set(const Operand& dst, intptr_t x) {
+ if (kPointerSize == kInt64Size) {
+ if (is_int32(x)) {
+ movp(dst, Immediate(static_cast<int32_t>(x)));
+ } else {
+ Set(kScratchRegister, x);
+ movp(dst, kScratchRegister);
+ }
} else {
- Set(kScratchRegister, x);
- movq(dst, kScratchRegister);
+ ASSERT(kPointerSize == kInt32Size);
+ movp(dst, Immediate(static_cast<int32_t>(x)));
}
}
@@ -1009,7 +1002,7 @@ void MacroAssembler::SafeMove(Register dst, Smi* src) {
if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
Move(kScratchRegister, Smi::FromInt(jit_cookie()));
- xor_(dst, kScratchRegister);
+ xorq(dst, kScratchRegister);
} else {
Move(dst, src);
}
@@ -1021,7 +1014,7 @@ void MacroAssembler::SafePush(Smi* src) {
if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
Push(Smi::FromInt(src->value() ^ jit_cookie()));
Move(kScratchRegister, Smi::FromInt(jit_cookie()));
- xor_(Operand(rsp, 0), kScratchRegister);
+ xorq(Operand(rsp, 0), kScratchRegister);
} else {
Push(src);
}
@@ -1059,24 +1052,28 @@ void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
switch (uvalue) {
case 9:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
+ leap(dst,
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
break;
case 8:
xorl(dst, dst);
- lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
+ leap(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
break;
case 4:
xorl(dst, dst);
- lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
+ leap(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
break;
case 5:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
+ leap(dst,
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
break;
case 3:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
+ leap(dst,
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
break;
case 2:
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
+ leap(dst,
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
break;
case 1:
movp(dst, kSmiConstantRegister);
@@ -1089,7 +1086,7 @@ void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
return;
}
if (negative) {
- neg(dst);
+ negp(dst);
}
}
@@ -1158,14 +1155,14 @@ void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
void MacroAssembler::SmiTest(Register src) {
AssertSmi(src);
- testq(src, src);
+ testp(src, src);
}
void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
AssertSmi(smi1);
AssertSmi(smi2);
- cmpq(smi1, smi2);
+ cmpp(smi1, smi2);
}
@@ -1178,10 +1175,10 @@ void MacroAssembler::SmiCompare(Register dst, Smi* src) {
void MacroAssembler::Cmp(Register dst, Smi* src) {
ASSERT(!dst.is(kScratchRegister));
if (src->value() == 0) {
- testq(dst, dst);
+ testp(dst, dst);
} else {
Register constant_reg = GetSmiConstant(src);
- cmpq(dst, constant_reg);
+ cmpp(dst, constant_reg);
}
}
@@ -1189,14 +1186,14 @@ void MacroAssembler::Cmp(Register dst, Smi* src) {
void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
AssertSmi(dst);
AssertSmi(src);
- cmpq(dst, src);
+ cmpp(dst, src);
}
void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
AssertSmi(dst);
AssertSmi(src);
- cmpq(dst, src);
+ cmpp(dst, src);
}
@@ -1210,7 +1207,7 @@ void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
// The Operand cannot use the smi register.
Register smi_reg = GetSmiConstant(src);
ASSERT(!dst.AddressUsesRegister(smi_reg));
- cmpq(dst, smi_reg);
+ cmpp(dst, smi_reg);
}
@@ -1258,12 +1255,12 @@ void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
ASSERT(!src1.is(kScratchRegister));
ASSERT(!src2.is(kScratchRegister));
movp(kScratchRegister, src1);
- or_(kScratchRegister, src2);
+ orp(kScratchRegister, src2);
JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
movp(dst, kScratchRegister);
} else {
movp(dst, src1);
- or_(dst, src2);
+ orp(dst, src2);
JumpIfNotSmi(dst, on_not_smis, near_jump);
}
}
@@ -1310,7 +1307,7 @@ Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
return CheckNonNegativeSmi(first);
}
movp(kScratchRegister, first);
- or_(kScratchRegister, second);
+ orp(kScratchRegister, second);
rol(kScratchRegister, Immediate(1));
testl(kScratchRegister, Immediate(3));
return zero;
@@ -1339,7 +1336,7 @@ Condition MacroAssembler::CheckEitherSmi(Register first,
Condition MacroAssembler::CheckIsMinSmi(Register src) {
ASSERT(!src.is(kScratchRegister));
// If we overflow by subtracting one, it's the minimal smi value.
- cmpq(src, kSmiConstantRegister);
+ cmpp(src, kSmiConstantRegister);
return overflow;
}
@@ -1456,39 +1453,39 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
ASSERT(!dst.is(kScratchRegister));
switch (constant->value()) {
case 1:
- addq(dst, kSmiConstantRegister);
+ addp(dst, kSmiConstantRegister);
return;
case 2:
- lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
return;
case 4:
- lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
return;
case 8:
- lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
return;
default:
Register constant_reg = GetSmiConstant(constant);
- addq(dst, constant_reg);
+ addp(dst, constant_reg);
return;
}
} else {
switch (constant->value()) {
case 1:
- lea(dst, Operand(src, kSmiConstantRegister, times_1, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_1, 0));
return;
case 2:
- lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
return;
case 4:
- lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
return;
case 8:
- lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
+ leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
return;
default:
LoadSmiConstant(dst, constant);
- addq(dst, src);
+ addp(dst, src);
return;
}
}
@@ -1515,16 +1512,16 @@ void MacroAssembler::SmiAddConstant(Register dst,
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
LoadSmiConstant(kScratchRegister, constant);
- addq(dst, kScratchRegister);
+ addp(dst, kScratchRegister);
if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
j(no_overflow, bailout_label, near_jump);
ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
- subq(dst, kScratchRegister);
+ subp(dst, kScratchRegister);
} else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
Label done;
j(no_overflow, &done, Label::kNear);
- subq(dst, kScratchRegister);
+ subp(dst, kScratchRegister);
jmp(bailout_label, near_jump);
bind(&done);
} else {
@@ -1538,7 +1535,7 @@ void MacroAssembler::SmiAddConstant(Register dst,
ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW));
LoadSmiConstant(dst, constant);
- addq(dst, src);
+ addp(dst, src);
j(overflow, bailout_label, near_jump);
}
}
@@ -1552,17 +1549,17 @@ void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- subq(dst, constant_reg);
+ subp(dst, constant_reg);
} else {
if (constant->value() == Smi::kMinValue) {
LoadSmiConstant(dst, constant);
// Adding and subtracting the min-value gives the same result, it only
// differs on the overflow bit, which we don't check here.
- addq(dst, src);
+ addp(dst, src);
} else {
// Subtract by adding the negation.
LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
- addq(dst, src);
+ addp(dst, src);
}
}
}
@@ -1581,16 +1578,16 @@ void MacroAssembler::SmiSubConstant(Register dst,
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
LoadSmiConstant(kScratchRegister, constant);
- subq(dst, kScratchRegister);
+ subp(dst, kScratchRegister);
if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
j(no_overflow, bailout_label, near_jump);
ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
- addq(dst, kScratchRegister);
+ addp(dst, kScratchRegister);
} else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
Label done;
j(no_overflow, &done, Label::kNear);
- addq(dst, kScratchRegister);
+ addp(dst, kScratchRegister);
jmp(bailout_label, near_jump);
bind(&done);
} else {
@@ -1607,12 +1604,12 @@ void MacroAssembler::SmiSubConstant(Register dst,
ASSERT(!dst.is(kScratchRegister));
movp(dst, src);
LoadSmiConstant(kScratchRegister, constant);
- subq(dst, kScratchRegister);
+ subp(dst, kScratchRegister);
j(overflow, bailout_label, near_jump);
} else {
// Subtract by adding the negation.
LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
- addq(dst, src);
+ addp(dst, src);
j(overflow, bailout_label, near_jump);
}
}
@@ -1626,15 +1623,15 @@ void MacroAssembler::SmiNeg(Register dst,
if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
movp(kScratchRegister, src);
- neg(dst); // Low 32 bits are retained as zero by negation.
+ negp(dst); // Low 32 bits are retained as zero by negation.
// Test if result is zero or Smi::kMinValue.
- cmpq(dst, kScratchRegister);
+ cmpp(dst, kScratchRegister);
j(not_equal, on_smi_result, near_jump);
movp(src, kScratchRegister);
} else {
movp(dst, src);
- neg(dst);
- cmpq(dst, src);
+ negp(dst);
+ cmpp(dst, src);
// If the result is zero or Smi::kMinValue, negation failed to create a smi.
j(not_equal, on_smi_result, near_jump);
}
@@ -1650,15 +1647,15 @@ static void SmiAddHelper(MacroAssembler* masm,
Label::Distance near_jump) {
if (dst.is(src1)) {
Label done;
- masm->addq(dst, src2);
+ masm->addp(dst, src2);
masm->j(no_overflow, &done, Label::kNear);
// Restore src1.
- masm->subq(dst, src2);
+ masm->subp(dst, src2);
masm->jmp(on_not_smi_result, near_jump);
masm->bind(&done);
} else {
masm->movp(dst, src1);
- masm->addq(dst, src2);
+ masm->addp(dst, src2);
masm->j(overflow, on_not_smi_result, near_jump);
}
}
@@ -1694,12 +1691,12 @@ void MacroAssembler::SmiAdd(Register dst,
if (!dst.is(src1)) {
if (emit_debug_code()) {
movp(kScratchRegister, src1);
- addq(kScratchRegister, src2);
+ addp(kScratchRegister, src2);
Check(no_overflow, kSmiAdditionOverflow);
}
- lea(dst, Operand(src1, src2, times_1, 0));
+ leap(dst, Operand(src1, src2, times_1, 0));
} else {
- addq(dst, src2);
+ addp(dst, src2);
Assert(no_overflow, kSmiAdditionOverflow);
}
}
@@ -1714,15 +1711,15 @@ static void SmiSubHelper(MacroAssembler* masm,
Label::Distance near_jump) {
if (dst.is(src1)) {
Label done;
- masm->subq(dst, src2);
+ masm->subp(dst, src2);
masm->j(no_overflow, &done, Label::kNear);
// Restore src1.
- masm->addq(dst, src2);
+ masm->addp(dst, src2);
masm->jmp(on_not_smi_result, near_jump);
masm->bind(&done);
} else {
masm->movp(dst, src1);
- masm->subq(dst, src2);
+ masm->subp(dst, src2);
masm->j(overflow, on_not_smi_result, near_jump);
}
}
@@ -1760,7 +1757,7 @@ static void SmiSubNoOverflowHelper(MacroAssembler* masm,
if (!dst.is(src1)) {
masm->movp(dst, src1);
}
- masm->subq(dst, src2);
+ masm->subp(dst, src2);
masm->Assert(no_overflow, kSmiSubtractionOverflow);
}
@@ -1792,17 +1789,17 @@ void MacroAssembler::SmiMul(Register dst,
Label failure, zero_correct_result;
movp(kScratchRegister, src1); // Create backup for later testing.
SmiToInteger64(dst, src1);
- imul(dst, src2);
+ imulp(dst, src2);
j(overflow, &failure, Label::kNear);
// Check for negative zero result. If product is zero, and one
// argument is negative, go to slow case.
Label correct_result;
- testq(dst, dst);
+ testp(dst, dst);
j(not_zero, &correct_result, Label::kNear);
movp(dst, kScratchRegister);
- xor_(dst, src2);
+ xorp(dst, src2);
// Result was positive zero.
j(positive, &zero_correct_result, Label::kNear);
@@ -1816,17 +1813,17 @@ void MacroAssembler::SmiMul(Register dst,
bind(&correct_result);
} else {
SmiToInteger64(dst, src1);
- imul(dst, src2);
+ imulp(dst, src2);
j(overflow, on_not_smi_result, near_jump);
// Check for negative zero result. If product is zero, and one
// argument is negative, go to slow case.
Label correct_result;
- testq(dst, dst);
+ testp(dst, dst);
j(not_zero, &correct_result, Label::kNear);
// One of src1 and src2 is zero, the check whether the other is
// negative.
movp(kScratchRegister, src1);
- xor_(kScratchRegister, src2);
+ xorp(kScratchRegister, src2);
j(negative, on_not_smi_result, near_jump);
bind(&correct_result);
}
@@ -1846,7 +1843,7 @@ void MacroAssembler::SmiDiv(Register dst,
ASSERT(!src1.is(rdx));
// Check for 0 divisor (result is +/-Infinity).
- testq(src2, src2);
+ testp(src2, src2);
j(zero, on_not_smi_result, near_jump);
if (src1.is(rax)) {
@@ -1863,7 +1860,7 @@ void MacroAssembler::SmiDiv(Register dst,
Label safe_div;
testl(rax, Immediate(0x7fffffff));
j(not_zero, &safe_div, Label::kNear);
- testq(src2, src2);
+ testp(src2, src2);
if (src1.is(rax)) {
j(positive, &safe_div, Label::kNear);
movp(src1, kScratchRegister);
@@ -1909,7 +1906,7 @@ void MacroAssembler::SmiMod(Register dst,
ASSERT(!src1.is(rdx));
ASSERT(!src1.is(src2));
- testq(src2, src2);
+ testp(src2, src2);
j(zero, on_not_smi_result, near_jump);
if (src1.is(rax)) {
@@ -1945,7 +1942,7 @@ void MacroAssembler::SmiMod(Register dst,
Label smi_result;
testl(rdx, rdx);
j(not_zero, &smi_result, Label::kNear);
- testq(src1, src1);
+ testp(src1, src1);
j(negative, on_not_smi_result, near_jump);
bind(&smi_result);
Integer32ToSmi(dst, rdx);
@@ -1958,11 +1955,11 @@ void MacroAssembler::SmiNot(Register dst, Register src) {
// Set tag and padding bits before negating, so that they are zero afterwards.
movl(kScratchRegister, Immediate(~0));
if (dst.is(src)) {
- xor_(dst, kScratchRegister);
+ xorp(dst, kScratchRegister);
} else {
- lea(dst, Operand(src, kScratchRegister, times_1, 0));
+ leap(dst, Operand(src, kScratchRegister, times_1, 0));
}
- not_(dst);
+ notp(dst);
}
@@ -1971,7 +1968,7 @@ void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
if (!dst.is(src1)) {
movp(dst, src1);
}
- and_(dst, src2);
+ andp(dst, src2);
}
@@ -1981,10 +1978,10 @@ void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
} else if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- and_(dst, constant_reg);
+ andp(dst, constant_reg);
} else {
LoadSmiConstant(dst, constant);
- and_(dst, src);
+ andp(dst, src);
}
}
@@ -1994,7 +1991,7 @@ void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
ASSERT(!src1.is(src2));
movp(dst, src1);
}
- or_(dst, src2);
+ orp(dst, src2);
}
@@ -2002,10 +1999,10 @@ void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- or_(dst, constant_reg);
+ orp(dst, constant_reg);
} else {
LoadSmiConstant(dst, constant);
- or_(dst, src);
+ orp(dst, src);
}
}
@@ -2015,7 +2012,7 @@ void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
ASSERT(!src1.is(src2));
movp(dst, src1);
}
- xor_(dst, src2);
+ xorp(dst, src2);
}
@@ -2023,10 +2020,10 @@ void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
if (dst.is(src)) {
ASSERT(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
- xor_(dst, constant_reg);
+ xorp(dst, constant_reg);
} else {
LoadSmiConstant(dst, constant);
- xor_(dst, src);
+ xorp(dst, src);
}
}
@@ -2067,7 +2064,7 @@ void MacroAssembler::SmiShiftLogicalRightConstant(
} else {
movp(dst, src);
if (shift_value == 0) {
- testq(dst, dst);
+ testp(dst, dst);
j(negative, on_not_smi_result, near_jump);
}
shr(dst, Immediate(shift_value + kSmiShift));
@@ -2086,7 +2083,7 @@ void MacroAssembler::SmiShiftLeft(Register dst,
}
SmiToInteger32(rcx, src2);
// Shift amount specified by lower 5 bits, not six as the shl opcode.
- and_(rcx, Immediate(0x1f));
+ andq(rcx, Immediate(0x1f));
shl_cl(dst);
}
@@ -2175,7 +2172,7 @@ void MacroAssembler::SelectNonSmi(Register dst,
STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(0, Smi::FromInt(0));
movl(kScratchRegister, Immediate(kSmiTagMask));
- and_(kScratchRegister, src1);
+ andp(kScratchRegister, src1);
testl(kScratchRegister, src2);
// If non-zero then both are smis.
j(not_zero, on_not_smis, near_jump);
@@ -2183,13 +2180,13 @@ void MacroAssembler::SelectNonSmi(Register dst,
// Exactly one operand is a smi.
ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
// kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
- subq(kScratchRegister, Immediate(1));
+ subp(kScratchRegister, Immediate(1));
// If src1 is a smi, then scratch register all 1s, else it is all 0s.
movp(dst, src1);
- xor_(dst, src2);
- and_(dst, kScratchRegister);
+ xorp(dst, src2);
+ andp(dst, kScratchRegister);
// If src1 is a smi, dst holds src1 ^ src2, else it is zero.
- xor_(dst, src1);
+ xorp(dst, src1);
// If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
}
@@ -2219,7 +2216,7 @@ SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
if (!dst.is(src)) {
movq(dst, src);
}
- neg(dst);
+ negq(dst);
if (shift < kSmiShift) {
sar(dst, Immediate(kSmiShift - shift));
} else {
@@ -2238,10 +2235,10 @@ void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
void MacroAssembler::Push(Smi* source) {
intptr_t smi = reinterpret_cast<intptr_t>(source);
if (is_int32(smi)) {
- push(Immediate(static_cast<int32_t>(smi)));
+ Push(Immediate(static_cast<int32_t>(smi)));
} else {
Register constant = GetSmiConstant(source);
- push(constant);
+ Push(constant);
}
}
@@ -2251,22 +2248,22 @@ void MacroAssembler::PushInt64AsTwoSmis(Register src, Register scratch) {
// High bits.
shr(src, Immediate(64 - kSmiShift));
shl(src, Immediate(kSmiShift));
- push(src);
+ Push(src);
// Low bits.
shl(scratch, Immediate(kSmiShift));
- push(scratch);
+ Push(scratch);
}
void MacroAssembler::PopInt64AsTwoSmis(Register dst, Register scratch) {
- pop(scratch);
+ Pop(scratch);
// Low bits.
shr(scratch, Immediate(kSmiShift));
- pop(dst);
+ Pop(dst);
shr(dst, Immediate(kSmiShift));
// High bits.
shl(dst, Immediate(64 - kSmiShift));
- or_(dst, scratch);
+ orp(dst, scratch);
}
@@ -2296,7 +2293,7 @@ void MacroAssembler::LookupNumberStringCache(Register object,
SmiToInteger32(
mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
shrl(mask, Immediate(1));
- subq(mask, Immediate(1)); // Make mask.
+ subp(mask, Immediate(1)); // Make mask.
// Calculate the entry in the number string cache. The hash value in the
// number string cache for smis is just the smi value, and the hash for
@@ -2312,8 +2309,8 @@ void MacroAssembler::LookupNumberStringCache(Register object,
STATIC_ASSERT(8 == kDoubleSize);
movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- and_(scratch, mask);
+ xorp(scratch, FieldOperand(object, HeapNumber::kValueOffset));
+ andp(scratch, mask);
// Each entry in string cache consists of two pointer sized fields,
// but times_twice_pointer_size (multiplication by 16) scale factor
// is not supported by addrmode on x64 platform.
@@ -2336,7 +2333,7 @@ void MacroAssembler::LookupNumberStringCache(Register object,
bind(&is_smi);
SmiToInteger32(scratch, object);
- and_(scratch, mask);
+ andp(scratch, mask);
// Each entry in string cache consists of two pointer sized fields,
// but times_twice_pointer_size (multiplication by 16) scale factor
// is not supported by addrmode on x64 platform.
@@ -2344,7 +2341,7 @@ void MacroAssembler::LookupNumberStringCache(Register object,
shl(scratch, Immediate(kPointerSizeLog2 + 1));
// Check if the entry is the smi we are looking for.
- cmpq(object,
+ cmpp(object,
FieldOperand(number_string_cache,
index,
times_1,
@@ -2401,7 +2398,7 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
andl(scratch2, Immediate(kFlatAsciiStringMask));
// Interleave the bits to check both scratch1 and scratch2 in one test.
ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
cmpl(scratch1,
Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
j(not_equal, on_fail, near_jump);
@@ -2448,7 +2445,7 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
andl(scratch2, Immediate(kFlatAsciiStringMask));
// Interleave the bits to check both scratch1 and scratch2 in one test.
ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
- lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
cmpl(scratch1,
Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
j(not_equal, on_fail, near_jump);
@@ -2520,7 +2517,7 @@ void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
Cmp(dst, Smi::cast(*source));
} else {
MoveHeapObject(kScratchRegister, source);
- cmpq(dst, kScratchRegister);
+ cmpp(dst, kScratchRegister);
}
}
@@ -2531,7 +2528,7 @@ void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
Cmp(dst, Smi::cast(*source));
} else {
MoveHeapObject(kScratchRegister, source);
- cmpq(dst, kScratchRegister);
+ cmpp(dst, kScratchRegister);
}
}
@@ -2542,7 +2539,7 @@ void MacroAssembler::Push(Handle<Object> source) {
Push(Smi::cast(*source));
} else {
MoveHeapObject(kScratchRegister, source);
- push(kScratchRegister);
+ Push(kScratchRegister);
}
}
@@ -2574,7 +2571,87 @@ void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
- addq(rsp, Immediate(stack_elements * kPointerSize));
+ addp(rsp, Immediate(stack_elements * kPointerSize));
+ }
+}
+
+
+void MacroAssembler::Push(Register src) {
+ if (kPointerSize == kInt64Size) {
+ pushq(src);
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ // x32 uses 64-bit push for rbp in the prologue.
+ ASSERT(src.code() != rbp.code());
+ leal(rsp, Operand(rsp, -4));
+ movp(Operand(rsp, 0), src);
+ }
+}
+
+
+void MacroAssembler::Push(const Operand& src) {
+ if (kPointerSize == kInt64Size) {
+ pushq(src);
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ movp(kScratchRegister, src);
+ leal(rsp, Operand(rsp, -4));
+ movp(Operand(rsp, 0), kScratchRegister);
+ }
+}
+
+
+void MacroAssembler::Push(Immediate value) {
+ if (kPointerSize == kInt64Size) {
+ pushq(value);
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ leal(rsp, Operand(rsp, -4));
+ movp(Operand(rsp, 0), value);
+ }
+}
+
+
+void MacroAssembler::PushImm32(int32_t imm32) {
+ if (kPointerSize == kInt64Size) {
+ pushq_imm32(imm32);
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ leal(rsp, Operand(rsp, -4));
+ movp(Operand(rsp, 0), Immediate(imm32));
+ }
+}
+
+
+void MacroAssembler::Pop(Register dst) {
+ if (kPointerSize == kInt64Size) {
+ popq(dst);
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ // x32 uses 64-bit pop for rbp in the epilogue.
+ ASSERT(dst.code() != rbp.code());
+ movp(dst, Operand(rsp, 0));
+ leal(rsp, Operand(rsp, 4));
+ }
+}
+
+
+void MacroAssembler::Pop(const Operand& dst) {
+ if (kPointerSize == kInt64Size) {
+ popq(dst);
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ Register scratch = dst.AddressUsesRegister(kScratchRegister)
+ ? kSmiConstantRegister : kScratchRegister;
+ movp(scratch, Operand(rsp, 0));
+ movp(dst, scratch);
+ leal(rsp, Operand(rsp, 4));
+ if (scratch.is(kSmiConstantRegister)) {
+ // Restore kSmiConstantRegister.
+ movp(kSmiConstantRegister,
+ reinterpret_cast<void*>(Smi::FromInt(kSmiConstantRegisterValue)),
+ Assembler::RelocInfoNone());
+ }
}
}
@@ -2592,6 +2669,17 @@ void MacroAssembler::Jump(ExternalReference ext) {
}
+void MacroAssembler::Jump(const Operand& op) {
+ if (kPointerSize == kInt64Size) {
+ jmp(op);
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ movp(kScratchRegister, op);
+ jmp(kScratchRegister);
+ }
+}
+
+
void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
Move(kScratchRegister, destination, rmode);
jmp(kScratchRegister);
@@ -2623,6 +2711,17 @@ void MacroAssembler::Call(ExternalReference ext) {
}
+void MacroAssembler::Call(const Operand& op) {
+ if (kPointerSize == kInt64Size) {
+ call(op);
+ } else {
+ ASSERT(kPointerSize == kInt32Size);
+ movp(kScratchRegister, op);
+ call(kScratchRegister);
+ }
+}
+
+
void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
#ifdef DEBUG
int end_position = pc_offset() + CallSize(destination);
@@ -2651,26 +2750,26 @@ void MacroAssembler::Call(Handle<Code> code_object,
void MacroAssembler::Pushad() {
- push(rax);
- push(rcx);
- push(rdx);
- push(rbx);
+ Push(rax);
+ Push(rcx);
+ Push(rdx);
+ Push(rbx);
// Not pushing rsp or rbp.
- push(rsi);
- push(rdi);
- push(r8);
- push(r9);
+ Push(rsi);
+ Push(rdi);
+ Push(r8);
+ Push(r9);
// r10 is kScratchRegister.
- push(r11);
+ Push(r11);
// r12 is kSmiConstantRegister.
// r13 is kRootRegister.
- push(r14);
- push(r15);
+ Push(r14);
+ Push(r15);
STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
// Use lea for symmetry with Popad.
int sp_delta =
(kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
- lea(rsp, Operand(rsp, -sp_delta));
+ leap(rsp, Operand(rsp, -sp_delta));
}
@@ -2678,23 +2777,23 @@ void MacroAssembler::Popad() {
// Popad must not change the flags, so use lea instead of addq.
int sp_delta =
(kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
- lea(rsp, Operand(rsp, sp_delta));
- pop(r15);
- pop(r14);
- pop(r11);
- pop(r9);
- pop(r8);
- pop(rdi);
- pop(rsi);
- pop(rbx);
- pop(rdx);
- pop(rcx);
- pop(rax);
+ leap(rsp, Operand(rsp, sp_delta));
+ Pop(r15);
+ Pop(r14);
+ Pop(r11);
+ Pop(r9);
+ Pop(r8);
+ Pop(rdi);
+ Pop(rsi);
+ Pop(rbx);
+ Pop(rdx);
+ Pop(rcx);
+ Pop(rax);
}
void MacroAssembler::Dropad() {
- addq(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
+ addp(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
}
@@ -2759,23 +2858,23 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
// The frame pointer does not point to a JS frame so we save NULL for
// rbp. We expect the code throwing an exception to check rbp before
// dereferencing it to restore the context.
- push(Immediate(0)); // NULL frame pointer.
+ pushq(Immediate(0)); // NULL frame pointer.
Push(Smi::FromInt(0)); // No context.
} else {
- push(rbp);
- push(rsi);
+ pushq(rbp);
+ Push(rsi);
}
// Push the state and the code object.
unsigned state =
StackHandler::IndexField::encode(handler_index) |
StackHandler::KindField::encode(kind);
- push(Immediate(state));
+ Push(Immediate(state));
Push(CodeObject());
// Link the current handler as the next handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- push(ExternalOperand(handler_address));
+ Push(ExternalOperand(handler_address));
// Set this new handler as the current one.
movp(ExternalOperand(handler_address), rsp);
}
@@ -2784,8 +2883,8 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
void MacroAssembler::PopTryHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- pop(ExternalOperand(handler_address));
- addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+ Pop(ExternalOperand(handler_address));
+ addp(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
}
@@ -2798,7 +2897,7 @@ void MacroAssembler::JumpToHandlerEntry() {
movp(rdx,
FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
SmiToInteger64(rdx, rdx);
- lea(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
+ leap(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
jmp(rdi);
}
@@ -2821,21 +2920,21 @@ void MacroAssembler::Throw(Register value) {
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
movp(rsp, ExternalOperand(handler_address));
// Restore the next handler.
- pop(ExternalOperand(handler_address));
+ Pop(ExternalOperand(handler_address));
// Remove the code object and state, compute the handler address in rdi.
- pop(rdi); // Code object.
- pop(rdx); // Offset and state.
+ Pop(rdi); // Code object.
+ Pop(rdx); // Offset and state.
// Restore the context and frame pointer.
- pop(rsi); // Context.
- pop(rbp); // Frame pointer.
+ Pop(rsi); // Context.
+ popq(rbp); // Frame pointer.
// If the handler is a JS frame, restore the context to the frame.
// (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
// rbp or rsi.
Label skip;
- testq(rsi, rsi);
+ testp(rsi, rsi);
j(zero, &skip, Label::kNear);
movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
bind(&skip);
@@ -2875,15 +2974,15 @@ void MacroAssembler::ThrowUncatchable(Register value) {
j(not_zero, &fetch_next);
// Set the top handler address to next handler past the top ENTRY handler.
- pop(ExternalOperand(handler_address));
+ Pop(ExternalOperand(handler_address));
// Remove the code object and state, compute the handler address in rdi.
- pop(rdi); // Code object.
- pop(rdx); // Offset and state.
+ Pop(rdi); // Code object.
+ Pop(rdx); // Offset and state.
// Clear the context pointer and frame pointer (0 was saved in the handler).
- pop(rsi);
- pop(rbp);
+ Pop(rsi);
+ popq(rbp);
JumpToHandlerEntry();
}
@@ -2899,7 +2998,7 @@ void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
ret(bytes_dropped);
} else {
PopReturnAddressTo(scratch);
- addq(rsp, Immediate(bytes_dropped));
+ addp(rsp, Immediate(bytes_dropped));
PushReturnAddressFrom(scratch);
ret(0);
}
@@ -3059,10 +3158,10 @@ void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
cvtsd2si(result_reg, input_reg);
testl(result_reg, Immediate(0xFFFFFF00));
j(zero, &done, Label::kNear);
- cmpl(result_reg, Immediate(0x80000000));
- j(equal, &conv_failure, Label::kNear);
+ cmpl(result_reg, Immediate(1));
+ j(overflow, &conv_failure, Label::kNear);
movl(result_reg, Immediate(0));
- setcc(above, result_reg);
+ setcc(sign, result_reg);
subl(result_reg, Immediate(1));
andl(result_reg, Immediate(255));
jmp(&done, Label::kNear);
@@ -3099,16 +3198,15 @@ void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
Label done;
movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
cvttsd2siq(result_reg, xmm0);
- Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
- cmpq(result_reg, kScratchRegister);
- j(not_equal, &done, Label::kNear);
+ cmpq(result_reg, Immediate(1));
+ j(no_overflow, &done, Label::kNear);
// Slow case.
if (input_reg.is(result_reg)) {
- subq(rsp, Immediate(kDoubleSize));
+ subp(rsp, Immediate(kDoubleSize));
movsd(MemOperand(rsp, 0), xmm0);
SlowTruncateToI(result_reg, rsp, 0);
- addq(rsp, Immediate(kDoubleSize));
+ addp(rsp, Immediate(kDoubleSize));
} else {
SlowTruncateToI(result_reg, input_reg);
}
@@ -3121,14 +3219,13 @@ void MacroAssembler::TruncateDoubleToI(Register result_reg,
XMMRegister input_reg) {
Label done;
cvttsd2siq(result_reg, input_reg);
- movq(kScratchRegister, V8_INT64_C(0x8000000000000000));
- cmpq(result_reg, kScratchRegister);
- j(not_equal, &done, Label::kNear);
+ cmpq(result_reg, Immediate(1));
+ j(no_overflow, &done, Label::kNear);
- subq(rsp, Immediate(kDoubleSize));
+ subp(rsp, Immediate(kDoubleSize));
movsd(MemOperand(rsp, 0), input_reg);
SlowTruncateToI(result_reg, rsp, 0);
- addq(rsp, Immediate(kDoubleSize));
+ addp(rsp, Immediate(kDoubleSize));
bind(&done);
}
@@ -3204,15 +3301,15 @@ void MacroAssembler::Throw(BailoutReason reason) {
}
#endif
- push(rax);
+ Push(rax);
Push(Smi::FromInt(reason));
if (!has_frame_) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kThrowMessage, 1);
+ CallRuntime(Runtime::kHiddenThrowMessage, 1);
} else {
- CallRuntime(Runtime::kThrowMessage, 1);
+ CallRuntime(Runtime::kHiddenThrowMessage, 1);
}
// Control will not return here.
int3();
@@ -3244,7 +3341,7 @@ void MacroAssembler::EnumLength(Register dst, Register map) {
STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
movp(dst, FieldOperand(map, Map::kBitField3Offset));
Move(kScratchRegister, Smi::FromInt(Map::EnumLengthBits::kMask));
- and_(dst, kScratchRegister);
+ andp(dst, kScratchRegister);
}
@@ -3315,10 +3412,10 @@ void MacroAssembler::AssertString(Register object) {
if (emit_debug_code()) {
testb(object, Immediate(kSmiTagMask));
Check(not_equal, kOperandIsASmiAndNotAString);
- push(object);
+ Push(object);
movp(object, FieldOperand(object, HeapObject::kMapOffset));
CmpInstanceType(object, FIRST_NONSTRING_TYPE);
- pop(object);
+ Pop(object);
Check(below, kOperandIsNotAString);
}
}
@@ -3328,22 +3425,35 @@ void MacroAssembler::AssertName(Register object) {
if (emit_debug_code()) {
testb(object, Immediate(kSmiTagMask));
Check(not_equal, kOperandIsASmiAndNotAName);
- push(object);
+ Push(object);
movp(object, FieldOperand(object, HeapObject::kMapOffset));
CmpInstanceType(object, LAST_NAME_TYPE);
- pop(object);
+ Pop(object);
Check(below_equal, kOperandIsNotAName);
}
}
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
+ if (emit_debug_code()) {
+ Label done_checking;
+ AssertNotSmi(object);
+ Cmp(object, isolate()->factory()->undefined_value());
+ j(equal, &done_checking);
+ Cmp(FieldOperand(object, 0), isolate()->factory()->allocation_site_map());
+ Assert(equal, kExpectedUndefinedOrCell);
+ bind(&done_checking);
+ }
+}
+
+
void MacroAssembler::AssertRootValue(Register src,
Heap::RootListIndex root_value_index,
BailoutReason reason) {
if (emit_debug_code()) {
ASSERT(!src.is(kScratchRegister));
LoadRoot(kScratchRegister, root_value_index);
- cmpq(src, kScratchRegister);
+ cmpp(src, kScratchRegister);
Check(equal, reason);
}
}
@@ -3591,14 +3701,14 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// Expected is in register, actual is immediate. This is the
// case when we invoke function values without going through the
// IC mechanism.
- cmpq(expected.reg(), Immediate(actual.immediate()));
+ cmpp(expected.reg(), Immediate(actual.immediate()));
j(equal, &invoke, Label::kNear);
ASSERT(expected.reg().is(rbx));
Set(rax, actual.immediate());
} else if (!expected.reg().is(actual.reg())) {
// Both expected and actual are in (different) registers. This
// is the case when we invoke functions using call and apply.
- cmpq(expected.reg(), actual.reg());
+ cmpp(expected.reg(), actual.reg());
j(equal, &invoke, Label::kNear);
ASSERT(actual.reg().is(rax));
ASSERT(expected.reg().is(rbx));
@@ -3609,7 +3719,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (!code_constant.is_null()) {
Move(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
- addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ addp(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
} else if (!code_register.is(rdx)) {
movp(rdx, code_register);
}
@@ -3631,9 +3741,9 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
if (frame_mode == BUILD_STUB_FRAME) {
- push(rbp); // Caller's frame pointer.
+ pushq(rbp); // Caller's frame pointer.
movp(rbp, rsp);
- push(rsi); // Callee's context.
+ Push(rsi); // Callee's context.
Push(Smi::FromInt(StackFrame::STUB));
} else {
PredictableCodeSizeScope predictible_code_size_scope(this,
@@ -3644,27 +3754,27 @@ void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
RelocInfo::CODE_AGE_SEQUENCE);
Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
} else {
- push(rbp); // Caller's frame pointer.
+ pushq(rbp); // Caller's frame pointer.
movp(rbp, rsp);
- push(rsi); // Callee's context.
- push(rdi); // Callee's JS function.
+ Push(rsi); // Callee's context.
+ Push(rdi); // Callee's JS function.
}
}
}
void MacroAssembler::EnterFrame(StackFrame::Type type) {
- push(rbp);
+ pushq(rbp);
movp(rbp, rsp);
- push(rsi); // Context.
+ Push(rsi); // Context.
Push(Smi::FromInt(type));
Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
- push(kScratchRegister);
+ Push(kScratchRegister);
if (emit_debug_code()) {
Move(kScratchRegister,
isolate()->factory()->undefined_value(),
RelocInfo::EMBEDDED_OBJECT);
- cmpq(Operand(rsp, 0), kScratchRegister);
+ cmpp(Operand(rsp, 0), kScratchRegister);
Check(not_equal, kCodeObjectNotProperlyPatched);
}
}
@@ -3673,11 +3783,11 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
if (emit_debug_code()) {
Move(kScratchRegister, Smi::FromInt(type));
- cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
+ cmpp(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
Check(equal, kStackFrameTypesMustMatch);
}
movp(rsp, rbp);
- pop(rbp);
+ popq(rbp);
}
@@ -3688,14 +3798,14 @@ void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
kFPOnStackSize + kPCOnStackSize);
ASSERT(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
- push(rbp);
+ pushq(rbp);
movp(rbp, rsp);
// Reserve room for entry stack pointer and push the code object.
ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
- push(Immediate(0)); // Saved entry sp, patched before call.
+ Push(Immediate(0)); // Saved entry sp, patched before call.
Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
- push(kScratchRegister); // Accessed from EditFrame::code_slot.
+ Push(kScratchRegister); // Accessed from EditFrame::code_slot.
// Save the frame pointer and the context in top.
if (save_rax) {
@@ -3717,14 +3827,14 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
if (save_doubles) {
int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize +
arg_stack_space * kRegisterSize;
- subq(rsp, Immediate(space));
+ subp(rsp, Immediate(space));
int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
XMMRegister reg = XMMRegister::FromAllocationIndex(i);
movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
}
} else if (arg_stack_space > 0) {
- subq(rsp, Immediate(arg_stack_space * kRegisterSize));
+ subp(rsp, Immediate(arg_stack_space * kRegisterSize));
}
// Get the required frame alignment for the OS.
@@ -3732,7 +3842,7 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
if (kFrameAlignment > 0) {
ASSERT(IsPowerOf2(kFrameAlignment));
ASSERT(is_int8(kFrameAlignment));
- and_(rsp, Immediate(-kFrameAlignment));
+ andp(rsp, Immediate(-kFrameAlignment));
}
// Patch the saved entry sp.
@@ -3746,7 +3856,7 @@ void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
// Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
// so it must be retained across the C-call.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
- lea(r15, Operand(rbp, r14, times_pointer_size, offset));
+ leap(r15, Operand(rbp, r14, times_pointer_size, offset));
EnterExitFrameEpilogue(arg_stack_space, save_doubles);
}
@@ -3774,7 +3884,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// Drop everything up to and including the arguments and the receiver
// from the caller stack.
- lea(rsp, Operand(r15, 1 * kPointerSize));
+ leap(rsp, Operand(r15, 1 * kPointerSize));
PushReturnAddressFrom(rcx);
@@ -3784,7 +3894,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
movp(rsp, rbp);
- pop(rbp);
+ popq(rbp);
LeaveExitFrameEpilogue(restore_context);
}
@@ -3821,7 +3931,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// When generating debug code, make sure the lexical context is set.
if (emit_debug_code()) {
- cmpq(scratch, Immediate(0));
+ cmpp(scratch, Immediate(0));
Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
}
// Load the native context of the current context.
@@ -3838,7 +3948,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
}
// Check if both contexts are the same.
- cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
+ cmpp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
j(equal, &same_contexts);
// Compare security tokens.
@@ -3849,7 +3959,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// Check the context is a native context.
if (emit_debug_code()) {
// Preserve original value of holder_reg.
- push(holder_reg);
+ Push(holder_reg);
movp(holder_reg,
FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
CompareRoot(holder_reg, Heap::kNullValueRootIndex);
@@ -3859,7 +3969,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
movp(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
- pop(holder_reg);
+ Pop(holder_reg);
}
movp(kScratchRegister,
@@ -3867,7 +3977,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
int token_offset =
Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
movp(scratch, FieldOperand(scratch, token_offset));
- cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
+ cmpp(scratch, FieldOperand(kScratchRegister, token_offset));
j(not_equal, miss);
bind(&same_contexts);
@@ -3958,14 +4068,14 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
if (i > 0) {
addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
}
- and_(r2, r1);
+ andp(r2, r1);
// Scale the index by multiplying by the entry size.
ASSERT(SeededNumberDictionary::kEntrySize == 3);
- lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
+ leap(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
// Check if the key matches.
- cmpq(key, FieldOperand(elements,
+ cmpp(key, FieldOperand(elements,
r2,
times_pointer_size,
SeededNumberDictionary::kElementsStartOffset));
@@ -4005,7 +4115,7 @@ void MacroAssembler::LoadAllocationTopHelper(Register result,
#ifdef DEBUG
// Assert that result actually contains top on entry.
Operand top_operand = ExternalOperand(allocation_top);
- cmpq(result, top_operand);
+ cmpp(result, top_operand);
Check(equal, kUnexpectedAllocationTop);
#endif
return;
@@ -4026,7 +4136,7 @@ void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
Register scratch,
AllocationFlags flags) {
if (emit_debug_code()) {
- testq(result_end, Immediate(kObjectAlignmentMask));
+ testp(result_end, Immediate(kObjectAlignmentMask));
Check(zero, kUnalignedAllocationInNewSpace);
}
@@ -4086,10 +4196,10 @@ void MacroAssembler::Allocate(int object_size,
if (!top_reg.is(result)) {
movp(top_reg, result);
}
- addq(top_reg, Immediate(object_size));
+ addp(top_reg, Immediate(object_size));
j(carry, gc_required);
Operand limit_operand = ExternalOperand(allocation_limit);
- cmpq(top_reg, limit_operand);
+ cmpp(top_reg, limit_operand);
j(above, gc_required);
// Update allocation top.
@@ -4098,14 +4208,14 @@ void MacroAssembler::Allocate(int object_size,
bool tag_result = (flags & TAG_OBJECT) != 0;
if (top_reg.is(result)) {
if (tag_result) {
- subq(result, Immediate(object_size - kHeapObjectTag));
+ subp(result, Immediate(object_size - kHeapObjectTag));
} else {
- subq(result, Immediate(object_size));
+ subp(result, Immediate(object_size));
}
} else if (tag_result) {
// Tag the result if requested.
ASSERT(kHeapObjectTag == 1);
- incq(result);
+ incp(result);
}
}
@@ -4119,7 +4229,7 @@ void MacroAssembler::Allocate(int header_size,
Label* gc_required,
AllocationFlags flags) {
ASSERT((flags & SIZE_IN_WORDS) == 0);
- lea(result_end, Operand(element_count, element_size, header_size));
+ leap(result_end, Operand(element_count, element_size, header_size));
Allocate(result_end, result, result_end, scratch, gc_required, flags);
}
@@ -4162,10 +4272,10 @@ void MacroAssembler::Allocate(Register object_size,
if (!object_size.is(result_end)) {
movp(result_end, object_size);
}
- addq(result_end, result);
+ addp(result_end, result);
j(carry, gc_required);
Operand limit_operand = ExternalOperand(allocation_limit);
- cmpq(result_end, limit_operand);
+ cmpp(result_end, limit_operand);
j(above, gc_required);
// Update allocation top.
@@ -4173,7 +4283,7 @@ void MacroAssembler::Allocate(Register object_size,
// Tag the result if requested.
if ((flags & TAG_OBJECT) != 0) {
- addq(result, Immediate(kHeapObjectTag));
+ addp(result, Immediate(kHeapObjectTag));
}
}
@@ -4183,10 +4293,10 @@ void MacroAssembler::UndoAllocationInNewSpace(Register object) {
ExternalReference::new_space_allocation_top_address(isolate());
// Make sure the object has no tag before resetting top.
- and_(object, Immediate(~kHeapObjectTagMask));
+ andp(object, Immediate(~kHeapObjectTagMask));
Operand top_operand = ExternalOperand(new_space_allocation_top);
#ifdef DEBUG
- cmpq(object, top_operand);
+ cmpp(object, top_operand);
Check(below, kUndoAllocationOfNonAllocatedMemory);
#endif
movp(top_operand, object);
@@ -4217,11 +4327,11 @@ void MacroAssembler::AllocateTwoByteString(Register result,
kObjectAlignmentMask;
ASSERT(kShortSize == 2);
// scratch1 = length * 2 + kObjectAlignmentMask.
- lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
+ leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
kHeaderAlignment));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
+ andp(scratch1, Immediate(~kObjectAlignmentMask));
if (kHeaderAlignment > 0) {
- subq(scratch1, Immediate(kHeaderAlignment));
+ subp(scratch1, Immediate(kHeaderAlignment));
}
// Allocate two byte string in new space.
@@ -4256,10 +4366,10 @@ void MacroAssembler::AllocateAsciiString(Register result,
kObjectAlignmentMask;
movl(scratch1, length);
ASSERT(kCharSize == 1);
- addq(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
- and_(scratch1, Immediate(~kObjectAlignmentMask));
+ addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
+ andp(scratch1, Immediate(~kObjectAlignmentMask));
if (kHeaderAlignment > 0) {
- subq(scratch1, Immediate(kHeaderAlignment));
+ subp(scratch1, Immediate(kHeaderAlignment));
}
// Allocate ASCII string in new space.
@@ -4405,12 +4515,12 @@ void MacroAssembler::CopyBytes(Register destination,
// at the end of the ranges.
movp(scratch, length);
shrl(length, Immediate(kPointerSizeLog2));
- repmovsq();
+ repmovsp();
// Move remaining bytes of length.
andl(scratch, Immediate(kPointerSize - 1));
movp(length, Operand(source, scratch, times_1, -kPointerSize));
movp(Operand(destination, scratch, times_1, -kPointerSize), length);
- addq(destination, scratch);
+ addp(destination, scratch);
if (min_length <= kLongStringLimit) {
jmp(&done, Label::kNear);
@@ -4426,7 +4536,7 @@ void MacroAssembler::CopyBytes(Register destination,
// Move remaining bytes of length.
movp(scratch, Operand(source, length, times_1, -kPointerSize));
movp(Operand(destination, length, times_1, -kPointerSize), scratch);
- addq(destination, length);
+ addp(destination, length);
jmp(&done, Label::kNear);
bind(&short_string);
@@ -4438,8 +4548,8 @@ void MacroAssembler::CopyBytes(Register destination,
bind(&short_loop);
movb(scratch, Operand(source, 0));
movb(Operand(destination, 0), scratch);
- incq(source);
- incq(destination);
+ incp(source);
+ incp(destination);
decl(length);
j(not_zero, &short_loop);
}
@@ -4455,9 +4565,9 @@ void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
jmp(&entry);
bind(&loop);
movp(Operand(start_offset, 0), filler);
- addq(start_offset, Immediate(kPointerSize));
+ addp(start_offset, Immediate(kPointerSize));
bind(&entry);
- cmpq(start_offset, end_offset);
+ cmpp(start_offset, end_offset);
j(less, &loop);
}
@@ -4505,7 +4615,7 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
int offset = expected_kind * kPointerSize +
FixedArrayBase::kHeaderSize;
- cmpq(map_in_out, FieldOperand(scratch, offset));
+ cmpp(map_in_out, FieldOperand(scratch, offset));
j(not_equal, no_map_match);
// Use the transitioned cached map.
@@ -4515,30 +4625,6 @@ void MacroAssembler::LoadTransitionedArrayMapConditional(
}
-void MacroAssembler::LoadInitialArrayMap(
- Register function_in, Register scratch,
- Register map_out, bool can_have_holes) {
- ASSERT(!function_in.is(map_out));
- Label done;
- movp(map_out, FieldOperand(function_in,
- JSFunction::kPrototypeOrInitialMapOffset));
- if (!FLAG_smi_only_arrays) {
- ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- kind,
- map_out,
- scratch,
- &done);
- } else if (can_have_holes) {
- LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
- FAST_HOLEY_SMI_ELEMENTS,
- map_out,
- scratch,
- &done);
- }
- bind(&done);
-}
-
#ifdef _WIN64
static const int kRegisterPassedArguments = 4;
#else
@@ -4556,15 +4642,6 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) {
}
-void MacroAssembler::LoadArrayFunction(Register function) {
- movp(function,
- Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- movp(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
- movp(function,
- Operand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
-}
-
-
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map) {
// Load the initial map. The global functions all have initial maps.
@@ -4608,13 +4685,13 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
Abort(kNonObject);
bind(&is_object);
- push(value);
+ Push(value);
movp(value, FieldOperand(string, HeapObject::kMapOffset));
- movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset));
+ movzxbp(value, FieldOperand(value, Map::kInstanceTypeOffset));
andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
- cmpq(value, Immediate(encoding_mask));
- pop(value);
+ cmpp(value, Immediate(encoding_mask));
+ Pop(value);
Check(equal, kUnexpectedStringType);
// The index is assumed to be untagged coming in, tag it to compare with the
@@ -4642,8 +4719,8 @@ void MacroAssembler::PrepareCallCFunction(int num_arguments) {
ASSERT(IsPowerOf2(frame_alignment));
int argument_slots_on_stack =
ArgumentStackSlotsForCFunctionCall(num_arguments);
- subq(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
- and_(rsp, Immediate(-frame_alignment));
+ subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
+ andp(rsp, Immediate(-frame_alignment));
movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister);
}
@@ -4712,10 +4789,10 @@ void MacroAssembler::CheckPageFlag(
Label::Distance condition_met_distance) {
ASSERT(cc == zero || cc == not_zero);
if (scratch.is(object)) {
- and_(scratch, Immediate(~Page::kPageAlignmentMask));
+ andp(scratch, Immediate(~Page::kPageAlignmentMask));
} else {
movp(scratch, Immediate(~Page::kPageAlignmentMask));
- and_(scratch, object);
+ andp(scratch, object);
}
if (mask < (1 << kBitsPerByte)) {
testb(Operand(scratch, MemoryChunk::kFlagsOffset),
@@ -4734,7 +4811,7 @@ void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
Move(scratch, map);
movp(scratch, FieldOperand(scratch, Map::kBitField3Offset));
SmiToInteger32(scratch, scratch);
- and_(scratch, Immediate(Map::Deprecated::kMask));
+ andp(scratch, Immediate(Map::Deprecated::kMask));
j(not_zero, if_deprecated);
}
}
@@ -4754,10 +4831,10 @@ void MacroAssembler::JumpIfBlack(Register object,
movp(rcx, mask_scratch);
// Make rcx into a mask that covers both marking bits using the operation
// rcx = mask | (mask << 1).
- lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
+ leap(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
// Note that we are using a 4-byte aligned 8-byte load.
- and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
- cmpq(mask_scratch, rcx);
+ andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ cmpp(mask_scratch, rcx);
j(equal, on_black, on_black_distance);
}
@@ -4791,19 +4868,19 @@ void MacroAssembler::GetMarkBits(Register addr_reg,
ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
movp(bitmap_reg, addr_reg);
// Sign extended 32 bit immediate.
- and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
+ andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
movp(rcx, addr_reg);
int shift =
Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
shrl(rcx, Immediate(shift));
- and_(rcx,
+ andp(rcx,
Immediate((Page::kPageAlignmentMask >> shift) &
~(Bitmap::kBytesPerCell - 1)));
- addq(bitmap_reg, rcx);
+ addp(bitmap_reg, rcx);
movp(rcx, addr_reg);
shrl(rcx, Immediate(kPointerSizeLog2));
- and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
+ andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
movl(mask_reg, Immediate(1));
shl_cl(mask_reg);
}
@@ -4828,20 +4905,20 @@ void MacroAssembler::EnsureNotWhite(
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
- testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+ testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
j(not_zero, &done, Label::kNear);
if (emit_debug_code()) {
// Check for impossible bit pattern.
Label ok;
- push(mask_scratch);
+ Push(mask_scratch);
// shl. May overflow making the check conservative.
- addq(mask_scratch, mask_scratch);
- testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+ addp(mask_scratch, mask_scratch);
+ testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
j(zero, &ok, Label::kNear);
int3();
bind(&ok);
- pop(mask_scratch);
+ Pop(mask_scratch);
}
// Value is white. We check whether it is data that doesn't need scanning.
@@ -4884,21 +4961,21 @@ void MacroAssembler::EnsureNotWhite(
bind(&not_external);
// Sequential string, either ASCII or UC16.
ASSERT(kOneByteStringTag == 0x04);
- and_(length, Immediate(kStringEncodingMask));
- xor_(length, Immediate(kStringEncodingMask));
- addq(length, Immediate(0x04));
+ andp(length, Immediate(kStringEncodingMask));
+ xorp(length, Immediate(kStringEncodingMask));
+ addp(length, Immediate(0x04));
// Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
- imul(length, FieldOperand(value, String::kLengthOffset));
+ imulp(length, FieldOperand(value, String::kLengthOffset));
shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
- addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
- and_(length, Immediate(~kObjectAlignmentMask));
+ addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
+ andp(length, Immediate(~kObjectAlignmentMask));
bind(&is_data_object);
// Value is a data object, and it is white. Mark it black. Since we know
// that the object is white we can make it black by flipping one bit.
- or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
+ orp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
- and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
+ andp(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
bind(&done);
@@ -4935,18 +5012,18 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
// Check that there are no elements. Register rcx contains the current JS
// object we've reached through the prototype chain.
Label no_elements;
- cmpq(empty_fixed_array_value,
+ cmpp(empty_fixed_array_value,
FieldOperand(rcx, JSObject::kElementsOffset));
j(equal, &no_elements);
// Second chance, the object may be using the empty slow element dictionary.
LoadRoot(kScratchRegister, Heap::kEmptySlowElementDictionaryRootIndex);
- cmpq(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
+ cmpp(kScratchRegister, FieldOperand(rcx, JSObject::kElementsOffset));
j(not_equal, call_runtime);
bind(&no_elements);
movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
- cmpq(rcx, null_value);
+ cmpp(rcx, null_value);
j(not_equal, &next);
}
@@ -4959,12 +5036,12 @@ void MacroAssembler::TestJSArrayForAllocationMemento(
ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(isolate());
- lea(scratch_reg, Operand(receiver_reg,
+ leap(scratch_reg, Operand(receiver_reg,
JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
Move(kScratchRegister, new_space_start);
- cmpq(scratch_reg, kScratchRegister);
+ cmpp(scratch_reg, kScratchRegister);
j(less, no_memento_found);
- cmpq(scratch_reg, ExternalOperand(new_space_allocation_top));
+ cmpp(scratch_reg, ExternalOperand(new_space_allocation_top));
j(greater, no_memento_found);
CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
Heap::kAllocationMementoMapRootIndex);
@@ -4987,9 +5064,9 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
bind(&loop_again);
movp(current, FieldOperand(current, HeapObject::kMapOffset));
movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
- and_(scratch1, Immediate(Map::kElementsKindMask));
+ andp(scratch1, Immediate(Map::kElementsKindMask));
shr(scratch1, Immediate(Map::kElementsKindShift));
- cmpq(scratch1, Immediate(DICTIONARY_ELEMENTS));
+ cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
j(equal, found);
movp(current, FieldOperand(current, Map::kPrototypeOffset));
CompareRoot(current, Heap::kNullValueRootIndex);
@@ -4997,6 +5074,21 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
}
+void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
+ ASSERT(!dividend.is(rax));
+ ASSERT(!dividend.is(rdx));
+ MultiplierAndShift ms(divisor);
+ movl(rax, Immediate(ms.multiplier()));
+ imull(dividend);
+ if (divisor > 0 && ms.multiplier() < 0) addl(rdx, dividend);
+ if (divisor < 0 && ms.multiplier() > 0) subl(rdx, dividend);
+ if (ms.shift() > 0) sarl(rdx, Immediate(ms.shift()));
+ movl(rax, dividend);
+ shrl(rax, Immediate(31));
+ addl(rdx, rax);
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 42245aa80..af65a6546 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -336,7 +336,7 @@ class MacroAssembler: public Assembler {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
Move(kRootRegister, roots_array_start);
- addq(kRootRegister, Immediate(kRootRegisterBias));
+ addp(kRootRegister, Immediate(kRootRegisterBias));
}
// ---------------------------------------------------------------------------
@@ -802,7 +802,7 @@ class MacroAssembler: public Assembler {
// Load a register with a long value as efficiently as possible.
void Set(Register dst, int64_t x);
- void Set(const Operand& dst, int64_t x);
+ void Set(const Operand& dst, intptr_t x);
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
// hinders register renaming and makes dependence chains longer. So we use
@@ -837,12 +837,16 @@ class MacroAssembler: public Assembler {
void Drop(int stack_elements);
void Call(Label* target) { call(target); }
- void Push(Register src) { push(src); }
- void Pop(Register dst) { pop(dst); }
- void PushReturnAddressFrom(Register src) { push(src); }
- void PopReturnAddressTo(Register dst) { pop(dst); }
+ void Push(Register src);
+ void Push(const Operand& src);
+ void Push(Immediate value);
+ void PushImm32(int32_t imm32);
+ void Pop(Register dst);
+ void Pop(const Operand& dst);
+ void PushReturnAddressFrom(Register src) { pushq(src); }
+ void PopReturnAddressTo(Register dst) { popq(dst); }
void Move(Register dst, ExternalReference ext) {
- movp(dst, reinterpret_cast<Address>(ext.address()),
+ movp(dst, reinterpret_cast<void*>(ext.address()),
RelocInfo::EXTERNAL_REFERENCE);
}
@@ -859,16 +863,18 @@ class MacroAssembler: public Assembler {
ASSERT(!RelocInfo::IsNone(rmode));
ASSERT(value->IsHeapObject());
ASSERT(!isolate()->heap()->InNewSpace(*value));
- movp(dst, value.location(), rmode);
+ movp(dst, reinterpret_cast<void*>(value.location()), rmode);
}
// Control Flow
void Jump(Address destination, RelocInfo::Mode rmode);
void Jump(ExternalReference ext);
+ void Jump(const Operand& op);
void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
void Call(Address destination, RelocInfo::Mode rmode);
void Call(ExternalReference ext);
+ void Call(const Operand& op);
void Call(Handle<Code> code_object,
RelocInfo::Mode rmode,
TypeFeedbackId ast_id = TypeFeedbackId::None());
@@ -1021,7 +1027,7 @@ class MacroAssembler: public Assembler {
static const int shift = Field::kShift + kSmiShift;
static const int mask = Field::kMask >> Field::kShift;
shr(reg, Immediate(shift));
- and_(reg, Immediate(mask));
+ andp(reg, Immediate(mask));
shl(reg, Immediate(kSmiShift));
}
@@ -1045,6 +1051,10 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
+ // Abort execution if argument is not undefined or an AllocationSite, enabled
+ // via --debug-code.
+ void AssertUndefinedOrAllocationSite(Register object);
+
// Abort execution if argument is not the root value with the given index,
// enabled via --debug-code.
void AssertRootValue(Register src,
@@ -1232,15 +1242,8 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* no_map_match);
- // Load the initial map for new Arrays from a JSFunction.
- void LoadInitialArrayMap(Register function_in,
- Register scratch,
- Register map_out,
- bool can_have_holes);
-
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
- void LoadArrayFunction(Register function);
// Load the initial map from the global function. The registers
// function and map can be the same.
@@ -1368,6 +1371,10 @@ class MacroAssembler: public Assembler {
Register filler);
+ // Emit code for a truncating division by a constant. The dividend register is
+ // unchanged, the result is in rdx, and rax gets clobbered.
+ void TruncatingDiv(Register dividend, int32_t divisor);
+
// ---------------------------------------------------------------------------
// StatsCounter support
@@ -1605,9 +1612,9 @@ extern void LogGeneratedCodeCoverage(const char* file_line);
Address x64_coverage_function = FUNCTION_ADDR(LogGeneratedCodeCoverage); \
masm->pushfq(); \
masm->Pushad(); \
- masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
+ masm->Push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
masm->Call(x64_coverage_function, RelocInfo::EXTERNAL_REFERENCE); \
- masm->pop(rax); \
+ masm->Pop(rax); \
masm->Popad(); \
masm->popfq(); \
} \
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
index 75e70c597..c819c71cb 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -166,7 +166,7 @@ void RegExpMacroAssemblerX64::AdvanceRegister(int reg, int by) {
ASSERT(reg >= 0);
ASSERT(reg < num_registers_);
if (by != 0) {
- __ addq(register_location(reg), Immediate(by));
+ __ addp(register_location(reg), Immediate(by));
}
}
@@ -175,7 +175,7 @@ void RegExpMacroAssemblerX64::Backtrack() {
CheckPreemption();
// Pop Code* offset from backtrack stack, add Code* and jump to location.
Pop(rbx);
- __ addq(rbx, code_object_pointer());
+ __ addp(rbx, code_object_pointer());
__ jmp(rbx);
}
@@ -203,8 +203,8 @@ void RegExpMacroAssemblerX64::CheckAtStart(Label* on_at_start) {
__ cmpl(Operand(rbp, kStartIndex), Immediate(0));
BranchOrBacktrack(not_equal, &not_at_start);
// If we did, are we still at the start of the input?
- __ lea(rax, Operand(rsi, rdi, times_1, 0));
- __ cmpq(rax, Operand(rbp, kInputStart));
+ __ leap(rax, Operand(rsi, rdi, times_1, 0));
+ __ cmpp(rax, Operand(rbp, kInputStart));
BranchOrBacktrack(equal, on_at_start);
__ bind(&not_at_start);
}
@@ -215,8 +215,8 @@ void RegExpMacroAssemblerX64::CheckNotAtStart(Label* on_not_at_start) {
__ cmpl(Operand(rbp, kStartIndex), Immediate(0));
BranchOrBacktrack(not_equal, on_not_at_start);
// If we did, are we still at the start of the input?
- __ lea(rax, Operand(rsi, rdi, times_1, 0));
- __ cmpq(rax, Operand(rbp, kInputStart));
+ __ leap(rax, Operand(rsi, rdi, times_1, 0));
+ __ cmpp(rax, Operand(rbp, kInputStart));
BranchOrBacktrack(not_equal, on_not_at_start);
}
@@ -243,7 +243,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
Label fallthrough;
__ movq(rdx, register_location(start_reg)); // Offset of start of capture
__ movq(rbx, register_location(start_reg + 1)); // Offset of end of capture
- __ subq(rbx, rdx); // Length of capture.
+ __ subp(rbx, rdx); // Length of capture.
// -----------------------
// rdx = Start offset of capture.
@@ -273,9 +273,9 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
on_no_match = &backtrack_label_;
}
- __ lea(r9, Operand(rsi, rdx, times_1, 0));
- __ lea(r11, Operand(rsi, rdi, times_1, 0));
- __ addq(rbx, r9); // End of capture
+ __ leap(r9, Operand(rsi, rdx, times_1, 0));
+ __ leap(r11, Operand(rsi, rdi, times_1, 0));
+ __ addp(rbx, r9); // End of capture
// ---------------------
// r11 - current input character address
// r9 - current capture character address
@@ -293,8 +293,8 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Mismatch, try case-insensitive match (converting letters to lower-case).
// I.e., if or-ing with 0x20 makes values equal and in range 'a'-'z', it's
// a match.
- __ or_(rax, Immediate(0x20)); // Convert match character to lower-case.
- __ or_(rdx, Immediate(0x20)); // Convert capture character to lower-case.
+ __ orp(rax, Immediate(0x20)); // Convert match character to lower-case.
+ __ orp(rdx, Immediate(0x20)); // Convert capture character to lower-case.
__ cmpb(rax, rdx);
__ j(not_equal, on_no_match); // Definitely not equal.
__ subb(rax, Immediate('a'));
@@ -308,10 +308,10 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
__ j(equal, on_no_match);
__ bind(&loop_increment);
// Increment pointers into match and capture strings.
- __ addq(r11, Immediate(1));
- __ addq(r9, Immediate(1));
+ __ addp(r11, Immediate(1));
+ __ addp(r9, Immediate(1));
// Compare to end of capture, and loop if not done.
- __ cmpq(r9, rbx);
+ __ cmpp(r9, rbx);
__ j(below, &loop);
// Compute new value of character position after the matched part.
@@ -322,10 +322,10 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Save important/volatile registers before calling C function.
#ifndef _WIN64
// Caller save on Linux and callee save in Windows.
- __ push(rsi);
- __ push(rdi);
+ __ pushq(rsi);
+ __ pushq(rdi);
#endif
- __ push(backtrack_stackpointer());
+ __ pushq(backtrack_stackpointer());
static const int num_arguments = 4;
__ PrepareCallCFunction(num_arguments);
@@ -337,18 +337,18 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Isolate* isolate
#ifdef _WIN64
// Compute and set byte_offset1 (start of capture).
- __ lea(rcx, Operand(rsi, rdx, times_1, 0));
+ __ leap(rcx, Operand(rsi, rdx, times_1, 0));
// Set byte_offset2.
- __ lea(rdx, Operand(rsi, rdi, times_1, 0));
+ __ leap(rdx, Operand(rsi, rdi, times_1, 0));
// Set byte_length.
__ movp(r8, rbx);
// Isolate.
__ LoadAddress(r9, ExternalReference::isolate_address(isolate()));
#else // AMD64 calling convention
// Compute byte_offset2 (current position = rsi+rdi).
- __ lea(rax, Operand(rsi, rdi, times_1, 0));
+ __ leap(rax, Operand(rsi, rdi, times_1, 0));
// Compute and set byte_offset1 (start of capture).
- __ lea(rdi, Operand(rsi, rdx, times_1, 0));
+ __ leap(rdi, Operand(rsi, rdx, times_1, 0));
// Set byte_offset2.
__ movp(rsi, rax);
// Set byte_length.
@@ -367,14 +367,14 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Restore original values before reacting on result value.
__ Move(code_object_pointer(), masm_.CodeObject());
- __ pop(backtrack_stackpointer());
+ __ popq(backtrack_stackpointer());
#ifndef _WIN64
- __ pop(rdi);
- __ pop(rsi);
+ __ popq(rdi);
+ __ popq(rsi);
#endif
// Check if function returned non-zero for success or zero for failure.
- __ testq(rax, rax);
+ __ testp(rax, rax);
BranchOrBacktrack(zero, on_no_match);
// On success, increment position by length of capture.
// Requires that rbx is callee save (true for both Win64 and AMD64 ABIs).
@@ -392,7 +392,7 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(
// Find length of back-referenced capture.
__ movq(rdx, register_location(start_reg));
__ movq(rax, register_location(start_reg + 1));
- __ subq(rax, rdx); // Length to check.
+ __ subp(rax, rdx); // Length to check.
// Fail on partial or illegal capture (start of capture after end of capture).
// This must not happen (no back-reference can reference a capture that wasn't
@@ -412,9 +412,9 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(
BranchOrBacktrack(greater, on_no_match);
// Compute pointers to match string and capture string
- __ lea(rbx, Operand(rsi, rdi, times_1, 0)); // Start of match.
- __ addq(rdx, rsi); // Start of capture.
- __ lea(r9, Operand(rdx, rax, times_1, 0)); // End of capture
+ __ leap(rbx, Operand(rsi, rdi, times_1, 0)); // Start of match.
+ __ addp(rdx, rsi); // Start of capture.
+ __ leap(r9, Operand(rdx, rax, times_1, 0)); // End of capture
// -----------------------
// rbx - current capture character address.
@@ -433,10 +433,10 @@ void RegExpMacroAssemblerX64::CheckNotBackReference(
}
BranchOrBacktrack(not_equal, on_no_match);
// Increment pointers into capture and match string.
- __ addq(rbx, Immediate(char_size()));
- __ addq(rdx, Immediate(char_size()));
+ __ addp(rbx, Immediate(char_size()));
+ __ addp(rdx, Immediate(char_size()));
// Check if we have reached end of match area.
- __ cmpq(rdx, r9);
+ __ cmpp(rdx, r9);
__ j(below, &loop);
// Success.
@@ -462,7 +462,7 @@ void RegExpMacroAssemblerX64::CheckCharacterAfterAnd(uint32_t c,
__ testl(current_character(), Immediate(mask));
} else {
__ movl(rax, Immediate(mask));
- __ and_(rax, current_character());
+ __ andp(rax, current_character());
__ cmpl(rax, Immediate(c));
}
BranchOrBacktrack(equal, on_equal);
@@ -476,7 +476,7 @@ void RegExpMacroAssemblerX64::CheckNotCharacterAfterAnd(uint32_t c,
__ testl(current_character(), Immediate(mask));
} else {
__ movl(rax, Immediate(mask));
- __ and_(rax, current_character());
+ __ andp(rax, current_character());
__ cmpl(rax, Immediate(c));
}
BranchOrBacktrack(not_equal, on_not_equal);
@@ -489,8 +489,8 @@ void RegExpMacroAssemblerX64::CheckNotCharacterAfterMinusAnd(
uc16 mask,
Label* on_not_equal) {
ASSERT(minus < String::kMaxUtf16CodeUnit);
- __ lea(rax, Operand(current_character(), -minus));
- __ and_(rax, Immediate(mask));
+ __ leap(rax, Operand(current_character(), -minus));
+ __ andp(rax, Immediate(mask));
__ cmpl(rax, Immediate(c));
BranchOrBacktrack(not_equal, on_not_equal);
}
@@ -523,7 +523,7 @@ void RegExpMacroAssemblerX64::CheckBitInTable(
Register index = current_character();
if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
__ movp(rbx, current_character());
- __ and_(rbx, Immediate(kTableMask));
+ __ andp(rbx, Immediate(kTableMask));
index = rbx;
}
__ cmpb(FieldOperand(rax, index, times_1, ByteArray::kHeaderSize),
@@ -536,7 +536,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
Label* on_no_match) {
// Range checks (c in min..max) are generally implemented by an unsigned
// (c - min) <= (max - min) check, using the sequence:
- // lea(rax, Operand(current_character(), -min)) or sub(rax, Immediate(min))
+ // leap(rax, Operand(current_character(), -min)) or sub(rax, Immediate(min))
// cmp(rax, Immediate(max - min))
switch (type) {
case 's':
@@ -547,7 +547,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
__ cmpl(current_character(), Immediate(' '));
__ j(equal, &success, Label::kNear);
// Check range 0x09..0x0d
- __ lea(rax, Operand(current_character(), -'\t'));
+ __ leap(rax, Operand(current_character(), -'\t'));
__ cmpl(rax, Immediate('\r' - '\t'));
__ j(below_equal, &success, Label::kNear);
// \u00a0 (NBSP).
@@ -562,20 +562,20 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
return false;
case 'd':
// Match ASCII digits ('0'..'9')
- __ lea(rax, Operand(current_character(), -'0'));
+ __ leap(rax, Operand(current_character(), -'0'));
__ cmpl(rax, Immediate('9' - '0'));
BranchOrBacktrack(above, on_no_match);
return true;
case 'D':
// Match non ASCII-digits
- __ lea(rax, Operand(current_character(), -'0'));
+ __ leap(rax, Operand(current_character(), -'0'));
__ cmpl(rax, Immediate('9' - '0'));
BranchOrBacktrack(below_equal, on_no_match);
return true;
case '.': {
// Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
__ movl(rax, current_character());
- __ xor_(rax, Immediate(0x01));
+ __ xorp(rax, Immediate(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
__ subl(rax, Immediate(0x0b));
__ cmpl(rax, Immediate(0x0c - 0x0b));
@@ -593,7 +593,7 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
case 'n': {
// Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
__ movl(rax, current_character());
- __ xor_(rax, Immediate(0x01));
+ __ xorp(rax, Immediate(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
__ subl(rax, Immediate(0x0b));
__ cmpl(rax, Immediate(0x0c - 0x0b));
@@ -674,7 +674,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
FrameScope scope(&masm_, StackFrame::MANUAL);
// Actually emit code to start a new stack frame.
- __ push(rbp);
+ __ pushq(rbp);
__ movp(rbp, rsp);
// Save parameters and callee-save registers. Order here should correspond
// to order of kBackup_ebx etc.
@@ -686,9 +686,9 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ movq(Operand(rbp, kInputStart), r8);
__ movq(Operand(rbp, kInputEnd), r9);
// Callee-save on Win64.
- __ push(rsi);
- __ push(rdi);
- __ push(rbx);
+ __ pushq(rsi);
+ __ pushq(rdi);
+ __ pushq(rbx);
#else
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9 (and then on stack).
// Push register parameters on stack for reference.
@@ -698,18 +698,18 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
ASSERT_EQ(kInputEnd, -4 * kPointerSize);
ASSERT_EQ(kRegisterOutput, -5 * kPointerSize);
ASSERT_EQ(kNumOutputRegisters, -6 * kPointerSize);
- __ push(rdi);
- __ push(rsi);
- __ push(rdx);
- __ push(rcx);
- __ push(r8);
- __ push(r9);
-
- __ push(rbx); // Callee-save
+ __ pushq(rdi);
+ __ pushq(rsi);
+ __ pushq(rdx);
+ __ pushq(rcx);
+ __ pushq(r8);
+ __ pushq(r9);
+
+ __ pushq(rbx); // Callee-save
#endif
- __ push(Immediate(0)); // Number of successful matches in a global regexp.
- __ push(Immediate(0)); // Make room for "input start - 1" constant.
+ __ Push(Immediate(0)); // Number of successful matches in a global regexp.
+ __ Push(Immediate(0)); // Make room for "input start - 1" constant.
// Check if we have space on the stack for registers.
Label stack_limit_hit;
@@ -719,12 +719,12 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
ExternalReference::address_of_stack_limit(isolate());
__ movp(rcx, rsp);
__ Move(kScratchRegister, stack_limit);
- __ subq(rcx, Operand(kScratchRegister, 0));
+ __ subp(rcx, Operand(kScratchRegister, 0));
// Handle it if the stack pointer is already below the stack limit.
__ j(below_equal, &stack_limit_hit);
// Check if there is room for the variable number of registers above
// the stack limit.
- __ cmpq(rcx, Immediate(num_registers_ * kPointerSize));
+ __ cmpp(rcx, Immediate(num_registers_ * kPointerSize));
__ j(above_equal, &stack_ok);
// Exit with OutOfMemory exception. There is not enough space on the stack
// for our working registers.
@@ -734,28 +734,28 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ bind(&stack_limit_hit);
__ Move(code_object_pointer(), masm_.CodeObject());
CallCheckStackGuardState(); // Preserves no registers beside rbp and rsp.
- __ testq(rax, rax);
+ __ testp(rax, rax);
// If returned value is non-zero, we exit with the returned value as result.
__ j(not_zero, &return_rax);
__ bind(&stack_ok);
// Allocate space on stack for registers.
- __ subq(rsp, Immediate(num_registers_ * kPointerSize));
+ __ subp(rsp, Immediate(num_registers_ * kPointerSize));
// Load string length.
__ movp(rsi, Operand(rbp, kInputEnd));
// Load input position.
__ movp(rdi, Operand(rbp, kInputStart));
// Set up rdi to be negative offset from string end.
- __ subq(rdi, rsi);
+ __ subp(rdi, rsi);
// Set rax to address of char before start of the string
// (effectively string position -1).
__ movp(rbx, Operand(rbp, kStartIndex));
- __ neg(rbx);
+ __ negq(rbx);
if (mode_ == UC16) {
- __ lea(rax, Operand(rdi, rbx, times_2, -char_size()));
+ __ leap(rax, Operand(rdi, rbx, times_2, -char_size()));
} else {
- __ lea(rax, Operand(rdi, rbx, times_1, -char_size()));
+ __ leap(rax, Operand(rdi, rbx, times_1, -char_size()));
}
// Store this value in a local variable, for use when clearing
// position registers.
@@ -824,11 +824,11 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ movp(rdx, Operand(rbp, kStartIndex));
__ movp(rbx, Operand(rbp, kRegisterOutput));
__ movp(rcx, Operand(rbp, kInputEnd));
- __ subq(rcx, Operand(rbp, kInputStart));
+ __ subp(rcx, Operand(rbp, kInputStart));
if (mode_ == UC16) {
- __ lea(rcx, Operand(rcx, rdx, times_2, 0));
+ __ leap(rcx, Operand(rcx, rdx, times_2, 0));
} else {
- __ addq(rcx, rdx);
+ __ addp(rcx, rdx);
}
for (int i = 0; i < num_saved_registers_; i++) {
__ movq(rax, register_location(i));
@@ -836,7 +836,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Keep capture start in rdx for the zero-length check later.
__ movp(rdx, rax);
}
- __ addq(rax, rcx); // Convert to index from start, not end.
+ __ addp(rax, rcx); // Convert to index from start, not end.
if (mode_ == UC16) {
__ sar(rax, Immediate(1)); // Convert byte index to character index.
}
@@ -847,18 +847,18 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
if (global()) {
// Restart matching if the regular expression is flagged as global.
// Increment success counter.
- __ incq(Operand(rbp, kSuccessfulCaptures));
+ __ incp(Operand(rbp, kSuccessfulCaptures));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
__ movsxlq(rcx, Operand(rbp, kNumOutputRegisters));
- __ subq(rcx, Immediate(num_saved_registers_));
+ __ subp(rcx, Immediate(num_saved_registers_));
// Check whether we have enough room for another set of capture results.
- __ cmpq(rcx, Immediate(num_saved_registers_));
+ __ cmpp(rcx, Immediate(num_saved_registers_));
__ j(less, &exit_label_);
__ movp(Operand(rbp, kNumOutputRegisters), rcx);
// Advance the location for output.
- __ addq(Operand(rbp, kRegisterOutput),
+ __ addp(Operand(rbp, kRegisterOutput),
Immediate(num_saved_registers_ * kIntSize));
// Prepare rax to initialize registers with its value in the next run.
@@ -867,11 +867,11 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
// rdx: capture start index
- __ cmpq(rdi, rdx);
+ __ cmpp(rdi, rdx);
// Not a zero-length match, restart.
__ j(not_equal, &load_char_start_regexp);
// rdi (offset from the end) is zero if we already reached the end.
- __ testq(rdi, rdi);
+ __ testp(rdi, rdi);
__ j(zero, &exit_label_, Label::kNear);
// Advance current position after a zero-length match.
if (mode_ == UC16) {
@@ -896,10 +896,10 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ bind(&return_rax);
#ifdef _WIN64
// Restore callee save registers.
- __ lea(rsp, Operand(rbp, kLastCalleeSaveRegister));
- __ pop(rbx);
- __ pop(rdi);
- __ pop(rsi);
+ __ leap(rsp, Operand(rbp, kLastCalleeSaveRegister));
+ __ popq(rbx);
+ __ popq(rdi);
+ __ popq(rsi);
// Stack now at rbp.
#else
// Restore callee save register.
@@ -908,7 +908,7 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ movp(rsp, rbp);
#endif
// Exit function frame, restore previous one.
- __ pop(rbp);
+ __ popq(rbp);
__ ret(0);
// Backtrack code (branch target for conditional backtracks).
@@ -923,19 +923,19 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
if (check_preempt_label_.is_linked()) {
SafeCallTarget(&check_preempt_label_);
- __ push(backtrack_stackpointer());
- __ push(rdi);
+ __ pushq(backtrack_stackpointer());
+ __ pushq(rdi);
CallCheckStackGuardState();
- __ testq(rax, rax);
+ __ testp(rax, rax);
// If returning non-zero, we should end execution with the given
// result as return value.
__ j(not_zero, &return_rax);
// Restore registers.
__ Move(code_object_pointer(), masm_.CodeObject());
- __ pop(rdi);
- __ pop(backtrack_stackpointer());
+ __ popq(rdi);
+ __ popq(backtrack_stackpointer());
// String might have moved: Reload esi from frame.
__ movp(rsi, Operand(rbp, kInputEnd));
SafeReturn();
@@ -950,8 +950,8 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
// Save registers before calling C function
#ifndef _WIN64
// Callee-save in Microsoft 64-bit ABI, but not in AMD64 ABI.
- __ push(rsi);
- __ push(rdi);
+ __ pushq(rsi);
+ __ pushq(rdi);
#endif
// Call GrowStack(backtrack_stackpointer())
@@ -960,12 +960,12 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
#ifdef _WIN64
// Microsoft passes parameters in rcx, rdx, r8.
// First argument, backtrack stackpointer, is already in rcx.
- __ lea(rdx, Operand(rbp, kStackHighEnd)); // Second argument
+ __ leap(rdx, Operand(rbp, kStackHighEnd)); // Second argument
__ LoadAddress(r8, ExternalReference::isolate_address(isolate()));
#else
// AMD64 ABI passes parameters in rdi, rsi, rdx.
__ movp(rdi, backtrack_stackpointer()); // First argument.
- __ lea(rsi, Operand(rbp, kStackHighEnd)); // Second argument.
+ __ leap(rsi, Operand(rbp, kStackHighEnd)); // Second argument.
__ LoadAddress(rdx, ExternalReference::isolate_address(isolate()));
#endif
ExternalReference grow_stack =
@@ -973,15 +973,15 @@ Handle<HeapObject> RegExpMacroAssemblerX64::GetCode(Handle<String> source) {
__ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
- __ testq(rax, rax);
+ __ testp(rax, rax);
__ j(equal, &exit_with_exception);
// Otherwise use return value as new stack pointer.
__ movp(backtrack_stackpointer(), rax);
// Restore saved registers and continue.
__ Move(code_object_pointer(), masm_.CodeObject());
#ifndef _WIN64
- __ pop(rdi);
- __ pop(rsi);
+ __ popq(rdi);
+ __ popq(rsi);
#endif
SafeReturn();
}
@@ -1015,7 +1015,7 @@ void RegExpMacroAssemblerX64::GoTo(Label* to) {
void RegExpMacroAssemblerX64::IfRegisterGE(int reg,
int comparand,
Label* if_ge) {
- __ cmpq(register_location(reg), Immediate(comparand));
+ __ cmpp(register_location(reg), Immediate(comparand));
BranchOrBacktrack(greater_equal, if_ge);
}
@@ -1023,14 +1023,14 @@ void RegExpMacroAssemblerX64::IfRegisterGE(int reg,
void RegExpMacroAssemblerX64::IfRegisterLT(int reg,
int comparand,
Label* if_lt) {
- __ cmpq(register_location(reg), Immediate(comparand));
+ __ cmpp(register_location(reg), Immediate(comparand));
BranchOrBacktrack(less, if_lt);
}
void RegExpMacroAssemblerX64::IfRegisterEqPos(int reg,
Label* if_eq) {
- __ cmpq(rdi, register_location(reg));
+ __ cmpp(rdi, register_location(reg));
BranchOrBacktrack(equal, if_eq);
}
@@ -1091,13 +1091,13 @@ void RegExpMacroAssemblerX64::ReadCurrentPositionFromRegister(int reg) {
void RegExpMacroAssemblerX64::ReadStackPointerFromRegister(int reg) {
__ movq(backtrack_stackpointer(), register_location(reg));
- __ addq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
+ __ addp(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
}
void RegExpMacroAssemblerX64::SetCurrentPositionFromEnd(int by) {
Label after_position;
- __ cmpq(rdi, Immediate(-by * char_size()));
+ __ cmpp(rdi, Immediate(-by * char_size()));
__ j(greater_equal, &after_position, Label::kNear);
__ movq(rdi, Immediate(-by * char_size()));
// On RegExp code entry (where this operation is used), the character before
@@ -1125,7 +1125,7 @@ void RegExpMacroAssemblerX64::WriteCurrentPositionToRegister(int reg,
if (cp_offset == 0) {
__ movp(register_location(reg), rdi);
} else {
- __ lea(rax, Operand(rdi, cp_offset * char_size()));
+ __ leap(rax, Operand(rdi, cp_offset * char_size()));
__ movp(register_location(reg), rax);
}
}
@@ -1142,7 +1142,7 @@ void RegExpMacroAssemblerX64::ClearRegisters(int reg_from, int reg_to) {
void RegExpMacroAssemblerX64::WriteStackPointerToRegister(int reg) {
__ movp(rax, backtrack_stackpointer());
- __ subq(rax, Operand(rbp, kStackHighEnd));
+ __ subp(rax, Operand(rbp, kStackHighEnd));
__ movp(register_location(reg), rax);
}
@@ -1161,7 +1161,7 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
__ movp(r8, rbp);
// First argument: Next address on the stack (will be address of
// return address).
- __ lea(rcx, Operand(rsp, -kPointerSize));
+ __ leap(rcx, Operand(rsp, -kPointerSize));
#else
// Third argument: RegExp code frame pointer.
__ movp(rdx, rbp);
@@ -1169,7 +1169,7 @@ void RegExpMacroAssemblerX64::CallCheckStackGuardState() {
__ movp(rsi, code_object_pointer());
// First argument: Next address on the stack (will be address of
// return address).
- __ lea(rdi, Operand(rsp, -kPointerSize));
+ __ leap(rdi, Operand(rsp, -kRegisterSize));
#endif
ExternalReference stack_check =
ExternalReference::re_check_stack_guard_state(isolate());
@@ -1323,12 +1323,12 @@ void RegExpMacroAssemblerX64::SafeCall(Label* to) {
void RegExpMacroAssemblerX64::SafeCallTarget(Label* label) {
__ bind(label);
- __ subq(Operand(rsp, 0), code_object_pointer());
+ __ subp(Operand(rsp, 0), code_object_pointer());
}
void RegExpMacroAssemblerX64::SafeReturn() {
- __ addq(Operand(rsp, 0), code_object_pointer());
+ __ addp(Operand(rsp, 0), code_object_pointer());
__ ret(0);
}
@@ -1336,14 +1336,14 @@ void RegExpMacroAssemblerX64::SafeReturn() {
void RegExpMacroAssemblerX64::Push(Register source) {
ASSERT(!source.is(backtrack_stackpointer()));
// Notice: This updates flags, unlike normal Push.
- __ subq(backtrack_stackpointer(), Immediate(kIntSize));
+ __ subp(backtrack_stackpointer(), Immediate(kIntSize));
__ movl(Operand(backtrack_stackpointer(), 0), source);
}
void RegExpMacroAssemblerX64::Push(Immediate value) {
// Notice: This updates flags, unlike normal Push.
- __ subq(backtrack_stackpointer(), Immediate(kIntSize));
+ __ subp(backtrack_stackpointer(), Immediate(kIntSize));
__ movl(Operand(backtrack_stackpointer(), 0), value);
}
@@ -1367,7 +1367,7 @@ void RegExpMacroAssemblerX64::FixupCodeRelativePositions() {
void RegExpMacroAssemblerX64::Push(Label* backtrack_target) {
- __ subq(backtrack_stackpointer(), Immediate(kIntSize));
+ __ subp(backtrack_stackpointer(), Immediate(kIntSize));
__ movl(Operand(backtrack_stackpointer(), 0), backtrack_target);
MarkPositionForCodeRelativeFixup();
}
@@ -1377,12 +1377,12 @@ void RegExpMacroAssemblerX64::Pop(Register target) {
ASSERT(!target.is(backtrack_stackpointer()));
__ movsxlq(target, Operand(backtrack_stackpointer(), 0));
// Notice: This updates flags, unlike normal Pop.
- __ addq(backtrack_stackpointer(), Immediate(kIntSize));
+ __ addp(backtrack_stackpointer(), Immediate(kIntSize));
}
void RegExpMacroAssemblerX64::Drop() {
- __ addq(backtrack_stackpointer(), Immediate(kIntSize));
+ __ addp(backtrack_stackpointer(), Immediate(kIntSize));
}
@@ -1392,7 +1392,7 @@ void RegExpMacroAssemblerX64::CheckPreemption() {
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(isolate());
__ load_rax(stack_limit);
- __ cmpq(rsp, rax);
+ __ cmpp(rsp, rax);
__ j(above, &no_preempt);
SafeCall(&check_preempt_label_);
@@ -1406,7 +1406,7 @@ void RegExpMacroAssemblerX64::CheckStackLimit() {
ExternalReference stack_limit =
ExternalReference::address_of_regexp_stack_limit(isolate());
__ load_rax(stack_limit);
- __ cmpq(backtrack_stackpointer(), rax);
+ __ cmpp(backtrack_stackpointer(), rax);
__ j(above, &no_stack_overflow);
SafeCall(&stack_overflow_label_);
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index a43d709b1..13e822da2 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -49,10 +49,12 @@ static void ProbeTable(Isolate* isolate,
// The offset is scaled by 4, based on
// kHeapObjectTagSize, which is two bits
Register offset) {
- // We need to scale up the pointer by 2 because the offset is scaled by less
+ // We need to scale up the pointer by 2 when the offset is scaled by less
// than the pointer size.
- ASSERT(kPointerSizeLog2 == kHeapObjectTagSize + 1);
- ScaleFactor scale_factor = times_2;
+ ASSERT(kPointerSize == kInt64Size
+ ? kPointerSizeLog2 == kHeapObjectTagSize + 1
+ : kPointerSizeLog2 == kHeapObjectTagSize);
+ ScaleFactor scale_factor = kPointerSize == kInt64Size ? times_2 : times_1;
ASSERT_EQ(3 * kPointerSize, sizeof(StubCache::Entry));
// The offset register holds the entry offset times four (due to masking
@@ -62,7 +64,7 @@ static void ProbeTable(Isolate* isolate,
Label miss;
// Multiply by 3 because there are 3 fields per entry (name, code, map).
- __ lea(offset, Operand(offset, offset, times_2, 0));
+ __ leap(offset, Operand(offset, offset, times_2, 0));
__ LoadAddress(kScratchRegister, key_offset);
@@ -77,7 +79,7 @@ static void ProbeTable(Isolate* isolate,
// Use key_offset + kPointerSize * 2, rather than loading map_offset.
__ movp(kScratchRegister,
Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2));
- __ cmpq(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ cmpp(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
__ j(not_equal, &miss);
// Get the code entry from the cache.
@@ -87,7 +89,7 @@ static void ProbeTable(Isolate* isolate,
// Check that the flags match what we're looking for.
__ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
- __ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
+ __ andp(offset, Immediate(~Code::kFlagsNotUsedInLookup));
__ cmpl(offset, Immediate(flags));
__ j(not_equal, &miss);
@@ -100,7 +102,7 @@ static void ProbeTable(Isolate* isolate,
#endif
// Jump to the first instruction in the code stub.
- __ addq(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ addp(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(kScratchRegister);
__ bind(&miss);
@@ -193,10 +195,10 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
// Use only the low 32 bits of the map pointer.
__ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(scratch, Immediate(flags));
+ __ xorp(scratch, Immediate(flags));
// We mask out the last two bits because they are not part of the hash and
// they are always 01 for maps. Also in the two 'and' instructions below.
- __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+ __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
// Probe the primary table.
ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch);
@@ -204,11 +206,11 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
// Primary miss: Compute hash for secondary probe.
__ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
__ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(scratch, Immediate(flags));
- __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+ __ xorp(scratch, Immediate(flags));
+ __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
__ subl(scratch, name);
__ addl(scratch, Immediate(flags));
- __ and_(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
+ __ andp(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
// Probe the secondary table.
ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch);
@@ -281,54 +283,6 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
}
-// Generate code to check if an object is a string. If the object is
-// a string, the map's instance type is left in the scratch register.
-static void GenerateStringCheck(MacroAssembler* masm,
- Register receiver,
- Register scratch,
- Label* smi,
- Label* non_string_object) {
- // Check that the object isn't a smi.
- __ JumpIfSmi(receiver, smi);
-
- // Check that the object is a string.
- __ movp(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ movzxbq(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kNotStringTag != 0);
- __ testl(scratch, Immediate(kNotStringTag));
- __ j(not_zero, non_string_object);
-}
-
-
-void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- Label check_wrapper;
-
- // Check if the object is a string leaving the instance type in the
- // scratch register.
- GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper);
-
- // Load length directly from the string.
- __ movp(rax, FieldOperand(receiver, String::kLengthOffset));
- __ ret(0);
-
- // Check if the object is a JSValue wrapper.
- __ bind(&check_wrapper);
- __ cmpl(scratch1, Immediate(JS_VALUE_TYPE));
- __ j(not_equal, miss);
-
- // Check if the wrapped value is a string and load the length
- // directly if it is.
- __ movp(scratch2, FieldOperand(receiver, JSValue::kValueOffset));
- GenerateStringCheck(masm, scratch2, scratch1, miss, miss);
- __ movp(rax, FieldOperand(scratch2, String::kLengthOffset));
- __ ret(0);
-}
-
-
void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
Register result,
@@ -346,7 +300,7 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
bool inobject,
int index,
Representation representation) {
- ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
+ ASSERT(!representation.IsDouble());
int offset = index * kPointerSize;
if (!inobject) {
// Calculate the offset into the properties array.
@@ -368,13 +322,13 @@ static void PushInterceptorArguments(MacroAssembler* masm,
STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
- __ push(name);
+ __ Push(name);
Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
__ Move(kScratchRegister, interceptor);
- __ push(kScratchRegister);
- __ push(receiver);
- __ push(holder);
+ __ Push(kScratchRegister);
+ __ Push(receiver);
+ __ Push(holder);
}
@@ -393,24 +347,25 @@ static void CompileCallLoadPropertyWithInterceptor(
// Generate call to api function.
-static void GenerateFastApiCall(MacroAssembler* masm,
- const CallOptimization& optimization,
- Handle<Map> receiver_map,
- Register receiver,
- Register scratch_in,
- int argc,
- Register* values) {
+void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
+ const CallOptimization& optimization,
+ Handle<Map> receiver_map,
+ Register receiver,
+ Register scratch_in,
+ bool is_store,
+ int argc,
+ Register* values) {
ASSERT(optimization.is_simple_api_call());
__ PopReturnAddressTo(scratch_in);
// receiver
- __ push(receiver);
+ __ Push(receiver);
// Write the arguments to stack frame.
for (int i = 0; i < argc; i++) {
Register arg = values[argc-1-i];
ASSERT(!receiver.is(arg));
ASSERT(!scratch_in.is(arg));
- __ push(arg);
+ __ Push(arg);
}
__ PushReturnAddressFrom(scratch_in);
// Stack now matches JSFunction abi.
@@ -465,7 +420,7 @@ static void GenerateFastApiCall(MacroAssembler* masm,
api_function_address, function_address, RelocInfo::EXTERNAL_REFERENCE);
// Jump to stub.
- CallApiFunctionStub stub(true, call_data_undefined, argc);
+ CallApiFunctionStub stub(is_store, call_data_undefined, argc);
__ TailCallStub(&stub);
}
@@ -536,11 +491,11 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
__ Cmp(value_reg, constant);
__ j(not_equal, miss_label);
- } else if (FLAG_track_fields && representation.IsSmi()) {
+ } else if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ } else if (representation.IsDouble()) {
Label do_store, heap_number;
__ AllocateHeapNumber(storage_reg, scratch1, slow);
@@ -568,9 +523,9 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
__ PopReturnAddressTo(scratch1);
- __ push(receiver_reg);
+ __ Push(receiver_reg);
__ Push(transition);
- __ push(value_reg);
+ __ Push(value_reg);
__ PushReturnAddressFrom(scratch1);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
@@ -613,15 +568,15 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
- if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (representation.IsDouble()) {
__ movp(FieldOperand(receiver_reg, offset), storage_reg);
} else {
__ movp(FieldOperand(receiver_reg, offset), value_reg);
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ if (!representation.IsDouble()) {
__ movp(storage_reg, value_reg);
}
__ RecordWriteField(
@@ -633,15 +588,15 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array (optimistically).
__ movp(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- if (FLAG_track_double_fields && representation.IsDouble()) {
+ if (representation.IsDouble()) {
__ movp(FieldOperand(scratch1, offset), storage_reg);
} else {
__ movp(FieldOperand(scratch1, offset), value_reg);
}
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
- if (!FLAG_track_double_fields || !representation.IsDouble()) {
+ if (!representation.IsDouble()) {
__ movp(storage_reg, value_reg);
}
__ RecordWriteField(
@@ -680,11 +635,11 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
Representation representation = lookup->representation();
ASSERT(!representation.IsNone());
- if (FLAG_track_fields && representation.IsSmi()) {
+ if (representation.IsSmi()) {
__ JumpIfNotSmi(value_reg, miss_label);
- } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
+ } else if (representation.IsHeapObject()) {
__ JumpIfSmi(value_reg, miss_label);
- } else if (FLAG_track_double_fields && representation.IsDouble()) {
+ } else if (representation.IsDouble()) {
// Load the double storage.
if (index < 0) {
int offset = object->map()->instance_size() + (index * kPointerSize);
@@ -723,7 +678,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
int offset = object->map()->instance_size() + (index * kPointerSize);
__ movp(FieldOperand(receiver_reg, offset), value_reg);
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
__ movp(name_reg, value_reg);
@@ -738,7 +693,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
__ movp(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
__ movp(FieldOperand(scratch1, offset), value_reg);
- if (!FLAG_track_fields || !representation.IsSmi()) {
+ if (!representation.IsSmi()) {
// Update the write barrier for the array address.
// Pass the value being stored in the now unused name_reg.
__ movp(name_reg, value_reg);
@@ -773,9 +728,6 @@ Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
Label* miss,
PrototypeCheckType check) {
Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
- // Make sure that the type feedback oracle harvests the receiver map.
- // TODO(svenpanne) Remove this hack when all ICs are reworked.
- __ Move(scratch1, receiver_map);
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
@@ -941,7 +893,7 @@ Register LoadStubCompiler::CallbackHandlerFrontend(
Operand(dictionary, index, times_pointer_size,
kValueOffset - kHeapObjectTag));
__ Move(scratch3(), callback, RelocInfo::EMBEDDED_OBJECT);
- __ cmpq(scratch2(), scratch3());
+ __ cmpp(scratch2(), scratch3());
__ j(not_equal, &miss);
}
@@ -970,15 +922,6 @@ void LoadStubCompiler::GenerateLoadField(Register reg,
void LoadStubCompiler::GenerateLoadCallback(
- const CallOptimization& call_optimization,
- Handle<Map> receiver_map) {
- GenerateFastApiCall(
- masm(), call_optimization, receiver_map,
- receiver(), scratch1(), 0, NULL);
-}
-
-
-void LoadStubCompiler::GenerateLoadCallback(
Register reg,
Handle<ExecutableAccessorInfo> callback) {
// Insert additional parameters into the stack frame above return address.
@@ -992,22 +935,22 @@ void LoadStubCompiler::GenerateLoadCallback(
STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
- __ push(receiver()); // receiver
+ __ Push(receiver()); // receiver
if (heap()->InNewSpace(callback->data())) {
ASSERT(!scratch2().is(reg));
__ Move(scratch2(), callback);
- __ push(FieldOperand(scratch2(),
+ __ Push(FieldOperand(scratch2(),
ExecutableAccessorInfo::kDataOffset)); // data
} else {
__ Push(Handle<Object>(callback->data(), isolate()));
}
ASSERT(!kScratchRegister.is(reg));
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ push(kScratchRegister); // return value
- __ push(kScratchRegister); // return value default
+ __ Push(kScratchRegister); // return value
+ __ Push(kScratchRegister); // return value default
__ PushAddress(ExternalReference::isolate_address(isolate()));
- __ push(reg); // holder
- __ push(name()); // name
+ __ Push(reg); // holder
+ __ Push(name()); // name
// Save a pointer to where we pushed the arguments pointer. This will be
// passed as the const PropertyAccessorInfo& to the C++ callback.
@@ -1075,10 +1018,10 @@ void LoadStubCompiler::GenerateLoadInterceptor(
FrameScope frame_scope(masm(), StackFrame::INTERNAL);
if (must_preserve_receiver_reg) {
- __ push(receiver());
+ __ Push(receiver());
}
- __ push(holder_reg);
- __ push(this->name());
+ __ Push(holder_reg);
+ __ Push(this->name());
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
@@ -1096,10 +1039,10 @@ void LoadStubCompiler::GenerateLoadInterceptor(
__ ret(0);
__ bind(&interceptor_failed);
- __ pop(this->name());
- __ pop(holder_reg);
+ __ Pop(this->name());
+ __ Pop(holder_reg);
if (must_preserve_receiver_reg) {
- __ pop(receiver());
+ __ Pop(receiver());
}
// Leave the internal frame.
@@ -1141,11 +1084,11 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
__ PopReturnAddressTo(scratch1());
- __ push(receiver());
- __ push(holder_reg);
+ __ Push(receiver());
+ __ Push(holder_reg);
__ Push(callback); // callback info
__ Push(name);
- __ push(value());
+ __ Push(value());
__ PushReturnAddressFrom(scratch1());
// Do tail-call to the runtime system.
@@ -1158,24 +1101,6 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
}
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<Name> name,
- const CallOptimization& call_optimization) {
- HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
- receiver(), holder, name);
-
- Register values[] = { value() };
- GenerateFastApiCall(
- masm(), call_optimization, handle(object->map()),
- receiver(), scratch1(), 1, values);
-
- // Return the generated code.
- return GetCode(kind(), Code::FAST, name);
-}
-
-
#undef __
#define __ ACCESS_MASM(masm)
@@ -1183,20 +1108,16 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
void StoreStubCompiler::GenerateStoreViaSetter(
MacroAssembler* masm,
Handle<HeapType> type,
+ Register receiver,
Handle<JSFunction> setter) {
// ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
{
FrameScope scope(masm, StackFrame::INTERNAL);
- Register receiver = rdx;
- Register value = rax;
// Save value register, so we can restore it later.
- __ push(value);
+ __ Push(value());
if (!setter.is_null()) {
// Call the JavaScript setter with receiver and value on the stack.
@@ -1205,8 +1126,8 @@ void StoreStubCompiler::GenerateStoreViaSetter(
__ movp(receiver,
FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
}
- __ push(receiver);
- __ push(value);
+ __ Push(receiver);
+ __ Push(value());
ParameterCount actual(1);
ParameterCount expected(setter);
__ InvokeFunction(setter, expected, actual,
@@ -1218,7 +1139,7 @@ void StoreStubCompiler::GenerateStoreViaSetter(
}
// We have to return the passed value, not the return value of the setter.
- __ pop(rax);
+ __ Pop(rax);
// Restore context register.
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -1235,9 +1156,9 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
Handle<JSObject> object,
Handle<Name> name) {
__ PopReturnAddressTo(scratch1());
- __ push(receiver());
- __ push(this->name());
- __ push(value());
+ __ Push(receiver());
+ __ Push(this->name());
+ __ Push(value());
__ PushReturnAddressFrom(scratch1());
// Do tail-call to the runtime system.
@@ -1250,6 +1171,20 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
}
+void StoreStubCompiler::GenerateStoreArrayLength() {
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ PopReturnAddressTo(scratch1());
+ __ Push(receiver());
+ __ Push(value());
+ __ PushReturnAddressFrom(scratch1());
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
+ masm()->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+}
+
+
Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
MapHandleList* receiver_maps,
CodeHandleList* handler_stubs,
@@ -1314,16 +1249,21 @@ Register* KeyedLoadStubCompiler::registers() {
}
+Register StoreStubCompiler::value() {
+ return rax;
+}
+
+
Register* StoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { rdx, rcx, rax, rbx, rdi, r8 };
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { rdx, rcx, rbx, rdi, r8 };
return registers;
}
Register* KeyedStoreStubCompiler::registers() {
- // receiver, name, value, scratch1, scratch2, scratch3.
- static Register registers[] = { rdx, rcx, rax, rbx, rdi, r8 };
+ // receiver, name, scratch1, scratch2, scratch3.
+ static Register registers[] = { rdx, rcx, rbx, rdi, r8 };
return registers;
}
@@ -1351,7 +1291,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
__ movp(receiver,
FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
}
- __ push(receiver);
+ __ Push(receiver);
ParameterCount actual(0);
ParameterCount expected(getter);
__ InvokeFunction(getter, expected, actual,