summaryrefslogtreecommitdiff
path: root/deps/v8/src/x64
diff options
context:
space:
mode:
authorRyan <ry@tinyclouds.org>2009-06-08 18:34:06 +0200
committerRyan <ry@tinyclouds.org>2009-06-08 18:34:06 +0200
commit696f02455792b368249bf9b013dde637b5ec31fd (patch)
tree95b2dbd6c2537df9df52f6627aac36fcf05f6a7a /deps/v8/src/x64
parentf6a7fe26574defaa807a13248102ebe0f23270af (diff)
downloadnode-new-696f02455792b368249bf9b013dde637b5ec31fd.tar.gz
Upgrade to v8 1.2.7
Diffstat (limited to 'deps/v8/src/x64')
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h250
-rw-r--r--deps/v8/src/x64/assembler-x64.cc1513
-rw-r--r--deps/v8/src/x64/assembler-x64.h688
-rw-r--r--deps/v8/src/x64/builtins-x64.cc38
-rw-r--r--deps/v8/src/x64/codegen-x64-inl.h42
-rw-r--r--deps/v8/src/x64/codegen-x64.cc309
-rw-r--r--deps/v8/src/x64/codegen-x64.h25
-rw-r--r--deps/v8/src/x64/cpu-x64.cc39
-rw-r--r--deps/v8/src/x64/debug-x64.cc56
-rw-r--r--deps/v8/src/x64/disasm-x64.cc61
-rw-r--r--deps/v8/src/x64/frames-x64.h17
-rw-r--r--deps/v8/src/x64/ic-x64.cc149
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc89
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h14
-rw-r--r--deps/v8/src/x64/register-allocator-x64-inl.h69
-rw-r--r--deps/v8/src/x64/register-allocator-x64.h45
-rw-r--r--deps/v8/src/x64/virtual-frame-x64.h203
17 files changed, 3225 insertions, 382 deletions
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 0b018490db..18225681ed 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -28,12 +28,141 @@
#ifndef V8_X64_ASSEMBLER_X64_INL_H_
#define V8_X64_ASSEMBLER_X64_INL_H_
-namespace v8 { namespace internal {
+#include "cpu.h"
+
+namespace v8 {
+namespace internal {
Condition NegateCondition(Condition cc) {
return static_cast<Condition>(cc ^ 1);
}
+// -----------------------------------------------------------------------------
+
+Immediate::Immediate(Smi* value) {
+ value_ = static_cast<int32_t>(reinterpret_cast<intptr_t>(value));
+}
+
+// -----------------------------------------------------------------------------
+// Implementation of Assembler
+
+
+
+void Assembler::emitl(uint32_t x) {
+ Memory::uint32_at(pc_) = x;
+ pc_ += sizeof(uint32_t);
+}
+
+
+void Assembler::emitq(uint64_t x, RelocInfo::Mode rmode) {
+ Memory::uint64_at(pc_) = x;
+ if (rmode != RelocInfo::NONE) {
+ RecordRelocInfo(rmode, x);
+ }
+ pc_ += sizeof(uint64_t);
+}
+
+
+void Assembler::emitw(uint16_t x) {
+ Memory::uint16_at(pc_) = x;
+ pc_ += sizeof(uint16_t);
+}
+
+
+void Assembler::emit_rex_64(Register reg, Register rm_reg) {
+ emit(0x48 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
+}
+
+
+void Assembler::emit_rex_64(Register reg, const Operand& op) {
+ emit(0x48 | (reg.code() & 0x8) >> 1 | op.rex_);
+}
+
+
+void Assembler::emit_rex_64(Register rm_reg) {
+ ASSERT_EQ(rm_reg.code() & 0xf, rm_reg.code());
+ emit(0x48 | (rm_reg.code() >> 3));
+}
+
+
+void Assembler::emit_rex_64(const Operand& op) {
+ emit(0x48 | op.rex_);
+}
+
+
+void Assembler::emit_rex_32(Register reg, Register rm_reg) {
+ emit(0x40 | (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3);
+}
+
+
+void Assembler::emit_rex_32(Register reg, const Operand& op) {
+ emit(0x40 | (reg.code() & 0x8) >> 1 | op.rex_);
+}
+
+
+void Assembler::emit_rex_32(Register rm_reg) {
+ emit(0x40 | (rm_reg.code() & 0x8) >> 3);
+}
+
+
+void Assembler::emit_rex_32(const Operand& op) {
+ emit(0x40 | op.rex_);
+}
+
+
+void Assembler::emit_optional_rex_32(Register reg, Register rm_reg) {
+ byte rex_bits = (reg.code() & 0x8) >> 1 | rm_reg.code() >> 3;
+ if (rex_bits != 0) emit(0x40 | rex_bits);
+}
+
+
+void Assembler::emit_optional_rex_32(Register reg, const Operand& op) {
+ byte rex_bits = (reg.code() & 0x8) >> 1 | op.rex_;
+ if (rex_bits != 0) emit(0x40 | rex_bits);
+}
+
+
+void Assembler::emit_optional_rex_32(Register rm_reg) {
+ if (rm_reg.code() & 0x8 != 0) emit(0x41);
+}
+
+
+void Assembler::emit_optional_rex_32(const Operand& op) {
+ if (op.rex_ != 0) emit(0x40 | op.rex_);
+}
+
+
+Address Assembler::target_address_at(Address pc) {
+ return Memory::Address_at(pc);
+}
+
+
+void Assembler::set_target_address_at(Address pc, Address target) {
+ Memory::Address_at(pc) = target;
+ CPU::FlushICache(pc, sizeof(intptr_t));
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+// The modes possibly affected by apply must be in kApplyMask.
+void RelocInfo::apply(int delta) {
+ if (rmode_ == RUNTIME_ENTRY || IsCodeTarget(rmode_)) {
+ intptr_t* p = reinterpret_cast<intptr_t*>(pc_);
+ *p -= delta; // relocate entry
+ } else if (rmode_ == JS_RETURN && IsCallInstruction()) {
+ // Special handling of js_return when a break point is set (call
+ // instruction has been inserted).
+ intptr_t* p = reinterpret_cast<intptr_t*>(pc_ + 1);
+ *p -= delta; // relocate entry
+ } else if (IsInternalReference(rmode_)) {
+ // absolute code pointer inside code object moves with the code object.
+ intptr_t* p = reinterpret_cast<intptr_t*>(pc_);
+ *p += delta; // relocate entry
+ }
+}
+
Address RelocInfo::target_address() {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
@@ -53,16 +182,125 @@ void RelocInfo::set_target_address(Address target) {
}
-void Assembler::set_target_address_at(byte* location, byte* value) {
- UNIMPLEMENTED();
+Object* RelocInfo::target_object() {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return *reinterpret_cast<Object**>(pc_);
}
-byte* Assembler::target_address_at(byte* location) {
- UNIMPLEMENTED();
- return NULL;
+Object** RelocInfo::target_object_address() {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return reinterpret_cast<Object**>(pc_);
}
+
+Address* RelocInfo::target_reference_address() {
+ ASSERT(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
+ return reinterpret_cast<Address*>(pc_);
+}
+
+
+void RelocInfo::set_target_object(Object* target) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ *reinterpret_cast<Object**>(pc_) = target;
+}
+
+
+bool RelocInfo::IsCallInstruction() {
+ UNIMPLEMENTED(); // IA32 code below.
+ return *pc_ == 0xE8;
+}
+
+
+Address RelocInfo::call_address() {
+ UNIMPLEMENTED(); // IA32 code below.
+ ASSERT(IsCallInstruction());
+ return Assembler::target_address_at(pc_ + 1);
+}
+
+
+void RelocInfo::set_call_address(Address target) {
+ UNIMPLEMENTED(); // IA32 code below.
+ ASSERT(IsCallInstruction());
+ Assembler::set_target_address_at(pc_ + 1, target);
+}
+
+
+Object* RelocInfo::call_object() {
+ UNIMPLEMENTED(); // IA32 code below.
+ ASSERT(IsCallInstruction());
+ return *call_object_address();
+}
+
+
+void RelocInfo::set_call_object(Object* target) {
+ UNIMPLEMENTED(); // IA32 code below.
+ ASSERT(IsCallInstruction());
+ *call_object_address() = target;
+}
+
+
+Object** RelocInfo::call_object_address() {
+ UNIMPLEMENTED(); // IA32 code below.
+ ASSERT(IsCallInstruction());
+ return reinterpret_cast<Object**>(pc_ + 1);
+}
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand
+
+Operand::Operand(Register base, int32_t disp) {
+ len_ = 1;
+ if (base.is(rsp) || base.is(r12)) {
+ // SIB byte is needed to encode (rsp + offset) or (r12 + offset).
+ set_sib(kTimes1, rsp, base);
+ }
+
+ if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
+ set_modrm(0, rsp);
+ } else if (is_int8(disp)) {
+ set_modrm(1, base);
+ set_disp8(disp);
+ } else {
+ set_modrm(2, base);
+ set_disp32(disp);
+ }
+}
+
+void Operand::set_modrm(int mod, Register rm) {
+ ASSERT((mod & -4) == 0);
+ buf_[0] = mod << 6 | (rm.code() & 0x7);
+ // Set REX.B to the high bit of rm.code().
+ rex_ |= (rm.code() >> 3);
+}
+
+
+void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
+ ASSERT(len_ == 1);
+ ASSERT(is_uint2(scale));
+ // Use SIB with no index register only for base rsp or r12.
+ ASSERT(!index.is(rsp) || base.is(rsp) || base.is(r12));
+ buf_[1] = scale << 6 | (index.code() & 0x7) << 3 | (base.code() & 0x7);
+ rex_ |= (index.code() >> 3) << 1 | base.code() >> 3;
+ len_ = 2;
+}
+
+void Operand::set_disp8(int disp) {
+ ASSERT(is_int8(disp));
+ ASSERT(len_ == 1 || len_ == 2);
+ int8_t* p = reinterpret_cast<int8_t*>(&buf_[len_]);
+ *p = disp;
+ len_ += sizeof(int8_t);
+}
+
+void Operand::set_disp32(int disp) {
+ ASSERT(len_ == 1 || len_ == 2);
+ int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
+ *p = disp;
+ len_ += sizeof(int32_t);
+}
+
+
} } // namespace v8::internal
#endif // V8_X64_ASSEMBLER_X64_INL_H_
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 6e2c42a128..77bbf52405 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -28,9 +28,1520 @@
#include "v8.h"
#include "macro-assembler.h"
+#include "serialize.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Implementation of Register
+
+Register rax = { 0 };
+Register rcx = { 1 };
+Register rdx = { 2 };
+Register rbx = { 3 };
+Register rsp = { 4 };
+Register rbp = { 5 };
+Register rsi = { 6 };
+Register rdi = { 7 };
+Register r8 = { 8 };
+Register r9 = { 9 };
+Register r10 = { 10 };
+Register r11 = { 11 };
+Register r12 = { 12 };
+Register r13 = { 13 };
+Register r14 = { 14 };
+Register r15 = { 15 };
Register no_reg = { -1 };
+XMMRegister xmm0 = { 0 };
+XMMRegister xmm1 = { 1 };
+XMMRegister xmm2 = { 2 };
+XMMRegister xmm3 = { 3 };
+XMMRegister xmm4 = { 4 };
+XMMRegister xmm5 = { 5 };
+XMMRegister xmm6 = { 6 };
+XMMRegister xmm7 = { 7 };
+XMMRegister xmm8 = { 8 };
+XMMRegister xmm9 = { 9 };
+XMMRegister xmm10 = { 10 };
+XMMRegister xmm11 = { 11 };
+XMMRegister xmm12 = { 12 };
+XMMRegister xmm13 = { 13 };
+XMMRegister xmm14 = { 14 };
+XMMRegister xmm15 = { 15 };
+
+// Safe default is no features.
+uint64_t CpuFeatures::supported_ = 0;
+uint64_t CpuFeatures::enabled_ = 0;
+
+void CpuFeatures::Probe() {
+ ASSERT(Heap::HasBeenSetup());
+ ASSERT(supported_ == 0);
+ if (Serializer::enabled()) return; // No features if we might serialize.
+
+ Assembler assm(NULL, 0);
+ Label cpuid, done;
+#define __ assm.
+ // Save old esp, since we are going to modify the stack.
+ __ push(rbp);
+ __ pushfq();
+ __ push(rcx);
+ __ push(rbx);
+ __ movq(rbp, rsp);
+
+ // If we can modify bit 21 of the EFLAGS register, then CPUID is supported.
+ __ pushfq();
+ __ pop(rax);
+ __ movq(rdx, rax);
+ __ xor_(rax, Immediate(0x200000)); // Flip bit 21.
+ __ push(rax);
+ __ popfq();
+ __ pushfq();
+ __ pop(rax);
+ __ xor_(rax, rdx); // Different if CPUID is supported.
+ __ j(not_zero, &cpuid);
+
+ // CPUID not supported. Clear the supported features in edx:eax.
+ __ xor_(rax, rax);
+ __ jmp(&done);
+
+ // Invoke CPUID with 1 in eax to get feature information in
+ // ecx:edx. Temporarily enable CPUID support because we know it's
+ // safe here.
+ __ bind(&cpuid);
+ __ movq(rax, Immediate(1));
+ supported_ = (1 << CPUID);
+ { Scope fscope(CPUID);
+ __ cpuid();
+ }
+ supported_ = 0;
+
+ // Move the result from ecx:edx to rax and make sure to mark the
+ // CPUID feature as supported.
+ __ movl(rax, rdx); // Zero-extended to 64 bits.
+ __ shl(rcx, Immediate(32));
+ __ or_(rax, rcx);
+ __ or_(rax, Immediate(1 << CPUID));
+
+ // Done.
+ __ bind(&done);
+ __ movq(rsp, rbp);
+ __ pop(rbx);
+ __ pop(rcx);
+ __ popfq();
+ __ pop(rbp);
+ __ ret(0);
+#undef __
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code =
+ Heap::CreateCode(desc, NULL, Code::ComputeFlags(Code::STUB), NULL);
+ if (!code->IsCode()) return;
+ LOG(CodeCreateEvent("Builtin", Code::cast(code), "CpuFeatures::Probe"));
+ typedef uint64_t (*F0)();
+ F0 probe = FUNCTION_CAST<F0>(Code::cast(code)->entry());
+ supported_ = probe();
+}
+
+// -----------------------------------------------------------------------------
+// Implementation of Assembler
+
+#ifdef GENERATED_CODE_COVERAGE
+static void InitCoverageLog();
+#endif
+
+byte* Assembler::spare_buffer_ = NULL;
+
+Assembler::Assembler(void* buffer, int buffer_size) {
+ if (buffer == NULL) {
+ // do our own buffer management
+ if (buffer_size <= kMinimalBufferSize) {
+ buffer_size = kMinimalBufferSize;
+
+ if (spare_buffer_ != NULL) {
+ buffer = spare_buffer_;
+ spare_buffer_ = NULL;
+ }
+ }
+ if (buffer == NULL) {
+ buffer_ = NewArray<byte>(buffer_size);
+ } else {
+ buffer_ = static_cast<byte*>(buffer);
+ }
+ buffer_size_ = buffer_size;
+ own_buffer_ = true;
+ } else {
+ // use externally provided buffer instead
+ ASSERT(buffer_size > 0);
+ buffer_ = static_cast<byte*>(buffer);
+ buffer_size_ = buffer_size;
+ own_buffer_ = false;
+ }
+
+ // Clear the buffer in debug mode unless it was provided by the
+ // caller in which case we can't be sure it's okay to overwrite
+ // existing code in it; see CodePatcher::CodePatcher(...).
+#ifdef DEBUG
+ if (own_buffer_) {
+ memset(buffer_, 0xCC, buffer_size); // int3
+ }
+#endif
+
+ // setup buffer pointers
+ ASSERT(buffer_ != NULL);
+ pc_ = buffer_;
+ reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
+
+ last_pc_ = NULL;
+ current_statement_position_ = RelocInfo::kNoPosition;
+ current_position_ = RelocInfo::kNoPosition;
+ written_statement_position_ = current_statement_position_;
+ written_position_ = current_position_;
+#ifdef GENERATED_CODE_COVERAGE
+ InitCoverageLog();
+#endif
+}
+
+
+Assembler::~Assembler() {
+ if (own_buffer_) {
+ if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
+ spare_buffer_ = buffer_;
+ } else {
+ DeleteArray(buffer_);
+ }
+ }
+}
+
+
+void Assembler::GetCode(CodeDesc* desc) {
+ // finalize code
+ // (at this point overflow() may be true, but the gap ensures that
+ // we are still not overlapping instructions and relocation info)
+ ASSERT(pc_ <= reloc_info_writer.pos()); // no overlap
+ // setup desc
+ desc->buffer = buffer_;
+ desc->buffer_size = buffer_size_;
+ desc->instr_size = pc_offset();
+ desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+ desc->origin = this;
+
+ Counters::reloc_info_size.Increment(desc->reloc_size);
+}
+
+
+void Assembler::Align(int m) {
+ ASSERT(IsPowerOf2(m));
+ while ((pc_offset() & (m - 1)) != 0) {
+ nop();
+ }
+}
+
+
+void Assembler::bind_to(Label* L, int pos) {
+ ASSERT(!L->is_bound()); // Label may only be bound once.
+ last_pc_ = NULL;
+ ASSERT(0 <= pos && pos <= pc_offset()); // Position must be valid.
+ if (L->is_linked()) {
+ int current = L->pos();
+ int next = long_at(current);
+ while (next != current) {
+ // relative address, relative to point after address
+ int imm32 = pos - (current + sizeof(int32_t));
+ long_at_put(current, imm32);
+ current = next;
+ next = long_at(next);
+ }
+ // Fix up last fixup on linked list.
+ int last_imm32 = pos - (current + sizeof(int32_t));
+ long_at_put(current, last_imm32);
+ }
+ L->bind_to(pos);
+}
+
+
+void Assembler::bind(Label* L) {
+ bind_to(L, pc_offset());
+}
+
+
+void Assembler::GrowBuffer() {
+ ASSERT(overflow()); // should not call this otherwise
+ if (!own_buffer_) FATAL("external code buffer is too small");
+
+ // compute new buffer size
+ CodeDesc desc; // the new buffer
+ if (buffer_size_ < 4*KB) {
+ desc.buffer_size = 4*KB;
+ } else {
+ desc.buffer_size = 2*buffer_size_;
+ }
+ // Some internal data structures overflow for very large buffers,
+ // they must ensure that kMaximalBufferSize is not too large.
+ if ((desc.buffer_size > kMaximalBufferSize) ||
+ (desc.buffer_size > Heap::OldGenerationSize())) {
+ V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
+ }
+
+ // setup new buffer
+ desc.buffer = NewArray<byte>(desc.buffer_size);
+ desc.instr_size = pc_offset();
+ desc.reloc_size = (buffer_ + buffer_size_) - (reloc_info_writer.pos());
+
+ // Clear the buffer in debug mode. Use 'int3' instructions to make
+ // sure to get into problems if we ever run uninitialized code.
+#ifdef DEBUG
+ memset(desc.buffer, 0xCC, desc.buffer_size);
+#endif
+
+ // copy the data
+ int pc_delta = desc.buffer - buffer_;
+ int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
+ memmove(desc.buffer, buffer_, desc.instr_size);
+ memmove(rc_delta + reloc_info_writer.pos(),
+ reloc_info_writer.pos(), desc.reloc_size);
+
+ // switch buffers
+ if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
+ spare_buffer_ = buffer_;
+ } else {
+ DeleteArray(buffer_);
+ }
+ buffer_ = desc.buffer;
+ buffer_size_ = desc.buffer_size;
+ pc_ += pc_delta;
+ if (last_pc_ != NULL) {
+ last_pc_ += pc_delta;
+ }
+ reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.last_pc() + pc_delta);
+
+ // relocate runtime entries
+ for (RelocIterator it(desc); !it.done(); it.next()) {
+ RelocInfo::Mode rmode = it.rinfo()->rmode();
+ if (rmode == RelocInfo::RUNTIME_ENTRY) {
+ int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
+ *p -= pc_delta; // relocate entry
+ } else if (rmode == RelocInfo::INTERNAL_REFERENCE) {
+ int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc());
+ if (*p != 0) { // 0 means uninitialized.
+ *p += pc_delta;
+ }
+ }
+ }
+
+ ASSERT(!overflow());
+}
+
+
+void Assembler::emit_operand(int rm, const Operand& adr) {
+ ASSERT_EQ(rm & 0x07, rm);
+ const unsigned length = adr.len_;
+ ASSERT(length > 0);
+
+ // Emit updated ModR/M byte containing the given register.
+ pc_[0] = (adr.buf_[0] & ~0x38) | (rm << 3);
+
+ // Emit the rest of the encoded operand.
+ for (unsigned i = 1; i < length; i++) pc_[i] = adr.buf_[i];
+ pc_ += length;
+}
+
+
+// Assembler Instruction implementations
+
+void Assembler::arithmetic_op(byte opcode, Register reg, const Operand& op) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(reg, op);
+ emit(opcode);
+ emit_operand(reg, op);
+}
+
+
+void Assembler::arithmetic_op(byte opcode, Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst, src);
+ emit(opcode);
+ emit_modrm(dst, src);
+}
+
+void Assembler::immediate_arithmetic_op(byte subcode,
+ Register dst,
+ Immediate src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ if (is_int8(src.value_)) {
+ emit(0x83);
+ emit_modrm(subcode, dst);
+ emit(src.value_);
+ } else if (dst.is(rax)) {
+ emit(0x05 | (subcode << 3));
+ emitl(src.value_);
+ } else {
+ emit(0x81);
+ emit_modrm(subcode, dst);
+ emitl(src.value_);
+ }
+}
+
+void Assembler::immediate_arithmetic_op(byte subcode,
+ const Operand& dst,
+ Immediate src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ if (is_int8(src.value_)) {
+ emit(0x83);
+ emit_operand(Register::toRegister(subcode), dst);
+ emit(src.value_);
+ } else {
+ emit(0x81);
+ emit_operand(Register::toRegister(subcode), dst);
+ emitl(src.value_);
+ }
+}
+
+
+void Assembler::shift(Register dst, Immediate shift_amount, int subcode) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(is_uint6(shift_amount.value_)); // illegal shift count
+ if (shift_amount.value_ == 1) {
+ emit_rex_64(dst);
+ emit(0xD1);
+ emit_modrm(subcode, dst);
+ } else {
+ emit_rex_64(dst);
+ emit(0xC1);
+ emit_modrm(subcode, dst);
+ emit(shift_amount.value_);
+ }
+}
+
+
+void Assembler::shift(Register dst, int subcode) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xD3);
+ emit_modrm(subcode, dst);
+}
+
+
+void Assembler::bt(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(src, dst);
+ emit(0x0F);
+ emit(0xA3);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::bts(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(src, dst);
+ emit(0x0F);
+ emit(0xAB);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::call(Label* L) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ // 1110 1000 #32-bit disp
+ emit(0xE8);
+ if (L->is_bound()) {
+ int offset = L->pos() - pc_offset() - sizeof(int32_t);
+ ASSERT(offset <= 0);
+ emitl(offset);
+ } else if (L->is_linked()) {
+ emitl(L->pos());
+ L->link_to(pc_offset() - sizeof(int32_t));
+ } else {
+ ASSERT(L->is_unused());
+ int32_t current = pc_offset();
+ emitl(current);
+ L->link_to(current);
+ }
+}
+
+
+void Assembler::call(Register adr) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ // Opcode: FF /2 r64
+ if (adr.code() > 7) {
+ emit_rex_64(adr);
+ }
+ emit(0xFF);
+ emit_modrm(0x2, adr);
+}
+
+void Assembler::cpuid() {
+ ASSERT(CpuFeatures::IsEnabled(CpuFeatures::CPUID));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x0F);
+ emit(0xA2);
+}
+
+
+void Assembler::call(const Operand& op) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ // Opcode: FF /2 m64
+ emit_rex_64(op);
+ emit(0xFF);
+ emit_operand(2, op);
+}
+
+
+void Assembler::cqo() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64();
+ emit(0x99);
+}
+
+
+void Assembler::dec(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xFF);
+ emit_modrm(0x1, dst);
+}
+
+
+void Assembler::dec(const Operand& dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xFF);
+ emit_operand(1, dst);
+}
+
+
+void Assembler::enter(Immediate size) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xC8);
+ emitw(size.value_); // 16 bit operand, always.
+ emit(0);
+}
+
+
+void Assembler::hlt() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xF4);
+}
+
+
+void Assembler::idiv(Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(src);
+ emit(0xF7);
+ emit_modrm(0x7, src);
+}
+
+
+void Assembler::imul(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0xAF);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::imul(Register dst, Register src, Immediate imm) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst, src);
+ if (is_int8(imm.value_)) {
+ emit(0x6B);
+ emit_modrm(dst, src);
+ emit(imm.value_);
+ } else {
+ emit(0x69);
+ emit_modrm(dst, src);
+ emitl(imm.value_);
+ }
+}
+
+
+void Assembler::inc(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xFF);
+ emit_modrm(0x0, dst);
+}
+
+
+void Assembler::inc(const Operand& dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xFF);
+ emit_operand(0, dst);
+}
+
+
+void Assembler::int3() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xCC);
+}
+
+
+void Assembler::j(Condition cc, Label* L) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(0 <= cc && cc < 16);
+ if (L->is_bound()) {
+ const int short_size = 2;
+ const int long_size = 6;
+ int offs = L->pos() - pc_offset();
+ ASSERT(offs <= 0);
+ if (is_int8(offs - short_size)) {
+ // 0111 tttn #8-bit disp
+ emit(0x70 | cc);
+ emit((offs - short_size) & 0xFF);
+ } else {
+ // 0000 1111 1000 tttn #32-bit disp
+ emit(0x0F);
+ emit(0x80 | cc);
+ emitl(offs - long_size);
+ }
+ } else if (L->is_linked()) {
+ // 0000 1111 1000 tttn #32-bit disp
+ emit(0x0F);
+ emit(0x80 | cc);
+ emitl(L->pos());
+ L->link_to(pc_offset() - sizeof(int32_t));
+ } else {
+ ASSERT(L->is_unused());
+ emit(0x0F);
+ emit(0x80 | cc);
+ int32_t current = pc_offset();
+ emitl(current);
+ L->link_to(current);
+ }
+}
+
+
+void Assembler::jmp(Label* L) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (L->is_bound()) {
+ int offs = L->pos() - pc_offset() - 1;
+ ASSERT(offs <= 0);
+ if (is_int8(offs - sizeof(int8_t))) {
+ // 1110 1011 #8-bit disp
+ emit(0xEB);
+ emit((offs - sizeof(int8_t)) & 0xFF);
+ } else {
+ // 1110 1001 #32-bit disp
+ emit(0xE9);
+ emitl(offs - sizeof(int32_t));
+ }
+ } else if (L->is_linked()) {
+ // 1110 1001 #32-bit disp
+ emit(0xE9);
+ emitl(L->pos());
+ L->link_to(pc_offset() - sizeof(int32_t));
+ } else {
+ // 1110 1001 #32-bit disp
+ ASSERT(L->is_unused());
+ emit(0xE9);
+ int32_t current = pc_offset();
+ emitl(current);
+ L->link_to(current);
+ }
+}
+
+
+void Assembler::jmp(Register target) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ // Opcode FF/4 r64
+ if (target.code() > 7) {
+ emit_rex_64(target);
+ }
+ emit(0xFF);
+ emit_modrm(0x4, target);
+}
+
+
+void Assembler::lea(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst, src);
+ emit(0x8D);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::load_rax(void* value, RelocInfo::Mode mode) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x48); // REX.W
+ emit(0xA1);
+ emitq(reinterpret_cast<uintptr_t>(value), mode);
+}
+
+
+void Assembler::load_rax(ExternalReference ref) {
+ load_rax(ref.address(), RelocInfo::EXTERNAL_REFERENCE);
+}
+
+
+void Assembler::leave() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0xC9);
+}
+
+
+void Assembler::movb(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_32(dst, src);
+ emit(0x8A);
+ emit_operand(dst, src);
+}
+
+void Assembler::movb(Register dst, Immediate imm) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_32(dst);
+ emit(0xC6);
+ emit_modrm(0x0, dst);
+ emit(imm.value_);
+}
+
+void Assembler::movb(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_32(src, dst);
+ emit(0x88);
+ emit_operand(src, dst);
+}
+
+void Assembler::movl(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst, src);
+ emit(0x8B);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::movl(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst, src);
+ emit(0x8B);
+ emit_modrm(dst, src);
+}
+
+
+void Assembler::movl(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(src, dst);
+ emit(0x89);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::movl(Register dst, Immediate value) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst);
+ emit(0xC7);
+ emit_modrm(0x0, dst);
+ emit(value); // Only 32-bit immediates are possible, not 8-bit immediates.
+}
+
+
+void Assembler::movq(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst, src);
+ emit(0x8B);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::movq(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst, src);
+ emit(0x8B);
+ emit_modrm(dst, src);
+}
+
+
+void Assembler::movq(Register dst, Immediate value) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xC7);
+ emit_modrm(0x0, dst);
+ emit(value); // Only 32-bit immediates are possible, not 8-bit immediates.
+}
+
+
+void Assembler::movq(const Operand& dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(src, dst);
+ emit(0x89);
+ emit_operand(src, dst);
+}
+
+
+void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xB8 | (dst.code() & 0x7));
+ emitq(reinterpret_cast<uintptr_t>(value), rmode);
+}
+
+
+void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xB8 | (dst.code() & 0x7)); // Not a ModR/M byte.
+ emitq(value, rmode);
+}
+
+
+void Assembler::movq(Register dst, ExternalReference ref) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xB8 | (dst.code() & 0x7));
+ emitq(reinterpret_cast<uintptr_t>(ref.address()),
+ RelocInfo::EXTERNAL_REFERENCE);
+}
+
+
+void Assembler::mul(Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(src);
+ emit(0xF7);
+ emit_modrm(0x4, src);
+}
+
+
+void Assembler::neg(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xF7);
+ emit_modrm(0x3, dst);
+}
+
+
+void Assembler::neg(const Operand& dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xF7);
+ emit_operand(3, dst);
+}
+
+
+void Assembler::nop() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x90);
+}
+
+
+void Assembler::not_(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xF7);
+ emit_modrm(0x2, dst);
+}
+
+
+void Assembler::not_(const Operand& dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst);
+ emit(0xF7);
+ emit_operand(2, dst);
+}
+
+
+void Assembler::nop(int n) {
+ // The recommended muti-byte sequences of NOP instructions from the Intel 64
+ // and IA-32 Architectures Software Developer's Manual.
+ //
+ // Length Assembly Byte Sequence
+ // 2 bytes 66 NOP 66 90H
+ // 3 bytes NOP DWORD ptr [EAX] 0F 1F 00H
+ // 4 bytes NOP DWORD ptr [EAX + 00H] 0F 1F 40 00H
+ // 5 bytes NOP DWORD ptr [EAX + EAX*1 + 00H] 0F 1F 44 00 00H
+ // 6 bytes 66 NOP DWORD ptr [EAX + EAX*1 + 00H] 66 0F 1F 44 00 00H
+ // 7 bytes NOP DWORD ptr [EAX + 00000000H] 0F 1F 80 00 00 00 00H
+ // 8 bytes NOP DWORD ptr [EAX + EAX*1 + 00000000H] 0F 1F 84 00 00 00 00 00H
+ // 9 bytes 66 NOP DWORD ptr [EAX + EAX*1 + 66 0F 1F 84 00 00 00 00
+ // 00000000H] 00H
+
+ ASSERT(1 <= n);
+ ASSERT(n <= 9);
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ switch (n) {
+ case 1:
+ emit(0x90);
+ return;
+ case 2:
+ emit(0x66);
+ emit(0x90);
+ return;
+ case 3:
+ emit(0x0f);
+ emit(0x1f);
+ emit(0x00);
+ return;
+ case 4:
+ emit(0x0f);
+ emit(0x1f);
+ emit(0x40);
+ emit(0x00);
+ return;
+ case 5:
+ emit(0x0f);
+ emit(0x1f);
+ emit(0x44);
+ emit(0x00);
+ emit(0x00);
+ return;
+ case 6:
+ emit(0x66);
+ emit(0x0f);
+ emit(0x1f);
+ emit(0x44);
+ emit(0x00);
+ emit(0x00);
+ return;
+ case 7:
+ emit(0x0f);
+ emit(0x1f);
+ emit(0x80);
+ emit(0x00);
+ emit(0x00);
+ emit(0x00);
+ emit(0x00);
+ return;
+ case 8:
+ emit(0x0f);
+ emit(0x1f);
+ emit(0x84);
+ emit(0x00);
+ emit(0x00);
+ emit(0x00);
+ emit(0x00);
+ emit(0x00);
+ return;
+ case 9:
+ emit(0x66);
+ emit(0x0f);
+ emit(0x1f);
+ emit(0x84);
+ emit(0x00);
+ emit(0x00);
+ emit(0x00);
+ emit(0x00);
+ emit(0x00);
+ return;
+ }
+}
+
+
+void Assembler::pop(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (dst.code() > 7) {
+ emit_rex_64(dst);
+ }
+ emit(0x58 | (dst.code() & 0x7));
+}
+
+
+void Assembler::pop(const Operand& dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst); // Could be omitted in some cases.
+ emit(0x8F);
+ emit_operand(0, dst);
+}
+
+
+void Assembler::popfq() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x9D);
+}
+
+
+void Assembler::push(Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (src.code() > 7) {
+ emit_rex_64(src);
+ }
+ emit(0x50 | (src.code() & 0x7));
+}
+
+
+void Assembler::push(const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(src); // Could be omitted in some cases.
+ emit(0xFF);
+ emit_operand(6, src);
+}
+
+
+void Assembler::push(Immediate value) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (is_int8(value.value_)) {
+ emit(0x6A);
+ emit(value.value_); // Emit low byte of value.
+ } else {
+ emit(0x68);
+ emitl(value.value_);
+ }
+}
+
+
+void Assembler::pushfq() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x9C);
+}
+
+
+void Assembler::rcl(Register dst, uint8_t imm8) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(is_uint6(imm8)); // illegal shift count
+ if (imm8 == 1) {
+ emit_rex_64(dst);
+ emit(0xD1);
+ emit_modrm(0x2, dst);
+ } else {
+ emit_rex_64(dst);
+ emit(0xC1);
+ emit_modrm(0x2, dst);
+ emit(imm8);
+ }
+}
+
+
+void Assembler::ret(int imm16) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(is_uint16(imm16));
+ if (imm16 == 0) {
+ emit(0xC3);
+ } else {
+ emit(0xC2);
+ emit(imm16 & 0xFF);
+ emit((imm16 >> 8) & 0xFF);
+ }
+}
+
+
+void Assembler::shld(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(src, dst);
+ emit(0x0F);
+ emit(0xA5);
+ emit_modrm(src, dst);
+}
+
+
+void Assembler::shrd(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(src, dst);
+ emit(0x0F);
+ emit(0xAD);
+ emit_modrm(src, dst);
+}
+
+
+void Assembler::xchg(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (src.is(rax) || dst.is(rax)) { // Single-byte encoding
+ Register other = src.is(rax) ? dst : src;
+ emit_rex_64(other);
+ emit(0x90 | (other.code() & 0x7));
+ } else {
+ emit_rex_64(src, dst);
+ emit(0x87);
+ emit_modrm(src, dst);
+ }
+}
+
+
+void Assembler::store_rax(void* dst, RelocInfo::Mode mode) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x48); // REX.W
+ emit(0xA3);
+ emitq(reinterpret_cast<uintptr_t>(dst), mode);
+}
+
+
+void Assembler::store_rax(ExternalReference ref) {
+ store_rax(ref.address(), RelocInfo::EXTERNAL_REFERENCE);
+}
+
+
+void Assembler::testb(Register reg, Immediate mask) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (reg.is(rax)) {
+ emit(0xA8);
+ emit(mask);
+ } else {
+ if (reg.code() > 3) {
+ // Register is not one of al, bl, cl, dl. Its encoding needs REX.
+ emit_rex_32(reg);
+ }
+ emit(0xF6);
+ emit_modrm(0x0, reg);
+ emit(mask.value_); // Low byte emitted.
+ }
+}
+
+
+void Assembler::testb(const Operand& op, Immediate mask) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(rax, op);
+ emit(0xF6);
+ emit_operand(rax, op); // Operation code 0
+ emit(mask.value_); // Low byte emitted.
+}
+
+
+void Assembler::testl(Register reg, Immediate mask) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ if (reg.is(rax)) {
+ emit(0xA9);
+ emit(mask);
+ } else {
+ emit_optional_rex_32(rax, reg);
+ emit(0xF7);
+ emit_modrm(0x0, reg);
+ emit(mask);
+ }
+}
+
+
+void Assembler::testl(const Operand& op, Immediate mask) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(rax, op);
+ emit(0xF7);
+ emit_operand(rax, op); // Operation code 0
+ emit(mask);
+}
+
+
+void Assembler::testq(const Operand& op, Register reg) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(reg, op);
+ emit(0x85);
+ emit_operand(reg, op);
+}
+
+
+void Assembler::testq(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(dst, src);
+ emit(0x85);
+ emit_modrm(dst, src);
+}
+
+
+// Relocation information implementations
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+ ASSERT(rmode != RelocInfo::NONE);
+ // Don't record external references unless the heap will be serialized.
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
+ !Serializer::enabled() &&
+ !FLAG_debug_code) {
+ return;
+ }
+ RelocInfo rinfo(pc_, rmode, data);
+ reloc_info_writer.Write(&rinfo);
+}
+
+void Assembler::RecordJSReturn() {
+ WriteRecordedPositions();
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(RelocInfo::JS_RETURN);
+}
+
+
+void Assembler::RecordComment(const char* msg) {
+ if (FLAG_debug_code) {
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
+ }
+}
+
+
+void Assembler::RecordPosition(int pos) {
+ ASSERT(pos != RelocInfo::kNoPosition);
+ ASSERT(pos >= 0);
+ current_position_ = pos;
+}
+
+
+void Assembler::RecordStatementPosition(int pos) {
+ ASSERT(pos != RelocInfo::kNoPosition);
+ ASSERT(pos >= 0);
+ current_statement_position_ = pos;
+}
+
+
+void Assembler::WriteRecordedPositions() {
+ // Write the statement position if it is different from what was written last
+ // time.
+ if (current_statement_position_ != written_statement_position_) {
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
+ written_statement_position_ = current_statement_position_;
+ }
+
+ // Write the position if it is different from what was written last time and
+ // also different from the written statement position.
+ if (current_position_ != written_position_ &&
+ current_position_ != written_statement_position_) {
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(RelocInfo::POSITION, current_position_);
+ written_position_ = current_position_;
+ }
+}
+
+
+const int RelocInfo::kApplyMask =
+ RelocInfo::kCodeTargetMask | 1 << RelocInfo::RUNTIME_ENTRY |
+ 1 << RelocInfo::JS_RETURN | 1 << RelocInfo::INTERNAL_REFERENCE;
+
+
+} } // namespace v8::internal
+
+
+// TODO(x64): Implement and move these to their correct cc-files:
+#include "ast.h"
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "cpu.h"
+#include "debug.h"
+#include "disasm.h"
+#include "disassembler.h"
+#include "frames-inl.h"
+#include "x64/macro-assembler-x64.h"
+#include "x64/regexp-macro-assembler-x64.h"
+#include "ic-inl.h"
+#include "log.h"
+#include "macro-assembler.h"
+#include "parser.h"
+#include "regexp-macro-assembler.h"
+#include "regexp-stack.h"
+#include "register-allocator-inl.h"
+#include "register-allocator.h"
+#include "runtime.h"
+#include "scopes.h"
+#include "serialize.h"
+#include "stub-cache.h"
+#include "unicode.h"
+
+namespace v8 {
+namespace internal {
+
+void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* a) {
+ UNIMPLEMENTED();
+}
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* a) {
+ UNIMPLEMENTED();
+}
+
+void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* a) {
+ UNIMPLEMENTED();
+}
+
+
+void BreakLocationIterator::ClearDebugBreakAtReturn() {
+ UNIMPLEMENTED();
+}
+
+bool BreakLocationIterator::IsDebugBreakAtReturn() {
+ UNIMPLEMENTED();
+ return false;
+}
+
+void BreakLocationIterator::SetDebugBreakAtReturn() {
+ UNIMPLEMENTED();
+}
+
+void CallIC::Generate(MacroAssembler* a, int b, ExternalReference const& c) {
+ UNIMPLEMENTED();
+}
+
+void CallIC::GenerateMegamorphic(MacroAssembler* a, int b) {
+ UNIMPLEMENTED();
+}
+
+void CallIC::GenerateNormal(MacroAssembler* a, int b) {
+ UNIMPLEMENTED();
+}
+
+Object* CallStubCompiler::CompileCallConstant(Object* a,
+ JSObject* b,
+ JSFunction* c,
+ StubCompiler::CheckType d,
+ Code::Flags flags) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+Object* CallStubCompiler::CompileCallField(Object* a,
+ JSObject* b,
+ int c,
+ String* d,
+ Code::Flags flags) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+Object* CallStubCompiler::CompileCallInterceptor(Object* a,
+ JSObject* b,
+ String* c) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+StackFrame::Type ExitFrame::GetStateForFramePointer(unsigned char* a,
+ StackFrame::State* b) {
+ // TODO(X64): UNIMPLEMENTED
+ return NONE;
+}
+
+int JavaScriptFrame::GetProvidedParametersCount() const {
+ UNIMPLEMENTED();
+ return 0;
+}
+
+void JumpTarget::DoBind(int a) {
+ UNIMPLEMENTED();
+}
+
+void JumpTarget::DoBranch(Condition a, Hint b) {
+ UNIMPLEMENTED();
+}
+
+void JumpTarget::DoJump() {
+ UNIMPLEMENTED();
+}
+
+
+Object* LoadStubCompiler::CompileLoadCallback(JSObject* a,
+ JSObject* b,
+ AccessorInfo* c,
+ String* d) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+Object* LoadStubCompiler::CompileLoadConstant(JSObject* a,
+ JSObject* b,
+ Object* c,
+ String* d) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+Object* LoadStubCompiler::CompileLoadField(JSObject* a,
+ JSObject* b,
+ int c,
+ String* d) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* a,
+ JSObject* b,
+ String* c) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+StackFrame::Type StackFrame::ComputeType(StackFrame::State* a) {
+ UNIMPLEMENTED();
+ return NONE;
+}
+
+Object* StoreStubCompiler::CompileStoreCallback(JSObject* a,
+ AccessorInfo* b,
+ String* c) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+Object* StoreStubCompiler::CompileStoreField(JSObject* a,
+ int b,
+ Map* c,
+ String* d) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* a, String* b) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+Object* StubCompiler::CompileLazyCompile(Code::Flags a) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+void VirtualFrame::Drop(int a) {
+ UNIMPLEMENTED();
+}
+
+int VirtualFrame::InvalidateFrameSlotAt(int a) {
+ UNIMPLEMENTED();
+ return -1;
+}
+
+void VirtualFrame::MergeTo(VirtualFrame* a) {
+ UNIMPLEMENTED();
+}
+
+Result VirtualFrame::Pop() {
+ UNIMPLEMENTED();
+ return Result(NULL);
+}
+
+Result VirtualFrame::RawCallStub(CodeStub* a) {
+ UNIMPLEMENTED();
+ return Result(NULL);
+}
+
+void VirtualFrame::SyncElementBelowStackPointer(int a) {
+ UNIMPLEMENTED();
+}
+
+void VirtualFrame::SyncElementByPushing(int a) {
+ UNIMPLEMENTED();
+}
+
+void VirtualFrame::SyncRange(int a, int b) {
+ UNIMPLEMENTED();
+}
+
+VirtualFrame::VirtualFrame() : elements_(0) {
+ UNIMPLEMENTED();
+}
+
+byte* ArgumentsAdaptorFrame::GetCallerStackPointer() const {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenerateLog(ZoneList<Expression*>* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* a) {
+ UNIMPLEMENTED();
+}
+
+void ExitFrame::Iterate(ObjectVisitor* a) const {
+ UNIMPLEMENTED();
+}
+
+byte* InternalFrame::GetCallerStackPointer() const {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+byte* JavaScriptFrame::GetCallerStackPointer() const {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 40fcdd32bc..b4882571e2 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -37,7 +37,21 @@
#ifndef V8_X64_ASSEMBLER_X64_H_
#define V8_X64_ASSEMBLER_X64_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
+
+// Utility functions
+
+// Test whether a 64-bit value is in a specific range.
+static inline bool is_uint32(int64_t x) {
+ const int64_t kUInt32Mask = V8_INT64_C(0xffffffff);
+ return x == x & kUInt32Mask;
+}
+
+static inline bool is_int32(int64_t x) {
+ const int64_t kMinIntValue = V8_INT64_C(-0x80000000);
+ return is_uint32(x - kMinIntValue);
+}
// CPU Registers.
//
@@ -60,10 +74,13 @@ namespace v8 { namespace internal {
// mode. This way we get the compile-time error checking in debug mode
// and best performance in optimized code.
//
-const int kNumRegisters = 16;
struct Register {
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
+ static Register toRegister(int code) {
+ Register r = {code};
+ return r;
+ }
+ bool is_valid() const { return 0 <= code_ && code_ < 16; }
bool is(Register reg) const { return code_ == reg.code_; }
// The byte-register distinction of ai32 has dissapeared.
bool is_byte_register() const { return false; }
@@ -98,7 +115,6 @@ extern Register r14;
extern Register r15;
extern Register no_reg;
-
struct XMMRegister {
bool is_valid() const { return 0 <= code_ && code_ < 2; }
int code() const {
@@ -117,6 +133,14 @@ extern XMMRegister xmm4;
extern XMMRegister xmm5;
extern XMMRegister xmm6;
extern XMMRegister xmm7;
+extern XMMRegister xmm8;
+extern XMMRegister xmm9;
+extern XMMRegister xmm10;
+extern XMMRegister xmm11;
+extern XMMRegister xmm12;
+extern XMMRegister xmm13;
+extern XMMRegister xmm14;
+extern XMMRegister xmm15;
enum Condition {
// any value < 0 is considered no_condition
@@ -200,34 +224,11 @@ inline Hint NegateHint(Hint hint) {
class Immediate BASE_EMBEDDED {
public:
- inline explicit Immediate(int64_t x);
- inline explicit Immediate(const char* s);
- inline explicit Immediate(const ExternalReference& ext);
- inline explicit Immediate(Handle<Object> handle);
+ explicit Immediate(int32_t value) : value_(value) {}
inline explicit Immediate(Smi* value);
- static Immediate CodeRelativeOffset(Label* label) {
- return Immediate(label);
- }
-
- bool is_zero() const { return x_ == 0 && rmode_ == RelocInfo::NONE; }
- bool is_int8() const {
- return -128 <= x_ && x_ < 128 && rmode_ == RelocInfo::NONE;
- }
- bool is_int16() const {
- return -32768 <= x_ && x_ < 32768 && rmode_ == RelocInfo::NONE;
- }
- bool is_int32() const {
- return V8_INT64_C(-2147483648) <= x_
- && x_ < V8_INT64_C(2147483648)
- && rmode_ == RelocInfo::NONE;
- }
-
private:
- inline explicit Immediate(Label* value) { UNIMPLEMENTED(); }
-
- int64_t x_;
- RelocInfo::Mode rmode_;
+ int32_t value_;
friend class Assembler;
};
@@ -237,177 +238,55 @@ class Immediate BASE_EMBEDDED {
// Machine instruction Operands
enum ScaleFactor {
- times_1 = 0,
- times_2 = 1,
- times_4 = 2,
- times_8 = 3
+ kTimes1 = 0,
+ kTimes2 = 1,
+ kTimes4 = 2,
+ kTimes8 = 3,
+ kTimesIntSize = kTimes4,
+ kTimesPointerSize = kTimes8
};
class Operand BASE_EMBEDDED {
public:
- // reg
- INLINE(explicit Operand(Register reg));
-
- // MemoryOperand
- INLINE(explicit Operand()) { UNIMPLEMENTED(); }
-
- // Returns true if this Operand is a wrapper for the specified register.
- bool is_reg(Register reg) const;
-
- // These constructors have been moved to MemOperand, and should
- // be removed from Operand as soon as all their uses use MemOperands instead.
- // [disp/r]
- INLINE(explicit Operand(intptr_t disp, RelocInfo::Mode rmode)) {
- UNIMPLEMENTED();
- }
- // disp only must always be relocated
-
// [base + disp/r]
- explicit Operand(Register base, intptr_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE);
+ INLINE(Operand(Register base, int32_t disp));
// [base + index*scale + disp/r]
- explicit Operand(Register base,
- Register index,
- ScaleFactor scale,
- intptr_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE);
+ Operand(Register base,
+ Register index,
+ ScaleFactor scale,
+ int32_t disp);
// [index*scale + disp/r]
- explicit Operand(Register index,
- ScaleFactor scale,
- intptr_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE);
-
- static Operand StaticVariable(const ExternalReference& ext) {
- return Operand(reinterpret_cast<intptr_t>(ext.address()),
- RelocInfo::EXTERNAL_REFERENCE);
- }
-
- static Operand StaticArray(Register index,
- ScaleFactor scale,
- const ExternalReference& arr) {
- return Operand(index, scale, reinterpret_cast<intptr_t>(arr.address()),
- RelocInfo::EXTERNAL_REFERENCE);
- }
-
- // End of constructors and methods that have been moved to MemOperand.
+ Operand(Register index,
+ ScaleFactor scale,
+ int32_t disp);
private:
byte rex_;
byte buf_[10];
// The number of bytes in buf_.
unsigned int len_;
- // Only valid if len_ > 4.
RelocInfo::Mode rmode_;
- // Set the ModRM byte without an encoded 'reg' register. The
+ // Set the ModR/M byte without an encoded 'reg' register. The
// register is encoded later as part of the emit_operand operation.
+ // set_modrm can be called before or after set_sib and set_disp*.
inline void set_modrm(int mod, Register rm);
+ // Set the SIB byte if one is needed. Sets the length to 2 rather than 1.
inline void set_sib(ScaleFactor scale, Register index, Register base);
- inline void set_disp8(int8_t disp);
- inline void set_disp32(int32_t disp);
- inline void set_dispr(intptr_t disp, RelocInfo::Mode rmode);
-
- friend class Assembler;
-};
-
-class MemOperand : public Operand {
- public:
- // [disp/r]
- INLINE(explicit MemOperand(intptr_t disp, RelocInfo::Mode rmode)) :
- Operand() {
- UNIMPLEMENTED();
- }
- // disp only must always be relocated
-
- // [base + disp/r]
- explicit MemOperand(Register base, intptr_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE);
-
- // [base + index*scale + disp/r]
- explicit MemOperand(Register base,
- Register index,
- ScaleFactor scale,
- intptr_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE);
- // [index*scale + disp/r]
- explicit MemOperand(Register index,
- ScaleFactor scale,
- intptr_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE);
-
- static MemOperand StaticVariable(const ExternalReference& ext) {
- return MemOperand(reinterpret_cast<intptr_t>(ext.address()),
- RelocInfo::EXTERNAL_REFERENCE);
- }
-
- static MemOperand StaticArray(Register index,
- ScaleFactor scale,
- const ExternalReference& arr) {
- return MemOperand(index, scale, reinterpret_cast<intptr_t>(arr.address()),
- RelocInfo::EXTERNAL_REFERENCE);
- }
-};
-
-// -----------------------------------------------------------------------------
-// A Displacement describes the 32bit immediate field of an instruction which
-// may be used together with a Label in order to refer to a yet unknown code
-// position. Displacements stored in the instruction stream are used to describe
-// the instruction and to chain a list of instructions using the same Label.
-// A Displacement contains 2 different fields:
-//
-// next field: position of next displacement in the chain (0 = end of list)
-// type field: instruction type
-//
-// A next value of null (0) indicates the end of a chain (note that there can
-// be no displacement at position zero, because there is always at least one
-// instruction byte before the displacement).
-//
-// Displacement _data field layout
-//
-// |31.....2|1......0|
-// [ next | type |
-
-class Displacement BASE_EMBEDDED {
- public:
- enum Type {
- UNCONDITIONAL_JUMP,
- CODE_RELATIVE,
- OTHER
- };
-
- int data() const { return data_; }
- Type type() const { return TypeField::decode(data_); }
- void next(Label* L) const {
- int n = NextField::decode(data_);
- n > 0 ? L->link_to(n) : L->Unuse();
- }
- void link_to(Label* L) { init(L, type()); }
-
- explicit Displacement(int data) { data_ = data; }
-
- Displacement(Label* L, Type type) { init(L, type); }
+ // Adds operand displacement fields (offsets added to the memory address).
+ // Needs to be called after set_sib, not before it.
+ inline void set_disp8(int disp);
+ inline void set_disp32(int disp);
- void print() {
- PrintF("%s (%x) ", (type() == UNCONDITIONAL_JUMP ? "jmp" : "[other]"),
- NextField::decode(data_));
- }
-
- private:
- int data_;
-
- class TypeField: public BitField<Type, 0, 2> {};
- class NextField: public BitField<int, 2, 32-2> {};
-
- void init(Label* L, Type type);
+ friend class Assembler;
};
-
// CpuFeatures keeps track of which features are supported by the target CPU.
// Supported features must be enabled by a Scope before use.
// Example:
@@ -428,11 +307,11 @@ class CpuFeatures : public AllStatic {
static void Probe();
// Check whether a feature is supported by the target CPU.
static bool IsSupported(Feature f) {
- return (supported_ & (static_cast<uint64_t>(1) << f)) != 0;
+ return (supported_ & (V8_UINT64_C(1) << f)) != 0;
}
// Check whether a feature is currently enabled.
static bool IsEnabled(Feature f) {
- return (enabled_ & (static_cast<uint64_t>(1) << f)) != 0;
+ return (enabled_ & (V8_UINT64_C(1) << f)) != 0;
}
// Enable a specified feature within a scope.
class Scope BASE_EMBEDDED {
@@ -441,7 +320,7 @@ class CpuFeatures : public AllStatic {
explicit Scope(Feature f) {
ASSERT(CpuFeatures::IsSupported(f));
old_enabled_ = CpuFeatures::enabled_;
- CpuFeatures::enabled_ |= (static_cast<uint64_t>(1) << f);
+ CpuFeatures::enabled_ |= (V8_UINT64_C(1) << f);
}
~Scope() { CpuFeatures::enabled_ = old_enabled_; }
private:
@@ -461,7 +340,8 @@ class Assembler : public Malloced {
private:
// The relocation writer's position is kGap bytes below the end of
// the generated instructions. This leaves enough space for the
- // longest possible ia32 instruction (17 bytes as of 9/26/06) and
+ // longest possible x64 instruction (There is a 15 byte limit on
+ // instruction length, ruling out some otherwise valid instructions) and
// allows for a single, fast space check per instruction.
static const int kGap = 32;
@@ -488,8 +368,9 @@ class Assembler : public Malloced {
void GetCode(CodeDesc* desc);
// Read/Modify the code target in the branch/call instruction at pc.
- inline static Address target_address_at(Address pc);
- inline static void set_target_address_at(Address pc, Address target);
+ // On the x64 architecture, the address is absolute, not relative.
+ static inline Address target_address_at(Address pc);
+ static inline void set_target_address_at(Address pc, Address target);
// Distance between the address of the code target in the call instruction
// and the return address
@@ -499,22 +380,20 @@ class Assembler : public Malloced {
// ---------------------------------------------------------------------------
// Code generation
//
- // - function names correspond one-to-one to ia32 instruction mnemonics
- // - unless specified otherwise, instructions operate on 32bit operands
- // - instructions on 8bit (byte) operands/registers have a trailing '_b'
- // - instructions on 16bit (word) operands/registers have a trailing '_w'
- // - naming conflicts with C++ keywords are resolved via a trailing '_'
-
- // NOTE ON INTERFACE: Currently, the interface is not very consistent
- // in the sense that some operations (e.g. mov()) can be called in more
- // the one way to generate the same instruction: The Register argument
- // can in some cases be replaced with an Operand(Register) argument.
- // This should be cleaned up and made more orthogonal. The questions
- // is: should we always use Operands instead of Registers where an
- // Operand is possible, or should we have a Register (overloaded) form
- // instead? We must be careful to make sure that the selected instruction
- // is obvious from the parameters to avoid hard-to-find code generation
- // bugs.
+ // Function names correspond one-to-one to x64 instruction mnemonics.
+ // Unless specified otherwise, instructions operate on 64-bit operands.
+ //
+ // If we need versions of an assembly instruction that operate on different
+ // width arguments, we add a single-letter suffix specifying the width.
+ // This is done for the following instructions: mov, cmp.
+ // There are no versions of these instructions without the suffix.
+ // - Instructions on 8-bit (byte) operands/registers have a trailing 'b'.
+ // - Instructions on 16-bit (word) operands/registers have a trailing 'w'.
+ // - Instructions on 32-bit (doubleword) operands/registers use 'l'.
+ // - Instructions on 64-bit (quadword) operands/registers use 'q'.
+ //
+ // Some mnemonics, such as "and", are the same as C++ keywords.
+ // Naming conflicts with C++ keywords are resolved by adding a trailing '_'.
// Insert the smallest number of nop instructions
// possible to align the pc offset to a multiple
@@ -522,13 +401,10 @@ class Assembler : public Malloced {
void Align(int m);
// Stack
- void pushad();
- void popad();
+ void pushfq();
+ void popfq();
- void pushfd();
- void popfd();
-
- void push(const Immediate& x);
+ void push(Immediate value);
void push(Register src);
void push(const Operand& src);
void push(Label* label, RelocInfo::Mode relocation_mode);
@@ -536,25 +412,42 @@ class Assembler : public Malloced {
void pop(Register dst);
void pop(const Operand& dst);
- void enter(const Immediate& size);
+ void enter(Immediate size);
void leave();
// Moves
- void mov_b(Register dst, const Operand& src);
- void mov_b(const Operand& dst, int8_t imm8);
- void mov_b(const Operand& dst, Register src);
-
- void mov_w(Register dst, const Operand& src);
- void mov_w(const Operand& dst, Register src);
-
- void mov(Register dst, int32_t imm32);
- void mov(Register dst, const Immediate& x);
- void mov(Register dst, Handle<Object> handle);
- void mov(Register dst, const Operand& src);
- void mov(Register dst, Register src);
- void mov(const Operand& dst, const Immediate& x);
- void mov(const Operand& dst, Handle<Object> handle);
- void mov(const Operand& dst, Register src);
+ void movb(Register dst, const Operand& src);
+ void movb(Register dst, Immediate imm);
+ void movb(const Operand& dst, Register src);
+
+ void movl(Register dst, Register src);
+ void movl(Register dst, const Operand& src);
+ void movl(const Operand& dst, Register src);
+ // Load a 32-bit immediate value, zero-extended to 64 bits.
+ void movl(Register dst, Immediate imm32);
+
+ void movq(Register dst, int32_t imm32);
+ void movq(Register dst, const Operand& src);
+ // Sign extends immediate 32-bit value to 64 bits.
+ void movq(Register dst, Immediate x);
+ void movq(Register dst, Register src);
+
+ // Move 64 bit register value to 64-bit memory location.
+ void movq(const Operand& dst, Register src);
+
+ // New x64 instructions to load a 64-bit immediate into a register.
+ // All 64-bit immediates must have a relocation mode.
+ void movq(Register dst, void* ptr, RelocInfo::Mode rmode);
+ void movq(Register dst, int64_t value, RelocInfo::Mode rmode);
+ void movq(Register dst, const char* s, RelocInfo::Mode rmode);
+ // Moves the address of the external reference into the register.
+ void movq(Register dst, ExternalReference ext);
+ void movq(Register dst, Handle<Object> handle, RelocInfo::Mode rmode);
+
+
+ // New x64 instruction to load from an immediate 64-bit pointer into RAX.
+ void load_rax(void* ptr, RelocInfo::Mode rmode);
+ void load_rax(ExternalReference ext);
void movsx_b(Register dst, const Operand& src);
@@ -573,84 +466,208 @@ class Assembler : public Malloced {
void xchg(Register dst, Register src);
// Arithmetics
- void adc(Register dst, int32_t imm32);
- void adc(Register dst, const Operand& src);
+ void add(Register dst, Register src) {
+ arithmetic_op(0x03, dst, src);
+ }
- void add(Register dst, const Operand& src);
- void add(const Operand& dst, const Immediate& x);
+ void add(Register dst, const Operand& src) {
+ arithmetic_op(0x03, dst, src);
+ }
- void and_(Register dst, int32_t imm32);
- void and_(Register dst, const Operand& src);
- void and_(const Operand& src, Register dst);
- void and_(const Operand& dst, const Immediate& x);
+
+ void add(const Operand& dst, Register src) {
+ arithmetic_op(0x01, src, dst);
+ }
+
+ void add(Register dst, Immediate src) {
+ immediate_arithmetic_op(0x0, dst, src);
+ }
+
+ void add(const Operand& dst, Immediate src) {
+ immediate_arithmetic_op(0x0, dst, src);
+ }
+
+ void cmp(Register dst, Register src) {
+ arithmetic_op(0x3B, dst, src);
+ }
+
+ void cmp(Register dst, const Operand& src) {
+ arithmetic_op(0x3B, dst, src);
+ }
+
+ void cmp(const Operand& dst, Register src) {
+ arithmetic_op(0x39, src, dst);
+ }
+
+ void cmp(Register dst, Immediate src) {
+ immediate_arithmetic_op(0x7, dst, src);
+ }
+
+ void cmp(const Operand& dst, Immediate src) {
+ immediate_arithmetic_op(0x7, dst, src);
+ }
+
+ void and_(Register dst, Register src) {
+ arithmetic_op(0x23, dst, src);
+ }
+
+ void and_(Register dst, const Operand& src) {
+ arithmetic_op(0x23, dst, src);
+ }
+
+ void and_(const Operand& dst, Register src) {
+ arithmetic_op(0x21, src, dst);
+ }
+
+ void and_(Register dst, Immediate src) {
+ immediate_arithmetic_op(0x4, dst, src);
+ }
+
+ void and_(const Operand& dst, Immediate src) {
+ immediate_arithmetic_op(0x4, dst, src);
+ }
void cmpb(const Operand& op, int8_t imm8);
void cmpb_al(const Operand& op);
void cmpw_ax(const Operand& op);
void cmpw(const Operand& op, Immediate imm16);
- void cmp(Register reg, int32_t imm32);
- void cmp(Register reg, Handle<Object> handle);
- void cmp(Register reg, const Operand& op);
- void cmp(const Operand& op, const Immediate& imm);
void dec_b(Register dst);
void dec(Register dst);
void dec(const Operand& dst);
- void cdq();
+ // Sign-extends rax into rdx:rax.
+ void cqo();
+ // Divide rdx:rax by src. Quotient in rax, remainder in rdx.
void idiv(Register src);
+ void imul(Register dst, Register src);
void imul(Register dst, const Operand& src);
- void imul(Register dst, Register src, int32_t imm32);
+ // Performs the operation dst = src * imm.
+ void imul(Register dst, Register src, Immediate imm);
void inc(Register dst);
void inc(const Operand& dst);
void lea(Register dst, const Operand& src);
+ // Multiply rax by src, put the result in rdx:rax.
void mul(Register src);
void neg(Register dst);
+ void neg(const Operand& dst);
void not_(Register dst);
+ void not_(const Operand& dst);
+
+ void or_(Register dst, Register src) {
+ arithmetic_op(0x0B, dst, src);
+ }
+
+ void or_(Register dst, const Operand& src) {
+ arithmetic_op(0x0B, dst, src);
+ }
+
+ void or_(const Operand& dst, Register src) {
+ arithmetic_op(0x09, src, dst);
+ }
+
+ void or_(Register dst, Immediate src) {
+ immediate_arithmetic_op(0x1, dst, src);
+ }
+
+ void or_(const Operand& dst, Immediate src) {
+ immediate_arithmetic_op(0x1, dst, src);
+ }
- void or_(Register dst, int32_t imm32);
- void or_(Register dst, const Operand& src);
- void or_(const Operand& dst, Register src);
- void or_(const Operand& dst, const Immediate& x);
void rcl(Register dst, uint8_t imm8);
- void sar(Register dst, uint8_t imm8);
- void sar(Register dst);
+ // Shifts dst:src left by cl bits, affecting only dst.
+ void shld(Register dst, Register src);
- void sbb(Register dst, const Operand& src);
+ // Shifts src:dst right by cl bits, affecting only dst.
+ void shrd(Register dst, Register src);
- void shld(Register dst, const Operand& src);
+ // Shifts dst right, duplicating sign bit, by shift_amount bits.
+ // Shifting by 1 is handled efficiently.
+ void sar(Register dst, Immediate shift_amount) {
+ shift(dst, shift_amount, 0x7);
+ }
- void shl(Register dst, uint8_t imm8);
- void shl(Register dst);
+ // Shifts dst right, duplicating sign bit, by cl % 64 bits.
+ void sar(Register dst) {
+ shift(dst, 0x7);
+ }
- void shrd(Register dst, const Operand& src);
+ void shl(Register dst, Immediate shift_amount) {
+ shift(dst, shift_amount, 0x4);
+ }
- void shr(Register dst, uint8_t imm8);
- void shr(Register dst);
- void shr_cl(Register dst);
+ void shl(Register dst) {
+ shift(dst, 0x4);
+ }
- void sub(const Operand& dst, const Immediate& x);
- void sub(Register dst, const Operand& src);
- void sub(const Operand& dst, Register src);
+ void shr(Register dst, Immediate shift_amount) {
+ shift(dst, shift_amount, 0x5);
+ }
- void test(Register reg, const Immediate& imm);
- void test(Register reg, const Operand& op);
- void test(const Operand& op, const Immediate& imm);
+ void shr(Register dst) {
+ shift(dst, 0x5);
+ }
+
+ void store_rax(void* dst, RelocInfo::Mode mode);
+ void store_rax(ExternalReference ref);
+
+ void sub(Register dst, Register src) {
+ arithmetic_op(0x2B, dst, src);
+ }
+
+ void sub(Register dst, const Operand& src) {
+ arithmetic_op(0x2B, dst, src);
+ }
+
+ void sub(const Operand& dst, Register src) {
+ arithmetic_op(0x29, src, dst);
+ }
+
+ void sub(Register dst, Immediate src) {
+ immediate_arithmetic_op(0x5, dst, src);
+ }
+
+ void sub(const Operand& dst, Immediate src) {
+ immediate_arithmetic_op(0x5, dst, src);
+ }
+
+ void testb(Register reg, Immediate mask);
+ void testb(const Operand& op, Immediate mask);
+ void testl(Register reg, Immediate mask);
+ void testl(const Operand& op, Immediate mask);
+ void testq(const Operand& op, Register reg);
+ void testq(Register dst, Register src);
+
+ void xor_(Register dst, Register src) {
+ arithmetic_op(0x33, dst, src);
+ }
+
+ void xor_(Register dst, const Operand& src) {
+ arithmetic_op(0x33, dst, src);
+ }
+
+ void xor_(const Operand& dst, Register src) {
+ arithmetic_op(0x31, src, dst);
+ }
+
+ void xor_(Register dst, Immediate src) {
+ immediate_arithmetic_op(0x6, dst, src);
+ }
+
+ void xor_(const Operand& dst, Immediate src) {
+ immediate_arithmetic_op(0x6, dst, src);
+ }
- void xor_(Register dst, int32_t imm32);
- void xor_(Register dst, const Operand& src);
- void xor_(const Operand& src, Register dst);
- void xor_(const Operand& dst, const Immediate& x);
// Bit operations.
void bt(const Operand& dst, Register src);
@@ -660,6 +677,7 @@ class Assembler : public Malloced {
void hlt();
void int3();
void nop();
+ void nop(int n);
void rdtsc();
void ret(int imm16);
@@ -681,21 +699,26 @@ class Assembler : public Malloced {
void bind(Label* L); // binds an unbound label L to the current code position
// Calls
+ // Call near relative 32-bit displacement, relative to next instruction.
void call(Label* L);
- void call(byte* entry, RelocInfo::Mode rmode);
- void call(const Operand& adr);
- void call(Handle<Code> code, RelocInfo::Mode rmode);
+
+ // Call near absolute indirect, address in register
+ void call(Register adr);
+
+ // Call near indirect
+ void call(const Operand& operand);
// Jumps
+ // Jump short or near relative.
void jmp(Label* L); // unconditional jump to L
- void jmp(byte* entry, RelocInfo::Mode rmode);
- void jmp(const Operand& adr);
- void jmp(Handle<Code> code, RelocInfo::Mode rmode);
+
+ // Jump near absolute indirect (r64)
+ void jmp(Register adr);
// Conditional jumps
- void j(Condition cc, Label* L, Hint hint = no_hint);
- void j(Condition cc, byte* entry, RelocInfo::Mode rmode, Hint hint = no_hint);
- void j(Condition cc, Handle<Code> code, Hint hint = no_hint);
+ void j(Condition cc, Label* L);
+ void j(Condition cc, byte* entry, RelocInfo::Mode rmode);
+ void j(Condition cc, Handle<Code> code);
// Floating-point operations
void fld(int i);
@@ -788,9 +811,13 @@ class Assembler : public Malloced {
void RecordStatementPosition(int pos);
void WriteRecordedPositions();
- // Writes a single word of data in the code stream.
+ // Writes a doubleword of data in the code stream.
+ // Used for inline tables, e.g., jump-tables.
+ void dd(uint32_t data);
+
+ // Writes a quadword of data in the code stream.
// Used for inline tables, e.g., jump-tables.
- void dd(uint32_t data, RelocInfo::Mode reloc_info);
+ void dd(uint64_t data, RelocInfo::Mode reloc_info);
// Writes the absolute address of a bound label at the given position in
// the generated code. That positions should have the relocation mode
@@ -833,25 +860,119 @@ class Assembler : public Malloced {
// code emission
void GrowBuffer();
- inline void emit(uint32_t x);
+
+ void emit(byte x) { *pc_++ = x; }
+ inline void emitl(uint32_t x);
inline void emit(Handle<Object> handle);
- inline void emit(uint32_t x, RelocInfo::Mode rmode);
- inline void emit(const Immediate& x);
- inline void emit_w(const Immediate& x);
+ inline void emitq(uint64_t x, RelocInfo::Mode rmode);
+ inline void emitw(uint16_t x);
+ void emit(Immediate x) { emitl(x.value_); }
+
+ // Emits a REX prefix that encodes a 64-bit operand size and
+ // the top bit of both register codes.
+ // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
+ // REX.W is set.
+ inline void emit_rex_64(Register reg, Register rm_reg);
+
+ // Emits a REX prefix that encodes a 64-bit operand size and
+ // the top bit of the destination, index, and base register codes.
+ // The high bit of reg is used for REX.R, the high bit of op's base
+ // register is used for REX.B, and the high bit of op's index register
+ // is used for REX.X. REX.W is set.
+ inline void emit_rex_64(Register reg, const Operand& op);
+
+ // Emits a REX prefix that encodes a 64-bit operand size and
+ // the top bit of the register code.
+ // The high bit of register is used for REX.B.
+ // REX.W is set and REX.R and REX.X are clear.
+ inline void emit_rex_64(Register rm_reg);
+
+ // Emits a REX prefix that encodes a 64-bit operand size and
+ // the top bit of the index and base register codes.
+ // The high bit of op's base register is used for REX.B, and the high
+ // bit of op's index register is used for REX.X.
+ // REX.W is set and REX.R clear.
+ inline void emit_rex_64(const Operand& op);
+
+ // Emit a REX prefix that only sets REX.W to choose a 64-bit operand size.
+ void emit_rex_64() { emit(0x48); }
+
+ // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
+ // REX.W is clear.
+ inline void emit_rex_32(Register reg, Register rm_reg);
+
+ // The high bit of reg is used for REX.R, the high bit of op's base
+ // register is used for REX.B, and the high bit of op's index register
+ // is used for REX.X. REX.W is cleared.
+ inline void emit_rex_32(Register reg, const Operand& op);
+
+ // High bit of rm_reg goes to REX.B.
+ // REX.W, REX.R and REX.X are clear.
+ inline void emit_rex_32(Register rm_reg);
+
+ // High bit of base goes to REX.B and high bit of index to REX.X.
+ // REX.W and REX.R are clear.
+ inline void emit_rex_32(const Operand& op);
+
+ // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
+ // REX.W is cleared. If no REX bits are set, no byte is emitted.
+ inline void emit_optional_rex_32(Register reg, Register rm_reg);
+
+ // The high bit of reg is used for REX.R, the high bit of op's base
+ // register is used for REX.B, and the high bit of op's index register
+ // is used for REX.X. REX.W is cleared. If no REX bits are set, nothing
+ // is emitted.
+ inline void emit_optional_rex_32(Register reg, const Operand& op);
+
+ // Optionally do as emit_rex_32(Register) if the register number has
+ // the high bit set.
+ inline void emit_optional_rex_32(Register rm_reg);
+
+ // Optionally do as emit_rex_32(const Operand&) if the operand register
+ // numbers have a high bit set.
+ inline void emit_optional_rex_32(const Operand& op);
+
+
+ // Emit the ModR/M byte, and optionally the SIB byte and
+ // 1- or 4-byte offset for a memory operand. Also encodes
+ // the second operand of the operation, a register or operation
+ // subcode, into the reg field of the ModR/M byte.
+ void emit_operand(Register reg, const Operand& adr) {
+ emit_operand(reg.code() & 0x07, adr);
+ }
- // Emit the code-object-relative offset of the label's position
- inline void emit_code_relative_offset(Label* label);
+ // Emit the ModR/M byte, and optionally the SIB byte and
+ // 1- or 4-byte offset for a memory operand. Also used to encode
+ // a three-bit opcode extension into the ModR/M byte.
+ void emit_operand(int rm, const Operand& adr);
- // instruction generation
- void emit_arith_b(int op1, int op2, Register dst, int imm8);
+ // Emit a ModR/M byte with registers coded in the reg and rm_reg fields.
+ void emit_modrm(Register reg, Register rm_reg) {
+ emit(0xC0 | (reg.code() & 0x7) << 3 | (rm_reg.code() & 0x7));
+ }
- // Emit a basic arithmetic instruction (i.e. first byte of the family is 0x81)
- // with a given destination expression and an immediate operand. It attempts
- // to use the shortest encoding possible.
- // sel specifies the /n in the modrm byte (see the Intel PRM).
- void emit_arith(int sel, Operand dst, const Immediate& x);
+ // Emit a ModR/M byte with an operation subcode in the reg field and
+ // a register in the rm_reg field.
+ void emit_modrm(int code, Register rm_reg) {
+ ASSERT((code & ~0x7) == 0);
+ emit(0xC0 | (code & 0x7) << 3 | (rm_reg.code() & 0x7));
+ }
- void emit_operand(Register reg, const Operand& adr);
+ // Emit the code-object-relative offset of the label's position
+ inline void emit_code_relative_offset(Label* label);
+
+ // Emit machine code for one of the operations ADD, ADC, SUB, SBC,
+ // AND, OR, XOR, or CMP. The encodings of these operations are all
+ // similar, differing just in the opcode or in the reg field of the
+ // ModR/M byte.
+ void arithmetic_op(byte opcode, Register dst, Register src);
+ void arithmetic_op(byte opcode, Register reg, const Operand& op);
+ void immediate_arithmetic_op(byte subcode, Register dst, Immediate src);
+ void immediate_arithmetic_op(byte subcode, const Operand& dst, Immediate src);
+ // Emit machine code for a shift operation.
+ void shift(Register dst, Immediate shift_amount, int subcode);
+ // Shift dst by cl % 64 bits.
+ void shift(Register dst, int subcode);
void emit_farith(int b1, int b2, int i);
@@ -860,11 +981,6 @@ class Assembler : public Malloced {
void bind_to(Label* L, int pos);
void link_to(Label* L, Label* appendix);
- // displacements
- inline Displacement disp_at(Label* L);
- inline void disp_at_put(Label* L, Displacement disp);
- inline void emit_disp(Label* L, Displacement::Type type);
-
// record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
@@ -877,6 +993,8 @@ class Assembler : public Malloced {
int buffer_size_;
// True if the assembler owns the buffer, false if buffer is external.
bool own_buffer_;
+ // A previously allocated buffer of kMinimalBufferSize bytes, or NULL.
+ static byte* spare_buffer_;
// code generation
byte* pc_; // the program counter; moves forward
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index 209aa2d307..3f1cd9faba 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -25,3 +25,41 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
+#include "codegen-inl.h"
+
+namespace v8 {
+namespace internal {
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm,
+ Builtins::CFunctionId id) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+} } // namespace v8::internal
+
+
diff --git a/deps/v8/src/x64/codegen-x64-inl.h b/deps/v8/src/x64/codegen-x64-inl.h
new file mode 100644
index 0000000000..0d5b0e21dc
--- /dev/null
+++ b/deps/v8/src/x64/codegen-x64-inl.h
@@ -0,0 +1,42 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_X64_CODEGEN_X64_INL_H_
+#define V8_X64_CODEGEN_X64_INL_H_
+
+namespace v8 {
+namespace internal {
+
+// Platform-specific inline functions.
+
+void DeferredCode::Jump() { UNIMPLEMENTED(); }
+void DeferredCode::Branch(Condition cc) { UNIMPLEMENTED(); }
+
+} } // namespace v8::internal
+
+#endif // V8_X64_CODEGEN_X64_INL_H_
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 209aa2d307..ca58e09a75 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -25,3 +25,312 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+#include "macro-assembler.h"
+#include "register-allocator-inl.h"
+#include "codegen.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// Platform-specific DeferredCode functions.
+
+void DeferredCode::SaveRegisters() { UNIMPLEMENTED(); }
+
+void DeferredCode::RestoreRegisters() { UNIMPLEMENTED(); }
+
+
+CodeGenerator::CodeGenerator(int buffer_size,
+ Handle<Script> script,
+ bool is_eval)
+ : is_eval_(is_eval),
+ script_(script),
+ deferred_(8),
+ masm_(new MacroAssembler(NULL, buffer_size)),
+ scope_(NULL),
+ frame_(NULL),
+ allocator_(NULL),
+ state_(NULL),
+ loop_nesting_(0),
+ function_return_is_shadowed_(false),
+ in_spilled_code_(false) {
+}
+
+#define __ masm->
+
+
+void CodeGenerator::DeclareGlobals(Handle<FixedArray> a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::GenCode(FunctionLiteral* a) {
+ masm_->int3(); // UNIMPLEMENTED
+}
+
+void CodeGenerator::GenerateFastCaseSwitchJumpTable(SwitchStatement* a,
+ int b,
+ int c,
+ Label* d,
+ Vector<Label*> e,
+ Vector<Label> f) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitStatements(ZoneList<Statement*>* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitBlock(Block* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitDeclaration(Declaration* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitExpressionStatement(ExpressionStatement* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitEmptyStatement(EmptyStatement* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitIfStatement(IfStatement* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitContinueStatement(ContinueStatement* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitBreakStatement(BreakStatement* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitReturnStatement(ReturnStatement* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitWithExitStatement(WithExitStatement* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitSwitchStatement(SwitchStatement* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitLoopStatement(LoopStatement* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitForInStatement(ForInStatement* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitTryCatch(TryCatch* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitTryFinally(TryFinally* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitFunctionBoilerplateLiteral(
+ FunctionBoilerplateLiteral* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitConditional(Conditional* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitSlot(Slot* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitVariableProxy(VariableProxy* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitLiteral(Literal* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitObjectLiteral(ObjectLiteral* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitArrayLiteral(ArrayLiteral* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitAssignment(Assignment* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitThrow(Throw* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitProperty(Property* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitCall(Call* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitCallEval(CallEval* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitCallNew(CallNew* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitCallRuntime(CallRuntime* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitUnaryOperation(UnaryOperation* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitCountOperation(CountOperation* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitBinaryOperation(BinaryOperation* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitCompareOperation(CompareOperation* a) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::VisitThisFunction(ThisFunction* a) {
+ UNIMPLEMENTED();
+}
+
+
+void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
+ masm->int3(); // TODO(X64): UNIMPLEMENTED.
+}
+
+
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+ Label invoke, exit;
+
+ // Setup frame.
+ __ push(rbp);
+ __ movq(rbp, rsp);
+
+ // Save callee-saved registers (X64 calling conventions).
+ int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ // Push something that is not an arguments adaptor.
+ __ push(Immediate(ArgumentsAdaptorFrame::NON_SENTINEL));
+ __ push(Immediate(Smi::FromInt(marker))); // @ function offset
+ __ push(r12);
+ __ push(r13);
+ __ push(r14);
+ __ push(r15);
+ __ push(rdi);
+ __ push(rsi);
+ __ push(rbx);
+ // TODO(X64): Push XMM6-XMM15 (low 64 bits) as well, or make them
+ // callee-save in JS code as well.
+
+ // Save copies of the top frame descriptor on the stack.
+ ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
+ __ load_rax(c_entry_fp);
+ __ push(rax);
+
+ // Call a faked try-block that does the invoke.
+ __ call(&invoke);
+
+ // Caught exception: Store result (exception) in the pending
+ // exception field in the JSEnv and return a failure sentinel.
+ ExternalReference pending_exception(Top::k_pending_exception_address);
+ __ store_rax(pending_exception);
+ __ movq(rax, Failure::Exception(), RelocInfo::NONE);
+ __ jmp(&exit);
+
+ // Invoke: Link this frame into the handler chain.
+ __ bind(&invoke);
+ __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+ __ push(rax); // flush TOS
+
+ // Clear any pending exceptions.
+ __ load_rax(ExternalReference::the_hole_value_location());
+ __ store_rax(pending_exception);
+
+ // Fake a receiver (NULL).
+ __ push(Immediate(0)); // receiver
+
+ // Invoke the function by calling through JS entry trampoline
+ // builtin and pop the faked function when we return. We load the address
+ // from an external reference instead of inlining the call target address
+ // directly in the code, because the builtin stubs may not have been
+ // generated yet at the time this code is generated.
+ if (is_construct) {
+ ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
+ __ load_rax(construct_entry);
+ } else {
+ ExternalReference entry(Builtins::JSEntryTrampoline);
+ __ load_rax(entry);
+ }
+ __ call(FieldOperand(rax, Code::kHeaderSize));
+
+ // Unlink this frame from the handler chain.
+ __ movq(kScratchRegister, ExternalReference(Top::k_handler_address));
+ __ pop(Operand(kScratchRegister, 0));
+ // Pop next_sp.
+ __ add(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+
+ // Restore the top frame descriptor from the stack.
+ __ bind(&exit);
+ __ movq(kScratchRegister, ExternalReference(Top::k_c_entry_fp_address));
+ __ pop(Operand(kScratchRegister, 0));
+
+ // Restore callee-saved registers (X64 conventions).
+ __ pop(rbx);
+ __ pop(rsi);
+ __ pop(rdi);
+ __ pop(r15);
+ __ pop(r14);
+ __ pop(r13);
+ __ pop(r12);
+ __ add(rsp, Immediate(2 * kPointerSize)); // remove markers
+
+ // Restore frame pointer and return.
+ __ pop(rbp);
+ __ ret(0);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index 4acb0cb7ff..5f5daa422b 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -28,7 +28,8 @@
#ifndef V8_X64_CODEGEN_X64_H_
#define V8_X64_CODEGEN_X64_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// Forward declarations
class DeferredCode;
@@ -332,8 +333,7 @@ class CodeGenerator: public AstVisitor {
// Accessors
Scope* scope() const { return scope_; }
- // Clearing and generating deferred code.
- void ClearDeferred();
+ // Generating deferred code.
void ProcessDeferred();
bool is_eval() { return is_eval_; }
@@ -473,12 +473,19 @@ class CodeGenerator: public AstVisitor {
void CheckStack();
+ struct InlineRuntimeLUT {
+ void (CodeGenerator::*method)(ZoneList<Expression*>*);
+ const char* name;
+ };
+ static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
bool CheckForInlineRuntimeCall(CallRuntime* node);
+ static bool PatchInlineRuntimeEntry(Handle<String> name,
+ const InlineRuntimeLUT& new_entry,
+ InlineRuntimeLUT* old_entry);
Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
- Handle<Code> ComputeCallInitialize(int argc);
- Handle<Code> ComputeCallInitializeInLoop(int argc);
+ Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
// Declare global variables and functions in the given array of
// name/value pairs.
@@ -570,14 +577,14 @@ class CodeGenerator: public AstVisitor {
void CodeForSourcePosition(int pos);
#ifdef DEBUG
- // True if the registers are valid for entry to a block. There should be
- // no frame-external references to eax, ebx, ecx, edx, or edi.
+ // True if the registers are valid for entry to a block. There should
+ // be no frame-external references to (non-reserved) registers.
bool HasValidEntryRegisters();
#endif
bool is_eval_; // Tells whether code is generated for eval.
Handle<Script> script_;
- List<DeferredCode*> deferred_;
+ ZoneList<DeferredCode*> deferred_;
// Assembler
MacroAssembler* masm_; // to generate code
@@ -604,6 +611,8 @@ class CodeGenerator: public AstVisitor {
// in a spilled state.
bool in_spilled_code_;
+ static InlineRuntimeLUT kInlineRuntimeLUT[];
+
friend class VirtualFrame;
friend class JumpTarget;
friend class Reference;
diff --git a/deps/v8/src/x64/cpu-x64.cc b/deps/v8/src/x64/cpu-x64.cc
index 209aa2d307..8df0ab7e97 100644
--- a/deps/v8/src/x64/cpu-x64.cc
+++ b/deps/v8/src/x64/cpu-x64.cc
@@ -25,3 +25,42 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// CPU specific code for x64 independent of OS goes here.
+
+#include "v8.h"
+
+#include "cpu.h"
+#include "macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+void CPU::Setup() {
+ CpuFeatures::Probe();
+}
+
+
+void CPU::FlushICache(void* start, size_t size) {
+ // No need to flush the instruction cache on Intel. On Intel instruction
+ // cache flushing is only necessary when multiple cores running the same
+ // code simultaneously. V8 (and JavaScript) is single threaded and when code
+ // is patched on an intel CPU the core performing the patching will have its
+ // own instruction cache updated automatically.
+
+ // If flushing of the instruction cache becomes necessary Windows has the
+ // API function FlushInstructionCache.
+}
+
+
+void CPU::DebugBreak() {
+#ifdef _MSC_VER
+ // To avoid Visual Studio runtime support the following code can be used
+ // instead
+ // __asm { int 3 }
+ __debugbreak();
+#else
+ asm("int $3");
+#endif
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc
index 209aa2d307..3b101325e6 100644
--- a/deps/v8/src/x64/debug-x64.cc
+++ b/deps/v8/src/x64/debug-x64.cc
@@ -25,3 +25,59 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "debug.h"
+
+
+namespace v8 {
+namespace internal {
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+
+bool Debug::IsDebugBreakAtReturn(v8::internal::RelocInfo* rinfo) {
+ UNIMPLEMENTED();
+ return false;
+}
+
+void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED
+}
+
+void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED
+}
+
+void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED
+}
+
+void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED
+}
+
+void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED
+}
+
+void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED
+}
+
+void Debug::GenerateReturnDebugBreakEntry(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED
+}
+
+void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED
+}
+
+void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED
+}
+
+#endif // ENABLE_DEBUGGER_SUPPORT
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 209aa2d307..767b1247a5 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -25,3 +25,64 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
+#include "disasm.h"
+
+namespace disasm {
+
+Disassembler::Disassembler(NameConverter const& converter)
+ : converter_(converter) {
+ UNIMPLEMENTED();
+}
+
+
+Disassembler::~Disassembler() {
+ UNIMPLEMENTED();
+}
+
+
+const char* NameConverter::NameOfAddress(unsigned char* addr) const {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+int Disassembler::ConstantPoolSizeAt(unsigned char* addr) {
+ UNIMPLEMENTED();
+ return 0;
+}
+
+
+int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
+ unsigned char* instruction) {
+ UNIMPLEMENTED();
+ return 0;
+}
+
+const char* NameConverter::NameOfByteCPURegister(int a) const {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+const char* NameConverter::NameOfXMMRegister(int a) const {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+const char* NameConverter::NameOfConstant(unsigned char* a) const {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+const char* NameConverter::NameInCode(unsigned char* a) const {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+} // namespace disasm
diff --git a/deps/v8/src/x64/frames-x64.h b/deps/v8/src/x64/frames-x64.h
index f4468f6a2f..3416f51dea 100644
--- a/deps/v8/src/x64/frames-x64.h
+++ b/deps/v8/src/x64/frames-x64.h
@@ -28,7 +28,8 @@
#ifndef V8_X64_FRAMES_X64_H_
#define V8_X64_FRAMES_X64_H_
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// TODO(x64): This is a stub, mostly just a copy of the ia32 bit version.
// This will all need to change to be correct for x64.
@@ -40,17 +41,17 @@ typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
class StackHandlerConstants : public AllStatic {
public:
- static const int kNextOffset = -1 * kPointerSize;
- static const int kPPOffset = -1 * kPointerSize;
- static const int kFPOffset = -1 * kPointerSize;
+ static const int kNextOffset = 0 * kPointerSize;
+ static const int kPPOffset = 1 * kPointerSize;
+ static const int kFPOffset = 2 * kPointerSize;
- static const int kCodeOffset = -1 * kPointerSize;
+ static const int kCodeOffset = 3 * kPointerSize;
- static const int kStateOffset = -1 * kPointerSize;
- static const int kPCOffset = -1 * kPointerSize;
+ static const int kStateOffset = 4 * kPointerSize;
+ static const int kPCOffset = 5 * kPointerSize;
static const int kAddressDisplacement = -1 * kPointerSize;
- static const int kSize = kPCOffset + kPointerSize;
+ static const int kSize = 6 * kPointerSize;
};
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index 209aa2d307..71a3a9ab85 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -25,3 +25,152 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "ic-inl.h"
+#include "runtime.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+void KeyedLoadIC::ClearInlinedVersion(Address address) {
+ UNIMPLEMENTED();
+}
+
+void KeyedLoadIC::Generate(MacroAssembler* masm,
+ ExternalReference const& f) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
+ UNIMPLEMENTED();
+ return false;
+}
+
+Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
+ JSObject* object,
+ JSObject* holder,
+ AccessorInfo* callback) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
+ JSObject* object,
+ JSObject* holder,
+ Object* callback) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
+ JSObject* object,
+ JSObject* holder,
+ int index) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* object,
+ JSObject* holder,
+ String* name) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+void KeyedStoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+ int index,
+ Map* transition,
+ String* name) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+void LoadIC::ClearInlinedVersion(Address address) {
+ UNIMPLEMENTED();
+}
+
+void LoadIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void LoadIC::GenerateStringLength(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+bool LoadIC::PatchInlinedLoad(Address address, Object* map, int index) {
+ UNIMPLEMENTED();
+ return false;
+}
+
+void StoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+ masm->int3(); // UNIMPLEMENTED.
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 209aa2d307..54c299dbfa 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -25,3 +25,92 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "macro-assembler-x64.h"
+
+namespace v8 {
+namespace internal {
+
+MacroAssembler::MacroAssembler(void* buffer, int size)
+ : Assembler(buffer, size),
+ unresolved_(0),
+ generating_stub_(false),
+ allow_stub_calls_(true),
+ code_object_(Heap::undefined_value()) {
+}
+
+
+void MacroAssembler::TailCallRuntime(ExternalReference const& a, int b) {
+ UNIMPLEMENTED();
+}
+
+
+void MacroAssembler::Set(Register dst, int64_t x) {
+ if (is_int32(x)) {
+ movq(dst, Immediate(x));
+ } else if (is_uint32(x)) {
+ movl(dst, Immediate(x));
+ } else {
+ movq(dst, x, RelocInfo::NONE);
+ }
+}
+
+
+void MacroAssembler::Set(const Operand& dst, int64_t x) {
+ if (is_int32(x)) {
+ movq(kScratchRegister, Immediate(x));
+ } else if (is_uint32(x)) {
+ movl(kScratchRegister, Immediate(x));
+ } else {
+ movq(kScratchRegister, x, RelocInfo::NONE);
+ }
+ movq(dst, kScratchRegister);
+}
+
+
+void MacroAssembler::PushTryHandler(CodeLocation try_location,
+ HandlerType type) {
+ // The pc (return address) is already on TOS.
+ // This code pushes state, code, frame pointer and parameter pointer.
+ // Check that they are expected next on the stack, int that order.
+ ASSERT_EQ(StackHandlerConstants::kStateOffset,
+ StackHandlerConstants::kPCOffset - kPointerSize);
+ ASSERT_EQ(StackHandlerConstants::kCodeOffset,
+ StackHandlerConstants::kStateOffset - kPointerSize);
+ ASSERT_EQ(StackHandlerConstants::kFPOffset,
+ StackHandlerConstants::kCodeOffset - kPointerSize);
+ ASSERT_EQ(StackHandlerConstants::kPPOffset,
+ StackHandlerConstants::kFPOffset - kPointerSize);
+
+ if (try_location == IN_JAVASCRIPT) {
+ if (type == TRY_CATCH_HANDLER) {
+ push(Immediate(StackHandler::TRY_CATCH));
+ } else {
+ push(Immediate(StackHandler::TRY_FINALLY));
+ }
+ push(Immediate(Smi::FromInt(StackHandler::kCodeNotPresent)));
+ push(rbp);
+ push(rdi);
+ } else {
+ ASSERT(try_location == IN_JS_ENTRY);
+ // The parameter pointer is meaningless here and ebp does not
+ // point to a JS frame. So we save NULL for both pp and ebp. We
+ // expect the code throwing an exception to check ebp before
+ // dereferencing it to restore the context.
+ push(Immediate(StackHandler::ENTRY));
+ push(Immediate(Smi::FromInt(StackHandler::kCodeNotPresent)));
+ push(Immediate(0)); // NULL frame pointer
+ push(Immediate(0)); // NULL parameter pointer
+ }
+ movq(kScratchRegister, ExternalReference(Top::k_handler_address));
+ // Cached TOS.
+ movq(rax, Operand(kScratchRegister, 0));
+ // Link this handler.
+ movq(Operand(kScratchRegister, 0), rsp);
+}
+
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 159d0c4f35..4af372a81a 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -30,7 +30,13 @@
#include "assembler.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
+
+// Default scratch register used by MacroAssembler (and other code that needs
+// a spare register). The register isn't callee save, and not used by the
+// function calling convention.
+static const Register kScratchRegister = r10;
// Forward declaration.
class JumpTarget;
@@ -136,8 +142,8 @@ class MacroAssembler: public Assembler {
void GetBuiltinEntry(Register target, Builtins::JavaScript id);
// Expression support
- void Set(Register dst, const Immediate& x);
- void Set(const Operand& dst, const Immediate& x);
+ void Set(Register dst, int64_t x);
+ void Set(const Operand& dst, int64_t x);
// Compare object type for heap object.
// Incoming register is heap_object and outgoing register is map.
@@ -155,7 +161,7 @@ class MacroAssembler: public Assembler {
// Push a new try handler and link into try handler chain.
// The return address must be pushed before calling this helper.
- // On exit, eax contains TOS (next_sp).
+ // On exit, rax contains TOS (next_sp).
void PushTryHandler(CodeLocation try_location, HandlerType type);
diff --git a/deps/v8/src/x64/register-allocator-x64-inl.h b/deps/v8/src/x64/register-allocator-x64-inl.h
new file mode 100644
index 0000000000..f369d7d8d9
--- /dev/null
+++ b/deps/v8/src/x64/register-allocator-x64-inl.h
@@ -0,0 +1,69 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
+#define V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
+
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+bool RegisterAllocator::IsReserved(Register reg) {
+ // All registers are reserved for now.
+ return true;
+}
+
+
+// The register allocator uses small integers to represent the
+// non-reserved assembler registers.
+
+int RegisterAllocator::ToNumber(Register reg) {
+ ASSERT(reg.is_valid() && !IsReserved(reg));
+ UNIMPLEMENTED();
+ return -1;
+}
+
+
+Register RegisterAllocator::ToRegister(int num) {
+ ASSERT(num >= 0 && num < kNumRegisters);
+ UNIMPLEMENTED();
+ return no_reg;
+}
+
+
+void RegisterAllocator::Initialize() {
+ UNIMPLEMENTED();
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_X64_REGISTER_ALLOCATOR_X64_INL_H_
diff --git a/deps/v8/src/x64/register-allocator-x64.h b/deps/v8/src/x64/register-allocator-x64.h
new file mode 100644
index 0000000000..bc08112479
--- /dev/null
+++ b/deps/v8/src/x64/register-allocator-x64.h
@@ -0,0 +1,45 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_X64_REGISTER_ALLOCATOR_X64_H_
+#define V8_X64_REGISTER_ALLOCATOR_X64_H_
+
+namespace v8 {
+namespace internal {
+
+class RegisterAllocatorConstants : public AllStatic {
+ public:
+ // Register allocation is not yet implemented on x64, but C++
+ // forbids 0-length arrays so we use 1 as the number of registers.
+ static const int kNumRegisters = 1;
+ static const int kInvalidRegister = -1;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_X64_REGISTER_ALLOCATOR_X64_H_
diff --git a/deps/v8/src/x64/virtual-frame-x64.h b/deps/v8/src/x64/virtual-frame-x64.h
index f71766d033..d341a1eee4 100644
--- a/deps/v8/src/x64/virtual-frame-x64.h
+++ b/deps/v8/src/x64/virtual-frame-x64.h
@@ -29,8 +29,10 @@
#define V8_X64_VIRTUAL_FRAME_X64_H_
#include "register-allocator.h"
+#include "scopes.h"
-namespace v8 { namespace internal {
+namespace v8 {
+namespace internal {
// -------------------------------------------------------------------------
// Virtual frames
@@ -41,7 +43,7 @@ namespace v8 { namespace internal {
// as random access to the expression stack elements, locals, and
// parameters.
-class VirtualFrame : public Malloced {
+class VirtualFrame : public ZoneObject {
public:
// A utility class to introduce a scope where the virtual frame is
// expected to remain spilled. The constructor spills the code
@@ -50,42 +52,66 @@ class VirtualFrame : public Malloced {
// generator is being transformed.
class SpilledScope BASE_EMBEDDED {
public:
- explicit SpilledScope(CodeGenerator* cgen);
+ SpilledScope() : previous_state_(cgen()->in_spilled_code()) {
+ ASSERT(cgen()->has_valid_frame());
+ cgen()->frame()->SpillAll();
+ cgen()->set_in_spilled_code(true);
+ }
- ~SpilledScope();
+ ~SpilledScope() {
+ cgen()->set_in_spilled_code(previous_state_);
+ }
private:
- CodeGenerator* cgen_;
bool previous_state_;
+
+ CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
};
// An illegal index into the virtual frame.
static const int kIllegalIndex = -1;
// Construct an initial virtual frame on entry to a JS function.
- explicit VirtualFrame(CodeGenerator* cgen);
+ VirtualFrame();
// Construct a virtual frame as a clone of an existing one.
explicit VirtualFrame(VirtualFrame* original);
+ CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
+ MacroAssembler* masm() { return cgen()->masm(); }
+
// Create a duplicate of an existing valid frame element.
FrameElement CopyElementAt(int index);
+ // The number of elements on the virtual frame.
+ int element_count() { return elements_.length(); }
+
// The height of the virtual expression stack.
- int height() const {
- return elements_.length() - expression_base_index();
+ int height() {
+ return element_count() - expression_base_index();
+ }
+
+ int register_location(int num) {
+ ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+ return register_locations_[num];
}
- int register_index(Register reg) {
- return register_locations_[reg.code()];
+ int register_location(Register reg) {
+ return register_locations_[RegisterAllocator::ToNumber(reg)];
}
- bool is_used(int reg_code) {
- return register_locations_[reg_code] != kIllegalIndex;
+ void set_register_location(Register reg, int index) {
+ register_locations_[RegisterAllocator::ToNumber(reg)] = index;
+ }
+
+ bool is_used(int num) {
+ ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+ return register_locations_[num] != kIllegalIndex;
}
bool is_used(Register reg) {
- return is_used(reg.code());
+ return register_locations_[RegisterAllocator::ToNumber(reg)]
+ != kIllegalIndex;
}
// Add extra in-memory elements to the top of the frame to match an actual
@@ -98,7 +124,12 @@ class VirtualFrame : public Malloced {
// match an external frame effect (examples include a call removing
// its arguments, and exiting a try/catch removing an exception
// handler). No code will be emitted.
- void Forget(int count);
+ void Forget(int count) {
+ ASSERT(count >= 0);
+ ASSERT(stack_pointer_ == element_count() - 1);
+ stack_pointer_ -= count;
+ ForgetElements(count);
+ }
// Forget count elements from the top of the frame without adjusting
// the stack pointer downward. This is used, for example, before
@@ -109,13 +140,25 @@ class VirtualFrame : public Malloced {
void SpillAll();
// Spill all occurrences of a specific register from the frame.
- void Spill(Register reg);
+ void Spill(Register reg) {
+ if (is_used(reg)) SpillElementAt(register_location(reg));
+ }
// Spill all occurrences of an arbitrary register if possible. Return the
// register spilled or no_reg if it was not possible to free any register
// (ie, they all have frame-external references).
Register SpillAnyRegister();
+ // Sync the range of elements in [begin, end] with memory.
+ void SyncRange(int begin, int end);
+
+ // Make this frame so that an arbitrary frame of the same height can
+ // be merged to it. Copies and constants are removed from the
+ // topmost mergable_elements elements of the frame. A
+ // mergable_elements of JumpTarget::kAllElements indicates constants
+ // and copies are should be removed from the entire frame.
+ void MakeMergable(int mergable_elements);
+
// Prepare this virtual frame for merging to an expected frame by
// performing some state changes that do not require generating
// code. It is guaranteed that no code will be generated.
@@ -130,13 +173,23 @@ class VirtualFrame : public Malloced {
// tells the register allocator that it is free to use frame-internal
// registers. Used when the code generator's frame is switched from this
// one to NULL by an unconditional jump.
- void DetachFromCodeGenerator();
+ void DetachFromCodeGenerator() {
+ RegisterAllocator* cgen_allocator = cgen()->allocator();
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ if (is_used(i)) cgen_allocator->Unuse(i);
+ }
+ }
// (Re)attach a frame to its code generator. This informs the register
// allocator that the frame-internal register references are active again.
// Used when a code generator's frame is switched from NULL to this one by
// binding a label.
- void AttachToCodeGenerator();
+ void AttachToCodeGenerator() {
+ RegisterAllocator* cgen_allocator = cgen()->allocator();
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ if (is_used(i)) cgen_allocator->Use(i);
+ }
+ }
// Emit code for the physical JS entry and exit frame sequences. After
// calling Enter, the virtual frame is ready for use; and after calling
@@ -151,7 +204,7 @@ class VirtualFrame : public Malloced {
void PrepareForReturn();
// Allocate and initialize the frame-allocated locals.
- void AllocateStackSlots(int count);
+ void AllocateStackSlots();
// An element of the expression stack as an assembly operand.
Operand ElementAt(int index) const {
@@ -164,22 +217,22 @@ class VirtualFrame : public Malloced {
// Set a frame element to a constant. The index is frame-top relative.
void SetElementAt(int index, Handle<Object> value) {
- Result temp(value, cgen_);
+ Result temp(value);
SetElementAt(index, &temp);
}
void PushElementAt(int index) {
- PushFrameSlotAt(elements_.length() - index - 1);
+ PushFrameSlotAt(element_count() - index - 1);
}
void StoreToElementAt(int index) {
- StoreToFrameSlotAt(elements_.length() - index - 1);
+ StoreToFrameSlotAt(element_count() - index - 1);
}
// A frame-allocated local as an assembly operand.
- Operand LocalAt(int index) const {
+ Operand LocalAt(int index) {
ASSERT(0 <= index);
- ASSERT(index < local_count_);
+ ASSERT(index < local_count());
return Operand(rbp, kLocal0Offset - index * kPointerSize);
}
@@ -215,10 +268,10 @@ class VirtualFrame : public Malloced {
void RestoreContextRegister();
// A parameter as an assembly operand.
- Operand ParameterAt(int index) const {
+ Operand ParameterAt(int index) {
ASSERT(-1 <= index); // -1 is the receiver.
- ASSERT(index < parameter_count_);
- return Operand(rbp, (1 + parameter_count_ - index) * kPointerSize);
+ ASSERT(index < parameter_count());
+ return Operand(rbp, (1 + parameter_count() - index) * kPointerSize);
}
// Push a copy of the value of a parameter frame slot on top of the frame.
@@ -240,14 +293,17 @@ class VirtualFrame : public Malloced {
}
// The receiver frame slot.
- Operand Receiver() const { return ParameterAt(-1); }
+ Operand Receiver() { return ParameterAt(-1); }
// Push a try-catch or try-finally handler on top of the virtual frame.
void PushTryHandler(HandlerType type);
// Call stub given the number of arguments it expects on (and
// removes from) the stack.
- Result CallStub(CodeStub* stub, int arg_count);
+ Result CallStub(CodeStub* stub, int arg_count) {
+ PrepareForCall(arg_count, arg_count);
+ return RawCallStub(stub);
+ }
// Call stub that takes a single argument passed in eax. The
// argument is given as a result which does not have to be eax or
@@ -307,7 +363,7 @@ class VirtualFrame : public Malloced {
void Drop() { Drop(1); }
// Duplicate the top element of the frame.
- void Dup() { PushFrameSlotAt(elements_.length() - 1); }
+ void Dup() { PushFrameSlotAt(element_count() - 1); }
// Pop an element from the top of the expression stack. Returns a
// Result, which may be a constant or a register.
@@ -331,7 +387,15 @@ class VirtualFrame : public Malloced {
// Pushing a result invalidates it (its contents become owned by the
// frame).
- void Push(Result* result);
+ void Push(Result* result) {
+ if (result->is_register()) {
+ Push(result->reg(), result->static_type());
+ } else {
+ ASSERT(result->is_constant());
+ Push(result->handle());
+ }
+ result->Unuse();
+ }
// Nip removes zero or more elements from immediately below the top
// of the frame, leaving the previous top-of-frame value on top of
@@ -346,70 +410,69 @@ class VirtualFrame : public Malloced {
static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots.
- CodeGenerator* cgen_;
- MacroAssembler* masm_;
-
- List<FrameElement> elements_;
-
- // The number of frame-allocated locals and parameters respectively.
- int parameter_count_;
- int local_count_;
+ ZoneList<FrameElement> elements_;
// The index of the element that is at the processor's stack pointer
// (the esp register).
int stack_pointer_;
- // The index of the element that is at the processor's frame pointer
- // (the ebp register).
- int frame_pointer_;
-
// The index of the register frame element using each register, or
// kIllegalIndex if a register is not on the frame.
- int register_locations_[kNumRegisters];
+ int register_locations_[RegisterAllocator::kNumRegisters];
+
+ // The number of frame-allocated locals and parameters respectively.
+ int parameter_count() { return cgen()->scope()->num_parameters(); }
+ int local_count() { return cgen()->scope()->num_stack_slots(); }
+
+ // The index of the element that is at the processor's frame pointer
+ // (the ebp register). The parameters, receiver, and return address
+ // are below the frame pointer.
+ int frame_pointer() { return parameter_count() + 2; }
// The index of the first parameter. The receiver lies below the first
// parameter.
- int param0_index() const { return 1; }
+ int param0_index() { return 1; }
- // The index of the context slot in the frame.
- int context_index() const {
- ASSERT(frame_pointer_ != kIllegalIndex);
- return frame_pointer_ + 1;
- }
+ // The index of the context slot in the frame. It is immediately
+ // above the frame pointer.
+ int context_index() { return frame_pointer() + 1; }
- // The index of the function slot in the frame. It lies above the context
- // slot.
- int function_index() const {
- ASSERT(frame_pointer_ != kIllegalIndex);
- return frame_pointer_ + 2;
- }
+ // The index of the function slot in the frame. It is above the frame
+ // pointer and the context slot.
+ int function_index() { return frame_pointer() + 2; }
- // The index of the first local. Between the parameters and the locals
- // lie the return address, the saved frame pointer, the context, and the
- // function.
- int local0_index() const {
- ASSERT(frame_pointer_ != kIllegalIndex);
- return frame_pointer_ + 3;
- }
+ // The index of the first local. Between the frame pointer and the
+ // locals lie the context and the function.
+ int local0_index() { return frame_pointer() + 3; }
// The index of the base of the expression stack.
- int expression_base_index() const { return local0_index() + local_count_; }
+ int expression_base_index() { return local0_index() + local_count(); }
// Convert a frame index into a frame pointer relative offset into the
// actual stack.
- int fp_relative(int index) const {
- return (frame_pointer_ - index) * kPointerSize;
+ int fp_relative(int index) {
+ ASSERT(index < element_count());
+ ASSERT(frame_pointer() < element_count()); // FP is on the frame.
+ return (frame_pointer() - index) * kPointerSize;
}
// Record an occurrence of a register in the virtual frame. This has the
// effect of incrementing the register's external reference count and
// of updating the index of the register's location in the frame.
- void Use(Register reg, int index);
+ void Use(Register reg, int index) {
+ ASSERT(!is_used(reg));
+ set_register_location(reg, index);
+ cgen()->allocator()->Use(reg);
+ }
// Record that a register reference has been dropped from the frame. This
// decrements the register's external reference count and invalidates the
// index of the register's location in the frame.
- void Unuse(Register reg);
+ void Unuse(Register reg) {
+ ASSERT(is_used(reg));
+ set_register_location(reg, kIllegalIndex);
+ cgen()->allocator()->Unuse(reg);
+ }
// Spill the element at a particular index---write it to memory if
// necessary, free any associated register, and forget its value if
@@ -421,9 +484,6 @@ class VirtualFrame : public Malloced {
// Keep the element type as register or constant, and clear the dirty bit.
void SyncElementAt(int index);
- // Sync the range of elements in [begin, end).
- void SyncRange(int begin, int end);
-
// Sync a single unsynced element that lies beneath or at the stack pointer.
void SyncElementBelowStackPointer(int index);
@@ -485,9 +545,12 @@ class VirtualFrame : public Malloced {
bool Equals(VirtualFrame* other);
+ // Classes that need raw access to the elements_ array.
+ friend class DeferredCode;
friend class JumpTarget;
};
+
} } // namespace v8::internal
#endif // V8_X64_VIRTUAL_FRAME_X64_H_