summaryrefslogtreecommitdiff
path: root/deps/v8/src/x64
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/x64')
-rw-r--r--deps/v8/src/x64/assembler-x64.cc211
-rw-r--r--deps/v8/src/x64/assembler-x64.h72
-rw-r--r--deps/v8/src/x64/codegen-x64.cc795
-rw-r--r--deps/v8/src/x64/codegen-x64.h68
-rw-r--r--deps/v8/src/x64/debug-x64.cc6
-rw-r--r--deps/v8/src/x64/disasm-x64.cc1402
-rw-r--r--deps/v8/src/x64/frames-x64.h7
-rw-r--r--deps/v8/src/x64/ic-x64.cc6
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc158
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h18
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc35
11 files changed, 2209 insertions, 569 deletions
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 7da6a8f66..c4ee45483 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -73,45 +73,8 @@ XMMRegister xmm14 = { 14 };
XMMRegister xmm15 = { 15 };
-Operand::Operand(Register base, int32_t disp): rex_(0) {
- len_ = 1;
- if (base.is(rsp) || base.is(r12)) {
- // SIB byte is needed to encode (rsp + offset) or (r12 + offset).
- set_sib(times_1, rsp, base);
- }
-
- if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
- set_modrm(0, base);
- } else if (is_int8(disp)) {
- set_modrm(1, base);
- set_disp8(disp);
- } else {
- set_modrm(2, base);
- set_disp32(disp);
- }
-}
-
-
-Operand::Operand(Register base,
- Register index,
- ScaleFactor scale,
- int32_t disp): rex_(0) {
- ASSERT(!index.is(rsp));
- len_ = 1;
- set_sib(scale, index, base);
- if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
- // This call to set_modrm doesn't overwrite the REX.B (or REX.X) bits
- // possibly set by set_sib.
- set_modrm(0, rsp);
- } else if (is_int8(disp)) {
- set_modrm(1, rsp);
- set_disp8(disp);
- } else {
- set_modrm(2, rsp);
- set_disp32(disp);
- }
-}
-
+// -----------------------------------------------------------------------------
+// Implementation of CpuFeatures
// The required user mode extensions in X64 are (from AMD64 ABI Table A.1):
// fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall
@@ -193,6 +156,71 @@ void CpuFeatures::Probe() {
ASSERT(IsSupported(CMOV));
}
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+// Patch the code at the current PC with a call to the target address.
+// Additional guard int3 instructions can be added if required.
+void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
+ // Call instruction takes up 13 bytes and int3 takes up one byte.
+ Address patch_site = pc_;
+ Memory::uint16_at(patch_site) = 0xBA49u; // movq r10, imm64
+ // Write "0x00, call r10" starting at last byte of address. We overwrite
+ // the 0x00 later, and this lets us write a uint32.
+ Memory::uint32_at(patch_site + 9) = 0xD2FF4900u; // 0x00, call r10
+ Memory::Address_at(patch_site + 2) = target;
+
+ // Add the requested number of int3 instructions after the call.
+ for (int i = 0; i < guard_bytes; i++) {
+ *(patch_site + 13 + i) = 0xCC; // int3
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand
+
+Operand::Operand(Register base, int32_t disp): rex_(0) {
+ len_ = 1;
+ if (base.is(rsp) || base.is(r12)) {
+ // SIB byte is needed to encode (rsp + offset) or (r12 + offset).
+ set_sib(times_1, rsp, base);
+ }
+
+ if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
+ set_modrm(0, base);
+ } else if (is_int8(disp)) {
+ set_modrm(1, base);
+ set_disp8(disp);
+ } else {
+ set_modrm(2, base);
+ set_disp32(disp);
+ }
+}
+
+
+Operand::Operand(Register base,
+ Register index,
+ ScaleFactor scale,
+ int32_t disp): rex_(0) {
+ ASSERT(!index.is(rsp));
+ len_ = 1;
+ set_sib(scale, index, base);
+ if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
+ // This call to set_modrm doesn't overwrite the REX.B (or REX.X) bits
+ // possibly set by set_sib.
+ set_modrm(0, rsp);
+ } else if (is_int8(disp)) {
+ set_modrm(1, rsp);
+ set_disp8(disp);
+ } else {
+ set_modrm(2, rsp);
+ set_disp32(disp);
+ }
+}
+
+
// -----------------------------------------------------------------------------
// Implementation of Assembler
@@ -273,6 +301,7 @@ void Assembler::GetCode(CodeDesc* desc) {
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
+ ASSERT(desc->instr_size > 0); // Zero-size code objects upset the system.
desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
desc->origin = this;
@@ -426,6 +455,17 @@ void Assembler::arithmetic_op_32(byte opcode, Register dst, Register src) {
}
+void Assembler::arithmetic_op_32(byte opcode,
+ const Operand& dst,
+ Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(src, dst);
+ emit(opcode);
+ emit_operand(src, dst);
+}
+
+
void Assembler::immediate_arithmetic_op(byte subcode,
Register dst,
Immediate src) {
@@ -470,8 +510,8 @@ void Assembler::immediate_arithmetic_op_32(byte subcode,
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(dst);
- emit(0x83);
if (is_int8(src.value_)) {
+ emit(0x83);
emit_modrm(subcode, dst);
emit(src.value_);
} else if (dst.is(rax)) {
@@ -567,6 +607,23 @@ void Assembler::shift_32(Register dst, int subcode) {
}
+void Assembler::shift_32(Register dst, Immediate shift_amount, int subcode) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(is_uint6(shift_amount.value_)); // illegal shift count
+ if (shift_amount.value_ == 1) {
+ emit_optional_rex_32(dst);
+ emit(0xD1);
+ emit_modrm(subcode, dst);
+ } else {
+ emit_optional_rex_32(dst);
+ emit(0xC1);
+ emit_modrm(subcode, dst);
+ emit(shift_amount.value_);
+ }
+}
+
+
void Assembler::bt(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -750,6 +807,15 @@ void Assembler::idiv(Register src) {
}
+void Assembler::imul(Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_rex_64(src);
+ emit(0xF7);
+ emit_modrm(0x5, src);
+}
+
+
void Assembler::imul(Register dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1058,6 +1124,19 @@ void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) {
void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
+ // Non-relocatable values might not need a 64-bit representation.
+ if (rmode == RelocInfo::NONE) {
+ // Sadly, there is no zero or sign extending move for 8-bit immediates.
+ if (is_int32(value)) {
+ movq(dst, Immediate(static_cast<int32_t>(value)));
+ return;
+ } else if (is_uint32(value)) {
+ movl(dst, Immediate(static_cast<int32_t>(value)));
+ return;
+ }
+ // Value cannot be represented by 32 bits, so do a full 64 bit immediate
+ // value.
+ }
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_rex_64(dst);
@@ -1087,16 +1166,24 @@ void Assembler::movq(const Operand& dst, Immediate value) {
void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
- EnsureSpace ensure_space(this);
- last_pc_ = pc_;
- ASSERT(!Heap::InNewSpace(*value));
- emit_rex_64(dst);
- emit(0xB8 | dst.low_bits());
- if (value->IsHeapObject()) {
- emitq(reinterpret_cast<uintptr_t>(value.location()), mode);
+ // If there is no relocation info, emit the value of the handle efficiently
+ // (possibly using less that 8 bytes for the value).
+ if (mode == RelocInfo::NONE) {
+ // There is no possible reason to store a heap pointer without relocation
+ // info, so it must be a smi.
+ ASSERT(value->IsSmi());
+ // Smis never have more than 32 significant bits, but they might
+ // have garbage in the high bits.
+ movq(dst,
+ Immediate(static_cast<int32_t>(reinterpret_cast<intptr_t>(*value))));
} else {
- ASSERT_EQ(RelocInfo::NONE, mode);
- emitq(reinterpret_cast<uintptr_t>(*value), RelocInfo::NONE);
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ ASSERT(value->IsHeapObject());
+ ASSERT(!Heap::InNewSpace(*value));
+ emit_rex_64(dst);
+ emit(0xB8 | dst.low_bits());
+ emitq(reinterpret_cast<uintptr_t>(value.location()), mode);
}
}
@@ -1439,7 +1526,7 @@ void Assembler::testb(Register reg, Immediate mask) {
last_pc_ = pc_;
if (reg.is(rax)) {
emit(0xA8);
- emit(mask);
+ emit(mask.value_); // Low byte emitted.
} else {
if (reg.code() > 3) {
// Register is not one of al, bl, cl, dl. Its encoding needs REX.
@@ -1463,6 +1550,15 @@ void Assembler::testb(const Operand& op, Immediate mask) {
}
+void Assembler::testl(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst, src);
+ emit(0x85);
+ emit_modrm(dst, src);
+}
+
+
void Assembler::testl(Register reg, Immediate mask) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1551,6 +1647,7 @@ void Assembler::fldz() {
void Assembler::fld_s(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
+ emit_optional_rex_32(adr);
emit(0xD9);
emit_operand(0, adr);
}
@@ -1559,6 +1656,7 @@ void Assembler::fld_s(const Operand& adr) {
void Assembler::fld_d(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
+ emit_optional_rex_32(adr);
emit(0xDD);
emit_operand(0, adr);
}
@@ -1567,6 +1665,7 @@ void Assembler::fld_d(const Operand& adr) {
void Assembler::fstp_s(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
+ emit_optional_rex_32(adr);
emit(0xD9);
emit_operand(3, adr);
}
@@ -1575,6 +1674,7 @@ void Assembler::fstp_s(const Operand& adr) {
void Assembler::fstp_d(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
+ emit_optional_rex_32(adr);
emit(0xDD);
emit_operand(3, adr);
}
@@ -1583,6 +1683,7 @@ void Assembler::fstp_d(const Operand& adr) {
void Assembler::fild_s(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
+ emit_optional_rex_32(adr);
emit(0xDB);
emit_operand(0, adr);
}
@@ -1591,6 +1692,7 @@ void Assembler::fild_s(const Operand& adr) {
void Assembler::fild_d(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
+ emit_optional_rex_32(adr);
emit(0xDF);
emit_operand(5, adr);
}
@@ -1599,6 +1701,7 @@ void Assembler::fild_d(const Operand& adr) {
void Assembler::fistp_s(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
+ emit_optional_rex_32(adr);
emit(0xDB);
emit_operand(3, adr);
}
@@ -1608,6 +1711,7 @@ void Assembler::fisttp_s(const Operand& adr) {
ASSERT(CpuFeatures::IsEnabled(CpuFeatures::SSE3));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
+ emit_optional_rex_32(adr);
emit(0xDB);
emit_operand(1, adr);
}
@@ -1616,6 +1720,7 @@ void Assembler::fisttp_s(const Operand& adr) {
void Assembler::fist_s(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
+ emit_optional_rex_32(adr);
emit(0xDB);
emit_operand(2, adr);
}
@@ -1624,6 +1729,7 @@ void Assembler::fist_s(const Operand& adr) {
void Assembler::fistp_d(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
+ emit_optional_rex_32(adr);
emit(0xDF);
emit_operand(8, adr);
}
@@ -1678,6 +1784,7 @@ void Assembler::fsub(int i) {
void Assembler::fisub_s(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
+ emit_optional_rex_32(adr);
emit(0xDA);
emit_operand(4, adr);
}
@@ -2001,11 +2108,11 @@ void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
- emit(0xC0 | (dst.code() << 3) | src.code());
+ emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
}
void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
- emit(0xC0 | (dst.code() << 3) | src.code());
+ emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
}
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 0d98e5fe4..e8953329b 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -160,6 +160,17 @@ struct XMMRegister {
return code_;
}
+ // Return the high bit of the register code as a 0 or 1. Used often
+ // when constructing the REX prefix byte.
+ int high_bit() const {
+ return code_ >> 3;
+ }
+ // Return the 3 low bits of the register code. Used when encoding registers
+ // in modR/M, SIB, and opcode bytes.
+ int low_bits() const {
+ return code_ & 0x7;
+ }
+
int code_;
};
@@ -522,6 +533,10 @@ class Assembler : public Malloced {
immediate_arithmetic_op_32(0x0, dst, src);
}
+ void addl(const Operand& dst, Immediate src) {
+ immediate_arithmetic_op_32(0x0, dst, src);
+ }
+
void addq(Register dst, const Operand& src) {
arithmetic_op(0x03, dst, src);
}
@@ -539,10 +554,6 @@ class Assembler : public Malloced {
immediate_arithmetic_op(0x0, dst, src);
}
- void addl(const Operand& dst, Immediate src) {
- immediate_arithmetic_op_32(0x0, dst, src);
- }
-
void cmpb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x7, dst, src);
}
@@ -551,6 +562,26 @@ class Assembler : public Malloced {
immediate_arithmetic_op_8(0x7, dst, src);
}
+ void cmpl(Register dst, Register src) {
+ arithmetic_op_32(0x3B, dst, src);
+ }
+
+ void cmpl(Register dst, const Operand& src) {
+ arithmetic_op_32(0x3B, src, dst);
+ }
+
+ void cmpl(const Operand& dst, Register src) {
+ arithmetic_op_32(0x39, dst, src);
+ }
+
+ void cmpl(Register dst, Immediate src) {
+ immediate_arithmetic_op_32(0x7, dst, src);
+ }
+
+ void cmpl(const Operand& dst, Immediate src) {
+ immediate_arithmetic_op_32(0x7, dst, src);
+ }
+
void cmpq(Register dst, Register src) {
arithmetic_op(0x3B, dst, src);
}
@@ -567,10 +598,6 @@ class Assembler : public Malloced {
immediate_arithmetic_op(0x7, dst, src);
}
- void cmpl(Register dst, Immediate src) {
- immediate_arithmetic_op_32(0x7, dst, src);
- }
-
void cmpq(const Operand& dst, Immediate src) {
immediate_arithmetic_op(0x7, dst, src);
}
@@ -605,12 +632,13 @@ class Assembler : public Malloced {
// Divide rdx:rax by src. Quotient in rax, remainder in rdx.
void idiv(Register src);
- void imul(Register dst, Register src);
- void imul(Register dst, const Operand& src);
- // Performs the operation dst = src * imm.
- void imul(Register dst, Register src, Immediate imm);
+ // Signed multiply instructions.
+ void imul(Register src); // rdx:rax = rax * src.
+ void imul(Register dst, Register src); // dst = dst * src.
+ void imul(Register dst, const Operand& src); // dst = dst * src.
+ void imul(Register dst, Register src, Immediate imm); // dst = src * imm.
// Multiply 32 bit registers
- void imull(Register dst, Register src);
+ void imull(Register dst, Register src); // dst = dst * src.
void incq(Register dst);
void incq(const Operand& dst);
@@ -662,11 +690,22 @@ class Assembler : public Malloced {
shift(dst, shift_amount, 0x7);
}
+ // Shifts dst right, duplicating sign bit, by shift_amount bits.
+ // Shifting by 1 is handled efficiently.
+ void sarl(Register dst, Immediate shift_amount) {
+ shift_32(dst, shift_amount, 0x7);
+ }
+
// Shifts dst right, duplicating sign bit, by cl % 64 bits.
void sar(Register dst) {
shift(dst, 0x7);
}
+ // Shifts dst right, duplicating sign bit, by cl % 64 bits.
+ void sarl(Register dst) {
+ shift_32(dst, 0x7);
+ }
+
void shl(Register dst, Immediate shift_amount) {
shift(dst, shift_amount, 0x4);
}
@@ -722,8 +761,13 @@ class Assembler : public Malloced {
immediate_arithmetic_op_32(0x5, dst, src);
}
+ void subl(Register dst, Immediate src) {
+ immediate_arithmetic_op_32(0x5, dst, src);
+ }
+
void testb(Register reg, Immediate mask);
void testb(const Operand& op, Immediate mask);
+ void testl(Register dst, Register src);
void testl(Register reg, Immediate mask);
void testl(const Operand& op, Immediate mask);
void testq(const Operand& op, Register reg);
@@ -1070,6 +1114,7 @@ class Assembler : public Malloced {
// ModR/M byte.
void arithmetic_op(byte opcode, Register dst, Register src);
void arithmetic_op_32(byte opcode, Register dst, Register src);
+ void arithmetic_op_32(byte opcode, const Operand& dst, Register src);
void arithmetic_op(byte opcode, Register reg, const Operand& op);
void immediate_arithmetic_op(byte subcode, Register dst, Immediate src);
void immediate_arithmetic_op(byte subcode, const Operand& dst, Immediate src);
@@ -1089,6 +1134,7 @@ class Assembler : public Malloced {
Immediate src);
// Emit machine code for a shift operation.
void shift(Register dst, Immediate shift_amount, int subcode);
+ void shift_32(Register dst, Immediate shift_amount, int subcode);
// Shift dst by cl % 64 bits.
void shift(Register dst, int subcode);
void shift_32(Register dst, int subcode);
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index fc196ce79..e3e32e694 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -25,9 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// TODO(X64): Remove stdio.h when compiler test is removed.
-#include <stdio.h>
-
#include "v8.h"
#include "bootstrapper.h"
@@ -38,9 +35,6 @@
#include "register-allocator-inl.h"
#include "scopes.h"
-// TODO(X64): Remove compiler.h when compiler test is removed.
-#include "compiler.h"
-
namespace v8 {
namespace internal {
@@ -138,81 +132,6 @@ void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
-void CodeGenerator::TestCodeGenerator() {
- // Compile a function from a string, and run it.
-
- // Set flags appropriately for this stage of implementation.
- // TODO(X64): Make ic work, and stop disabling them.
- // These settings stick - remove them when we don't want them anymore.
-#ifdef DEBUG
- FLAG_print_builtin_source = true;
- FLAG_print_builtin_ast = true;
-#endif
- FLAG_use_ic = false;
-
- // Read the file "test.js" from the current directory, compile, and run it.
- // If the file is not there, use a simple script embedded here instead.
- Handle<String> test_script;
- FILE* file = fopen("test.js", "rb");
- if (file == NULL) {
- test_script = Factory::NewStringFromAscii(CStrVector(
- "// Put all code in anonymous function to avoid global scope.\n"
- "(function(){"
- " var x = true ? 47 : 32;"
- " return x;"
- "})()"));
- } else {
- fseek(file, 0, SEEK_END);
- int size = ftell(file);
- rewind(file);
-
- char* chars = new char[size + 1];
- chars[size] = '\0';
- for (int i = 0; i < size;) {
- int read = fread(&chars[i], 1, size - i, file);
- i += read;
- }
- fclose(file);
- test_script = Factory::NewStringFromAscii(CStrVector(chars));
- delete[] chars;
- }
-
- Handle<JSFunction> test_function = Compiler::Compile(
- test_script,
- Factory::NewStringFromAscii(CStrVector("CodeGeneratorTestScript")),
- 0,
- 0,
- NULL,
- NULL);
-
- Code* code_object = test_function->code(); // Local for debugging ease.
- USE(code_object);
-
- // Create a dummy function and context.
- Handle<JSFunction> bridge =
- Factory::NewFunction(Factory::empty_symbol(), Factory::undefined_value());
- Handle<Context> context =
- Factory::NewFunctionContext(Context::MIN_CONTEXT_SLOTS, bridge);
-
- test_function = Factory::NewFunctionFromBoilerplate(
- test_function,
- context);
-
- bool pending_exceptions;
- Handle<Object> result =
- Execution::Call(test_function,
- Handle<Object>::cast(test_function),
- 0,
- NULL,
- &pending_exceptions);
- // Function compiles and runs, but returns a JSFunction object.
-#ifdef DEBUG
- PrintF("Result of test function: ");
- result->Print();
-#endif
-}
-
-
void CodeGenerator::GenCode(FunctionLiteral* function) {
// Record the position for debugging purposes.
CodeForFunctionPosition(function);
@@ -235,8 +154,7 @@ void CodeGenerator::GenCode(FunctionLiteral* function) {
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- // fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- false) {
+ function->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
frame_->SpillAll();
__ int3();
}
@@ -437,26 +355,22 @@ void CodeGenerator::GenerateReturnSequence(Result* return_value) {
// receiver.
frame_->Exit();
masm_->ret((scope_->num_parameters() + 1) * kPointerSize);
+ // Add padding that will be overwritten by a debugger breakpoint.
+ // frame_->Exit() generates "movq rsp, rbp; pop rbp" length 5.
+ // "ret k" has length 2.
+ const int kPadding = Debug::kX64JSReturnSequenceLength - 5 - 2;
+ for (int i = 0; i < kPadding; ++i) {
+ masm_->int3();
+ }
DeleteFrame();
- // TODO(x64): introduce kX64JSReturnSequenceLength and enable assert.
-
// Check that the size of the code used for returning matches what is
// expected by the debugger.
- // ASSERT_EQ(Debug::kIa32JSReturnSequenceLength,
- // masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
+ ASSERT_EQ(Debug::kX64JSReturnSequenceLength,
+ masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
}
-void CodeGenerator::GenerateFastCaseSwitchJumpTable(SwitchStatement* a,
- int b,
- int c,
- Label* d,
- Vector<Label*> e,
- Vector<Label> f) {
- UNIMPLEMENTED();
-}
-
#ifdef DEBUG
bool CodeGenerator::HasValidEntryRegisters() {
return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0))
@@ -1358,7 +1272,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
frame_->EmitPush(rax); // <- slot 3
frame_->EmitPush(rdx); // <- slot 2
- __ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
+ __ movsxlq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
__ shl(rax, Immediate(kSmiTagSize));
frame_->EmitPush(rax); // <- slot 1
frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
@@ -1370,7 +1284,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
frame_->EmitPush(rax); // <- slot 2
// Push the length of the array and the initial index onto the stack.
- __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
+ __ movsxlq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
__ shl(rax, Immediate(kSmiTagSize));
frame_->EmitPush(rax); // <- slot 1
frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
@@ -1383,15 +1297,14 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
__ movq(rax, frame_->ElementAt(0)); // load the current count
- __ cmpq(rax, frame_->ElementAt(1)); // compare to the array length
+ __ cmpl(rax, frame_->ElementAt(1)); // compare to the array length
node->break_target()->Branch(above_equal);
// Get the i'th entry of the array.
__ movq(rdx, frame_->ElementAt(2));
ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
// Multiplier is times_4 since rax is already a Smi.
- __ movq(rbx, Operand(rdx, rax, times_4,
- FixedArray::kHeaderSize - kHeapObjectTag));
+ __ movq(rbx, FieldOperand(rdx, rax, times_4, FixedArray::kHeaderSize));
// Get the expected map from the stack or a zero map in the
// permanent slow case rax: current iteration count rbx: i'th entry
@@ -1895,7 +1808,7 @@ void CodeGenerator::VisitConditional(Conditional* node) {
void CodeGenerator::VisitSlot(Slot* node) {
Comment cmnt(masm_, "[ Slot");
- LoadFromSlot(node, typeof_state());
+ LoadFromSlotCheckForArguments(node, typeof_state());
}
@@ -2227,12 +2140,12 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
Result elements = frame_->Pop();
elements.ToRegister();
frame_->Spill(elements.reg());
- // Get the elements array.
+ // Get the elements FixedArray.
__ movq(elements.reg(),
FieldOperand(elements.reg(), JSObject::kElementsOffset));
// Write to the indexed properties array.
- int offset = i * kPointerSize + Array::kHeaderSize;
+ int offset = i * kPointerSize + FixedArray::kHeaderSize;
__ movq(FieldOperand(elements.reg(), offset), prop_value.reg());
// Update the write barrier for the array address.
@@ -2300,7 +2213,7 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
// or the right hand side is a different variable. TakeValue invalidates
// the target, with an implicit promise that it will be written to again
// before it is read.
- // TODO(X64): Implement TakeValue optimization.
+ // TODO(X64): Implement TakeValue optimization. Check issue 150016.
if (false) {
// if (literal != NULL || (right_var != NULL && right_var != var)) {
// target.TakeValue(NOT_INSIDE_TYPEOF);
@@ -2410,9 +2323,6 @@ void CodeGenerator::VisitCall(Call* node) {
frame_->SetElementAt(0, &result);
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
- // TODO(X64): Enable calls of non-global functions.
- UNIMPLEMENTED();
- /*
// ----------------------------------
// JavaScript example: 'with (obj) foo(1, 2, 3)' // foo is in obj
// ----------------------------------
@@ -2420,8 +2330,8 @@ void CodeGenerator::VisitCall(Call* node) {
// Load the function from the context. Sync the frame so we can
// push the arguments directly into place.
frame_->SyncRange(0, frame_->element_count() - 1);
- frame_->EmitPush(esi);
- frame_->EmitPush(Immediate(var->name()));
+ frame_->EmitPush(rsi);
+ frame_->EmitPush(var->name());
frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
// The runtime call returns a pair of values in rax and rdx. The
// looked-up function is in rax and the receiver is in rdx. These
@@ -2437,7 +2347,6 @@ void CodeGenerator::VisitCall(Call* node) {
// Call the function.
CallWithArguments(args, node->position());
- */
} else if (property != NULL) {
// Check if the key is a literal string.
Literal* literal = property->key()->AsLiteral();
@@ -2545,13 +2454,13 @@ void CodeGenerator::VisitCallEval(CallEval* node) {
// receiver. Use a scratch register to avoid destroying the result.
Result scratch = allocator_->Allocate();
ASSERT(scratch.is_valid());
- __ movl(scratch.reg(),
+ __ movq(scratch.reg(),
FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(0)));
frame_->SetElementAt(arg_count + 1, &scratch);
// We can reuse the result register now.
frame_->Spill(result.reg());
- __ movl(result.reg(),
+ __ movq(result.reg(),
FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(1)));
frame_->SetElementAt(arg_count, &result);
@@ -2820,12 +2729,6 @@ class DeferredPrefixCountOperation: public DeferredCode {
void DeferredPrefixCountOperation::Generate() {
- // Undo the optimistic smi operation.
- if (is_increment_) {
- __ subq(dst_, Immediate(Smi::FromInt(1)));
- } else {
- __ addq(dst_, Immediate(Smi::FromInt(1)));
- }
__ push(dst_);
__ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
__ push(rax);
@@ -2861,12 +2764,6 @@ class DeferredPostfixCountOperation: public DeferredCode {
void DeferredPostfixCountOperation::Generate() {
- // Undo the optimistic smi operation.
- if (is_increment_) {
- __ subq(dst_, Immediate(Smi::FromInt(1)));
- } else {
- __ addq(dst_, Immediate(Smi::FromInt(1)));
- }
__ push(dst_);
__ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
@@ -2923,19 +2820,6 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
// Ensure the new value is writable.
frame_->Spill(new_value.reg());
- // In order to combine the overflow and the smi tag check, we need
- // to be able to allocate a byte register. We attempt to do so
- // without spilling. If we fail, we will generate separate overflow
- // and smi tag checks.
- //
- // We allocate and clear the temporary register before
- // performing the count operation since clearing the register using
- // xor will clear the overflow flag.
- Result tmp = allocator_->AllocateWithoutSpilling();
-
- // Clear scratch register to prepare it for setcc after the operation below.
- __ xor_(kScratchRegister, kScratchRegister);
-
DeferredCode* deferred = NULL;
if (is_postfix) {
deferred = new DeferredPostfixCountOperation(new_value.reg(),
@@ -2946,25 +2830,26 @@ void CodeGenerator::VisitCountOperation(CountOperation* node) {
is_increment);
}
+ Result tmp = allocator_->AllocateWithoutSpilling();
+ ASSERT(kSmiTagMask == 1 && kSmiTag == 0);
+ __ movl(tmp.reg(), Immediate(kSmiTagMask));
+ // Smi test.
+ __ movq(kScratchRegister, new_value.reg());
if (is_increment) {
- __ addq(new_value.reg(), Immediate(Smi::FromInt(1)));
+ __ addl(kScratchRegister, Immediate(Smi::FromInt(1)));
} else {
- __ subq(new_value.reg(), Immediate(Smi::FromInt(1)));
+ __ subl(kScratchRegister, Immediate(Smi::FromInt(1)));
}
-
- // If the count operation didn't overflow and the result is a valid
- // smi, we're done. Otherwise, we jump to the deferred slow-case
- // code.
-
- // We combine the overflow and the smi tag check.
- __ setcc(overflow, kScratchRegister);
- __ or_(kScratchRegister, new_value.reg());
- __ testl(kScratchRegister, Immediate(kSmiTagMask));
+ // deferred->Branch(overflow);
+ __ cmovl(overflow, kScratchRegister, tmp.reg());
+ __ testl(kScratchRegister, tmp.reg());
tmp.Unuse();
deferred->Branch(not_zero);
+ __ movq(new_value.reg(), kScratchRegister);
deferred->BindExit();
+
// Postfix: store the old value in the allocated slot under the
// reference.
if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
@@ -3227,13 +3112,12 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
// It can be an undetectable object.
__ movq(kScratchRegister,
FieldOperand(answer.reg(), HeapObject::kMapOffset));
- __ movb(kScratchRegister,
- FieldOperand(kScratchRegister, Map::kBitFieldOffset));
- __ testb(kScratchRegister, Immediate(1 << Map::kIsUndetectable));
+ __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
destination()->false_target()->Branch(not_zero);
- __ cmpb(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
+ __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
destination()->false_target()->Branch(below);
- __ cmpb(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
+ __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
answer.Unuse();
destination()->Split(below_equal);
} else {
@@ -3330,6 +3214,29 @@ void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
+
+ // Get the frame pointer for the calling frame.
+ Result fp = allocator()->Allocate();
+ __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+
+ // Skip the arguments adaptor frame if it exists.
+ Label check_frame_marker;
+ __ cmpq(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
+ Immediate(ArgumentsAdaptorFrame::SENTINEL));
+ __ j(not_equal, &check_frame_marker);
+ __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
+
+ // Check the marker in the calling frame.
+ __ bind(&check_frame_marker);
+ __ cmpq(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
+ Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
+ fp.Unuse();
+ destination()->Split(equal);
+}
+
+
void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
// ArgumentsAccessStub takes the parameter count as an input argument
@@ -3412,13 +3319,107 @@ void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
+ frame_->SpillAll();
-void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* a) {
- UNIMPLEMENTED();
+ // Make sure the frame is aligned like the OS expects.
+ static const int kFrameAlignment = OS::ActivationFrameAlignment();
+ if (kFrameAlignment > 0) {
+ ASSERT(IsPowerOf2(kFrameAlignment));
+ __ movq(rbx, rsp); // Save in AMD-64 abi callee-saved register.
+ __ and_(rsp, Immediate(-kFrameAlignment));
+ }
+
+ // Call V8::RandomPositiveSmi().
+ __ Call(FUNCTION_ADDR(V8::RandomPositiveSmi), RelocInfo::RUNTIME_ENTRY);
+
+ // Restore stack pointer from callee-saved register edi.
+ if (kFrameAlignment > 0) {
+ __ movq(rsp, rbx);
+ }
+
+ Result result = allocator_->Allocate(rax);
+ frame_->Push(&result);
}
+
void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
- UNIMPLEMENTED();
+ // TODO(X64): Use inline floating point in the fast case.
+ ASSERT(args->length() == 1);
+
+ // Load number.
+ Load(args->at(0));
+ Result answer;
+ switch (op) {
+ case SIN:
+ answer = frame_->CallRuntime(Runtime::kMath_sin, 1);
+ break;
+ case COS:
+ answer = frame_->CallRuntime(Runtime::kMath_cos, 1);
+ break;
+ }
+ frame_->Push(&answer);
+}
+
+
+void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+ JumpTarget leave, null, function, non_function_constructor;
+ Load(args->at(0)); // Load the object.
+ Result obj = frame_->Pop();
+ obj.ToRegister();
+ frame_->Spill(obj.reg());
+
+ // If the object is a smi, we return null.
+ __ testl(obj.reg(), Immediate(kSmiTagMask));
+ null.Branch(zero);
+
+ // Check that the object is a JS object but take special care of JS
+ // functions to make sure they have 'Function' as their class.
+
+ __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
+ null.Branch(less);
+
+ // As long as JS_FUNCTION_TYPE is the last instance type and it is
+ // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
+ // LAST_JS_OBJECT_TYPE.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+ __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
+ function.Branch(equal);
+
+ // Check if the constructor in the map is a function.
+ __ movq(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
+ __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
+ non_function_constructor.Branch(not_equal);
+
+ // The obj register now contains the constructor function. Grab the
+ // instance class name from there.
+ __ movq(obj.reg(),
+ FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
+ __ movq(obj.reg(),
+ FieldOperand(obj.reg(),
+ SharedFunctionInfo::kInstanceClassNameOffset));
+ frame_->Push(&obj);
+ leave.Jump();
+
+ // Functions have class 'Function'.
+ function.Bind();
+ frame_->Push(Factory::function_class_symbol());
+ leave.Jump();
+
+ // Objects with a non-function constructor have class 'Object'.
+ non_function_constructor.Bind();
+ frame_->Push(Factory::Object_symbol());
+ leave.Jump();
+
+ // Non-JS objects have class null.
+ null.Bind();
+ frame_->Push(Factory::null_value());
+
+ // All done.
+ leave.Bind();
}
@@ -3795,8 +3796,28 @@ Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
Result tmp,
JumpTarget* slow) {
- UNIMPLEMENTED();
- return Operand(rsp, 0);
+ ASSERT(slot->type() == Slot::CONTEXT);
+ ASSERT(tmp.is_register());
+ Register context = rsi;
+
+ for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_eval()) {
+ // Check that extension is NULL.
+ __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
+ Immediate(0));
+ slow->Branch(not_equal, not_taken);
+ }
+ __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
+ __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
+ context = tmp.reg();
+ }
+ }
+ // Check that last extension is NULL.
+ __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
+ slow->Branch(not_equal, not_taken);
+ __ movq(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
+ return ContextOperand(tmp.reg(), slot->index());
}
@@ -3906,6 +3927,44 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
}
+void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
+ TypeofState state) {
+ LoadFromSlot(slot, state);
+
+ // Bail out quickly if we're not using lazy arguments allocation.
+ if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
+
+ // ... or if the slot isn't a non-parameter arguments slot.
+ if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
+
+ // Pop the loaded value from the stack.
+ Result value = frame_->Pop();
+
+ // If the loaded value is a constant, we know if the arguments
+ // object has been lazily loaded yet.
+ if (value.is_constant()) {
+ if (value.handle()->IsTheHole()) {
+ Result arguments = StoreArgumentsObject(false);
+ frame_->Push(&arguments);
+ } else {
+ frame_->Push(&value);
+ }
+ return;
+ }
+
+ // The loaded value is in a register. If it is the sentinel that
+ // indicates that we haven't loaded the arguments object yet, we
+ // need to do it now.
+ JumpTarget exit;
+ __ Cmp(value.reg(), Factory::the_hole_value());
+ frame_->Push(&value);
+ exit.Branch(not_equal);
+ Result arguments = StoreArgumentsObject(false);
+ frame_->SetElementAt(0, &arguments);
+ exit.Bind();
+}
+
+
void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
// TODO(X64): Enable more types of slot.
@@ -4009,8 +4068,72 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
Slot* slot,
TypeofState typeof_state,
JumpTarget* slow) {
- UNIMPLEMENTED();
- return Result(rax);
+ // Check that no extension objects have been created by calls to
+ // eval from the current scope to the global scope.
+ Register context = rsi;
+ Result tmp = allocator_->Allocate();
+ ASSERT(tmp.is_valid()); // All non-reserved registers were available.
+
+ Scope* s = scope();
+ while (s != NULL) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_eval()) {
+ // Check that extension is NULL.
+ __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
+ Immediate(0));
+ slow->Branch(not_equal, not_taken);
+ }
+ // Load next context in chain.
+ __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
+ __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
+ context = tmp.reg();
+ }
+ // If no outer scope calls eval, we do not need to check more
+ // context extensions. If we have reached an eval scope, we check
+ // all extensions from this point.
+ if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+ s = s->outer_scope();
+ }
+
+ if (s->is_eval_scope()) {
+ // Loop up the context chain. There is no frame effect so it is
+ // safe to use raw labels here.
+ Label next, fast;
+ if (!context.is(tmp.reg())) {
+ __ movq(tmp.reg(), context);
+ }
+ // Load map for comparison into register, outside loop.
+ __ Move(kScratchRegister, Factory::global_context_map());
+ __ bind(&next);
+ // Terminate at global context.
+ __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset));
+ __ j(equal, &fast);
+ // Check that extension is NULL.
+ __ cmpq(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
+ slow->Branch(not_equal);
+ // Load next context in chain.
+ __ movq(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
+ __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
+ __ jmp(&next);
+ __ bind(&fast);
+ }
+ tmp.Unuse();
+
+ // All extension objects were empty and it is safe to use a global
+ // load IC call.
+ LoadGlobal();
+ frame_->Push(slot->var()->name());
+ RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
+ ? RelocInfo::CODE_TARGET
+ : RelocInfo::CODE_TARGET_CONTEXT;
+ Result answer = frame_->CallLoadIC(mode);
+ // A test rax instruction following the call signals that the inobject
+ // property case was inlined. Ensure that there is not a test eax
+ // instruction here.
+ __ nop();
+ // Discard the global object. The result is in answer.
+ frame_->Drop();
+ return answer;
}
@@ -4120,39 +4243,6 @@ void CodeGenerator::LoadTypeofExpression(Expression* x) {
}
-class CompareStub: public CodeStub {
- public:
- CompareStub(Condition cc, bool strict) : cc_(cc), strict_(strict) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Condition cc_;
- bool strict_;
-
- Major MajorKey() { return Compare; }
-
- int MinorKey() {
- // Encode the three parameters in a unique 16 bit value.
- ASSERT(static_cast<int>(cc_) < (1 << 15));
- return (static_cast<int>(cc_) << 1) | (strict_ ? 1 : 0);
- }
-
- // Branch to the label if the given object isn't a symbol.
- void BranchIfNonSymbol(MacroAssembler* masm,
- Label* label,
- Register object);
-
-#ifdef DEBUG
- void Print() {
- PrintF("CompareStub (cc %d), (strict %s)\n",
- static_cast<int>(cc_),
- strict_ ? "true" : "false");
- }
-#endif
-};
-
-
void CodeGenerator::Comparison(Condition cc,
bool strict,
ControlDestination* dest) {
@@ -4239,12 +4329,8 @@ void CodeGenerator::Comparison(Condition cc,
left_side = Result(left_reg);
right_side = Result(right_val);
// Test smi equality and comparison by signed int comparison.
- if (IsUnsafeSmi(right_side.handle())) {
- right_side.ToRegister();
- __ cmpq(left_side.reg(), right_side.reg());
- } else {
- __ Cmp(left_side.reg(), right_side.handle());
- }
+ // Both sides are smis, so we can use an Immediate.
+ __ cmpl(left_side.reg(), Immediate(Smi::cast(*right_side.handle())));
left_side.Unuse();
right_side.Unuse();
dest->Split(cc);
@@ -4296,7 +4382,8 @@ void CodeGenerator::Comparison(Condition cc,
// When non-smi, call out to the compare stub.
CompareStub stub(cc, strict);
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
- __ testq(answer.reg(), answer.reg()); // Both zero and sign flag right.
+ // The result is a Smi, which is negative, zero, or positive.
+ __ testl(answer.reg(), answer.reg()); // Both zero and sign flag right.
answer.Unuse();
dest->Split(cc);
} else {
@@ -4309,18 +4396,14 @@ void CodeGenerator::Comparison(Condition cc,
Register left_reg = left_side.reg();
Register right_reg = right_side.reg();
- __ movq(kScratchRegister, left_side.reg());
- __ or_(kScratchRegister, right_side.reg());
+ __ movq(kScratchRegister, left_reg);
+ __ or_(kScratchRegister, right_reg);
__ testl(kScratchRegister, Immediate(kSmiTagMask));
is_smi.Branch(zero, taken);
// When non-smi, call out to the compare stub.
CompareStub stub(cc, strict);
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
- if (cc == equal) {
- __ testq(answer.reg(), answer.reg());
- } else {
- __ cmpq(answer.reg(), Immediate(0));
- }
+ __ testl(answer.reg(), answer.reg()); // Sets both zero and sign flags.
answer.Unuse();
dest->true_target()->Branch(cc);
dest->false_target()->Jump();
@@ -4328,7 +4411,7 @@ void CodeGenerator::Comparison(Condition cc,
is_smi.Bind();
left_side = Result(left_reg);
right_side = Result(right_reg);
- __ cmpq(left_side.reg(), right_side.reg());
+ __ cmpl(left_side.reg(), right_side.reg());
right_side.Unuse();
left_side.Unuse();
dest->Split(cc);
@@ -4649,8 +4732,6 @@ class DeferredInlineSmiAdd: public DeferredCode {
void DeferredInlineSmiAdd::Generate() {
- // Undo the optimistic add operation and call the shared stub.
- __ subq(dst_, Immediate(value_));
__ push(dst_);
__ push(Immediate(value_));
GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
@@ -4681,8 +4762,6 @@ class DeferredInlineSmiAddReversed: public DeferredCode {
void DeferredInlineSmiAddReversed::Generate() {
- // Undo the optimistic add operation and call the shared stub.
- __ subq(dst_, Immediate(value_));
__ push(Immediate(value_));
__ push(dst_);
GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
@@ -4714,8 +4793,6 @@ class DeferredInlineSmiSub: public DeferredCode {
void DeferredInlineSmiSub::Generate() {
- // Undo the optimistic sub operation and call the shared stub.
- __ addq(dst_, Immediate(value_));
__ push(dst_);
__ push(Immediate(value_));
GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
@@ -4757,9 +4834,6 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
case Token::ADD: {
operand->ToRegister();
frame_->Spill(operand->reg());
-
- // Optimistically add. Call the specialized add stub if the
- // result is not a smi or overflows.
DeferredCode* deferred = NULL;
if (reversed) {
deferred = new DeferredInlineSmiAddReversed(operand->reg(),
@@ -4770,11 +4844,17 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
smi_value,
overwrite_mode);
}
- __ movq(kScratchRegister, value, RelocInfo::NONE);
- __ addl(operand->reg(), kScratchRegister);
- deferred->Branch(overflow);
__ testl(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
+ // A smi currently fits in a 32-bit Immediate.
+ __ addl(operand->reg(), Immediate(smi_value));
+ Label add_success;
+ __ j(no_overflow, &add_success);
+ __ subl(operand->reg(), Immediate(smi_value));
+ __ movsxlq(operand->reg(), operand->reg());
+ deferred->Jump();
+ __ bind(&add_success);
+ __ movsxlq(operand->reg(), operand->reg());
deferred->BindExit();
frame_->Push(operand);
break;
@@ -4982,12 +5062,12 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
// Perform the operation.
switch (op) {
case Token::SAR:
- __ sar(answer.reg());
+ __ sarl(answer.reg());
// No checks of result necessary
break;
case Token::SHR: {
Label result_ok;
- __ shr(answer.reg());
+ __ shrl(answer.reg());
// Check that the *unsigned* result fits in a smi. Neither of
// the two high-order bits can be set:
// * 0x80000000: high bit would be lost when smi tagging.
@@ -5010,7 +5090,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
Label result_ok;
__ shl(answer.reg());
// Check that the *signed* result fits in a smi.
- __ cmpq(answer.reg(), Immediate(0xc0000000));
+ __ cmpl(answer.reg(), Immediate(0xc0000000));
__ j(positive, &result_ok);
ASSERT(kSmiTag == 0);
__ shl(rcx, Immediate(kSmiTagSize));
@@ -5060,12 +5140,12 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
__ movq(answer.reg(), left->reg());
switch (op) {
case Token::ADD:
- __ addl(answer.reg(), right->reg()); // Add optimistically.
+ __ addl(answer.reg(), right->reg());
deferred->Branch(overflow);
break;
case Token::SUB:
- __ subl(answer.reg(), right->reg()); // Subtract optimistically.
+ __ subl(answer.reg(), right->reg());
deferred->Branch(overflow);
break;
@@ -5148,7 +5228,7 @@ void Reference::GetValue(TypeofState typeof_state) {
Comment cmnt(masm, "[ Load from Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
- cgen_->LoadFromSlot(slot, typeof_state);
+ cgen_->LoadFromSlotCheckForArguments(slot, typeof_state);
break;
}
@@ -5347,6 +5427,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
__ j(equal, &false_result);
// Get the map and type of the heap object.
+ // We don't use CmpObjectType because we manipulate the type field.
__ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
__ movzxbq(rcx, FieldOperand(rdx, Map::kInstanceTypeOffset));
@@ -5372,6 +5453,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
__ bind(&not_string);
// HeapNumber => false iff +0, -0, or NaN.
+ // These three cases set C3 when compared to zero in the FPU.
__ Cmp(rdx, Factory::heap_number_map());
__ j(not_equal, &true_result);
// TODO(x64): Don't use fp stack, use MMX registers?
@@ -5381,9 +5463,9 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
__ fucompp(); // Compare and pop both values.
__ movq(kScratchRegister, rax);
__ fnstsw_ax(); // Store fp status word in ax, no checking for exceptions.
- __ testb(rax, Immediate(0x08)); // Test FP condition flag C3.
+ __ testl(rax, Immediate(0x4000)); // Test FP condition flag C3, bit 16.
__ movq(rax, kScratchRegister);
- __ j(zero, &false_result);
+ __ j(not_zero, &false_result);
// Fall through to |true_result|.
// Return 1/0 for true/false in rax.
@@ -5481,12 +5563,32 @@ bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
}
-
-
// End of CodeGenerator implementation.
void UnarySubStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED();
+ Label slow;
+ Label done;
+
+ // Check whether the value is a smi.
+ __ testl(rax, Immediate(kSmiTagMask));
+ // TODO(X64): Add inline code that handles floats, as on ia32 platform.
+ __ j(not_zero, &slow);
+ // Enter runtime system if the value of the smi is zero
+ // to make sure that we switch between 0 and -0.
+ // Also enter it if the value of the smi is Smi::kMinValue
+ __ testl(rax, Immediate(0x7FFFFFFE));
+ __ j(zero, &slow);
+ __ neg(rax);
+ __ jmp(&done);
+ // Enter runtime system.
+ __ bind(&slow);
+ __ pop(rcx); // pop return address
+ __ push(rax);
+ __ push(rcx); // push return address
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+
+ __ bind(&done);
+ __ StubReturn(1);
}
@@ -5523,7 +5625,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// The representation of NaN values has all exponent bits (52..62) set,
// and not all mantissa bits (0..51) clear.
// Read double representation into rax.
- __ movq(rbx, 0x7ff0000000000000, RelocInfo::NONE);
+ __ movq(rbx, V8_UINT64_C(0x7ff0000000000000), RelocInfo::NONE);
__ movq(rax, FieldOperand(rdx, HeapNumber::kValueOffset));
// Test that exponent bits are all set.
__ or_(rbx, rax);
@@ -5533,7 +5635,8 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ shl(rax, Immediate(12));
// If all bits in the mantissa are zero the number is Infinity, and
// we return zero. Otherwise it is a NaN, and we return non-zero.
- // So just return rax.
+ // We cannot just return rax because only eax is tested on return.
+ __ setcc(not_zero, rax);
__ ret(0);
__ bind(&not_identical);
@@ -5571,7 +5674,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
Factory::heap_number_map());
// If heap number, handle it in the slow case.
__ j(equal, &slow);
- // Return non-equal (ebx is not zero)
+ // Return non-equal. ebx (the lower half of rbx) is not zero.
__ movq(rax, rbx);
__ ret(0);
@@ -5587,7 +5690,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
Label first_non_object;
__ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
__ j(below, &first_non_object);
- // Return non-zero (rax is not zero)
+ // Return non-zero (eax (not rax) is not zero)
Label return_not_equal;
ASSERT(kHeapObjectTag != 0);
__ bind(&return_not_equal);
@@ -5647,11 +5750,11 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Fast negative check for symbol-to-symbol equality.
__ bind(&check_for_symbols);
if (cc_ == equal) {
- BranchIfNonSymbol(masm, &call_builtin, rax);
- BranchIfNonSymbol(masm, &call_builtin, rdx);
+ BranchIfNonSymbol(masm, &call_builtin, rax, kScratchRegister);
+ BranchIfNonSymbol(masm, &call_builtin, rdx, kScratchRegister);
// We've already checked for object identity, so if both operands
- // are symbols they aren't equal. Register rax already holds a
+ // are symbols they aren't equal. Register eax (not rax) already holds a
// non-zero value, which indicates not equal, so just return.
__ ret(2 * kPointerSize);
}
@@ -5691,14 +5794,15 @@ void CompareStub::Generate(MacroAssembler* masm) {
void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
Label* label,
- Register object) {
+ Register object,
+ Register scratch) {
__ testl(object, Immediate(kSmiTagMask));
__ j(zero, label);
- __ movq(kScratchRegister, FieldOperand(object, HeapObject::kMapOffset));
- __ movzxbq(kScratchRegister,
- FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
- __ and_(kScratchRegister, Immediate(kIsSymbolMask | kIsNotStringMask));
- __ cmpb(kScratchRegister, Immediate(kSymbolTag | kStringTag));
+ __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
+ __ movzxbq(scratch,
+ FieldOperand(scratch, Map::kInstanceTypeOffset));
+ __ and_(scratch, Immediate(kIsSymbolMask | kIsNotStringMask));
+ __ cmpb(scratch, Immediate(kSymbolTag | kStringTag));
__ j(not_equal, label);
}
@@ -5728,6 +5832,62 @@ void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
void InstanceofStub::Generate(MacroAssembler* masm) {
+ // Implements "value instanceof function" operator.
+ // Expected input state:
+ // rsp[0] : return address
+ // rsp[1] : function pointer
+ // rsp[2] : value
+
+ // Get the object - go slow case if it's a smi.
+ Label slow;
+ __ movq(rax, Operand(rsp, 2 * kPointerSize));
+ __ testl(rax, Immediate(kSmiTagMask));
+ __ j(zero, &slow);
+
+ // Check that the left hand is a JS object. Leave its map in rax.
+ __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
+ __ j(below, &slow);
+ __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE);
+ __ j(above, &slow);
+
+ // Get the prototype of the function.
+ __ movq(rdx, Operand(rsp, 1 * kPointerSize));
+ __ TryGetFunctionPrototype(rdx, rbx, &slow);
+
+ // Check that the function prototype is a JS object.
+ __ testl(rbx, Immediate(kSmiTagMask));
+ __ j(zero, &slow);
+ __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
+ __ j(below, &slow);
+ __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
+ __ j(above, &slow);
+
+ // Register mapping: rax is object map and rbx is function prototype.
+ __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
+
+ // Loop through the prototype chain looking for the function prototype.
+ Label loop, is_instance, is_not_instance;
+ __ Move(kScratchRegister, Factory::null_value());
+ __ bind(&loop);
+ __ cmpq(rcx, rbx);
+ __ j(equal, &is_instance);
+ __ cmpq(rcx, kScratchRegister);
+ __ j(equal, &is_not_instance);
+ __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
+ __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
+ __ jmp(&loop);
+
+ __ bind(&is_instance);
+ __ xor_(rax, rax);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&is_not_instance);
+ __ movq(rax, Immediate(Smi::FromInt(1)));
+ __ ret(2 * kPointerSize);
+
+ // Slow-case: Go through the JavaScript implementation.
+ __ bind(&slow);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
}
@@ -5878,13 +6038,18 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// rbx: pointer to C function (C callee-saved).
// rbp: frame pointer (restored after C call).
// rsp: stack pointer (restored after C call).
- // rdi: number of arguments including receiver.
+ // r14: number of arguments including receiver (C callee-saved).
// r15: pointer to the first argument (C callee-saved).
// This pointer is reused in LeaveExitFrame(), so it is stored in a
// callee-saved register.
if (do_gc) {
- __ movq(Operand(rsp, 0), rax); // Result.
+ // Pass failure code returned from last attempt as first argument to GC.
+#ifdef __MSVC__
+ __ movq(rcx, rax); // argc.
+#else // ! defined(__MSVC__)
+ __ movq(rdi, rax); // argv.
+#endif
__ movq(kScratchRegister,
FUNCTION_ADDR(Runtime::PerformGC),
RelocInfo::RUNTIME_ENTRY);
@@ -5901,11 +6066,11 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Call C function.
#ifdef __MSVC__
// MSVC passes arguments in rcx, rdx, r8, r9
- __ movq(rcx, rdi); // argc.
+ __ movq(rcx, r14); // argc.
__ movq(rdx, r15); // argv.
#else // ! defined(__MSVC__)
// GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
- // First argument is already in rdi.
+ __ movq(rdi, r14); // argc.
__ movq(rsi, r15); // argv.
#endif
__ call(rbx);
@@ -6047,10 +6212,9 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
// rax: number of arguments including receiver
// rbx: pointer to C function (C callee-saved)
- // rbp: frame pointer (restored after C call)
+ // rbp: frame pointer of calling JS frame (restored after C call)
// rsp: stack pointer (restored after C call)
- // rsi: current context (C callee-saved)
- // rdi: caller's parameter pointer pp (C callee-saved)
+ // rsi: current context (restored)
// NOTE: Invocations of builtins may return failure objects
// instead of a proper result. The builtin entry handles
@@ -6064,16 +6228,16 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
// Enter the exit frame that transitions from JavaScript to C++.
__ EnterExitFrame(frame_type);
- // rax: result parameter for PerformGC, if any (setup below).
- // Holds the result of a previous call to GenerateCore that
- // returned a failure. On next call, it's used as parameter
- // to Runtime::PerformGC.
+ // rax: Holds the context at this point, but should not be used.
+ // On entry to code generated by GenerateCore, it must hold
+ // a failure result if the collect_garbage argument to GenerateCore
+ // is true. This failure result can be the result of code
+ // generated by a previous call to GenerateCore. The value
+ // of rax is then passed to Runtime::PerformGC.
// rbx: pointer to builtin function (C callee-saved).
- // rbp: frame pointer (restored after C call).
- // rsp: stack pointer (restored after C call).
- // rdi: number of arguments including receiver (destroyed by C call).
- // The rdi register is not callee-save in Unix 64-bit ABI, so
- // we must treat it as volatile.
+ // rbp: frame pointer of exit frame (restored after C call).
+ // rsp: stack pointer (restored after C call).
+ // r14: number of arguments including receiver (C callee-saved).
// r15: argv pointer (C callee-saved).
Label throw_out_of_memory_exception;
@@ -6338,17 +6502,18 @@ void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
__ bind(&load_smi_lhs);
ASSERT(kSmiTagSize == 1);
ASSERT(kSmiTag == 0);
- __ lea(kScratchRegister, Operand(lhs, lhs, times_1, 0));
+ __ movsxlq(kScratchRegister, lhs);
+ __ sar(kScratchRegister, Immediate(kSmiTagSize));
__ push(kScratchRegister);
- __ fild_s(Operand(rsp, 0));
+ __ fild_d(Operand(rsp, 0));
__ pop(kScratchRegister);
__ jmp(&done_load_lhs);
__ bind(&load_smi_rhs);
- __ movq(kScratchRegister, rhs);
+ __ movsxlq(kScratchRegister, rhs);
__ sar(kScratchRegister, Immediate(kSmiTagSize));
__ push(kScratchRegister);
- __ fild_s(Operand(rsp, 0));
+ __ fild_d(Operand(rsp, 0));
__ pop(kScratchRegister);
__ bind(&done);
@@ -6357,24 +6522,18 @@ void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
Label* non_float) {
Label test_other, done;
- // Test if both operands are floats or smi -> scratch=k_is_float;
- // Otherwise scratch = k_not_float.
+ // Test if both operands are numbers (heap_numbers or smis).
+ // If not, jump to label non_float.
__ testl(rdx, Immediate(kSmiTagMask));
__ j(zero, &test_other); // argument in rdx is OK
- __ movq(kScratchRegister,
- Factory::heap_number_map(),
- RelocInfo::EMBEDDED_OBJECT);
- __ cmpq(kScratchRegister, FieldOperand(rdx, HeapObject::kMapOffset));
- __ j(not_equal, non_float); // argument in rdx is not a number -> NaN
+ __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), Factory::heap_number_map());
+ __ j(not_equal, non_float); // The argument in rdx is not a number.
__ bind(&test_other);
__ testl(rax, Immediate(kSmiTagMask));
__ j(zero, &done); // argument in rax is OK
- __ movq(kScratchRegister,
- Factory::heap_number_map(),
- RelocInfo::EMBEDDED_OBJECT);
- __ cmpq(kScratchRegister, FieldOperand(rax, HeapObject::kMapOffset));
- __ j(not_equal, non_float); // argument in rax is not a number -> NaN
+ __ Cmp(FieldOperand(rax, HeapObject::kMapOffset), Factory::heap_number_map());
+ __ j(not_equal, non_float); // The argument in rax is not a number.
// Fall-through: Both operands are numbers.
__ bind(&done);
@@ -6401,49 +6560,26 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// Perform fast-case smi code for the operation (rax <op> rbx) and
// leave result in register rax.
- // Prepare the smi check of both operands by or'ing them together
- // before checking against the smi mask.
+ // Smi check both operands.
__ movq(rcx, rbx);
__ or_(rcx, rax);
+ __ testl(rcx, Immediate(kSmiTagMask));
+ __ j(not_zero, slow);
switch (op_) {
- case Token::ADD:
- __ addl(rax, rbx); // add optimistically
- __ j(overflow, slow);
+ case Token::ADD: {
+ __ addl(rax, rbx);
+ __ j(overflow, slow); // The slow case rereads operands from the stack.
__ movsxlq(rax, rax); // Sign extend eax into rax.
break;
+ }
- case Token::SUB:
- __ subl(rax, rbx); // subtract optimistically
- __ j(overflow, slow);
+ case Token::SUB: {
+ __ subl(rax, rbx);
+ __ j(overflow, slow); // The slow case rereads operands from the stack.
__ movsxlq(rax, rax); // Sign extend eax into rax.
break;
-
- case Token::DIV:
- case Token::MOD:
- // Sign extend rax into rdx:rax
- // (also sign extends eax into edx if eax is Smi).
- __ cqo();
- // Check for 0 divisor.
- __ testq(rbx, rbx);
- __ j(zero, slow);
- break;
-
- default:
- // Fall-through to smi check.
- break;
- }
-
- // Perform the actual smi check.
- ASSERT(kSmiTag == 0); // adjust zero check if not the case
- __ testl(rcx, Immediate(kSmiTagMask));
- __ j(not_zero, slow);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- // Do nothing here.
- break;
+ }
case Token::MUL:
// If the smi tag is 0 we can just leave the tag on one operand.
@@ -6460,6 +6596,12 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
break;
case Token::DIV:
+ // Sign extend rax into rdx:rax
+ // (also sign extends eax into edx if eax is Smi).
+ __ cqo();
+ // Check for 0 divisor.
+ __ testq(rbx, rbx);
+ __ j(zero, slow);
// Divide rdx:rax by rbx (where rdx:rax is equivalent to the smi in eax).
__ idiv(rbx);
// Check that the remainder is zero.
@@ -6481,6 +6623,12 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
break;
case Token::MOD:
+ // Sign extend rax into rdx:rax
+ // (also sign extends eax into edx if eax is Smi).
+ __ cqo();
+ // Check for 0 divisor.
+ __ testq(rbx, rbx);
+ __ j(zero, slow);
// Divide rdx:rax by rbx.
__ idiv(rbx);
// Check for negative zero result.
@@ -6508,12 +6656,12 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// Move the second operand into register ecx.
__ movq(rcx, rbx);
// Remove tags from operands (but keep sign).
- __ sar(rax, Immediate(kSmiTagSize));
- __ sar(rcx, Immediate(kSmiTagSize));
+ __ sarl(rax, Immediate(kSmiTagSize));
+ __ sarl(rcx, Immediate(kSmiTagSize));
// Perform the operation.
switch (op_) {
case Token::SAR:
- __ sar(rax);
+ __ sarl(rax);
// No checks of result necessary
break;
case Token::SHR:
@@ -6524,19 +6672,17 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// - 0x40000000: this number would convert to negative when
// Smi tagging these two cases can only happen with shifts
// by 0 or 1 when handed a valid smi.
- __ testq(rax, Immediate(0xc0000000));
+ __ testl(rax, Immediate(0xc0000000));
__ j(not_zero, slow);
break;
case Token::SHL:
__ shll(rax);
- // TODO(Smi): Significant change if Smi changes.
// Check that the *signed* result fits in a smi.
// It does, if the 30th and 31st bits are equal, since then
// shifting the SmiTag in at the bottom doesn't change the sign.
ASSERT(kSmiTagSize == 1);
__ cmpl(rax, Immediate(0xc0000000));
__ j(sign, slow);
- __ movsxlq(rax, rax); // Extend new sign of eax into rax.
break;
default:
UNREACHABLE();
@@ -6674,9 +6820,9 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::BIT_OR: __ or_(rax, rcx); break;
case Token::BIT_AND: __ and_(rax, rcx); break;
case Token::BIT_XOR: __ xor_(rax, rcx); break;
- case Token::SAR: __ sar(rax); break;
- case Token::SHL: __ shl(rax); break;
- case Token::SHR: __ shr(rax); break;
+ case Token::SAR: __ sarl(rax); break;
+ case Token::SHL: __ shll(rax); break;
+ case Token::SHR: __ shrl(rax); break;
default: UNREACHABLE();
}
if (op_ == Token::SHR) {
@@ -6752,8 +6898,6 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// If all else fails, use the runtime system to get the correct
// result.
__ bind(&call_runtime);
- // Disable builtin-calls until JS builtins can compile and run.
- __ Abort("Disabled until builtins compile and run.");
switch (op_) {
case Token::ADD:
__ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
@@ -6794,6 +6938,13 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
}
+int CompareStub::MinorKey() {
+ // Encode the two parameters in a unique 16 bit value.
+ ASSERT(static_cast<unsigned>(cc_) < (1 << 15));
+ return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index af82de8ff..bb4b53880 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -294,15 +294,6 @@ class CodeGenerator: public AstVisitor {
Handle<Script> script,
bool is_eval);
- // During implementation of CodeGenerator, this call creates a
- // CodeGenerator instance, and calls GenCode on it with a null
- // function literal. CodeGenerator will then construct and return
- // a simple dummy function. Call this during bootstrapping before
- // trying to compile any real functions, to get CodeGenerator up
- // and running.
- // TODO(X64): Remove once we can get through the bootstrapping process.
- static void TestCodeGenerator();
-
#ifdef ENABLE_LOGGING_AND_PROFILING
static bool ShouldGenerateLog(Expression* type);
#endif
@@ -432,6 +423,7 @@ class CodeGenerator: public AstVisitor {
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
+ void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
JumpTarget* slow);
@@ -522,11 +514,15 @@ class CodeGenerator: public AstVisitor {
void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
void GenerateIsArray(ZoneList<Expression*>* args);
+ // Support for construct call checks.
+ void GenerateIsConstructCall(ZoneList<Expression*>* args);
+
// Support for arguments.length and arguments[?].
void GenerateArgumentsLength(ZoneList<Expression*>* args);
void GenerateArgumentsAccess(ZoneList<Expression*>* args);
- // Support for accessing the value field of an object (used by Date).
+ // Support for accessing the class and value fields of an object.
+ void GenerateClassOf(ZoneList<Expression*>* args);
void GenerateValueOf(ZoneList<Expression*>* args);
void GenerateSetValueOf(ZoneList<Expression*>* args);
@@ -547,58 +543,6 @@ class CodeGenerator: public AstVisitor {
inline void GenerateMathSin(ZoneList<Expression*>* args);
inline void GenerateMathCos(ZoneList<Expression*>* args);
- // Methods and constants for fast case switch statement support.
- //
- // Only allow fast-case switch if the range of labels is at most
- // this factor times the number of case labels.
- // Value is derived from comparing the size of code generated by the normal
- // switch code for Smi-labels to the size of a single pointer. If code
- // quality increases this number should be decreased to match.
- static const int kFastSwitchMaxOverheadFactor = 5;
-
- // Minimal number of switch cases required before we allow jump-table
- // optimization.
- static const int kFastSwitchMinCaseCount = 5;
-
- // The limit of the range of a fast-case switch, as a factor of the number
- // of cases of the switch. Each platform should return a value that
- // is optimal compared to the default code generated for a switch statement
- // on that platform.
- int FastCaseSwitchMaxOverheadFactor();
-
- // The minimal number of cases in a switch before the fast-case switch
- // optimization is enabled. Each platform should return a value that
- // is optimal compared to the default code generated for a switch statement
- // on that platform.
- int FastCaseSwitchMinCaseCount();
-
- // Allocate a jump table and create code to jump through it.
- // Should call GenerateFastCaseSwitchCases to generate the code for
- // all the cases at the appropriate point.
- void GenerateFastCaseSwitchJumpTable(SwitchStatement* node,
- int min_index,
- int range,
- Label* fail_label,
- Vector<Label*> case_targets,
- Vector<Label> case_labels);
-
- // Generate the code for cases for the fast case switch.
- // Called by GenerateFastCaseSwitchJumpTable.
- void GenerateFastCaseSwitchCases(SwitchStatement* node,
- Vector<Label> case_labels,
- VirtualFrame* start_frame);
-
- // Fast support for constant-Smi switches.
- void GenerateFastCaseSwitchStatement(SwitchStatement* node,
- int min_index,
- int range,
- int default_index);
-
- // Fast support for constant-Smi switches. Tests whether switch statement
- // permits optimization and calls GenerateFastCaseSwitch if it does.
- // Returns true if the fast-case switch was generated, and false if not.
- bool TryGenerateFastCaseSwitchStatement(SwitchStatement* node);
-
// Methods used to indicate which source code is generated for. Source
// positions are collected by the assembler and emitted with the relocation
// information.
diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc
index 3b101325e..e94e781d4 100644
--- a/deps/v8/src/x64/debug-x64.cc
+++ b/deps/v8/src/x64/debug-x64.cc
@@ -38,8 +38,10 @@ namespace internal {
#ifdef ENABLE_DEBUGGER_SUPPORT
bool Debug::IsDebugBreakAtReturn(v8::internal::RelocInfo* rinfo) {
- UNIMPLEMENTED();
- return false;
+ ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
+ // 11th byte of patch is 0x49, 11th byte of JS return is 0xCC (int3).
+ ASSERT(*(rinfo->pc() + 10) == 0x49 || *(rinfo->pc() + 10) == 0xCC);
+ return (*(rinfo->pc() + 10) == 0x49);
}
void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 767b1247a..f962c0193 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -25,64 +25,1408 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include <assert.h>
+#include <stdio.h>
+#include <stdarg.h>
+
#include "v8.h"
#include "disasm.h"
namespace disasm {
-Disassembler::Disassembler(NameConverter const& converter)
- : converter_(converter) {
- UNIMPLEMENTED();
+enum OperandOrder {
+ UNSET_OP_ORDER = 0, REG_OPER_OP_ORDER, OPER_REG_OP_ORDER
+};
+
+//------------------------------------------------------------------
+// Tables
+//------------------------------------------------------------------
+struct ByteMnemonic {
+ int b; // -1 terminates, otherwise must be in range (0..255)
+ OperandOrder op_order_;
+ const char* mnem;
+};
+
+
+static ByteMnemonic two_operands_instr[] = {
+ { 0x03, REG_OPER_OP_ORDER, "add" },
+ { 0x21, OPER_REG_OP_ORDER, "and" },
+ { 0x23, REG_OPER_OP_ORDER, "and" },
+ { 0x3B, REG_OPER_OP_ORDER, "cmp" },
+ { 0x8D, REG_OPER_OP_ORDER, "lea" },
+ { 0x09, OPER_REG_OP_ORDER, "or" },
+ { 0x0B, REG_OPER_OP_ORDER, "or" },
+ { 0x1B, REG_OPER_OP_ORDER, "sbb" },
+ { 0x29, OPER_REG_OP_ORDER, "sub" },
+ { 0x2B, REG_OPER_OP_ORDER, "sub" },
+ { 0x85, REG_OPER_OP_ORDER, "test" },
+ { 0x31, OPER_REG_OP_ORDER, "xor" },
+ { 0x33, REG_OPER_OP_ORDER, "xor" },
+ { 0x87, REG_OPER_OP_ORDER, "xchg" },
+ { 0x8A, REG_OPER_OP_ORDER, "movb" },
+ { 0x8B, REG_OPER_OP_ORDER, "mov" },
+ { -1, UNSET_OP_ORDER, "" }
+};
+
+
+static ByteMnemonic zero_operands_instr[] = {
+ { 0xC3, UNSET_OP_ORDER, "ret" },
+ { 0xC9, UNSET_OP_ORDER, "leave" },
+ { 0x90, UNSET_OP_ORDER, "nop" },
+ { 0xF4, UNSET_OP_ORDER, "hlt" },
+ { 0xCC, UNSET_OP_ORDER, "int3" },
+ { 0x60, UNSET_OP_ORDER, "pushad" },
+ { 0x61, UNSET_OP_ORDER, "popad" },
+ { 0x9C, UNSET_OP_ORDER, "pushfd" },
+ { 0x9D, UNSET_OP_ORDER, "popfd" },
+ { 0x9E, UNSET_OP_ORDER, "sahf" },
+ { 0x99, UNSET_OP_ORDER, "cdq" },
+ { 0x9B, UNSET_OP_ORDER, "fwait" },
+ { -1, UNSET_OP_ORDER, "" }
+};
+
+
+static ByteMnemonic call_jump_instr[] = {
+ { 0xE8, UNSET_OP_ORDER, "call" },
+ { 0xE9, UNSET_OP_ORDER, "jmp" },
+ { -1, UNSET_OP_ORDER, "" }
+};
+
+
+static ByteMnemonic short_immediate_instr[] = {
+ { 0x05, UNSET_OP_ORDER, "add" },
+ { 0x0D, UNSET_OP_ORDER, "or" },
+ { 0x15, UNSET_OP_ORDER, "adc" },
+ { 0x25, UNSET_OP_ORDER, "and" },
+ { 0x2D, UNSET_OP_ORDER, "sub" },
+ { 0x35, UNSET_OP_ORDER, "xor" },
+ { 0x3D, UNSET_OP_ORDER, "cmp" },
+ { -1, UNSET_OP_ORDER, "" }
+};
+
+
+static const char* conditional_code_suffix[] = {
+ "o", "no", "c", "nc", "z", "nz", "a", "na",
+ "s", "ns", "pe", "po", "l", "ge", "le", "g"
+};
+
+
+enum InstructionType {
+ NO_INSTR,
+ ZERO_OPERANDS_INSTR,
+ TWO_OPERANDS_INSTR,
+ JUMP_CONDITIONAL_SHORT_INSTR,
+ REGISTER_INSTR,
+ PUSHPOP_INSTR, // Has implicit 64-bit operand size.
+ MOVE_REG_INSTR,
+ CALL_JUMP_INSTR,
+ SHORT_IMMEDIATE_INSTR
+};
+
+
+struct InstructionDesc {
+ const char* mnem;
+ InstructionType type;
+ OperandOrder op_order_;
+};
+
+
+class InstructionTable {
+ public:
+ InstructionTable();
+ const InstructionDesc& Get(byte x) const {
+ return instructions_[x];
+ }
+
+ private:
+ InstructionDesc instructions_[256];
+ void Clear();
+ void Init();
+ void CopyTable(ByteMnemonic bm[], InstructionType type);
+ void SetTableRange(InstructionType type, byte start, byte end,
+ const char* mnem);
+ void AddJumpConditionalShort();
+};
+
+
+InstructionTable::InstructionTable() {
+ Clear();
+ Init();
+}
+
+
+void InstructionTable::Clear() {
+ for (int i = 0; i < 256; i++) {
+ instructions_[i].mnem = "";
+ instructions_[i].type = NO_INSTR;
+ instructions_[i].op_order_ = UNSET_OP_ORDER;
+ }
+}
+
+
+void InstructionTable::Init() {
+ CopyTable(two_operands_instr, TWO_OPERANDS_INSTR);
+ CopyTable(zero_operands_instr, ZERO_OPERANDS_INSTR);
+ CopyTable(call_jump_instr, CALL_JUMP_INSTR);
+ CopyTable(short_immediate_instr, SHORT_IMMEDIATE_INSTR);
+ AddJumpConditionalShort();
+ SetTableRange(PUSHPOP_INSTR, 0x50, 0x57, "push");
+ SetTableRange(PUSHPOP_INSTR, 0x58, 0x5F, "pop");
+ SetTableRange(MOVE_REG_INSTR, 0xB8, 0xBF, "mov");
+}
+
+
+void InstructionTable::CopyTable(ByteMnemonic bm[], InstructionType type) {
+ for (int i = 0; bm[i].b >= 0; i++) {
+ InstructionDesc* id = &instructions_[bm[i].b];
+ id->mnem = bm[i].mnem;
+ id->op_order_ = bm[i].op_order_;
+ assert(id->type == NO_INSTR); // Information already entered
+ id->type = type;
+ }
+}
+
+
+void InstructionTable::SetTableRange(InstructionType type, byte start,
+ byte end, const char* mnem) {
+ for (byte b = start; b <= end; b++) {
+ InstructionDesc* id = &instructions_[b];
+ assert(id->type == NO_INSTR); // Information already entered
+ id->mnem = mnem;
+ id->type = type;
+ }
+}
+
+
+void InstructionTable::AddJumpConditionalShort() {
+ for (byte b = 0x70; b <= 0x7F; b++) {
+ InstructionDesc* id = &instructions_[b];
+ assert(id->type == NO_INSTR); // Information already entered
+ id->mnem = NULL; // Computed depending on condition code.
+ id->type = JUMP_CONDITIONAL_SHORT_INSTR;
+ }
+}
+
+
+static InstructionTable instruction_table;
+
+
+// The X64 disassembler implementation.
+enum UnimplementedOpcodeAction {
+ CONTINUE_ON_UNIMPLEMENTED_OPCODE,
+ ABORT_ON_UNIMPLEMENTED_OPCODE
+};
+
+
+class DisassemblerX64 {
+ public:
+ DisassemblerX64(const NameConverter& converter,
+ UnimplementedOpcodeAction unimplemented_action =
+ ABORT_ON_UNIMPLEMENTED_OPCODE)
+ : converter_(converter),
+ tmp_buffer_pos_(0),
+ abort_on_unimplemented_(
+ unimplemented_action == ABORT_ON_UNIMPLEMENTED_OPCODE),
+ rex_(0),
+ operand_size_(0) {
+ tmp_buffer_[0] = '\0';
+ }
+
+ virtual ~DisassemblerX64() {
+ }
+
+ // Writes one disassembled instruction into 'buffer' (0-terminated).
+ // Returns the length of the disassembled machine instruction in bytes.
+ int InstructionDecode(v8::internal::Vector<char> buffer, byte* instruction);
+
+ private:
+
+ const NameConverter& converter_;
+ v8::internal::EmbeddedVector<char, 128> tmp_buffer_;
+ unsigned int tmp_buffer_pos_;
+ bool abort_on_unimplemented_;
+ // Prefixes parsed
+ byte rex_;
+ byte operand_size_;
+
+ void setOperandSizePrefix(byte prefix) {
+ ASSERT_EQ(0x66, prefix);
+ operand_size_ = prefix;
+ }
+
+ void setRex(byte rex) {
+ ASSERT_EQ(0x40, rex & 0xF0);
+ rex_ = rex;
+ }
+
+ bool rex() { return rex_ != 0; }
+
+ bool rex_b() { return (rex_ & 0x01) != 0; }
+
+ // Actual number of base register given the low bits and the rex.b state.
+ int base_reg(int low_bits) { return low_bits | ((rex_ & 0x01) << 3); }
+
+ bool rex_x() { return (rex_ & 0x02) != 0; }
+
+ bool rex_r() { return (rex_ & 0x04) != 0; }
+
+ bool rex_w() { return (rex_ & 0x08) != 0; }
+
+ int operand_size() {
+ return rex_w() ? 64 : (operand_size_ != 0) ? 16 : 32;
+ }
+
+ char operand_size_code() {
+ return rex_w() ? 'q' : (operand_size_ != 0) ? 'w' : 'l';
+ }
+
+ const char* NameOfCPURegister(int reg) const {
+ return converter_.NameOfCPURegister(reg);
+ }
+
+ const char* NameOfByteCPURegister(int reg) const {
+ return converter_.NameOfByteCPURegister(reg);
+ }
+
+ const char* NameOfXMMRegister(int reg) const {
+ return converter_.NameOfXMMRegister(reg);
+ }
+
+ const char* NameOfAddress(byte* addr) const {
+ return converter_.NameOfAddress(addr);
+ }
+
+ // Disassembler helper functions.
+ void get_modrm(byte data,
+ int* mod,
+ int* regop,
+ int* rm) {
+ *mod = (data >> 6) & 3;
+ *regop = ((data & 0x38) >> 3) | (rex_r() ? 8 : 0);
+ *rm = (data & 7) | (rex_b() ? 8 : 0);
+ }
+
+ void get_sib(byte data,
+ int* scale,
+ int* index,
+ int* base) {
+ *scale = (data >> 6) & 3;
+ *index = ((data >> 3) & 7) | (rex_x() ? 8 : 0);
+ *base = data & 7 | (rex_b() ? 8 : 0);
+ }
+
+ typedef const char* (DisassemblerX64::*RegisterNameMapping)(int reg) const;
+
+ int PrintRightOperandHelper(byte* modrmp,
+ RegisterNameMapping register_name);
+ int PrintRightOperand(byte* modrmp);
+ int PrintRightByteOperand(byte* modrmp);
+ int PrintOperands(const char* mnem,
+ OperandOrder op_order,
+ byte* data);
+ int PrintImmediateOp(byte* data);
+ int F7Instruction(byte* data);
+ int D1D3C1Instruction(byte* data);
+ int JumpShort(byte* data);
+ int JumpConditional(byte* data);
+ int JumpConditionalShort(byte* data);
+ int SetCC(byte* data);
+ int FPUInstruction(byte* data);
+ void AppendToBuffer(const char* format, ...);
+
+ void UnimplementedInstruction() {
+ if (abort_on_unimplemented_) {
+ UNIMPLEMENTED();
+ } else {
+ AppendToBuffer("'Unimplemented Instruction'");
+ }
+ }
+};
+
+
+void DisassemblerX64::AppendToBuffer(const char* format, ...) {
+ v8::internal::Vector<char> buf = tmp_buffer_ + tmp_buffer_pos_;
+ va_list args;
+ va_start(args, format);
+ int result = v8::internal::OS::VSNPrintF(buf, format, args);
+ va_end(args);
+ tmp_buffer_pos_ += result;
+}
+
+
+int DisassemblerX64::PrintRightOperandHelper(
+ byte* modrmp,
+ RegisterNameMapping register_name) {
+ int mod, regop, rm;
+ get_modrm(*modrmp, &mod, &regop, &rm);
+ switch (mod) {
+ case 0:
+ if ((rm & 7) == 5) {
+ int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 1);
+ AppendToBuffer("[0x%x]", disp);
+ return 5;
+ } else if ((rm & 7) == 4) {
+ // Codes for SIB byte.
+ byte sib = *(modrmp + 1);
+ int scale, index, base;
+ get_sib(sib, &scale, &index, &base);
+ if (index == 4 && (base & 7) == 4 && scale == 0 /*times_1*/) {
+ // index == rsp means no index. Only use sib byte with no index for
+ // rsp and r12 base.
+ AppendToBuffer("[%s]", (this->*register_name)(base));
+ return 2;
+ } else if (base == 5) {
+ // base == rbp means no base register (when mod == 0).
+ int32_t disp = *reinterpret_cast<int32_t*>(modrmp + 2);
+ AppendToBuffer("[%s*%d+0x%x]",
+ (this->*register_name)(index),
+ 1 << scale, disp);
+ return 6;
+ } else if (index != 4 && base != 5) {
+ // [base+index*scale]
+ AppendToBuffer("[%s+%s*%d]",
+ (this->*register_name)(base),
+ (this->*register_name)(index),
+ 1 << scale);
+ return 2;
+ } else {
+ UnimplementedInstruction();
+ return 1;
+ }
+ } else {
+ AppendToBuffer("[%s]", (this->*register_name)(rm));
+ return 1;
+ }
+ break;
+ case 1: // fall through
+ case 2:
+ if ((rm & 7) == 4) {
+ byte sib = *(modrmp + 1);
+ int scale, index, base;
+ get_sib(sib, &scale, &index, &base);
+ int disp = (mod == 2) ? *reinterpret_cast<int32_t*>(modrmp + 2)
+ : *reinterpret_cast<char*>(modrmp + 2);
+ if (index == 4 && (base & 7) == 4 && scale == 0 /*times_1*/) {
+ if (-disp > 0) {
+ AppendToBuffer("[%s-0x%x]", (this->*register_name)(base), -disp);
+ } else {
+ AppendToBuffer("[%s+0x%x]", (this->*register_name)(base), disp);
+ }
+ } else {
+ if (-disp > 0) {
+ AppendToBuffer("[%s+%s*%d-0x%x]",
+ (this->*register_name)(base),
+ (this->*register_name)(index),
+ 1 << scale,
+ -disp);
+ } else {
+ AppendToBuffer("[%s+%s*%d+0x%x]",
+ (this->*register_name)(base),
+ (this->*register_name)(index),
+ 1 << scale,
+ disp);
+ }
+ }
+ return mod == 2 ? 6 : 3;
+ } else {
+ // No sib.
+ int disp = (mod == 2) ? *reinterpret_cast<int32_t*>(modrmp + 1)
+ : *reinterpret_cast<char*>(modrmp + 1);
+ if (-disp > 0) {
+ AppendToBuffer("[%s-0x%x]", (this->*register_name)(rm), -disp);
+ } else {
+ AppendToBuffer("[%s+0x%x]", (this->*register_name)(rm), disp);
+ }
+ return (mod == 2) ? 5 : 2;
+ }
+ break;
+ case 3:
+ AppendToBuffer("%s", (this->*register_name)(rm));
+ return 1;
+ default:
+ UnimplementedInstruction();
+ return 1;
+ }
+ UNREACHABLE();
+}
+
+
+int DisassemblerX64::PrintRightOperand(byte* modrmp) {
+ return PrintRightOperandHelper(modrmp,
+ &DisassemblerX64::NameOfCPURegister);
+}
+
+
+int DisassemblerX64::PrintRightByteOperand(byte* modrmp) {
+ return PrintRightOperandHelper(modrmp,
+ &DisassemblerX64::NameOfByteCPURegister);
}
-Disassembler::~Disassembler() {
- UNIMPLEMENTED();
+// Returns number of bytes used including the current *data.
+// Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
+int DisassemblerX64::PrintOperands(const char* mnem,
+ OperandOrder op_order,
+ byte* data) {
+ byte modrm = *data;
+ int mod, regop, rm;
+ get_modrm(modrm, &mod, &regop, &rm);
+ int advance = 0;
+ switch (op_order) {
+ case REG_OPER_OP_ORDER: {
+ AppendToBuffer("%s%c %s,",
+ mnem,
+ operand_size_code(),
+ NameOfCPURegister(regop));
+ advance = PrintRightOperand(data);
+ break;
+ }
+ case OPER_REG_OP_ORDER: {
+ AppendToBuffer("%s%c ", mnem, operand_size_code());
+ advance = PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfCPURegister(regop));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ return advance;
}
-const char* NameConverter::NameOfAddress(unsigned char* addr) const {
- UNIMPLEMENTED();
- return NULL;
+// Returns number of bytes used by machine instruction, including *data byte.
+// Writes immediate instructions to 'tmp_buffer_'.
+int DisassemblerX64::PrintImmediateOp(byte* data) {
+ bool sign_extension_bit = (*data & 0x02) != 0;
+ byte modrm = *(data + 1);
+ int mod, regop, rm;
+ get_modrm(modrm, &mod, &regop, &rm);
+ const char* mnem = "Imm???";
+ switch (regop) {
+ case 0:
+ mnem = "add";
+ break;
+ case 1:
+ mnem = "or";
+ break;
+ case 2:
+ mnem = "adc";
+ break;
+ case 4:
+ mnem = "and";
+ break;
+ case 5:
+ mnem = "sub";
+ break;
+ case 6:
+ mnem = "xor";
+ break;
+ case 7:
+ mnem = "cmp";
+ break;
+ default:
+ UnimplementedInstruction();
+ }
+ AppendToBuffer("%s ", mnem);
+ int count = PrintRightOperand(data + 1);
+ if (sign_extension_bit) {
+ AppendToBuffer(",0x%x", *(data + 1 + count));
+ return 1 + count + 1 /*int8*/;
+ } else {
+ AppendToBuffer(",0x%x", *reinterpret_cast<int32_t*>(data + 1 + count));
+ return 1 + count + 4 /*int32_t*/;
+ }
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerX64::F7Instruction(byte* data) {
+ assert(*data == 0xF7);
+ byte modrm = *(data + 1);
+ int mod, regop, rm;
+ get_modrm(modrm, &mod, &regop, &rm);
+ if (mod == 3 && regop != 0) {
+ const char* mnem = NULL;
+ switch (regop) {
+ case 2:
+ mnem = "not";
+ break;
+ case 3:
+ mnem = "neg";
+ break;
+ case 4:
+ mnem = "mul";
+ break;
+ case 7:
+ mnem = "idiv";
+ break;
+ default:
+ UnimplementedInstruction();
+ }
+ AppendToBuffer("%s%c %s",
+ mnem,
+ operand_size_code(),
+ NameOfCPURegister(rm));
+ return 2;
+ } else if (mod == 3 && regop == 0) {
+ int32_t imm = *reinterpret_cast<int32_t*>(data + 2);
+ AppendToBuffer("test%c %s,0x%x",
+ operand_size_code(),
+ NameOfCPURegister(rm),
+ imm);
+ return 6;
+ } else if (regop == 0) {
+ AppendToBuffer("test%c ", operand_size_code());
+ int count = PrintRightOperand(data + 1);
+ int32_t imm = *reinterpret_cast<int32_t*>(data + 1 + count);
+ AppendToBuffer(",0x%x", imm);
+ return 1 + count + 4 /*int32_t*/;
+ } else {
+ UnimplementedInstruction();
+ return 2;
+ }
+}
+
+
+int DisassemblerX64::D1D3C1Instruction(byte* data) {
+ byte op = *data;
+ assert(op == 0xD1 || op == 0xD3 || op == 0xC1);
+ byte modrm = *(data + 1);
+ int mod, regop, rm;
+ get_modrm(modrm, &mod, &regop, &rm);
+ ASSERT(regop < 8);
+ int imm8 = -1;
+ int num_bytes = 2;
+ if (mod == 3) {
+ const char* mnem = NULL;
+ if (op == 0xD1) {
+ imm8 = 1;
+ switch (regop) {
+ case 2:
+ mnem = "rcl";
+ break;
+ case 7:
+ mnem = "sar";
+ break;
+ case 4:
+ mnem = "shl";
+ break;
+ default:
+ UnimplementedInstruction();
+ }
+ } else if (op == 0xC1) {
+ imm8 = *(data + 2);
+ num_bytes = 3;
+ switch (regop) {
+ case 2:
+ mnem = "rcl";
+ break;
+ case 4:
+ mnem = "shl";
+ break;
+ case 5:
+ mnem = "shr";
+ break;
+ case 7:
+ mnem = "sar";
+ break;
+ default:
+ UnimplementedInstruction();
+ }
+ } else if (op == 0xD3) {
+ switch (regop) {
+ case 4:
+ mnem = "shl";
+ break;
+ case 5:
+ mnem = "shr";
+ break;
+ case 7:
+ mnem = "sar";
+ break;
+ default:
+ UnimplementedInstruction();
+ }
+ }
+ assert(mnem != NULL);
+ AppendToBuffer("%s%c %s,",
+ mnem,
+ operand_size_code(),
+ NameOfCPURegister(rm));
+ if (imm8 > 0) {
+ AppendToBuffer("%d", imm8);
+ } else {
+ AppendToBuffer("cl");
+ }
+ } else {
+ UnimplementedInstruction();
+ }
+ return num_bytes;
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerX64::JumpShort(byte* data) {
+ assert(*data == 0xEB);
+ byte b = *(data + 1);
+ byte* dest = data + static_cast<int8_t>(b) + 2;
+ AppendToBuffer("jmp %s", NameOfAddress(dest));
+ return 2;
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerX64::JumpConditional(byte* data) {
+ assert(*data == 0x0F);
+ byte cond = *(data + 1) & 0x0F;
+ byte* dest = data + *reinterpret_cast<int32_t*>(data + 2) + 6;
+ const char* mnem = conditional_code_suffix[cond];
+ AppendToBuffer("j%s %s", mnem, NameOfAddress(dest));
+ return 6; // includes 0x0F
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerX64::JumpConditionalShort(byte* data) {
+ byte cond = *data & 0x0F;
+ byte b = *(data + 1);
+ byte* dest = data + static_cast<int8_t>(b) + 2;
+ const char* mnem = conditional_code_suffix[cond];
+ AppendToBuffer("j%s %s", mnem, NameOfAddress(dest));
+ return 2;
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerX64::SetCC(byte* data) {
+ assert(*data == 0x0F);
+ byte cond = *(data + 1) & 0x0F;
+ const char* mnem = conditional_code_suffix[cond];
+ AppendToBuffer("set%s%c ", mnem, operand_size_code());
+ PrintRightByteOperand(data + 2);
+ return 3; // includes 0x0F
+}
+
+
+// Returns number of bytes used, including *data.
+int DisassemblerX64::FPUInstruction(byte* data) {
+ byte b1 = *data;
+ byte b2 = *(data + 1);
+ if (b1 == 0xD9) {
+ const char* mnem = NULL;
+ switch (b2) {
+ case 0xE8:
+ mnem = "fld1";
+ break;
+ case 0xEE:
+ mnem = "fldz";
+ break;
+ case 0xE1:
+ mnem = "fabs";
+ break;
+ case 0xE0:
+ mnem = "fchs";
+ break;
+ case 0xF8:
+ mnem = "fprem";
+ break;
+ case 0xF5:
+ mnem = "fprem1";
+ break;
+ case 0xF7:
+ mnem = "fincstp";
+ break;
+ case 0xE4:
+ mnem = "ftst";
+ break;
+ }
+ if (mnem != NULL) {
+ AppendToBuffer("%s", mnem);
+ return 2;
+ } else if ((b2 & 0xF8) == 0xC8) {
+ AppendToBuffer("fxch st%d", b2 & 0x7);
+ return 2;
+ } else {
+ int mod, regop, rm;
+ get_modrm(*(data + 1), &mod, &regop, &rm);
+ const char* mnem = "?";
+ switch (regop) {
+ case 0:
+ mnem = "fld_s";
+ break;
+ case 3:
+ mnem = "fstp_s";
+ break;
+ default:
+ UnimplementedInstruction();
+ }
+ AppendToBuffer("%s ", mnem);
+ int count = PrintRightOperand(data + 1);
+ return count + 1;
+ }
+ } else if (b1 == 0xDD) {
+ if ((b2 & 0xF8) == 0xC0) {
+ AppendToBuffer("ffree st%d", b2 & 0x7);
+ return 2;
+ } else {
+ int mod, regop, rm;
+ get_modrm(*(data + 1), &mod, &regop, &rm);
+ const char* mnem = "?";
+ switch (regop) {
+ case 0:
+ mnem = "fld_d";
+ break;
+ case 3:
+ mnem = "fstp_d";
+ break;
+ default:
+ UnimplementedInstruction();
+ }
+ AppendToBuffer("%s ", mnem);
+ int count = PrintRightOperand(data + 1);
+ return count + 1;
+ }
+ } else if (b1 == 0xDB) {
+ int mod, regop, rm;
+ get_modrm(*(data + 1), &mod, &regop, &rm);
+ const char* mnem = "?";
+ switch (regop) {
+ case 0:
+ mnem = "fild_s";
+ break;
+ case 2:
+ mnem = "fist_s";
+ break;
+ case 3:
+ mnem = "fistp_s";
+ break;
+ default:
+ UnimplementedInstruction();
+ }
+ AppendToBuffer("%s ", mnem);
+ int count = PrintRightOperand(data + 1);
+ return count + 1;
+ } else if (b1 == 0xDF) {
+ if (b2 == 0xE0) {
+ AppendToBuffer("fnstsw_ax");
+ return 2;
+ }
+ int mod, regop, rm;
+ get_modrm(*(data + 1), &mod, &regop, &rm);
+ const char* mnem = "?";
+ switch (regop) {
+ case 5:
+ mnem = "fild_d";
+ break;
+ case 7:
+ mnem = "fistp_d";
+ break;
+ default:
+ UnimplementedInstruction();
+ }
+ AppendToBuffer("%s ", mnem);
+ int count = PrintRightOperand(data + 1);
+ return count + 1;
+ } else if (b1 == 0xDC || b1 == 0xDE) {
+ bool is_pop = (b1 == 0xDE);
+ if (is_pop && b2 == 0xD9) {
+ AppendToBuffer("fcompp");
+ return 2;
+ }
+ const char* mnem = "FP0xDC";
+ switch (b2 & 0xF8) {
+ case 0xC0:
+ mnem = "fadd";
+ break;
+ case 0xE8:
+ mnem = "fsub";
+ break;
+ case 0xC8:
+ mnem = "fmul";
+ break;
+ case 0xF8:
+ mnem = "fdiv";
+ break;
+ default:
+ UnimplementedInstruction();
+ }
+ AppendToBuffer("%s%s st%d", mnem, is_pop ? "p" : "", b2 & 0x7);
+ return 2;
+ } else if (b1 == 0xDA && b2 == 0xE9) {
+ const char* mnem = "fucompp";
+ AppendToBuffer("%s", mnem);
+ return 2;
+ }
+ AppendToBuffer("Unknown FP instruction");
+ return 2;
+}
+
+// Mnemonics for instructions 0xF0 byte.
+// Returns NULL if the instruction is not handled here.
+static const char* F0Mnem(byte f0byte) {
+ switch (f0byte) {
+ case 0x1F:
+ return "nop";
+ case 0x31:
+ return "rdtsc";
+ case 0xA2:
+ return "cpuid";
+ case 0xBE:
+ return "movsxb";
+ case 0xBF:
+ return "movsxw";
+ case 0xB6:
+ return "movzxb";
+ case 0xB7:
+ return "movzxw";
+ case 0xAF:
+ return "imul";
+ case 0xA5:
+ return "shld";
+ case 0xAD:
+ return "shrd";
+ case 0xAB:
+ return "bts";
+ default:
+ return NULL;
+ }
+}
+
+// Disassembled instruction '*instr' and writes it into 'out_buffer'.
+int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
+ byte* instr) {
+ tmp_buffer_pos_ = 0; // starting to write as position 0
+ byte* data = instr;
+ bool processed = true; // Will be set to false if the current instruction
+ // is not in 'instructions' table.
+ byte current;
+
+ // Scan for prefixes.
+ while (true) {
+ current = *data;
+ if (current == 0x66) {
+ setOperandSizePrefix(current);
+ data++;
+ } else if ((current & 0xF0) == 0x40) {
+ setRex(current);
+ if (rex_w()) AppendToBuffer("REX.W ");
+ data++;
+ } else {
+ break;
+ }
+ }
+
+ const InstructionDesc& idesc = instruction_table.Get(current);
+ switch (idesc.type) {
+ case ZERO_OPERANDS_INSTR:
+ AppendToBuffer(idesc.mnem);
+ data++;
+ break;
+
+ case TWO_OPERANDS_INSTR:
+ data++;
+ data += PrintOperands(idesc.mnem, idesc.op_order_, data);
+ break;
+
+ case JUMP_CONDITIONAL_SHORT_INSTR:
+ data += JumpConditionalShort(data);
+ break;
+
+ case REGISTER_INSTR:
+ AppendToBuffer("%s%c %s",
+ idesc.mnem,
+ operand_size_code(),
+ NameOfCPURegister(base_reg(current & 0x07)));
+ data++;
+ break;
+ case PUSHPOP_INSTR:
+ AppendToBuffer("%s %s",
+ idesc.mnem,
+ NameOfCPURegister(base_reg(current & 0x07)));
+ data++;
+ break;
+ case MOVE_REG_INSTR: {
+ byte* addr = NULL;
+ switch (operand_size()) {
+ case 16:
+ addr = reinterpret_cast<byte*>(*reinterpret_cast<int16_t*>(data + 1));
+ data += 3;
+ break;
+ case 32:
+ addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
+ data += 5;
+ break;
+ case 64:
+ addr = reinterpret_cast<byte*>(*reinterpret_cast<int64_t*>(data + 1));
+ data += 9;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ AppendToBuffer("mov%c %s,%s",
+ operand_size_code(),
+ NameOfCPURegister(base_reg(current & 0x07)),
+ NameOfAddress(addr));
+ break;
+ }
+
+ case CALL_JUMP_INSTR: {
+ byte* addr = data + *reinterpret_cast<int32_t*>(data + 1) + 5;
+ AppendToBuffer("%s %s", idesc.mnem, NameOfAddress(addr));
+ data += 5;
+ break;
+ }
+
+ case SHORT_IMMEDIATE_INSTR: {
+ byte* addr =
+ reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
+ AppendToBuffer("%s rax, %s", idesc.mnem, NameOfAddress(addr));
+ data += 5;
+ break;
+ }
+
+ case NO_INSTR:
+ processed = false;
+ break;
+
+ default:
+ UNIMPLEMENTED(); // This type is not implemented.
+ }
+
+ // The first byte didn't match any of the simple opcodes, so we
+ // need to do special processing on it.
+ if (!processed) {
+ switch (*data) {
+ case 0xC2:
+ AppendToBuffer("ret 0x%x", *reinterpret_cast<uint16_t*>(data + 1));
+ data += 3;
+ break;
+
+ case 0x69: // fall through
+ case 0x6B: {
+ int mod, regop, rm;
+ get_modrm(*(data + 1), &mod, &regop, &rm);
+ int32_t imm = *data == 0x6B ? *(data + 2)
+ : *reinterpret_cast<int32_t*>(data + 2);
+ AppendToBuffer("imul %s,%s,0x%x", NameOfCPURegister(regop),
+ NameOfCPURegister(rm), imm);
+ data += 2 + (*data == 0x6B ? 1 : 4);
+ }
+ break;
+
+ case 0xF6: {
+ int mod, regop, rm;
+ get_modrm(*(data + 1), &mod, &regop, &rm);
+ if (mod == 3 && regop == 0) {
+ AppendToBuffer("testb %s,%d", NameOfCPURegister(rm), *(data + 2));
+ } else {
+ UnimplementedInstruction();
+ }
+ data += 3;
+ }
+ break;
+
+ case 0x81: // fall through
+ case 0x83: // 0x81 with sign extension bit set
+ data += PrintImmediateOp(data);
+ break;
+
+ case 0x0F: {
+ byte f0byte = *(data + 1);
+ const char* f0mnem = F0Mnem(f0byte);
+ if (f0byte == 0x1F) {
+ data += 1;
+ byte modrm = *data;
+ data += 1;
+ if (((modrm >> 3) & 7) == 4) {
+ // SIB byte present.
+ data += 1;
+ }
+ int mod = modrm >> 6;
+ if (mod == 1) {
+ // Byte displacement.
+ data += 1;
+ } else if (mod == 2) {
+ // 32-bit displacement.
+ data += 4;
+ }
+ AppendToBuffer("nop");
+ } else if (f0byte == 0xA2 || f0byte == 0x31) {
+ AppendToBuffer("%s", f0mnem);
+ data += 2;
+ } else if ((f0byte & 0xF0) == 0x80) {
+ data += JumpConditional(data);
+ } else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 || f0byte
+ == 0xB7 || f0byte == 0xAF) {
+ data += 2;
+ data += PrintOperands(f0mnem, REG_OPER_OP_ORDER, data);
+ } else if ((f0byte & 0xF0) == 0x90) {
+ data += SetCC(data);
+ } else {
+ data += 2;
+ if (f0byte == 0xAB || f0byte == 0xA5 || f0byte == 0xAD) {
+ // shrd, shld, bts
+ AppendToBuffer("%s ", f0mnem);
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ data += PrintRightOperand(data);
+ if (f0byte == 0xAB) {
+ AppendToBuffer(",%s", NameOfCPURegister(regop));
+ } else {
+ AppendToBuffer(",%s,cl", NameOfCPURegister(regop));
+ }
+ } else {
+ UnimplementedInstruction();
+ }
+ }
+ }
+ break;
+
+ case 0x8F: {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ if (regop == 0) {
+ AppendToBuffer("pop ");
+ data += PrintRightOperand(data);
+ }
+ }
+ break;
+
+ case 0xFF: {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ const char* mnem = NULL;
+ switch (regop) {
+ case 0:
+ mnem = "inc";
+ break;
+ case 1:
+ mnem = "dec";
+ break;
+ case 2:
+ mnem = "call";
+ break;
+ case 4:
+ mnem = "jmp";
+ break;
+ case 6:
+ mnem = "push";
+ break;
+ default:
+ mnem = "???";
+ }
+ AppendToBuffer(((regop <= 1) ? "%s%c " : "%s "),
+ mnem,
+ operand_size_code());
+ data += PrintRightOperand(data);
+ }
+ break;
+
+ case 0xC7: // imm32, fall through
+ case 0xC6: // imm8
+ {
+ bool is_byte = *data == 0xC6;
+ data++;
+
+ AppendToBuffer("mov%c ", is_byte ? 'b' : operand_size_code());
+ data += PrintRightOperand(data);
+ int32_t imm = is_byte ? *data : *reinterpret_cast<int32_t*>(data);
+ AppendToBuffer(",0x%x", imm);
+ data += is_byte ? 1 : 4;
+ }
+ break;
+
+ case 0x80: {
+ data++;
+ AppendToBuffer("cmpb ");
+ data += PrintRightOperand(data);
+ int32_t imm = *data;
+ AppendToBuffer(",0x%x", imm);
+ data++;
+ }
+ break;
+
+ case 0x88: // 8bit, fall through
+ case 0x89: // 32bit
+ {
+ bool is_byte = *data == 0x88;
+ int mod, regop, rm;
+ data++;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("mov%c ", is_byte ? 'b' : operand_size_code());
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfCPURegister(regop));
+ }
+ break;
+
+ case 0x90:
+ case 0x91:
+ case 0x92:
+ case 0x93:
+ case 0x94:
+ case 0x95:
+ case 0x96:
+ case 0x97: {
+ int reg = current & 0x7 | (rex_b() ? 8 : 0);
+ if (reg == 0) {
+ AppendToBuffer("nop"); // Common name for xchg rax,rax.
+ } else {
+ AppendToBuffer("xchg%c rax, %s",
+ operand_size_code(),
+ NameOfByteCPURegister(reg));
+ }
+ }
+
+
+ case 0xFE: {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ if (mod == 3 && regop == 1) {
+ AppendToBuffer("decb %s", NameOfCPURegister(rm));
+ } else {
+ UnimplementedInstruction();
+ }
+ data++;
+ }
+ break;
+
+ case 0x68:
+ AppendToBuffer("push 0x%x", *reinterpret_cast<int32_t*>(data + 1));
+ data += 5;
+ break;
+
+ case 0x6A:
+ AppendToBuffer("push 0x%x", *reinterpret_cast<int8_t*>(data + 1));
+ data += 2;
+ break;
+
+ case 0xA8:
+ AppendToBuffer("test al,0x%x", *reinterpret_cast<uint8_t*>(data + 1));
+ data += 2;
+ break;
+
+ case 0xA9:
+ AppendToBuffer("test%c rax,0x%x", // CHECKME!
+ operand_size_code(),
+ *reinterpret_cast<int32_t*>(data + 1));
+ data += 5;
+ break;
+
+ case 0xD1: // fall through
+ case 0xD3: // fall through
+ case 0xC1:
+ data += D1D3C1Instruction(data);
+ break;
+
+ case 0xD9: // fall through
+ case 0xDA: // fall through
+ case 0xDB: // fall through
+ case 0xDC: // fall through
+ case 0xDD: // fall through
+ case 0xDE: // fall through
+ case 0xDF:
+ data += FPUInstruction(data);
+ break;
+
+ case 0xEB:
+ data += JumpShort(data);
+ break;
+
+ case 0xF2:
+ if (*(data + 1) == 0x0F) {
+ byte b2 = *(data + 2);
+ if (b2 == 0x11) {
+ AppendToBuffer("movsd ");
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else if (b2 == 0x10) {
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("movsd %s,", NameOfXMMRegister(regop));
+ data += PrintRightOperand(data);
+ } else {
+ const char* mnem = "?";
+ switch (b2) {
+ case 0x2A:
+ mnem = "cvtsi2sd";
+ break;
+ case 0x58:
+ mnem = "addsd";
+ break;
+ case 0x59:
+ mnem = "mulsd";
+ break;
+ case 0x5C:
+ mnem = "subsd";
+ break;
+ case 0x5E:
+ mnem = "divsd";
+ break;
+ }
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ if (b2 == 0x2A) {
+ AppendToBuffer("%s %s,", mnem, NameOfXMMRegister(regop));
+ data += PrintRightOperand(data);
+ } else {
+ AppendToBuffer("%s %s,%s", mnem, NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ }
+ }
+ } else {
+ UnimplementedInstruction();
+ }
+ break;
+
+ case 0xF3:
+ if (*(data + 1) == 0x0F && *(data + 2) == 0x2C) {
+ data += 3;
+ data += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, data);
+ } else {
+ UnimplementedInstruction();
+ }
+ break;
+
+ case 0xF7:
+ data += F7Instruction(data);
+ break;
+
+ default:
+ UnimplementedInstruction();
+ }
+ } // !processed
+
+ if (tmp_buffer_pos_ < sizeof tmp_buffer_) {
+ tmp_buffer_[tmp_buffer_pos_] = '\0';
+ }
+
+ int instr_len = data - instr;
+ ASSERT(instr_len > 0); // Ensure progress.
+
+ int outp = 0;
+ // Instruction bytes.
+ for (byte* bp = instr; bp < data; bp++) {
+ outp += v8::internal::OS::SNPrintF(out_buffer + outp, "%02x", *bp);
+ }
+ for (int i = 6 - instr_len; i >= 0; i--) {
+ outp += v8::internal::OS::SNPrintF(out_buffer + outp, " ");
+ }
+
+ outp += v8::internal::OS::SNPrintF(out_buffer + outp, " %s",
+ tmp_buffer_.start());
+ return instr_len;
+}
+
+//------------------------------------------------------------------------------
+
+
+static const char* cpu_regs[16] = {
+ "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
+};
+
+
+static const char* byte_cpu_regs[16] = {
+ "al", "cl", "dl", "bl", "spl", "bpl", "sil", "dil",
+ "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l"
+};
+
+
+static const char* xmm_regs[16] = {
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
+ "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"
+};
+
+
+const char* NameConverter::NameOfAddress(byte* addr) const {
+ static v8::internal::EmbeddedVector<char, 32> tmp_buffer;
+ v8::internal::OS::SNPrintF(tmp_buffer, "%p", addr);
+ return tmp_buffer.start();
+}
+
+
+const char* NameConverter::NameOfConstant(byte* addr) const {
+ return NameOfAddress(addr);
}
const char* NameConverter::NameOfCPURegister(int reg) const {
- UNIMPLEMENTED();
- return NULL;
+ if (0 <= reg && reg < 16)
+ return cpu_regs[reg];
+ return "noreg";
}
-int Disassembler::ConstantPoolSizeAt(unsigned char* addr) {
- UNIMPLEMENTED();
- return 0;
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+ if (0 <= reg && reg < 16)
+ return byte_cpu_regs[reg];
+ return "noreg";
}
-int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
- unsigned char* instruction) {
- UNIMPLEMENTED();
- return 0;
+const char* NameConverter::NameOfXMMRegister(int reg) const {
+ if (0 <= reg && reg < 16)
+ return xmm_regs[reg];
+ return "noxmmreg";
}
-const char* NameConverter::NameOfByteCPURegister(int a) const {
- UNIMPLEMENTED();
- return NULL;
+
+const char* NameConverter::NameInCode(byte* addr) const {
+ // X64 does not embed debug strings at the moment.
+ UNREACHABLE();
+ return "";
}
-const char* NameConverter::NameOfXMMRegister(int a) const {
- UNIMPLEMENTED();
- return NULL;
+//------------------------------------------------------------------------------
+
+Disassembler::Disassembler(const NameConverter& converter)
+ : converter_(converter) { }
+
+Disassembler::~Disassembler() { }
+
+
+int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
+ byte* instruction) {
+ DisassemblerX64 d(converter_, CONTINUE_ON_UNIMPLEMENTED_OPCODE);
+ return d.InstructionDecode(buffer, instruction);
}
-const char* NameConverter::NameOfConstant(unsigned char* a) const {
- UNIMPLEMENTED();
- return NULL;
+
+// The X64 assembler does not use constant pools.
+int Disassembler::ConstantPoolSizeAt(byte* instruction) {
+ return -1;
}
-const char* NameConverter::NameInCode(unsigned char* a) const {
- UNIMPLEMENTED();
- return NULL;
+
+void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
+ NameConverter converter;
+ Disassembler d(converter);
+ for (byte* pc = begin; pc < end;) {
+ v8::internal::EmbeddedVector<char, 128> buffer;
+ buffer[0] = '\0';
+ byte* prev_pc = pc;
+ pc += d.InstructionDecode(buffer, pc);
+ fprintf(f, "%p", prev_pc);
+ fprintf(f, " ");
+
+ for (byte* bp = prev_pc; bp < pc; bp++) {
+ fprintf(f, "%02x", *bp);
+ }
+ for (int i = 6 - (pc - prev_pc); i >= 0; i--) {
+ fprintf(f, " ");
+ }
+ fprintf(f, " %s\n", buffer.start());
+ }
}
} // namespace disasm
diff --git a/deps/v8/src/x64/frames-x64.h b/deps/v8/src/x64/frames-x64.h
index d4ab2c62e..24c78da99 100644
--- a/deps/v8/src/x64/frames-x64.h
+++ b/deps/v8/src/x64/frames-x64.h
@@ -59,12 +59,7 @@ class StackHandlerConstants : public AllStatic {
class EntryFrameConstants : public AllStatic {
public:
- static const int kCallerFPOffset = -6 * kPointerSize;
-
- static const int kFunctionArgOffset = +3 * kPointerSize;
- static const int kReceiverArgOffset = +4 * kPointerSize;
- static const int kArgcOffset = +5 * kPointerSize;
- static const int kArgvOffset = +6 * kPointerSize;
+ static const int kCallerFPOffset = -10 * kPointerSize;
};
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index abaffb338..7b8699f6d 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -212,11 +212,9 @@ void CallIC::Generate(MacroAssembler* masm,
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); // receiver
__ testl(rdx, Immediate(kSmiTagMask));
__ j(zero, &invoke);
- __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
- __ movzxbq(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
- __ cmpq(rcx, Immediate(static_cast<int8_t>(JS_GLOBAL_OBJECT_TYPE)));
+ __ CmpObjectType(rdx, JS_GLOBAL_OBJECT_TYPE, rcx);
__ j(equal, &global);
- __ cmpq(rcx, Immediate(static_cast<int8_t>(JS_BUILTINS_OBJECT_TYPE)));
+ __ CmpInstanceType(rcx, JS_BUILTINS_OBJECT_TYPE);
__ j(not_equal, &invoke);
// Patch the receiver on the stack.
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index f6a5ffecc..099a46103 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -79,51 +79,6 @@ void MacroAssembler::NegativeZeroTest(Register result,
}
-void MacroAssembler::ConstructAndTestJSFunction() {
- const int initial_buffer_size = 4 * KB;
- char* buffer = new char[initial_buffer_size];
- MacroAssembler masm(buffer, initial_buffer_size);
-
- const uint64_t secret = V8_INT64_C(0xdeadbeefcafebabe);
- Handle<String> constant =
- Factory::NewStringFromAscii(Vector<const char>("451", 3), TENURED);
-#define __ ACCESS_MASM((&masm))
- // Construct a simple JSfunction here, using Assembler and MacroAssembler
- // commands.
- __ movq(rax, constant, RelocInfo::EMBEDDED_OBJECT);
- __ push(rax);
- __ CallRuntime(Runtime::kStringParseFloat, 1);
- __ movq(kScratchRegister, secret, RelocInfo::NONE);
- __ addq(rax, kScratchRegister);
- __ ret(0);
-#undef __
- CodeDesc desc;
- masm.GetCode(&desc);
- Code::Flags flags = Code::ComputeFlags(Code::FUNCTION);
- Object* code = Heap::CreateCode(desc, NULL, flags, Handle<Object>::null());
- if (!code->IsFailure()) {
- Handle<Code> code_handle(Code::cast(code));
- Handle<String> name =
- Factory::NewStringFromAscii(Vector<const char>("foo", 3), NOT_TENURED);
- Handle<JSFunction> function =
- Factory::NewFunction(name,
- JS_FUNCTION_TYPE,
- JSObject::kHeaderSize,
- code_handle,
- true);
- bool pending_exceptions;
- Handle<Object> result =
- Execution::Call(function,
- Handle<Object>::cast(function),
- 0,
- NULL,
- &pending_exceptions);
- CHECK(result->IsSmi());
- CHECK(secret + (451 << kSmiTagSize) == reinterpret_cast<uint64_t>(*result));
- }
-}
-
-
void MacroAssembler::Abort(const char* msg) {
// We want to pass the msg string like a smi to avoid GC
// problems, however msg is not guaranteed to be aligned
@@ -221,7 +176,7 @@ void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
const char* name = Builtins::GetName(id);
int argc = Builtins::GetArgumentsCount(id);
- movq(target, code, RelocInfo::EXTERNAL_REFERENCE); // Is external reference?
+ movq(target, code, RelocInfo::EMBEDDED_OBJECT);
if (!resolved) {
uint32_t flags =
Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
@@ -253,7 +208,9 @@ Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
void MacroAssembler::Set(Register dst, int64_t x) {
- if (is_int32(x)) {
+ if (x == 0) {
+ xor_(dst, dst);
+ } else if (is_int32(x)) {
movq(dst, Immediate(x));
} else if (is_uint32(x)) {
movl(dst, Immediate(x));
@@ -264,14 +221,17 @@ void MacroAssembler::Set(Register dst, int64_t x) {
void MacroAssembler::Set(const Operand& dst, int64_t x) {
- if (is_int32(x)) {
- movq(kScratchRegister, Immediate(x));
+ if (x == 0) {
+ xor_(kScratchRegister, kScratchRegister);
+ movq(dst, kScratchRegister);
+ } else if (is_int32(x)) {
+ movq(dst, Immediate(x));
} else if (is_uint32(x)) {
- movl(kScratchRegister, Immediate(x));
+ movl(dst, Immediate(x));
} else {
movq(kScratchRegister, x, RelocInfo::NONE);
+ movq(dst, kScratchRegister);
}
- movq(dst, kScratchRegister);
}
@@ -285,11 +245,13 @@ void MacroAssembler::LoadUnsafeSmi(Register dst, Smi* source) {
void MacroAssembler::Move(Register dst, Handle<Object> source) {
+ ASSERT(!source->IsFailure());
if (source->IsSmi()) {
if (IsUnsafeSmi(source)) {
LoadUnsafeSmi(dst, source);
} else {
- movq(dst, source, RelocInfo::NONE);
+ int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source));
+ movq(dst, Immediate(smi));
}
} else {
movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
@@ -298,8 +260,13 @@ void MacroAssembler::Move(Register dst, Handle<Object> source) {
void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
- Move(kScratchRegister, source);
- movq(dst, kScratchRegister);
+ if (source->IsSmi()) {
+ int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source));
+ movq(dst, Immediate(smi));
+ } else {
+ movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
+ movq(dst, kScratchRegister);
+ }
}
@@ -310,14 +277,37 @@ void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
- Move(kScratchRegister, source);
- cmpq(dst, kScratchRegister);
+ if (source->IsSmi()) {
+ if (IsUnsafeSmi(source)) {
+ LoadUnsafeSmi(kScratchRegister, source);
+ cmpl(dst, kScratchRegister);
+ } else {
+ // For smi-comparison, it suffices to compare the low 32 bits.
+ int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source));
+ cmpl(dst, Immediate(smi));
+ }
+ } else {
+ ASSERT(source->IsHeapObject());
+ movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
+ cmpq(dst, kScratchRegister);
+ }
}
void MacroAssembler::Push(Handle<Object> source) {
- Move(kScratchRegister, source);
- push(kScratchRegister);
+ if (source->IsSmi()) {
+ if (IsUnsafeSmi(source)) {
+ LoadUnsafeSmi(kScratchRegister, source);
+ push(kScratchRegister);
+ } else {
+ int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source));
+ push(Immediate(smi));
+ }
+ } else {
+ ASSERT(source->IsHeapObject());
+ movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
+ push(kScratchRegister);
+ }
}
@@ -444,6 +434,51 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
}
+void MacroAssembler::TryGetFunctionPrototype(Register function,
+ Register result,
+ Label* miss) {
+ // Check that the receiver isn't a smi.
+ testl(function, Immediate(kSmiTagMask));
+ j(zero, miss);
+
+ // Check that the function really is a function.
+ CmpObjectType(function, JS_FUNCTION_TYPE, result);
+ j(not_equal, miss);
+
+ // Make sure that the function has an instance prototype.
+ Label non_instance;
+ testb(FieldOperand(result, Map::kBitFieldOffset),
+ Immediate(1 << Map::kHasNonInstancePrototype));
+ j(not_zero, &non_instance);
+
+ // Get the prototype or initial map from the function.
+ movq(result,
+ FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // If the prototype or initial map is the hole, don't return it and
+ // simply miss the cache instead. This will allow us to allocate a
+ // prototype object on-demand in the runtime system.
+ Cmp(result, Factory::the_hole_value());
+ j(equal, miss);
+
+ // If the function does not have an initial map, we're done.
+ Label done;
+ CmpObjectType(result, MAP_TYPE, kScratchRegister);
+ j(not_equal, &done);
+
+ // Get the prototype from the initial map.
+ movq(result, FieldOperand(result, Map::kPrototypeOffset));
+ jmp(&done);
+
+ // Non-instance prototype: Fetch prototype from constructor field
+ // in initial map.
+ bind(&non_instance);
+ movq(result, FieldOperand(result, Map::kConstructorOffset));
+
+ // All done.
+ bind(&done);
+}
+
void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
if (FLAG_native_code_counters && counter->Enabled()) {
@@ -589,7 +624,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
if (!resolved) {
uint32_t flags =
Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
- Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
+ Bootstrapper::FixupFlagsIsPCRelative::encode(false) |
Bootstrapper::FixupFlagsUseCodeObject::encode(false);
Unresolved entry =
{ pc_offset() - kTargetAddrToReturnAddrDist, flags, name };
@@ -749,6 +784,7 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
// Setup the frame structure on the stack.
+ // All constants are relative to the frame pointer of the exit frame.
ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
@@ -763,7 +799,7 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
// Save the frame pointer and the context in top.
ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
ExternalReference context_address(Top::k_context_address);
- movq(rdi, rax); // Backup rax before we use it.
+ movq(r14, rax); // Backup rax before we use it.
movq(rax, rbp);
store_rax(c_entry_fp_address);
@@ -773,7 +809,7 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
// Setup argv in callee-saved register r15. It is reused in LeaveExitFrame,
// so it must be retained across the C-call.
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
- lea(r15, Operand(rbp, rdi, times_pointer_size, offset));
+ lea(r15, Operand(rbp, r14, times_pointer_size, offset));
#ifdef ENABLE_DEBUGGER_SUPPORT
// Save the state of all registers to the stack from the memory
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 3ae78ba63..f13a7adae 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -67,16 +67,6 @@ class MacroAssembler: public Assembler {
MacroAssembler(void* buffer, int size);
// ---------------------------------------------------------------------------
- // x64 Implementation Support
-
- // Test the MacroAssembler by constructing and calling a simple JSFunction.
- // Cannot be done using API because this must be done in the middle of the
- // bootstrapping process.
- // TODO(X64): Remove once we can get through the bootstrapping process.
-
- static void ConstructAndTestJSFunction();
-
- // ---------------------------------------------------------------------------
// GC Support
// Set the remembered set bit for [object+offset].
@@ -186,6 +176,7 @@ class MacroAssembler: public Assembler {
// Compare object type for heap object.
// Incoming register is heap_object and outgoing register is map.
+ // They may be the same register, and may be kScratchRegister.
void CmpObjectType(Register heap_object, InstanceType type, Register map);
// Compare instance type for map.
@@ -247,11 +238,10 @@ class MacroAssembler: public Assembler {
// Try to get function prototype of a function and puts the value in
// the result register. Checks that the function really is a
// function and jumps to the miss label if the fast checks fail. The
- // function register will be untouched; the other registers may be
+ // function register will be untouched; the other register may be
// clobbered.
void TryGetFunctionPrototype(Register function,
Register result,
- Register scratch,
Label* miss);
// Generates code for reporting that an illegal operation has
@@ -394,12 +384,12 @@ extern void LogGeneratedCodeCoverage(const char* file_line);
#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
#define ACCESS_MASM(masm) { \
- byte* x64_coverage_function = \
+ byte* x64_coverage_function = \
reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
masm->pushfd(); \
masm->pushad(); \
masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
- masm->call(x64_coverage_function, RelocInfo::RUNTIME_ENTRY); \
+ masm->call(x64_coverage_function, RelocInfo::RUNTIME_ENTRY); \
masm->pop(rax); \
masm->popad(); \
masm->popfd(); \
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index fdfa67fcc..c5776159d 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -42,8 +42,8 @@ namespace internal {
Object* CallStubCompiler::CompileCallConstant(Object* a,
JSObject* b,
JSFunction* c,
- StubCompiler::CheckType d,
- Code::Flags flags) {
+ String* d,
+ StubCompiler::CheckType e) {
UNIMPLEMENTED();
return NULL;
}
@@ -51,8 +51,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* a,
Object* CallStubCompiler::CompileCallField(Object* a,
JSObject* b,
int c,
- String* d,
- Code::Flags flags) {
+ String* d) {
UNIMPLEMENTED();
return NULL;
}
@@ -67,6 +66,16 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* a,
+Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
+ GlobalObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
Object* LoadStubCompiler::CompileLoadCallback(JSObject* a,
JSObject* b,
AccessorInfo* c,
@@ -102,6 +111,16 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* a,
}
+Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
+ GlobalObject* holder,
+ JSGlobalPropertyCell* cell,
+ String* name,
+ bool is_dont_delete) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
Object* StoreStubCompiler::CompileStoreCallback(JSObject* a,
AccessorInfo* b,
String* c) {
@@ -125,6 +144,14 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* a, String* b) {
}
+Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
+ JSGlobalPropertyCell* cell,
+ String* name) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
// TODO(1241006): Avoid having lazy compile stubs specialized by the
// number of arguments. It is not needed anymore.
Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {