diff options
Diffstat (limited to 'deps/v8/src/ia32/assembler-ia32.cc')
-rw-r--r-- | deps/v8/src/ia32/assembler-ia32.cc | 80 |
1 files changed, 59 insertions, 21 deletions
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc index 99f52031ed..38508c7632 100644 --- a/deps/v8/src/ia32/assembler-ia32.cc +++ b/deps/v8/src/ia32/assembler-ia32.cc @@ -88,7 +88,7 @@ V8_INLINE uint64_t _xgetbv(unsigned int xcr) { // directly because older assemblers do not include support for xgetbv and // there is no easy way to conditionally compile based on the assembler // used. - __asm__ volatile(".byte 0x0f, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c"(xcr)); + __asm__ volatile(".byte 0x0F, 0x01, 0xD0" : "=a"(eax), "=d"(edx) : "c"(xcr)); return static_cast<uint64_t>(eax) | (static_cast<uint64_t>(edx) << 32); } @@ -398,7 +398,7 @@ bool Assembler::IsNop(Address addr) { Address a = addr; while (*a == 0x66) a++; if (*a == 0x90) return true; - if (a[0] == 0xf && a[1] == 0x1f) return true; + if (a[0] == 0xF && a[1] == 0x1F) return true; return false; } @@ -415,28 +415,28 @@ void Assembler::Nop(int bytes) { EMIT(0x90); return; case 3: - EMIT(0xf); - EMIT(0x1f); + EMIT(0xF); + EMIT(0x1F); EMIT(0); return; case 4: - EMIT(0xf); - EMIT(0x1f); + EMIT(0xF); + EMIT(0x1F); EMIT(0x40); EMIT(0); return; case 6: EMIT(0x66); case 5: - EMIT(0xf); - EMIT(0x1f); + EMIT(0xF); + EMIT(0x1F); EMIT(0x44); EMIT(0); EMIT(0); return; case 7: - EMIT(0xf); - EMIT(0x1f); + EMIT(0xF); + EMIT(0x1F); EMIT(0x80); EMIT(0); EMIT(0); @@ -454,8 +454,8 @@ void Assembler::Nop(int bytes) { EMIT(0x66); bytes--; case 8: - EMIT(0xf); - EMIT(0x1f); + EMIT(0xF); + EMIT(0x1F); EMIT(0x84); EMIT(0); EMIT(0); @@ -507,7 +507,7 @@ void Assembler::popfd() { void Assembler::push(const Immediate& x) { EnsureSpace ensure_space(this); if (x.is_int8()) { - EMIT(0x6a); + EMIT(0x6A); EMIT(x.immediate()); } else { EMIT(0x68); @@ -609,7 +609,7 @@ void Assembler::mov_w(const Operand& dst, const Immediate& src) { EMIT(0x66); EMIT(0xC7); emit_operand(eax, dst); - EMIT(static_cast<int8_t>(src.immediate() & 0xff)); + EMIT(static_cast<int8_t>(src.immediate() & 0xFF)); EMIT(static_cast<int8_t>(src.immediate() >> 8)); } @@ -796,6 +796,13 @@ void Assembler::cmpxchg_w(const Operand& dst, Register src) { emit_operand(src, dst); } +void Assembler::lfence() { + EnsureSpace ensure_space(this); + EMIT(0x0F); + EMIT(0xAE); + EMIT(0xE8); +} + void Assembler::adc(Register dst, int32_t imm32) { EnsureSpace ensure_space(this); emit_arith(2, Operand(dst), Immediate(imm32)); @@ -1378,7 +1385,7 @@ void Assembler::test_w(Register reg, Immediate imm16) { } else { EMIT(0x66); EMIT(0xF7); - EMIT(0xc0 | reg.code()); + EMIT(0xC0 | reg.code()); emit_w(imm16); } } @@ -2426,6 +2433,13 @@ void Assembler::sqrtsd(XMMRegister dst, const Operand& src) { emit_sse_operand(dst, src); } +void Assembler::haddps(XMMRegister dst, const Operand& src) { + EnsureSpace ensure_space(this); + EMIT(0xF2); + EMIT(0x0F); + EMIT(0x7C); + emit_sse_operand(dst, src); +} void Assembler::andpd(XMMRegister dst, XMMRegister src) { EnsureSpace ensure_space(this); @@ -2828,6 +2842,17 @@ void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) { EMIT(offset); } +void Assembler::insertps(XMMRegister dst, const Operand& src, int8_t offset) { + DCHECK(IsEnabled(SSE4_1)); + EnsureSpace ensure_space(this); + EMIT(0x66); + EMIT(0x0F); + EMIT(0x3A); + EMIT(0x21); + emit_sse_operand(dst, src); + EMIT(offset); +} + void Assembler::pinsrb(XMMRegister dst, const Operand& src, int8_t offset) { DCHECK(IsEnabled(SSE4_1)); EnsureSpace ensure_space(this); @@ -2908,8 +2933,8 @@ void Assembler::sqrtss(XMMRegister dst, const Operand& src) { void Assembler::ucomiss(XMMRegister dst, const Operand& src) { EnsureSpace ensure_space(this); - EMIT(0x0f); - EMIT(0x2e); + EMIT(0x0F); + EMIT(0x2E); emit_sse_operand(dst, src); } @@ -2982,6 +3007,13 @@ void Assembler::vcmpps(XMMRegister dst, XMMRegister src1, const Operand& src2, EMIT(cmp); } +void Assembler::vshufps(XMMRegister dst, XMMRegister src1, const Operand& src2, + byte imm8) { + DCHECK(is_uint8(imm8)); + vps(0xC6, dst, src1, src2); + EMIT(imm8); +} + void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int8_t imm8) { XMMRegister iop = XMMRegister::from_code(6); vinstr(0x71, iop, dst, Operand(src), k66, k0F, kWIG); @@ -3043,6 +3075,12 @@ void Assembler::vpextrd(const Operand& dst, XMMRegister src, int8_t offset) { EMIT(offset); } +void Assembler::vinsertps(XMMRegister dst, XMMRegister src1, + const Operand& src2, int8_t offset) { + vinstr(0x21, dst, src1, src2, k66, k0F3A, kWIG); + EMIT(offset); +} + void Assembler::vpinsrb(XMMRegister dst, XMMRegister src1, const Operand& src2, int8_t offset) { vinstr(0x20, dst, src1, src2, k66, k0F3A, kWIG); @@ -3186,12 +3224,12 @@ void Assembler::emit_sse_operand(XMMRegister dst, Register src) { void Assembler::emit_vex_prefix(XMMRegister vreg, VectorLength l, SIMDPrefix pp, LeadingOpcode mm, VexW w) { if (mm != k0F || w != kW0) { - EMIT(0xc4); + EMIT(0xC4); // Change RXB from "110" to "111" to align with gdb disassembler. - EMIT(0xe0 | mm); - EMIT(w | ((~vreg.code() & 0xf) << 3) | l | pp); + EMIT(0xE0 | mm); + EMIT(w | ((~vreg.code() & 0xF) << 3) | l | pp); } else { - EMIT(0xc5); + EMIT(0xC5); EMIT(((~vreg.code()) << 3) | l | pp); } } |