diff options
author | Lorry Tar Creator <lorry-tar-importer@lorry> | 2017-06-27 06:07:23 +0000 |
---|---|---|
committer | Lorry Tar Creator <lorry-tar-importer@lorry> | 2017-06-27 06:07:23 +0000 |
commit | 1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c (patch) | |
tree | 46dcd36c86e7fbc6e5df36deb463b33e9967a6f7 /Source/JavaScriptCore/assembler | |
parent | 32761a6cee1d0dee366b885b7b9c777e67885688 (diff) | |
download | WebKitGtk-tarball-master.tar.gz |
webkitgtk-2.16.5HEADwebkitgtk-2.16.5master
Diffstat (limited to 'Source/JavaScriptCore/assembler')
40 files changed, 11481 insertions, 7635 deletions
diff --git a/Source/JavaScriptCore/assembler/ARM64Assembler.h b/Source/JavaScriptCore/assembler/ARM64Assembler.h index cfbd8cec5..7421460e9 100644 --- a/Source/JavaScriptCore/assembler/ARM64Assembler.h +++ b/Source/JavaScriptCore/assembler/ARM64Assembler.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2014, 2017 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -23,12 +23,13 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef ARM64Assembler_h -#define ARM64Assembler_h +#pragma once #if ENABLE(ASSEMBLER) && CPU(ARM64) #include "AssemblerBuffer.h" +#include "AssemblerCommon.h" +#include <limits.h> #include <wtf/Assertions.h> #include <wtf/Vector.h> #include <stdint.h> @@ -37,30 +38,28 @@ #define DATASIZE_OF(datasize) ((datasize == 64) ? Datasize_64 : Datasize_32) #define MEMOPSIZE_OF(datasize) ((datasize == 8 || datasize == 128) ? MemOpSize_8_or_128 : (datasize == 16) ? MemOpSize_16 : (datasize == 32) ? MemOpSize_32 : MemOpSize_64) #define CHECK_DATASIZE() CHECK_DATASIZE_OF(datasize) +#define CHECK_VECTOR_DATASIZE() ASSERT(datasize == 64 || datasize == 128) #define DATASIZE DATASIZE_OF(datasize) #define MEMOPSIZE MEMOPSIZE_OF(datasize) #define CHECK_FP_MEMOP_DATASIZE() ASSERT(datasize == 8 || datasize == 16 || datasize == 32 || datasize == 64 || datasize == 128) +#define MEMPAIROPSIZE_INT(datasize) ((datasize == 64) ? MemPairOp_64 : MemPairOp_32) +#define MEMPAIROPSIZE_FP(datasize) ((datasize == 128) ? MemPairOp_V128 : (datasize == 64) ? MemPairOp_V64 : MemPairOp_32) namespace JSC { -ALWAYS_INLINE bool isInt9(int32_t value) +ALWAYS_INLINE bool isInt7(int32_t value) { - return value == ((value << 23) >> 23); + return value == ((value << 25) >> 25); } -ALWAYS_INLINE bool isUInt5(int32_t value) +ALWAYS_INLINE bool isInt11(int32_t value) { - return !(value & ~0x1f); + return value == ((value << 21) >> 21); } -ALWAYS_INLINE bool isUInt12(int32_t value) -{ - return !(value & ~0xfff); -} - -ALWAYS_INLINE bool isUInt12(intptr_t value) +ALWAYS_INLINE bool isUInt5(int32_t value) { - return !(value & ~0xfffL); + return !(value & ~0x1f); } class UInt5 { @@ -119,333 +118,174 @@ private: int m_value; }; -class LogicalImmediate { +class PairPostIndex { public: - static LogicalImmediate create32(uint32_t value) - { - // Check for 0, -1 - these cannot be encoded. - if (!value || !~value) - return InvalidLogicalImmediate; - - // First look for a 32-bit pattern, then for repeating 16-bit - // patterns, 8-bit, 4-bit, and finally 2-bit. - - unsigned hsb, lsb; - bool inverted; - if (findBitRange<32>(value, hsb, lsb, inverted)) - return encodeLogicalImmediate<32>(hsb, lsb, inverted); - - if ((value & 0xffff) != (value >> 16)) - return InvalidLogicalImmediate; - value &= 0xffff; - - if (findBitRange<16>(value, hsb, lsb, inverted)) - return encodeLogicalImmediate<16>(hsb, lsb, inverted); - - if ((value & 0xff) != (value >> 8)) - return InvalidLogicalImmediate; - value &= 0xff; - - if (findBitRange<8>(value, hsb, lsb, inverted)) - return encodeLogicalImmediate<8>(hsb, lsb, inverted); - - if ((value & 0xf) != (value >> 4)) - return InvalidLogicalImmediate; - value &= 0xf; - - if (findBitRange<4>(value, hsb, lsb, inverted)) - return encodeLogicalImmediate<4>(hsb, lsb, inverted); - - if ((value & 0x3) != (value >> 2)) - return InvalidLogicalImmediate; - value &= 0x3; - - if (findBitRange<2>(value, hsb, lsb, inverted)) - return encodeLogicalImmediate<2>(hsb, lsb, inverted); - - return InvalidLogicalImmediate; - } - - static LogicalImmediate create64(uint64_t value) - { - // Check for 0, -1 - these cannot be encoded. - if (!value || !~value) - return InvalidLogicalImmediate; - - // Look for a contiguous bit range. - unsigned hsb, lsb; - bool inverted; - if (findBitRange<64>(value, hsb, lsb, inverted)) - return encodeLogicalImmediate<64>(hsb, lsb, inverted); - - // If the high & low 32 bits are equal, we can try for a 32-bit (or narrower) pattern. - if (static_cast<uint32_t>(value) == static_cast<uint32_t>(value >> 32)) - return create32(static_cast<uint32_t>(value)); - return InvalidLogicalImmediate; - } - - int value() const - { - ASSERT(isValid()); - return m_value; - } - - bool isValid() const + explicit PairPostIndex(int value) + : m_value(value) { - return m_value != InvalidLogicalImmediate; + ASSERT(isInt11(value)); } - bool is64bit() const - { - return m_value & (1 << 12); - } + operator int() { return m_value; } private: - LogicalImmediate(int value) - : m_value(value) - { - } - - // Generate a mask with bits in the range hsb..0 set, for example: - // hsb:63 = 0xffffffffffffffff - // hsb:42 = 0x000007ffffffffff - // hsb: 0 = 0x0000000000000001 - static uint64_t mask(unsigned hsb) - { - ASSERT(hsb < 64); - return 0xffffffffffffffffull >> (63 - hsb); - } + int m_value; +}; - template<unsigned N> - static void partialHSB(uint64_t& value, unsigned&result) +class PairPreIndex { +public: + explicit PairPreIndex(int value) + : m_value(value) { - if (value & (0xffffffffffffffffull << N)) { - result += N; - value >>= N; - } + ASSERT(isInt11(value)); } - // Find the bit number of the highest bit set in a non-zero value, for example: - // 0x8080808080808080 = hsb:63 - // 0x0000000000000001 = hsb: 0 - // 0x000007ffffe00000 = hsb:42 - static unsigned highestSetBit(uint64_t value) - { - ASSERT(value); - unsigned hsb = 0; - partialHSB<32>(value, hsb); - partialHSB<16>(value, hsb); - partialHSB<8>(value, hsb); - partialHSB<4>(value, hsb); - partialHSB<2>(value, hsb); - partialHSB<1>(value, hsb); - return hsb; - } - - // This function takes a value and a bit width, where value obeys the following constraints: - // * bits outside of the width of the value must be zero. - // * bits within the width of value must neither be all clear or all set. - // The input is inspected to detect values that consist of either two or three contiguous - // ranges of bits. The output range hsb..lsb will describe the second range of the value. - // if the range is set, inverted will be false, and if the range is clear, inverted will - // be true. For example (with width 8): - // 00001111 = hsb:3, lsb:0, inverted:false - // 11110000 = hsb:3, lsb:0, inverted:true - // 00111100 = hsb:5, lsb:2, inverted:false - // 11000011 = hsb:5, lsb:2, inverted:true - template<unsigned width> - static bool findBitRange(uint64_t value, unsigned& hsb, unsigned& lsb, bool& inverted) - { - ASSERT(value & mask(width - 1)); - ASSERT(value != mask(width - 1)); - ASSERT(!(value & ~mask(width - 1))); - - // Detect cases where the top bit is set; if so, flip all the bits & set invert. - // This halves the number of patterns we need to look for. - const uint64_t msb = 1ull << (width - 1); - if ((inverted = (value & msb))) - value ^= mask(width - 1); - - // Find the highest set bit in value, generate a corresponding mask & flip all - // bits under it. - hsb = highestSetBit(value); - value ^= mask(hsb); - if (!value) { - // If this cleared the value, then the range hsb..0 was all set. - lsb = 0; - return true; - } - - // Try making one more mask, and flipping the bits! - lsb = highestSetBit(value); - value ^= mask(lsb); - if (!value) { - // Success - but lsb actually points to the hsb of a third range - add one - // to get to the lsb of the mid range. - ++lsb; - return true; - } - - return false; - } - - // Encodes the set of immN:immr:imms fields found in a logical immediate. - template<unsigned width> - static int encodeLogicalImmediate(unsigned hsb, unsigned lsb, bool inverted) - { - // Check width is a power of 2! - ASSERT(!(width & (width -1))); - ASSERT(width <= 64 && width >= 2); - ASSERT(hsb >= lsb); - ASSERT(hsb < width); - - int immN = 0; - int imms = 0; - int immr = 0; - - // For 64-bit values this is easy - just set immN to true, and imms just - // contains the bit number of the highest set bit of the set range. For - // values with narrower widths, these are encoded by a leading set of - // one bits, followed by a zero bit, followed by the remaining set of bits - // being the high bit of the range. For a 32-bit immediate there are no - // leading one bits, just a zero followed by a five bit number. For a - // 16-bit immediate there is one one bit, a zero bit, and then a four bit - // bit-position, etc. - if (width == 64) - immN = 1; - else - imms = 63 & ~(width + width - 1); - - if (inverted) { - // if width is 64 & hsb is 62, then we have a value something like: - // 0x80000000ffffffff (in this case with lsb 32). - // The ror should be by 1, imms (effectively set width minus 1) is - // 32. Set width is full width minus cleared width. - immr = (width - 1) - hsb; - imms |= (width - ((hsb - lsb) + 1)) - 1; - } else { - // if width is 64 & hsb is 62, then we have a value something like: - // 0x7fffffff00000000 (in this case with lsb 32). - // The value is effectively rol'ed by lsb, which is equivalent to - // a ror by width - lsb (or 0, in the case where lsb is 0). imms - // is hsb - lsb. - immr = (width - lsb) & (width - 1); - imms |= hsb - lsb; - } - - return immN << 12 | immr << 6 | imms; - } - - static const int InvalidLogicalImmediate = -1; + operator int() { return m_value; } +private: int m_value; }; +typedef ARM64LogicalImmediate LogicalImmediate; + inline uint16_t getHalfword(uint64_t value, int which) { return value >> (which << 4); } namespace ARM64Registers { - typedef enum { - // Parameter/result registers - x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - // Indirect result location register - x8, - // Temporary registers - x9, - x10, - x11, - x12, - x13, - x14, - x15, - // Intra-procedure-call scratch registers (temporary) - x16, ip0 = x16, - x17, ip1 = x17, - // Platform Register (temporary) - x18, - // Callee-saved - x19, - x20, - x21, - x22, - x23, - x24, - x25, - x26, - x27, - x28, - // Special - x29, fp = x29, - x30, lr = x30, - sp, - zr = 0x3f, - } RegisterID; - typedef enum { - // Parameter/result registers - q0, - q1, - q2, - q3, - q4, - q5, - q6, - q7, - // Callee-saved (up to 64-bits only!) - q8, - q9, - q10, - q11, - q12, - q13, - q14, - q15, - // Temporary registers - q16, - q17, - q18, - q19, - q20, - q21, - q22, - q23, - q24, - q25, - q26, - q27, - q28, - q29, - q30, - q31, - } FPRegisterID; - - static bool isSp(RegisterID reg) { return reg == sp; } - static bool isZr(RegisterID reg) { return reg == zr; } -} +#define FOR_EACH_CPU_REGISTER(V) \ + FOR_EACH_CPU_GPREGISTER(V) \ + FOR_EACH_CPU_SPECIAL_REGISTER(V) \ + FOR_EACH_CPU_FPREGISTER(V) + +// The following are defined as pairs of the following value: +// 1. type of the storage needed to save the register value by the JIT probe. +// 2. name of the register. +#define FOR_EACH_CPU_GPREGISTER(V) \ + /* Parameter/result registers */ \ + V(void*, x0) \ + V(void*, x1) \ + V(void*, x2) \ + V(void*, x3) \ + V(void*, x4) \ + V(void*, x5) \ + V(void*, x6) \ + V(void*, x7) \ + /* Indirect result location register */ \ + V(void*, x8) \ + /* Temporary registers */ \ + V(void*, x9) \ + V(void*, x10) \ + V(void*, x11) \ + V(void*, x12) \ + V(void*, x13) \ + V(void*, x14) \ + V(void*, x15) \ + /* Intra-procedure-call scratch registers (temporary) */ \ + V(void*, x16) \ + V(void*, x17) \ + /* Platform Register (temporary) */ \ + V(void*, x18) \ + /* Callee-saved */ \ + V(void*, x19) \ + V(void*, x20) \ + V(void*, x21) \ + V(void*, x22) \ + V(void*, x23) \ + V(void*, x24) \ + V(void*, x25) \ + V(void*, x26) \ + V(void*, x27) \ + V(void*, x28) \ + /* Special */ \ + V(void*, fp) \ + V(void*, lr) \ + V(void*, sp) + +#define FOR_EACH_CPU_SPECIAL_REGISTER(V) \ + V(void*, pc) \ + V(void*, nzcv) \ + V(void*, fpsr) \ + +// ARM64 always has 32 FPU registers 128-bits each. See http://llvm.org/devmtg/2012-11/Northover-AArch64.pdf +// and Section 5.1.2 in http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055b/IHI0055B_aapcs64.pdf. +// However, we only use them for 64-bit doubles. +#define FOR_EACH_CPU_FPREGISTER(V) \ + /* Parameter/result registers */ \ + V(double, q0) \ + V(double, q1) \ + V(double, q2) \ + V(double, q3) \ + V(double, q4) \ + V(double, q5) \ + V(double, q6) \ + V(double, q7) \ + /* Callee-saved (up to 64-bits only!) */ \ + V(double, q8) \ + V(double, q9) \ + V(double, q10) \ + V(double, q11) \ + V(double, q12) \ + V(double, q13) \ + V(double, q14) \ + V(double, q15) \ + /* Temporary registers */ \ + V(double, q16) \ + V(double, q17) \ + V(double, q18) \ + V(double, q19) \ + V(double, q20) \ + V(double, q21) \ + V(double, q22) \ + V(double, q23) \ + V(double, q24) \ + V(double, q25) \ + V(double, q26) \ + V(double, q27) \ + V(double, q28) \ + V(double, q29) \ + V(double, q30) \ + V(double, q31) + +typedef enum { + #define DECLARE_REGISTER(_type, _regName) _regName, + FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER) + #undef DECLARE_REGISTER + + ip0 = x16, + ip1 = x17, + x29 = fp, + x30 = lr, + zr = 0x3f, +} RegisterID; + +typedef enum { + #define DECLARE_REGISTER(_type, _regName) _regName, + FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER) + #undef DECLARE_REGISTER +} FPRegisterID; + +static constexpr bool isSp(RegisterID reg) { return reg == sp; } +static constexpr bool isZr(RegisterID reg) { return reg == zr; } + +} // namespace ARM64Registers class ARM64Assembler { public: typedef ARM64Registers::RegisterID RegisterID; typedef ARM64Registers::FPRegisterID FPRegisterID; - static RegisterID firstRegister() { return ARM64Registers::x0; } - static RegisterID lastRegister() { return ARM64Registers::x28; } + static constexpr RegisterID firstRegister() { return ARM64Registers::x0; } + static constexpr RegisterID lastRegister() { return ARM64Registers::sp; } - static FPRegisterID firstFPRegister() { return ARM64Registers::q0; } - static FPRegisterID lastFPRegister() { return ARM64Registers::q31; } + static constexpr FPRegisterID firstFPRegister() { return ARM64Registers::q0; } + static constexpr FPRegisterID lastFPRegister() { return ARM64Registers::q31; } private: - static bool isSp(RegisterID reg) { return ARM64Registers::isSp(reg); } - static bool isZr(RegisterID reg) { return ARM64Registers::isZr(reg); } + static constexpr bool isSp(RegisterID reg) { return ARM64Registers::isSp(reg); } + static constexpr bool isZr(RegisterID reg) { return ARM64Registers::isZr(reg); } public: ARM64Assembler() @@ -583,9 +423,9 @@ public: JumpType m_type : 8; JumpLinkType m_linkType : 8; Condition m_condition : 4; - bool m_is64Bit : 1; unsigned m_bitNumber : 6; - RegisterID m_compareRegister : 5; + RegisterID m_compareRegister : 6; + bool m_is64Bit : 1; } realTypes; struct CopyTypes { uint64_t content[3]; @@ -642,19 +482,12 @@ public: template<int datasize> static bool canEncodePImmOffset(int32_t offset) { - int32_t maxPImm = 4095 * (datasize / 8); - if (offset < 0) - return false; - if (offset > maxPImm) - return false; - if (offset & ((datasize / 8 ) - 1)) - return false; - return true; + return isValidScaledUImm12<datasize>(offset); } static bool canEncodeSImmOffset(int32_t offset) { - return isInt9(offset); + return isValidSignedImm9(offset); } private: @@ -787,6 +620,22 @@ private: FPDataOp_FNMUL }; + enum SIMD3Same { + SIMD_LogicalOp = 0x03 + }; + + enum SIMD3SameLogical { + // This includes both the U bit and the "size" / opc for convience. + SIMD_LogicalOp_AND = 0x00, + SIMD_LogicalOp_BIC = 0x01, + SIMD_LogicalOp_ORR = 0x02, + SIMD_LogicalOp_ORN = 0x03, + SIMD_LogacalOp_EOR = 0x80, + SIMD_LogicalOp_BSL = 0x81, + SIMD_LogicalOp_BIT = 0x82, + SIMD_LogicalOp_BIF = 0x83, + }; + enum FPIntConvOp { FPIntConvOp_FCVTNS = 0x00, FPIntConvOp_FCVTNU = 0x01, @@ -823,6 +672,16 @@ private: MemOp_LOAD_signed32 = 3 // size may be 0 or 1 }; + enum MemPairOpSize { + MemPairOp_32 = 0, + MemPairOp_LoadSigned_32 = 1, + MemPairOp_64 = 2, + + MemPairOp_V32 = MemPairOp_32, + MemPairOp_V64 = 1, + MemPairOp_V128 = 2 + }; + enum MoveWideOp { MoveWideOp_N = 0, MoveWideOp_Z = 2, @@ -836,6 +695,14 @@ private: LdrLiteralOp_128BIT = 2 }; + static unsigned memPairOffsetShift(bool V, MemPairOpSize size) + { + // return the log2 of the size in bytes, e.g. 64 bit size returns 3 + if (V) + return size + 2; + return (size >> 1) + 2; + } + public: // Integer Instructions: @@ -871,8 +738,9 @@ public: ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount) { CHECK_DATASIZE(); - if (isSp(rn)) { + if (isSp(rd) || isSp(rn)) { ASSERT(shift == LSL); + ASSERT(!isSp(rm)); add<datasize, setFlags>(rd, rn, rm, UXTX, amount); } else insn(addSubtractShiftedRegister(DATASIZE, AddOp_ADD, setFlags, shift, rm, amount, rn, rd)); @@ -887,6 +755,7 @@ public: { ASSERT(!(offset & 0xfff)); insn(pcRelative(true, offset >> 12, rd)); + nopCortexA53Fix843419(); } template<int datasize, SetFlags setFlags = DontSetFlags> @@ -1215,6 +1084,40 @@ public: insn(excepnGeneration(ExcepnOp_HALT, imm, 0)); } + // Only used for testing purposes. + void illegalInstruction() + { + insn(0x0); + } + + template<int datasize> + ALWAYS_INLINE void ldp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPostIndex simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPairPostIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_LOAD, simm, rn, rt, rt2)); + } + + template<int datasize> + ALWAYS_INLINE void ldp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPreIndex simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPairPreIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_LOAD, simm, rn, rt, rt2)); + } + + template<int datasize> + ALWAYS_INLINE void ldp(RegisterID rt, RegisterID rt2, RegisterID rn, unsigned pimm = 0) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPairOffset(MEMPAIROPSIZE_INT(datasize), false, MemOp_LOAD, pimm, rn, rt, rt2)); + } + + template<int datasize> + ALWAYS_INLINE void ldnp(RegisterID rt, RegisterID rt2, RegisterID rn, unsigned pimm = 0) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPairNonTemporal(MEMPAIROPSIZE_INT(datasize), false, MemOp_LOAD, pimm, rn, rt, rt2)); + } + template<int datasize> ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm) { @@ -1494,6 +1397,7 @@ public: ALWAYS_INLINE void madd(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra) { CHECK_DATASIZE(); + nopCortexA53Fix835769<datasize>(); insn(dataProcessing3Source(DATASIZE, DataOp_MADD, rm, ra, rn, rd)); } @@ -1546,6 +1450,7 @@ public: ALWAYS_INLINE void msub(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra) { CHECK_DATASIZE(); + nopCortexA53Fix835769<datasize>(); insn(dataProcessing3Source(DATASIZE, DataOp_MSUB, rm, ra, rn, rd)); } @@ -1596,9 +1501,27 @@ public: insn(nopPseudo()); } - ALWAYS_INLINE void dmbSY() + static void fillNops(void* base, size_t size, bool isCopyingToExecutableMemory) + { + RELEASE_ASSERT(!(size % sizeof(int32_t))); + size_t n = size / sizeof(int32_t); + for (int32_t* ptr = static_cast<int32_t*>(base); n--;) { + int insn = nopPseudo(); + if (isCopyingToExecutableMemory) + performJITMemcpy(ptr++, &insn, sizeof(int)); + else + memcpy(ptr++, &insn, sizeof(int)); + } + } + + ALWAYS_INLINE void dmbISH() + { + insn(0xd5033bbf); + } + + ALWAYS_INLINE void dmbISHST() { - insn(0xd5033fbf); + insn(0xd5033abf); } template<int datasize> @@ -1724,6 +1647,7 @@ public: ALWAYS_INLINE void smaddl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra) { + nopCortexA53Fix835769<64>(); insn(dataProcessing3Source(Datasize_64, DataOp_SMADDL, rm, ra, rn, rd)); } @@ -1734,6 +1658,7 @@ public: ALWAYS_INLINE void smsubl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra) { + nopCortexA53Fix835769<64>(); insn(dataProcessing3Source(Datasize_64, DataOp_SMSUBL, rm, ra, rn, rd)); } @@ -1748,6 +1673,34 @@ public: } template<int datasize> + ALWAYS_INLINE void stp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPostIndex simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPairPostIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_STORE, simm, rn, rt, rt2)); + } + + template<int datasize> + ALWAYS_INLINE void stp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPreIndex simm) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPairPreIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_STORE, simm, rn, rt, rt2)); + } + + template<int datasize> + ALWAYS_INLINE void stp(RegisterID rt, RegisterID rt2, RegisterID rn, unsigned pimm = 0) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPairOffset(MEMPAIROPSIZE_INT(datasize), false, MemOp_STORE, pimm, rn, rt, rt2)); + } + + template<int datasize> + ALWAYS_INLINE void stnp(RegisterID rt, RegisterID rt2, RegisterID rn, unsigned pimm = 0) + { + CHECK_DATASIZE(); + insn(loadStoreRegisterPairNonTemporal(MEMPAIROPSIZE_INT(datasize), false, MemOp_STORE, pimm, rn, rt, rt2)); + } + + template<int datasize> ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm) { str<datasize>(rt, rn, rm, UXTX, 0); @@ -1862,7 +1815,13 @@ public: template<int datasize, SetFlags setFlags = DontSetFlags> ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm) { - sub<datasize, setFlags>(rd, rn, rm, LSL, 0); + ASSERT_WITH_MESSAGE(!isSp(rd) || setFlags == DontSetFlags, "SUBS with shifted register does not support SP for Xd, it uses XZR for the register 31. SUBS with extended register support SP for Xd, but only if SetFlag is not used, otherwise register 31 is Xd."); + ASSERT_WITH_MESSAGE(!isSp(rm), "No encoding of SUBS supports SP for the third operand."); + + if (isSp(rd) || isSp(rn)) + sub<datasize, setFlags>(rd, rn, rm, UXTX, 0); + else + sub<datasize, setFlags>(rd, rn, rm, LSL, 0); } template<int datasize, SetFlags setFlags = DontSetFlags> @@ -1876,11 +1835,8 @@ public: ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount) { CHECK_DATASIZE(); - if (isSp(rn)) { - ASSERT(shift == LSL); - sub<datasize, setFlags>(rd, rn, rm, UXTX, amount); - } else - insn(addSubtractShiftedRegister(DATASIZE, AddOp_SUB, setFlags, shift, rm, amount, rn, rd)); + ASSERT(!isSp(rd) && !isSp(rn) && !isSp(rm)); + insn(addSubtractShiftedRegister(DATASIZE, AddOp_SUB, setFlags, shift, rm, amount, rn, rd)); } template<int datasize> @@ -1960,6 +1916,7 @@ public: ALWAYS_INLINE void umaddl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra) { + nopCortexA53Fix835769<64>(); insn(dataProcessing3Source(Datasize_64, DataOp_UMADDL, rm, ra, rn, rd)); } @@ -1970,6 +1927,7 @@ public: ALWAYS_INLINE void umsubl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra) { + nopCortexA53Fix835769<64>(); insn(dataProcessing3Source(Datasize_64, DataOp_UMSUBL, rm, ra, rn, rd)); } @@ -2279,6 +2237,20 @@ public: } template<int datasize> + ALWAYS_INLINE void vand(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm) + { + CHECK_VECTOR_DATASIZE(); + insn(vectorDataProcessingLogical(SIMD_LogicalOp_AND, vm, vn, vd)); + } + + template<int datasize> + ALWAYS_INLINE void vorr(FPRegisterID vd, FPRegisterID vn, FPRegisterID vm) + { + CHECK_VECTOR_DATASIZE(); + insn(vectorDataProcessingLogical(SIMD_LogicalOp_ORR, vm, vn, vd)); + } + + template<int datasize> ALWAYS_INLINE void frinta(FPRegisterID vd, FPRegisterID vn) { CHECK_DATASIZE(); @@ -2494,13 +2466,6 @@ public: return b.m_offset - a.m_offset; } - int executableOffsetFor(int location) - { - if (!location) - return 0; - return static_cast<int32_t*>(m_buffer.data())[location / sizeof(int32_t) - 1]; - } - void* unlinkedCode() { return m_buffer.data(); } size_t codeSize() const { return m_buffer.codeSize(); } @@ -2539,23 +2504,23 @@ public: m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition, bitNumber, compareRegister)); } - void linkJump(AssemblerLabel from, AssemblerLabel to) + void linkJump(AssemblerLabel from, void* executableCode, AssemblerLabel to) { ASSERT(from.isSet()); ASSERT(to.isSet()); - relinkJumpOrCall<false>(addressOf(from), addressOf(to)); + relinkJumpOrCall<false>(addressOf(from), addressOf(executableCode, from), addressOf(to)); } static void linkJump(void* code, AssemblerLabel from, void* to) { ASSERT(from.isSet()); - relinkJumpOrCall<false>(addressOf(code, from), to); + relinkJumpOrCall<false>(addressOf(code, from), addressOf(code, from), to); } static void linkCall(void* code, AssemblerLabel from, void* to) { ASSERT(from.isSet()); - linkJumpOrCall<true>(addressOf(code, from) - 1, to); + linkJumpOrCall<true>(addressOf(code, from) - 1, addressOf(code, from) - 1, to); } static void linkPointer(void* code, AssemblerLabel where, void* valuePtr) @@ -2567,7 +2532,8 @@ public: { intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(where)) >> 2; ASSERT(static_cast<int>(offset) == offset); - *static_cast<int*>(where) = unconditionalBranchImmediate(false, static_cast<int>(offset)); + int insn = unconditionalBranchImmediate(false, static_cast<int>(offset)); + performJITMemcpy(where, &insn, sizeof(int)); cacheFlush(where, sizeof(int)); } @@ -2575,6 +2541,11 @@ public: { return 4; } + + static constexpr ptrdiff_t patchableJumpSize() + { + return 4; + } static void replaceWithLoad(void* where) { @@ -2591,7 +2562,8 @@ public: ASSERT(!S); ASSERT(!shift); ASSERT(!(imm12 & ~0xff8)); - *static_cast<int*>(where) = loadStoreRegisterUnsignedImmediate(MemOpSize_64, false, MemOp_LOAD, encodePositiveImmediate<64>(imm12), rn, rd); + int insn = loadStoreRegisterUnsignedImmediate(MemOpSize_64, false, MemOp_LOAD, encodePositiveImmediate<64>(imm12), rn, rd); + performJITMemcpy(where, &insn, sizeof(int)); cacheFlush(where, sizeof(int)); } #if !ASSERT_DISABLED @@ -2624,7 +2596,8 @@ public: ASSERT(!V); ASSERT(opc == MemOp_LOAD); ASSERT(!(imm12 & ~0x1ff)); - *static_cast<int*>(where) = addSubtractImmediate(Datasize_64, AddOp_ADD, DontSetFlags, 0, imm12 * sizeof(void*), rn, rt); + int insn = addSubtractImmediate(Datasize_64, AddOp_ADD, DontSetFlags, 0, imm12 * sizeof(void*), rn, rt); + performJITMemcpy(where, &insn, sizeof(int)); cacheFlush(where, sizeof(int)); } #if !ASSERT_DISABLED @@ -2654,9 +2627,11 @@ public: static void setPointer(int* address, void* valuePtr, RegisterID rd, bool flush) { uintptr_t value = reinterpret_cast<uintptr_t>(valuePtr); - address[0] = moveWideImediate(Datasize_64, MoveWideOp_Z, 0, getHalfword(value, 0), rd); - address[1] = moveWideImediate(Datasize_64, MoveWideOp_K, 1, getHalfword(value, 1), rd); - address[2] = moveWideImediate(Datasize_64, MoveWideOp_K, 2, getHalfword(value, 2), rd); + int buffer[3]; + buffer[0] = moveWideImediate(Datasize_64, MoveWideOp_Z, 0, getHalfword(value, 0), rd); + buffer[1] = moveWideImediate(Datasize_64, MoveWideOp_K, 1, getHalfword(value, 1), rd); + buffer[2] = moveWideImediate(Datasize_64, MoveWideOp_K, 2, getHalfword(value, 2), rd); + performJITMemcpy(address, buffer, sizeof(int) * 3); if (flush) cacheFlush(address, sizeof(int) * 3); @@ -2675,13 +2650,15 @@ public: ASSERT_UNUSED(expected, expected && !sf && (opc == MoveWideOp_Z || opc == MoveWideOp_N) && !hw); ASSERT(checkMovk<Datasize_32>(address[1], 1, rd)); + int buffer[2]; if (value >= 0) { - address[0] = moveWideImediate(Datasize_32, MoveWideOp_Z, 0, getHalfword(value, 0), rd); - address[1] = moveWideImediate(Datasize_32, MoveWideOp_K, 1, getHalfword(value, 1), rd); + buffer[0] = moveWideImediate(Datasize_32, MoveWideOp_Z, 0, getHalfword(value, 0), rd); + buffer[1] = moveWideImediate(Datasize_32, MoveWideOp_K, 1, getHalfword(value, 1), rd); } else { - address[0] = moveWideImediate(Datasize_32, MoveWideOp_N, 0, ~getHalfword(value, 0), rd); - address[1] = moveWideImediate(Datasize_32, MoveWideOp_K, 1, getHalfword(value, 1), rd); + buffer[0] = moveWideImediate(Datasize_32, MoveWideOp_N, 0, ~getHalfword(value, 0), rd); + buffer[1] = moveWideImediate(Datasize_32, MoveWideOp_K, 1, getHalfword(value, 1), rd); } + performJITMemcpy(where, &buffer, sizeof(int) * 2); cacheFlush(where, sizeof(int) * 2); } @@ -2716,15 +2693,25 @@ public: return readPointer(reinterpret_cast<int*>(from) - 4); } + // The static relink, repatch, and replace methods can use can + // use |from| for both the write and executable address for call + // and jump patching as they're modifying existing (linked) code, + // so the address being provided is correct for relative address + // computation. static void relinkJump(void* from, void* to) { - relinkJumpOrCall<false>(reinterpret_cast<int*>(from), to); + relinkJumpOrCall<false>(reinterpret_cast<int*>(from), reinterpret_cast<const int*>(from), to); cacheFlush(from, sizeof(int)); } + static void relinkJumpToNop(void* from) + { + relinkJump(from, static_cast<char*>(from) + 4); + } + static void relinkCall(void* from, void* to) { - relinkJumpOrCall<true>(reinterpret_cast<int*>(from) - 1, to); + relinkJumpOrCall<true>(reinterpret_cast<int*>(from) - 1, reinterpret_cast<const int*>(from) - 1, to); cacheFlush(reinterpret_cast<int*>(from) - 1, sizeof(int)); } @@ -2745,17 +2732,42 @@ public: imm12 = encodePositiveImmediate<32>(value); else imm12 = encodePositiveImmediate<64>(value); - *static_cast<int*>(where) = loadStoreRegisterUnsignedImmediate(size, V, opc, imm12, rn, rt); + int insn = loadStoreRegisterUnsignedImmediate(size, V, opc, imm12, rn, rt); + performJITMemcpy(where, &insn, sizeof(int)); cacheFlush(where, sizeof(int)); } unsigned debugOffset() { return m_buffer.debugOffset(); } +#if OS(LINUX) && COMPILER(GCC_OR_CLANG) + static inline void linuxPageFlush(uintptr_t begin, uintptr_t end) + { + __builtin___clear_cache(reinterpret_cast<char*>(begin), reinterpret_cast<char*>(end)); + } +#endif + static void cacheFlush(void* code, size_t size) { #if OS(IOS) sys_cache_control(kCacheFunctionPrepareForExecution, code, size); +#elif OS(LINUX) + size_t page = pageSize(); + uintptr_t current = reinterpret_cast<uintptr_t>(code); + uintptr_t end = current + size; + uintptr_t firstPageEnd = (current & ~(page - 1)) + page; + + if (end <= firstPageEnd) { + linuxPageFlush(current, end); + return; + } + + linuxPageFlush(current, firstPageEnd); + + for (current = firstPageEnd; current + page < end; current += page) + linuxPageFlush(current, current + page); + + linuxPageFlush(current, end); #else #error "The cacheFlush support is missing on this platform." #endif @@ -2763,20 +2775,20 @@ public: // Assembler admin methods: - int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); } + static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); } static ALWAYS_INLINE bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b) { return a.from() < b.from(); } - bool canCompact(JumpType jumpType) + static bool canCompact(JumpType jumpType) { // Fixed jumps cannot be compacted return (jumpType == JumpNoCondition) || (jumpType == JumpCondition) || (jumpType == JumpCompareAndBranch) || (jumpType == JumpTestBit); } - JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) + static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { switch (jumpType) { case JumpFixed: @@ -2828,51 +2840,43 @@ public: return LinkJumpNoCondition; } - JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) + static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { JumpLinkType linkType = computeJumpType(record.type(), from, to); record.setLinkType(linkType); return linkType; } - void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) - { - int32_t ptr = regionStart / sizeof(int32_t); - const int32_t end = regionEnd / sizeof(int32_t); - int32_t* offsets = static_cast<int32_t*>(m_buffer.data()); - while (ptr < end) - offsets[ptr++] = offset; - } - Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator); return m_jumpsToLink; } - void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, uint8_t* to) + static void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, const uint8_t* fromInstruction8, uint8_t* to) { + const int* fromInstruction = reinterpret_cast<const int*>(fromInstruction8); switch (record.linkType()) { case LinkJumpNoCondition: - linkJumpOrCall<false>(reinterpret_cast<int*>(from), to); + linkJumpOrCall<false>(reinterpret_cast<int*>(from), fromInstruction, to); break; case LinkJumpConditionDirect: - linkConditionalBranch<true>(record.condition(), reinterpret_cast<int*>(from), to); + linkConditionalBranch<true>(record.condition(), reinterpret_cast<int*>(from), fromInstruction, to); break; case LinkJumpCondition: - linkConditionalBranch<false>(record.condition(), reinterpret_cast<int*>(from) - 1, to); + linkConditionalBranch<false>(record.condition(), reinterpret_cast<int*>(from) - 1, fromInstruction - 1, to); break; case LinkJumpCompareAndBranchDirect: - linkCompareAndBranch<true>(record.condition(), record.is64Bit(), record.compareRegister(), reinterpret_cast<int*>(from), to); + linkCompareAndBranch<true>(record.condition(), record.is64Bit(), record.compareRegister(), reinterpret_cast<int*>(from), fromInstruction, to); break; case LinkJumpCompareAndBranch: - linkCompareAndBranch<false>(record.condition(), record.is64Bit(), record.compareRegister(), reinterpret_cast<int*>(from) - 1, to); + linkCompareAndBranch<false>(record.condition(), record.is64Bit(), record.compareRegister(), reinterpret_cast<int*>(from) - 1, fromInstruction - 1, to); break; case LinkJumpTestBitDirect: - linkTestAndBranch<true>(record.condition(), record.bitNumber(), record.compareRegister(), reinterpret_cast<int*>(from), to); + linkTestAndBranch<true>(record.condition(), record.bitNumber(), record.compareRegister(), reinterpret_cast<int*>(from), fromInstruction, to); break; case LinkJumpTestBit: - linkTestAndBranch<false>(record.condition(), record.bitNumber(), record.compareRegister(), reinterpret_cast<int*>(from) - 1, to); + linkTestAndBranch<false>(record.condition(), record.bitNumber(), record.compareRegister(), reinterpret_cast<int*>(from) - 1, fromInstruction - 1, to); break; default: ASSERT_NOT_REACHED(); @@ -2914,7 +2918,7 @@ private: } template<bool isCall> - static void linkJumpOrCall(int* from, void* to) + static void linkJumpOrCall(int* from, const int* fromInstruction, void* to) { bool link; int imm26; @@ -2924,60 +2928,69 @@ private: ASSERT_UNUSED(isCall, (link == isCall) || disassembleNop(from)); ASSERT(!(reinterpret_cast<intptr_t>(from) & 3)); ASSERT(!(reinterpret_cast<intptr_t>(to) & 3)); - intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from)) >> 2; + intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(fromInstruction)) >> 2; ASSERT(static_cast<int>(offset) == offset); - *from = unconditionalBranchImmediate(isCall, static_cast<int>(offset)); + int insn = unconditionalBranchImmediate(isCall, static_cast<int>(offset)); + performJITMemcpy(from, &insn, sizeof(int)); } template<bool isDirect> - static void linkCompareAndBranch(Condition condition, bool is64Bit, RegisterID rt, int* from, void* to) + static void linkCompareAndBranch(Condition condition, bool is64Bit, RegisterID rt, int* from, const int* fromInstruction, void* to) { ASSERT(!(reinterpret_cast<intptr_t>(from) & 3)); ASSERT(!(reinterpret_cast<intptr_t>(to) & 3)); - intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from)) >> 2; + intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(fromInstruction)) >> 2; ASSERT(((offset << 38) >> 38) == offset); bool useDirect = ((offset << 45) >> 45) == offset; // Fits in 19 bits ASSERT(!isDirect || useDirect); if (useDirect || isDirect) { - *from = compareAndBranchImmediate(is64Bit ? Datasize_64 : Datasize_32, condition == ConditionNE, static_cast<int>(offset), rt); - if (!isDirect) - *(from + 1) = nopPseudo(); + int insn = compareAndBranchImmediate(is64Bit ? Datasize_64 : Datasize_32, condition == ConditionNE, static_cast<int>(offset), rt); + performJITMemcpy(from, &insn, sizeof(int)); + if (!isDirect) { + insn = nopPseudo(); + performJITMemcpy(from + 1, &insn, sizeof(int)); + } } else { - *from = compareAndBranchImmediate(is64Bit ? Datasize_64 : Datasize_32, invert(condition) == ConditionNE, 2, rt); - linkJumpOrCall<false>(from + 1, to); + int insn = compareAndBranchImmediate(is64Bit ? Datasize_64 : Datasize_32, invert(condition) == ConditionNE, 2, rt); + performJITMemcpy(from, &insn, sizeof(int)); + linkJumpOrCall<false>(from + 1, fromInstruction + 1, to); } } template<bool isDirect> - static void linkConditionalBranch(Condition condition, int* from, void* to) + static void linkConditionalBranch(Condition condition, int* from, const int* fromInstruction, void* to) { ASSERT(!(reinterpret_cast<intptr_t>(from) & 3)); ASSERT(!(reinterpret_cast<intptr_t>(to) & 3)); - intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from)) >> 2; + intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(fromInstruction)) >> 2; ASSERT(((offset << 38) >> 38) == offset); bool useDirect = ((offset << 45) >> 45) == offset; // Fits in 19 bits ASSERT(!isDirect || useDirect); if (useDirect || isDirect) { - *from = conditionalBranchImmediate(static_cast<int>(offset), condition); - if (!isDirect) - *(from + 1) = nopPseudo(); + int insn = conditionalBranchImmediate(static_cast<int>(offset), condition); + performJITMemcpy(from, &insn, sizeof(int)); + if (!isDirect) { + insn = nopPseudo(); + performJITMemcpy(from + 1, &insn, sizeof(int)); + } } else { - *from = conditionalBranchImmediate(2, invert(condition)); - linkJumpOrCall<false>(from + 1, to); + int insn = conditionalBranchImmediate(2, invert(condition)); + performJITMemcpy(from, &insn, sizeof(int)); + linkJumpOrCall<false>(from + 1, fromInstruction + 1, to); } } template<bool isDirect> - static void linkTestAndBranch(Condition condition, unsigned bitNumber, RegisterID rt, int* from, void* to) + static void linkTestAndBranch(Condition condition, unsigned bitNumber, RegisterID rt, int* from, const int* fromInstruction, void* to) { ASSERT(!(reinterpret_cast<intptr_t>(from) & 3)); ASSERT(!(reinterpret_cast<intptr_t>(to) & 3)); - intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from)) >> 2; + intptr_t offset = (reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(fromInstruction)) >> 2; ASSERT(static_cast<int>(offset) == offset); ASSERT(((offset << 38) >> 38) == offset); @@ -2985,17 +2998,21 @@ private: ASSERT(!isDirect || useDirect); if (useDirect || isDirect) { - *from = testAndBranchImmediate(condition == ConditionNE, static_cast<int>(bitNumber), static_cast<int>(offset), rt); - if (!isDirect) - *(from + 1) = nopPseudo(); + int insn = testAndBranchImmediate(condition == ConditionNE, static_cast<int>(bitNumber), static_cast<int>(offset), rt); + performJITMemcpy(from, &insn, sizeof(int)); + if (!isDirect) { + insn = nopPseudo(); + performJITMemcpy(from + 1, &insn, sizeof(int)); + } } else { - *from = testAndBranchImmediate(invert(condition) == ConditionNE, static_cast<int>(bitNumber), 2, rt); - linkJumpOrCall<false>(from + 1, to); + int insn = testAndBranchImmediate(invert(condition) == ConditionNE, static_cast<int>(bitNumber), 2, rt); + performJITMemcpy(from, &insn, sizeof(int)); + linkJumpOrCall<false>(from + 1, fromInstruction + 1, to); } } template<bool isCall> - static void relinkJumpOrCall(int* from, void* to) + static void relinkJumpOrCall(int* from, const int* fromInstruction, void* to) { if (!isCall && disassembleNop(from)) { unsigned op01; @@ -3010,7 +3027,7 @@ private: if (imm19 == 8) condition = invert(condition); - linkConditionalBranch<false>(condition, from - 1, to); + linkConditionalBranch<false>(condition, from - 1, fromInstruction - 1, to); return; } @@ -3023,7 +3040,7 @@ private: if (imm19 == 8) op = !op; - linkCompareAndBranch<false>(op ? ConditionNE : ConditionEQ, opSize == Datasize_64, rt, from - 1, to); + linkCompareAndBranch<false>(op ? ConditionNE : ConditionEQ, opSize == Datasize_64, rt, from - 1, fromInstruction - 1, to); return; } @@ -3035,12 +3052,12 @@ private: if (imm14 == 8) op = !op; - linkTestAndBranch<false>(op ? ConditionNE : ConditionEQ, bitNumber, rt, from - 1, to); + linkTestAndBranch<false>(op ? ConditionNE : ConditionEQ, bitNumber, rt, from - 1, fromInstruction - 1, to); return; } } - linkJumpOrCall<isCall>(from, to); + linkJumpOrCall<isCall>(from, fromInstruction, to); } static int* addressOf(void* code, AssemblerLabel label) @@ -3124,7 +3141,7 @@ private: int insn = *static_cast<int*>(address); op = (insn >> 24) & 0x1; imm14 = (insn << 13) >> 18; - bitNumber = static_cast<unsigned>((((insn >> 26) & 0x20)) | ((insn > 19) & 0x1f)); + bitNumber = static_cast<unsigned>((((insn >> 26) & 0x20)) | ((insn >> 19) & 0x1f)); rt = static_cast<RegisterID>(insn & 0x1f); return (insn & 0x7e000000) == 0x36000000; @@ -3138,8 +3155,18 @@ private: return (insn & 0x7c000000) == 0x14000000; } - static int xOrSp(RegisterID reg) { ASSERT(!isZr(reg)); return reg; } - static int xOrZr(RegisterID reg) { ASSERT(!isSp(reg)); return reg & 31; } + static int xOrSp(RegisterID reg) + { + ASSERT(!isZr(reg)); + ASSERT(!isIOS() || reg != ARM64Registers::x18); + return reg; + } + static int xOrZr(RegisterID reg) + { + ASSERT(!isSp(reg)); + ASSERT(!isIOS() || reg != ARM64Registers::x18); + return reg & 31; + } static FPRegisterID xOrZrAsFPR(RegisterID reg) { return static_cast<FPRegisterID>(xOrZr(reg)); } static int xOrZrOrSp(bool useZr, RegisterID reg) { return useZr ? xOrZr(reg) : xOrSp(reg); } @@ -3326,6 +3353,12 @@ private: return (0x1e200800 | M << 31 | S << 29 | type << 22 | rm << 16 | opcode << 12 | rn << 5 | rd); } + ALWAYS_INLINE static int vectorDataProcessingLogical(SIMD3SameLogical uAndSize, FPRegisterID vm, FPRegisterID vn, FPRegisterID vd) + { + const int Q = 0; + return (0xe200400 | Q << 30 | uAndSize << 22 | vm << 16 | SIMD_LogicalOp << 11 | vn << 5 | vd); + } + // 'o1' means negate ALWAYS_INLINE static int floatingPointDataProcessing3Source(Datasize type, bool o1, FPRegisterID rm, AddOp o2, FPRegisterID ra, FPRegisterID rn, FPRegisterID rd) { @@ -3361,6 +3394,23 @@ private: } // 'V' means vector + ALWAYS_INLINE static int loadStoreRegisterPairPostIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, FPRegisterID rt, FPRegisterID rt2) + { + ASSERT(size < 3); + ASSERT(opc == (opc & 1)); // Only load or store, load signed 64 is handled via size. + ASSERT(V || (size != MemPairOp_LoadSigned_32) || (opc == MemOp_LOAD)); // There isn't an integer store signed. + unsigned immedShiftAmount = memPairOffsetShift(V, size); + int imm7 = immediate >> immedShiftAmount; + ASSERT((imm7 << immedShiftAmount) == immediate && isInt7(imm7)); + return (0x28800000 | size << 30 | V << 26 | opc << 22 | (imm7 & 0x7f) << 15 | rt2 << 10 | xOrSp(rn) << 5 | rt); + } + + ALWAYS_INLINE static int loadStoreRegisterPairPostIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, RegisterID rt, RegisterID rt2) + { + return loadStoreRegisterPairPostIndex(size, V, opc, immediate, rn, xOrZrAsFPR(rt), xOrZrAsFPR(rt2)); + } + + // 'V' means vector ALWAYS_INLINE static int loadStoreRegisterPreIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, FPRegisterID rt) { ASSERT(!(size && V && (opc & 2))); // Maximum vector size is 128 bits. @@ -3375,6 +3425,57 @@ private: } // 'V' means vector + ALWAYS_INLINE static int loadStoreRegisterPairPreIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, FPRegisterID rt, FPRegisterID rt2) + { + ASSERT(size < 3); + ASSERT(opc == (opc & 1)); // Only load or store, load signed 64 is handled via size. + ASSERT(V || (size != MemPairOp_LoadSigned_32) || (opc == MemOp_LOAD)); // There isn't an integer store signed. + unsigned immedShiftAmount = memPairOffsetShift(V, size); + int imm7 = immediate >> immedShiftAmount; + ASSERT((imm7 << immedShiftAmount) == immediate && isInt7(imm7)); + return (0x29800000 | size << 30 | V << 26 | opc << 22 | (imm7 & 0x7f) << 15 | rt2 << 10 | xOrSp(rn) << 5 | rt); + } + + ALWAYS_INLINE static int loadStoreRegisterPairPreIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, RegisterID rt, RegisterID rt2) + { + return loadStoreRegisterPairPreIndex(size, V, opc, immediate, rn, xOrZrAsFPR(rt), xOrZrAsFPR(rt2)); + } + + // 'V' means vector + ALWAYS_INLINE static int loadStoreRegisterPairOffset(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, FPRegisterID rt, FPRegisterID rt2) + { + ASSERT(size < 3); + ASSERT(opc == (opc & 1)); // Only load or store, load signed 64 is handled via size. + ASSERT(V || (size != MemPairOp_LoadSigned_32) || (opc == MemOp_LOAD)); // There isn't an integer store signed. + unsigned immedShiftAmount = memPairOffsetShift(V, size); + int imm7 = immediate >> immedShiftAmount; + ASSERT((imm7 << immedShiftAmount) == immediate && isInt7(imm7)); + return (0x29000000 | size << 30 | V << 26 | opc << 22 | (imm7 & 0x7f) << 15 | rt2 << 10 | xOrSp(rn) << 5 | rt); + } + + ALWAYS_INLINE static int loadStoreRegisterPairOffset(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, RegisterID rt, RegisterID rt2) + { + return loadStoreRegisterPairOffset(size, V, opc, immediate, rn, xOrZrAsFPR(rt), xOrZrAsFPR(rt2)); + } + + // 'V' means vector + ALWAYS_INLINE static int loadStoreRegisterPairNonTemporal(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, FPRegisterID rt, FPRegisterID rt2) + { + ASSERT(size < 3); + ASSERT(opc == (opc & 1)); // Only load or store, load signed 64 is handled via size. + ASSERT(V || (size != MemPairOp_LoadSigned_32) || (opc == MemOp_LOAD)); // There isn't an integer store signed. + unsigned immedShiftAmount = memPairOffsetShift(V, size); + int imm7 = immediate >> immedShiftAmount; + ASSERT((imm7 << immedShiftAmount) == immediate && isInt7(imm7)); + return (0x28000000 | size << 30 | V << 26 | opc << 22 | (imm7 & 0x7f) << 15 | rt2 << 10 | xOrSp(rn) << 5 | rt); + } + + ALWAYS_INLINE static int loadStoreRegisterPairNonTemporal(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, RegisterID rt, RegisterID rt2) + { + return loadStoreRegisterPairNonTemporal(size, V, opc, immediate, rn, xOrZrAsFPR(rt), xOrZrAsFPR(rt2)); + } + + // 'V' means vector // 'S' means shift rm ALWAYS_INLINE static int loadStoreRegisterRegisterOffset(MemOpSize size, bool V, MemOp opc, RegisterID rm, ExtendType option, bool S, RegisterID rn, FPRegisterID rt) { @@ -3488,6 +3589,37 @@ private: return (0xd6000000 | opc << 21 | op2 << 16 | op3 << 10 | xOrZr(rn) << 5 | op4); } + // Workaround for Cortex-A53 erratum (835769). Emit an extra nop if the + // last instruction in the buffer is a load, store or prefetch. Needed + // before 64-bit multiply-accumulate instructions. + template<int datasize> + ALWAYS_INLINE void nopCortexA53Fix835769() + { +#if CPU(ARM64_CORTEXA53) + CHECK_DATASIZE(); + if (datasize == 64) { + if (LIKELY(m_buffer.codeSize() >= sizeof(int32_t))) { + // From ARMv8 Reference Manual, Section C4.1: the encoding of the + // instructions in the Loads and stores instruction group is: + // ---- 1-0- ---- ---- ---- ---- ---- ---- + if (UNLIKELY((*reinterpret_cast_ptr<int32_t*>(reinterpret_cast_ptr<char*>(m_buffer.data()) + m_buffer.codeSize() - sizeof(int32_t)) & 0x0a000000) == 0x08000000)) + nop(); + } + } +#endif + } + + // Workaround for Cortex-A53 erratum (843419). Emit extra nops to avoid + // wrong address access after ADRP instruction. + ALWAYS_INLINE void nopCortexA53Fix843419() + { +#if CPU(ARM64_CORTEXA53) + nop(); + nop(); + nop(); +#endif + } + AssemblerBuffer m_buffer; Vector<LinkRecord, 0, UnsafeVectorOverflow> m_jumpsToLink; int m_indexOfLastWatchpoint; @@ -3505,5 +3637,3 @@ private: #undef CHECK_FP_MEMOP_DATASIZE #endif // ENABLE(ASSEMBLER) && CPU(ARM64) - -#endif // ARM64Assembler_h diff --git a/Source/JavaScriptCore/assembler/ARMAssembler.cpp b/Source/JavaScriptCore/assembler/ARMAssembler.cpp index f9100d4c9..552f37f68 100644 --- a/Source/JavaScriptCore/assembler/ARMAssembler.cpp +++ b/Source/JavaScriptCore/assembler/ARMAssembler.cpp @@ -395,8 +395,6 @@ void ARMAssembler::prepareExecutableCopy(void* to) { // 64-bit alignment is required for next constant pool and JIT code as well m_buffer.flushWithoutBarrier(true); - if (!m_buffer.isAligned(8)) - bkpt(0); char* data = reinterpret_cast<char*>(m_buffer.data()); ptrdiff_t delta = reinterpret_cast<char*>(to) - data; diff --git a/Source/JavaScriptCore/assembler/ARMAssembler.h b/Source/JavaScriptCore/assembler/ARMAssembler.h index 087d31c14..6fba9ed18 100644 --- a/Source/JavaScriptCore/assembler/ARMAssembler.h +++ b/Source/JavaScriptCore/assembler/ARMAssembler.h @@ -24,8 +24,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef ARMAssembler_h -#define ARMAssembler_h +#pragma once #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) @@ -36,62 +35,6 @@ namespace JSC { typedef uint32_t ARMWord; - namespace ARMRegisters { - typedef enum { - r0 = 0, - r1, - r2, - r3, - r4, - r5, - r6, S0 = r6, - r7, - r8, - r9, - r10, - r11, fp = r11, // frame pointer - r12, ip = r12, S1 = r12, - r13, sp = r13, - r14, lr = r14, - r15, pc = r15 - } RegisterID; - - typedef enum { - d0, - d1, - d2, - d3, - d4, - d5, - d6, - d7, SD0 = d7, /* Same as thumb assembler. */ - d8, - d9, - d10, - d11, - d12, - d13, - d14, - d15, - d16, - d17, - d18, - d19, - d20, - d21, - d22, - d23, - d24, - d25, - d26, - d27, - d28, - d29, - d30, - d31 - } FPRegisterID; - -#if USE(MASM_PROBE) #define FOR_EACH_CPU_REGISTER(V) \ FOR_EACH_CPU_GPREGISTER(V) \ FOR_EACH_CPU_SPECIAL_REGISTER(V) \ @@ -109,11 +52,11 @@ namespace JSC { V(void*, r8) \ V(void*, r9) \ V(void*, r10) \ - V(void*, r11) \ + V(void*, fp) \ V(void*, ip) \ V(void*, sp) \ V(void*, lr) \ - V(void*, pc) + V(void*, pc) \ #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \ V(void*, apsr) \ @@ -135,8 +78,49 @@ namespace JSC { V(double, d12) \ V(double, d13) \ V(double, d14) \ - V(double, d15) -#endif // USE(MASM_PROBE) + V(double, d15) \ + V(double, d16) \ + V(double, d17) \ + V(double, d18) \ + V(double, d19) \ + V(double, d20) \ + V(double, d21) \ + V(double, d22) \ + V(double, d23) \ + V(double, d24) \ + V(double, d25) \ + V(double, d26) \ + V(double, d27) \ + V(double, d28) \ + V(double, d29) \ + V(double, d30) \ + V(double, d31) \ + + namespace ARMRegisters { + + typedef enum { + #define DECLARE_REGISTER(_type, _regName) _regName, + FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER) + #undef DECLARE_REGISTER + + // Pseudonyms for some of the registers. + S0 = r6, + r11 = fp, // frame pointer + r12 = ip, S1 = ip, + r13 = sp, + r14 = lr, + r15 = pc + } RegisterID; + + typedef enum { + #define DECLARE_REGISTER(_type, _regName) _regName, + FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER) + #undef DECLARE_REGISTER + + // Pseudonyms for some of the registers. + SD0 = d7, /* Same as thumb assembler. */ + } FPRegisterID; + } // namespace ARMRegisters class ARMAssembler { @@ -153,11 +137,11 @@ namespace JSC { ARMBuffer& buffer() { return m_buffer; } - static RegisterID firstRegister() { return ARMRegisters::r0; } - static RegisterID lastRegister() { return ARMRegisters::r15; } + static constexpr RegisterID firstRegister() { return ARMRegisters::r0; } + static constexpr RegisterID lastRegister() { return ARMRegisters::r15; } - static FPRegisterID firstFPRegister() { return ARMRegisters::d0; } - static FPRegisterID lastFPRegister() { return ARMRegisters::d31; } + static constexpr FPRegisterID firstFPRegister() { return ARMRegisters::d0; } + static constexpr FPRegisterID lastFPRegister() { return ARMRegisters::d31; } // ARM conditional constants typedef enum { @@ -231,6 +215,11 @@ namespace JSC { #endif NOP = 0xe1a00000, DMB_SY = 0xf57ff05f, + DMB_ISHST = 0xf57ff05a, +#if HAVE(ARM_IDIV_INSTRUCTIONS) + SDIV = 0x0710f010, + UDIV = 0x0730f010, +#endif }; enum { @@ -492,6 +481,26 @@ namespace JSC { m_buffer.putInt(toARMWord(cc) | MULL | RN(rdhi) | RD(rdlo) | RS(rn) | RM(rm)); } +#if HAVE(ARM_IDIV_INSTRUCTIONS) + template<int datasize> + void sdiv(int rd, int rn, int rm, Condition cc = AL) + { + static_assert(datasize == 32, "sdiv datasize must be 32 for armv7s"); + ASSERT(rd != ARMRegisters::pc); + ASSERT(rn != ARMRegisters::pc); + ASSERT(rm != ARMRegisters::pc); + m_buffer.putInt(toARMWord(cc) | SDIV | RN(rd) | RM(rn) | RS(rm)); + } + + void udiv(int rd, int rn, int rm, Condition cc = AL) + { + ASSERT(rd != ARMRegisters::pc); + ASSERT(rn != ARMRegisters::pc); + ASSERT(rm != ARMRegisters::pc); + m_buffer.putInt(toARMWord(cc) | UDIV | RN(rd) | RM(rn) | RS(rm)); + } +#endif + void vmov_f64(int dd, int dm, Condition cc = AL) { emitDoublePrecisionInstruction(toARMWord(cc) | VMOV_F64, dd, 0, dm); @@ -697,11 +706,28 @@ namespace JSC { m_buffer.putInt(NOP); } + static void fillNops(void* base, size_t size, bool isCopyingToExecutableMemory) + { + UNUSED_PARAM(isCopyingToExecutableMemory); + RELEASE_ASSERT(!(size % sizeof(int32_t))); + + int32_t* ptr = static_cast<int32_t*>(base); + const size_t num32s = size / sizeof(int32_t); + const int32_t insn = NOP; + for (size_t i = 0; i < num32s; i++) + *ptr++ = insn; + } + void dmbSY() { m_buffer.putInt(DMB_SY); } + void dmbISHST() + { + m_buffer.putInt(DMB_ISHST); + } + void bx(int rm, Condition cc = AL) { emitInstruction(toARMWord(cc) | BX, 0, 0, RM(rm)); @@ -940,6 +966,11 @@ namespace JSC { patchPointerInternal(getAbsoluteJumpAddress(from), to); } + static void relinkJumpToNop(void* from) + { + relinkJump(from, from); + } + static void linkCall(void* code, AssemblerLabel from, void* to) { patchPointerInternal(getAbsoluteJumpAddress(code, from.m_offset), to); @@ -981,6 +1012,11 @@ namespace JSC { return sizeof(ARMWord) * 2; } + static constexpr ptrdiff_t patchableJumpSize() + { + return sizeof(ARMWord) * 3; + } + static void replaceWithLoad(void* instructionStart) { ARMWord* instruction = reinterpret_cast<ARMWord*>(instructionStart); @@ -1082,7 +1118,7 @@ namespace JSC { return AL | B | (offset & BranchOffsetMask); } -#if OS(LINUX) && COMPILER(GCC) +#if OS(LINUX) && COMPILER(GCC_OR_CLANG) static inline void linuxPageFlush(uintptr_t begin, uintptr_t end) { asm volatile( @@ -1102,7 +1138,7 @@ namespace JSC { static void cacheFlush(void* code, size_t size) { -#if OS(LINUX) && COMPILER(GCC) +#if OS(LINUX) && COMPILER(GCC_OR_CLANG) size_t page = pageSize(); uintptr_t current = reinterpret_cast<uintptr_t>(code); uintptr_t end = current + size; @@ -1119,8 +1155,6 @@ namespace JSC { linuxPageFlush(current, current + page); linuxPageFlush(current, end); -#elif OS(WINCE) - CacheRangeFlush(code, size, CACHE_SYNC_ALL); #else #error "The cacheFlush support is missing on this platform." #endif @@ -1176,5 +1210,3 @@ namespace JSC { } // namespace JSC #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) - -#endif // ARMAssembler_h diff --git a/Source/JavaScriptCore/assembler/ARMv7Assembler.cpp b/Source/JavaScriptCore/assembler/ARMv7Assembler.cpp deleted file mode 100644 index faca66421..000000000 --- a/Source/JavaScriptCore/assembler/ARMv7Assembler.cpp +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright (C) 2010 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, - * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "config.h" - -#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2) - -#include "ARMv7Assembler.h" - -namespace JSC { - -} - -#endif diff --git a/Source/JavaScriptCore/assembler/ARMv7Assembler.h b/Source/JavaScriptCore/assembler/ARMv7Assembler.h index 5257f32a8..86218ea72 100644 --- a/Source/JavaScriptCore/assembler/ARMv7Assembler.h +++ b/Source/JavaScriptCore/assembler/ARMv7Assembler.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009, 2010, 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2009, 2010, 2012, 2013, 2014 Apple Inc. All rights reserved. * Copyright (C) 2010 University of Szeged * * Redistribution and use in source and binary forms, with or without @@ -24,12 +24,12 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef ARMAssembler_h -#define ARMAssembler_h +#pragma once #if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2) #include "AssemblerBuffer.h" +#include "AssemblerCommon.h" #include <limits.h> #include <wtf/Assertions.h> #include <wtf/Vector.h> @@ -38,23 +38,83 @@ namespace JSC { namespace ARMRegisters { + + #define FOR_EACH_CPU_REGISTER(V) \ + FOR_EACH_CPU_GPREGISTER(V) \ + FOR_EACH_CPU_SPECIAL_REGISTER(V) \ + FOR_EACH_CPU_FPREGISTER(V) + + // The following are defined as pairs of the following value: + // 1. type of the storage needed to save the register value by the JIT probe. + // 2. name of the register. + #define FOR_EACH_CPU_GPREGISTER(V) \ + V(void*, r0) \ + V(void*, r1) \ + V(void*, r2) \ + V(void*, r3) \ + V(void*, r4) \ + V(void*, r5) \ + V(void*, r6) \ + V(void*, r7) \ + V(void*, r8) \ + V(void*, r9) \ + V(void*, r10) \ + V(void*, r11) \ + V(void*, ip) \ + V(void*, sp) \ + V(void*, lr) \ + V(void*, pc) + + #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \ + V(void*, apsr) \ + V(void*, fpscr) \ + + #define FOR_EACH_CPU_FPREGISTER(V) \ + V(double, d0) \ + V(double, d1) \ + V(double, d2) \ + V(double, d3) \ + V(double, d4) \ + V(double, d5) \ + V(double, d6) \ + V(double, d7) \ + V(double, d8) \ + V(double, d9) \ + V(double, d10) \ + V(double, d11) \ + V(double, d12) \ + V(double, d13) \ + V(double, d14) \ + V(double, d15) \ + V(double, d16) \ + V(double, d17) \ + V(double, d18) \ + V(double, d19) \ + V(double, d20) \ + V(double, d21) \ + V(double, d22) \ + V(double, d23) \ + V(double, d24) \ + V(double, d25) \ + V(double, d26) \ + V(double, d27) \ + V(double, d28) \ + V(double, d29) \ + V(double, d30) \ + V(double, d31) + typedef enum { - r0, - r1, - r2, - r3, - r4, - r5, - r6, - r7, fp = r7, // frame pointer - r8, - r9, sb = r9, // static base - r10, sl = r10, // stack limit - r11, - r12, ip = r12, - r13, sp = r13, - r14, lr = r14, - r15, pc = r15, + #define DECLARE_REGISTER(_type, _regName) _regName, + FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER) + #undef DECLARE_REGISTER + + fp = r7, // frame pointer + sb = r9, // static base + sl = r10, // stack limit + r12 = ip, + r13 = sp, + r14 = lr, + r15 = pc } RegisterID; typedef enum { @@ -93,38 +153,9 @@ namespace ARMRegisters { } FPSingleRegisterID; typedef enum { - d0, - d1, - d2, - d3, - d4, - d5, - d6, - d7, - d8, - d9, - d10, - d11, - d12, - d13, - d14, - d15, - d16, - d17, - d18, - d19, - d20, - d21, - d22, - d23, - d24, - d25, - d26, - d27, - d28, - d29, - d30, - d31, + #define DECLARE_REGISTER(_type, _regName) _regName, + FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER) + #undef DECLARE_REGISTER } FPDoubleRegisterID; typedef enum { @@ -174,77 +205,7 @@ namespace ARMRegisters { return (FPDoubleRegisterID)(reg >> 1); } -#if USE(MASM_PROBE) - #define FOR_EACH_CPU_REGISTER(V) \ - FOR_EACH_CPU_GPREGISTER(V) \ - FOR_EACH_CPU_SPECIAL_REGISTER(V) \ - FOR_EACH_CPU_FPREGISTER(V) - - #define FOR_EACH_CPU_GPREGISTER(V) \ - V(void*, r0) \ - V(void*, r1) \ - V(void*, r2) \ - V(void*, r3) \ - V(void*, r4) \ - V(void*, r5) \ - V(void*, r6) \ - V(void*, r7) \ - V(void*, r8) \ - V(void*, r9) \ - V(void*, r10) \ - V(void*, r11) \ - V(void*, ip) \ - V(void*, sp) \ - V(void*, lr) \ - V(void*, pc) - - #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \ - V(void*, apsr) \ - V(void*, fpscr) \ - - #define FOR_EACH_CPU_FPREGISTER(V) \ - V(double, d0) \ - V(double, d1) \ - V(double, d2) \ - V(double, d3) \ - V(double, d4) \ - V(double, d5) \ - V(double, d6) \ - V(double, d7) \ - V(double, d8) \ - V(double, d9) \ - V(double, d10) \ - V(double, d11) \ - V(double, d12) \ - V(double, d13) \ - V(double, d14) \ - V(double, d15) \ - FOR_EACH_CPU_FPREGISTER_EXTENSION(V) - -#if CPU(APPLE_ARMV7S) - #define FOR_EACH_CPU_FPREGISTER_EXTENSION(V) \ - V(double, d16) \ - V(double, d17) \ - V(double, d18) \ - V(double, d19) \ - V(double, d20) \ - V(double, d21) \ - V(double, d22) \ - V(double, d23) \ - V(double, d24) \ - V(double, d25) \ - V(double, d26) \ - V(double, d27) \ - V(double, d28) \ - V(double, d29) \ - V(double, d30) \ - V(double, d31) -#else - #define FOR_EACH_CPU_FPREGISTER_EXTENSION(V) // Nothing to add. -#endif // CPU(APPLE_ARMV7S) - -#endif // USE(MASM_PROBE) -} +} // namespace ARMRegisters class ARMv7Assembler; class ARMThumbImmediate { @@ -492,11 +453,11 @@ public: typedef ARMRegisters::FPQuadRegisterID FPQuadRegisterID; typedef FPDoubleRegisterID FPRegisterID; - static RegisterID firstRegister() { return ARMRegisters::r0; } - static RegisterID lastRegister() { return ARMRegisters::r13; } - - static FPRegisterID firstFPRegister() { return ARMRegisters::d0; } - static FPRegisterID lastFPRegister() { return ARMRegisters::d31; } + static constexpr RegisterID firstRegister() { return ARMRegisters::r0; } + static constexpr RegisterID lastRegister() { return ARMRegisters::r13; } + + static constexpr FPRegisterID firstFPRegister() { return ARMRegisters::d0; } + static constexpr FPRegisterID lastFPRegister() { return ARMRegisters::d31; } // (HS, LO, HI, LS) -> (AE, B, A, BE) // (VS, VC) -> (O, NO) @@ -583,6 +544,8 @@ public: { } + AssemblerBuffer& buffer() { return m_formatter.m_buffer; } + private: // ARMv7, Appx-A.6.3 @@ -646,6 +609,8 @@ private: OP_ADD_SP_imm_T1 = 0xA800, OP_ADD_SP_imm_T2 = 0xB000, OP_SUB_SP_imm_T1 = 0xB080, + OP_PUSH_T1 = 0xB400, + OP_POP_T1 = 0xBC00, OP_BKPT = 0xBE00, OP_IT = 0xBF00, OP_NOP_T1 = 0xBF00, @@ -654,6 +619,8 @@ private: typedef enum { OP_B_T1 = 0xD000, OP_B_T2 = 0xE000, + OP_POP_T2 = 0xE8BD, + OP_PUSH_T2 = 0xE92D, OP_AND_reg_T2 = 0xEA00, OP_TST_reg_T2 = 0xEA10, OP_ORR_reg_T2 = 0xEA40, @@ -714,7 +681,7 @@ private: OP_MOVT = 0xF2C0, OP_UBFX_T1 = 0xF3C0, OP_NOP_T2a = 0xF3AF, - OP_DMB_SY_T2a = 0xF3BF, + OP_DMB_T1a = 0xF3BF, OP_STRB_imm_T3 = 0xF800, OP_STRB_reg_T2 = 0xF800, OP_LDRB_imm_T3 = 0xF810, @@ -741,39 +708,40 @@ private: OP_ROR_reg_T2 = 0xFA60, OP_CLZ = 0xFAB0, OP_SMULL_T1 = 0xFB80, -#if CPU(APPLE_ARMV7S) +#if HAVE(ARM_IDIV_INSTRUCTIONS) OP_SDIV_T1 = 0xFB90, OP_UDIV_T1 = 0xFBB0, #endif } OpcodeID1; typedef enum { - OP_VADD_T2b = 0x0A00, - OP_VDIVb = 0x0A00, - OP_FLDSb = 0x0A00, - OP_VLDRb = 0x0A00, - OP_VMOV_IMM_T2b = 0x0A00, - OP_VMOV_T2b = 0x0A40, - OP_VMUL_T2b = 0x0A00, - OP_FSTSb = 0x0A00, - OP_VSTRb = 0x0A00, - OP_VMOV_StoCb = 0x0A10, - OP_VMOV_CtoSb = 0x0A10, - OP_VMOV_DtoCb = 0x0A10, - OP_VMOV_CtoDb = 0x0A10, - OP_VMRSb = 0x0A10, - OP_VABS_T2b = 0x0A40, - OP_VCMPb = 0x0A40, - OP_VCVT_FPIVFPb = 0x0A40, - OP_VNEG_T2b = 0x0A40, - OP_VSUB_T2b = 0x0A40, - OP_VSQRT_T1b = 0x0A40, - OP_VCVTSD_T1b = 0x0A40, - OP_VCVTDS_T1b = 0x0A40, - OP_NOP_T2b = 0x8000, - OP_DMB_SY_T2b = 0x8F5F, - OP_B_T3b = 0x8000, - OP_B_T4b = 0x9000, + OP_VADD_T2b = 0x0A00, + OP_VDIVb = 0x0A00, + OP_FLDSb = 0x0A00, + OP_VLDRb = 0x0A00, + OP_VMOV_IMM_T2b = 0x0A00, + OP_VMOV_T2b = 0x0A40, + OP_VMUL_T2b = 0x0A00, + OP_FSTSb = 0x0A00, + OP_VSTRb = 0x0A00, + OP_VMOV_StoCb = 0x0A10, + OP_VMOV_CtoSb = 0x0A10, + OP_VMOV_DtoCb = 0x0A10, + OP_VMOV_CtoDb = 0x0A10, + OP_VMRSb = 0x0A10, + OP_VABS_T2b = 0x0A40, + OP_VCMPb = 0x0A40, + OP_VCVT_FPIVFPb = 0x0A40, + OP_VNEG_T2b = 0x0A40, + OP_VSUB_T2b = 0x0A40, + OP_VSQRT_T1b = 0x0A40, + OP_VCVTSD_T1b = 0x0A40, + OP_VCVTDS_T1b = 0x0A40, + OP_NOP_T2b = 0x8000, + OP_DMB_SY_T1b = 0x8F5F, + OP_DMB_ISHST_T1b = 0x8F5A, + OP_B_T3b = 0x8000, + OP_B_T4b = 0x9000, } OpcodeID2; struct FourFours { @@ -799,11 +767,11 @@ private: class ARMInstructionFormatter; // false means else! - bool ifThenElseConditionBit(Condition condition, bool isIf) + static bool ifThenElseConditionBit(Condition condition, bool isIf) { return isIf ? (condition & 1) : !(condition & 1); } - uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if) + static uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if) { int mask = (ifThenElseConditionBit(condition, inst2if) << 3) | (ifThenElseConditionBit(condition, inst3if) << 2) @@ -812,7 +780,7 @@ private: ASSERT((condition != ConditionAL) || !(mask & (mask - 1))); return (condition << 4) | mask; } - uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if) + static uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if) { int mask = (ifThenElseConditionBit(condition, inst2if) << 3) | (ifThenElseConditionBit(condition, inst3if) << 2) @@ -820,7 +788,7 @@ private: ASSERT((condition != ConditionAL) || !(mask & (mask - 1))); return (condition << 4) | mask; } - uint8_t ifThenElse(Condition condition, bool inst2if) + static uint8_t ifThenElse(Condition condition, bool inst2if) { int mask = (ifThenElseConditionBit(condition, inst2if) << 3) | 4; @@ -828,7 +796,7 @@ private: return (condition << 4) | mask; } - uint8_t ifThenElse(Condition condition) + static uint8_t ifThenElse(Condition condition) { int mask = 8; return (condition << 4) | mask; @@ -855,7 +823,7 @@ public: ASSERT(rn != ARMRegisters::pc); ASSERT(imm.isValid()); - if (rn == ARMRegisters::sp) { + if (rn == ARMRegisters::sp && imm.isUInt16()) { ASSERT(!(imm.getUInt16() & 3)); if (!(rd & 8) && imm.isUInt10()) { m_formatter.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1, rd, static_cast<uint8_t>(imm.getUInt10() >> 2)); @@ -894,6 +862,11 @@ public: // NOTE: In an IT block, add doesn't modify the flags register. ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm) { + if (rd == ARMRegisters::sp) { + mov(rd, rn); + rn = rd; + } + if (rd == rn) m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rm, rd); else if (rd == rm) @@ -1183,9 +1156,10 @@ public: { ASSERT(rn != ARMRegisters::pc); // LDR (literal) ASSERT(imm.isUInt12()); + ASSERT(!(imm.getUInt12() & 1)); if (!((rt | rn) & 8) && imm.isUInt6()) - m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 2, rn, rt); + m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 1, rn, rt); else m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2, rn, rt, imm.getUInt12()); } @@ -1353,11 +1327,14 @@ public: uint16_t* address = static_cast<uint16_t*>(instructionStart); ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm)); ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm >> 16)); - address[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16); - address[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(right, lo16); - address[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16); - address[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond(right, hi16); - address[4] = OP_CMP_reg_T2 | left; + uint16_t instruction[] = { + twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16), + twoWordOp5i6Imm4Reg4EncodedImmSecond(right, lo16), + twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16), + twoWordOp5i6Imm4Reg4EncodedImmSecond(right, hi16), + static_cast<uint16_t>(OP_CMP_reg_T2 | left) + }; + performJITMemcpy(address, instruction, sizeof(uint16_t) * 5); cacheFlush(address, sizeof(uint16_t) * 5); } #else @@ -1368,8 +1345,11 @@ public: ASSERT(!BadReg(rd)); uint16_t* address = static_cast<uint16_t*>(instructionStart); - address[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, imm); - address[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, imm); + uint16_t instruction[] = { + twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, imm), + twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, imm) + }; + performJITMemcpy(address, instruction, sizeof(uint16_t) * 2); cacheFlush(address, sizeof(uint16_t) * 2); } #endif @@ -1488,9 +1468,49 @@ public: m_formatter.twoWordOp12Reg4FourFours(OP_ROR_reg_T2, rn, FourFours(0xf, rd, 0, rm)); } -#if CPU(APPLE_ARMV7S) + ALWAYS_INLINE void pop(RegisterID dest) + { + if (dest < ARMRegisters::r8) + m_formatter.oneWordOp7Imm9(OP_POP_T1, 1 << dest); + else { + // Load postindexed with writeback. + ldr(dest, ARMRegisters::sp, sizeof(void*), false, true); + } + } + + ALWAYS_INLINE void pop(uint32_t registerList) + { + ASSERT(WTF::bitCount(registerList) > 1); + ASSERT(!((1 << ARMRegisters::pc) & registerList) || !((1 << ARMRegisters::lr) & registerList)); + ASSERT(!((1 << ARMRegisters::sp) & registerList)); + m_formatter.twoWordOp16Imm16(OP_POP_T2, registerList); + } + + ALWAYS_INLINE void push(RegisterID src) + { + if (src < ARMRegisters::r8) + m_formatter.oneWordOp7Imm9(OP_PUSH_T1, 1 << src); + else if (src == ARMRegisters::lr) + m_formatter.oneWordOp7Imm9(OP_PUSH_T1, 0x100); + else { + // Store preindexed with writeback. + str(src, ARMRegisters::sp, -sizeof(void*), true, true); + } + } + + ALWAYS_INLINE void push(uint32_t registerList) + { + ASSERT(WTF::bitCount(registerList) > 1); + ASSERT(!((1 << ARMRegisters::pc) & registerList)); + ASSERT(!((1 << ARMRegisters::sp) & registerList)); + m_formatter.twoWordOp16Imm16(OP_PUSH_T2, registerList); + } + +#if HAVE(ARM_IDIV_INSTRUCTIONS) + template<int datasize> ALWAYS_INLINE void sdiv(RegisterID rd, RegisterID rn, RegisterID rm) { + static_assert(datasize == 32, "sdiv datasize must be 32 for armv7s"); ASSERT(!BadReg(rd)); ASSERT(!BadReg(rn)); ASSERT(!BadReg(rm)); @@ -1635,8 +1655,8 @@ public: ASSERT(rn != ARMRegisters::pc); ASSERT(imm.isUInt12()); - if (!((rt | rn) & 8) && imm.isUInt7()) - m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRH_imm_T1, imm.getUInt7() >> 2, rn, rt); + if (!((rt | rn) & 8) && imm.isUInt6()) + m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRH_imm_T1, imm.getUInt6() >> 1, rn, rt); else m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T2, rn, rt, imm.getUInt12()); } @@ -1834,7 +1854,7 @@ public: m_formatter.twoWordOp12Reg40Imm3Reg4Imm20Imm5(OP_UBFX_T1, rd, rn, (lsb & 0x1c) << 10, (lsb & 0x3) << 6, (width - 1) & 0x1f); } -#if CPU(APPLE_ARMV7S) +#if HAVE(ARM_IDIV_INSTRUCTIONS) ALWAYS_INLINE void udiv(RegisterID rd, RegisterID rn, RegisterID rm) { ASSERT(!BadReg(rd)); @@ -1984,9 +2004,51 @@ public: m_formatter.twoWordOp16Op16(OP_NOP_T2a, OP_NOP_T2b); } + static constexpr int16_t nopPseudo16() + { + return OP_NOP_T1; + } + + static constexpr int32_t nopPseudo32() + { + return OP_NOP_T2a | (OP_NOP_T2b << 16); + } + + static void fillNops(void* base, size_t size, bool isCopyingToExecutableMemory) + { + RELEASE_ASSERT(!(size % sizeof(int16_t))); + + char* ptr = static_cast<char*>(base); + const size_t num32s = size / sizeof(int32_t); + for (size_t i = 0; i < num32s; i++) { + const int32_t insn = nopPseudo32(); + if (isCopyingToExecutableMemory) + performJITMemcpy(ptr, &insn, sizeof(int32_t)); + else + memcpy(ptr, &insn, sizeof(int32_t)); + ptr += sizeof(int32_t); + } + + const size_t num16s = (size % sizeof(int32_t)) / sizeof(int16_t); + ASSERT(num16s == 0 || num16s == 1); + ASSERT(num16s * sizeof(int16_t) + num32s * sizeof(int32_t) == size); + if (num16s) { + const int16_t insn = nopPseudo16(); + if (isCopyingToExecutableMemory) + performJITMemcpy(ptr, &insn, sizeof(int16_t)); + else + memcpy(ptr, &insn, sizeof(int16_t)); + } + } + void dmbSY() { - m_formatter.twoWordOp16Op16(OP_DMB_SY_T2a, OP_DMB_SY_T2b); + m_formatter.twoWordOp16Op16(OP_DMB_T1a, OP_DMB_SY_T1b); + } + + void dmbISHST() + { + m_formatter.twoWordOp16Op16(OP_DMB_T1a, OP_DMB_ISHST_T1b); } AssemblerLabel labelIgnoringWatchpoints() @@ -2036,14 +2098,7 @@ public: return b.m_offset - a.m_offset; } - int executableOffsetFor(int location) - { - if (!location) - return 0; - return static_cast<int32_t*>(m_formatter.data())[location / sizeof(int32_t) - 1]; - } - - int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); } + static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); } // Assembler admin methods: @@ -2052,7 +2107,7 @@ public: return a.from() < b.from(); } - bool canCompact(JumpType jumpType) + static bool canCompact(JumpType jumpType) { // The following cannot be compacted: // JumpFixed: represents custom jump sequence @@ -2061,7 +2116,7 @@ public: return (jumpType == JumpNoCondition) || (jumpType == JumpCondition); } - JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) + static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { if (jumpType == JumpFixed) return LinkInvalid; @@ -2105,51 +2160,43 @@ public: return LinkConditionalBX; } - JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) + static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { JumpLinkType linkType = computeJumpType(record.type(), from, to); record.setLinkType(linkType); return linkType; } - void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) - { - int32_t ptr = regionStart / sizeof(int32_t); - const int32_t end = regionEnd / sizeof(int32_t); - int32_t* offsets = static_cast<int32_t*>(m_formatter.data()); - while (ptr < end) - offsets[ptr++] = offset; - } - Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator); return m_jumpsToLink; } - void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, uint8_t* to) + static void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, const uint8_t* fromInstruction8, uint8_t* to) { + const uint16_t* fromInstruction = reinterpret_cast_ptr<const uint16_t*>(fromInstruction8); switch (record.linkType()) { case LinkJumpT1: - linkJumpT1(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to); + linkJumpT1(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to); break; case LinkJumpT2: - linkJumpT2(reinterpret_cast_ptr<uint16_t*>(from), to); + linkJumpT2(reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to); break; case LinkJumpT3: - linkJumpT3(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to); + linkJumpT3(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to); break; case LinkJumpT4: - linkJumpT4(reinterpret_cast_ptr<uint16_t*>(from), to); + linkJumpT4(reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to); break; case LinkConditionalJumpT4: - linkConditionalJumpT4(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to); + linkConditionalJumpT4(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to); break; case LinkConditionalBX: - linkConditionalBX(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to); + linkConditionalBX(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to); break; case LinkBX: - linkBX(reinterpret_cast_ptr<uint16_t*>(from), to); + linkBX(reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to); break; default: RELEASE_ASSERT_NOT_REACHED(); @@ -2186,7 +2233,7 @@ public: ASSERT(from.isSet()); uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset); - linkJumpAbsolute(location, to); + linkJumpAbsolute(location, location, to); } static void linkCall(void* code, AssemblerLabel from, void* to) @@ -2202,15 +2249,24 @@ public: setPointer(reinterpret_cast<char*>(code) + where.m_offset, value, false); } + // The static relink and replace methods can use can use |from| for both + // the write and executable address for call and jump patching + // as they're modifying existing (linked) code, so the address being + // provided is correct for relative address computation. static void relinkJump(void* from, void* to) { ASSERT(!(reinterpret_cast<intptr_t>(from) & 1)); ASSERT(!(reinterpret_cast<intptr_t>(to) & 1)); - linkJumpAbsolute(reinterpret_cast<uint16_t*>(from), to); + linkJumpAbsolute(reinterpret_cast<uint16_t*>(from), reinterpret_cast<uint16_t*>(from), to); cacheFlush(reinterpret_cast<uint16_t*>(from) - 5, 5 * sizeof(uint16_t)); } + + static void relinkJumpToNop(void* from) + { + relinkJump(from, from); + } static void relinkCall(void* from, void* to) { @@ -2246,8 +2302,9 @@ public: offset |= (1 << 11); uint16_t* location = reinterpret_cast<uint16_t*>(where); - location[1] &= ~((1 << 12) - 1); - location[1] |= offset; + uint16_t instruction = location[1] & ~((1 << 12) - 1); + instruction |= offset; + performJITMemcpy(location + 1, &instruction, sizeof(uint16_t)); cacheFlush(location, sizeof(uint16_t) * 2); } @@ -2271,16 +2328,16 @@ public: #if OS(LINUX) if (canBeJumpT4(reinterpret_cast<uint16_t*>(instructionStart), to)) { uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 2; - linkJumpT4(ptr, to); + linkJumpT4(ptr, ptr, to); cacheFlush(ptr - 2, sizeof(uint16_t) * 2); } else { uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 5; - linkBX(ptr, to); + linkBX(ptr, ptr, to); cacheFlush(ptr - 5, sizeof(uint16_t) * 5); } #else uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 2; - linkJumpT4(ptr, to); + linkJumpT4(ptr, ptr, to); cacheFlush(ptr - 2, sizeof(uint16_t) * 2); #endif } @@ -2293,6 +2350,11 @@ public: return 4; #endif } + + static constexpr ptrdiff_t patchableJumpSize() + { + return 10; + } static void replaceWithLoad(void* instructionStart) { @@ -2301,14 +2363,17 @@ public: switch (ptr[0] & 0xFFF0) { case OP_LDR_imm_T3: break; - case OP_ADD_imm_T3: + case OP_ADD_imm_T3: { ASSERT(!(ptr[1] & 0xF000)); - ptr[0] &= 0x000F; - ptr[0] |= OP_LDR_imm_T3; - ptr[1] |= (ptr[1] & 0x0F00) << 4; - ptr[1] &= 0xF0FF; + uint16_t instructions[2]; + instructions[0] = ptr[0] & 0x000F; + instructions[0] |= OP_LDR_imm_T3; + instructions[1] = ptr[1] | (ptr[1] & 0x0F00) << 4; + instructions[1] &= 0xF0FF; + performJITMemcpy(ptr, instructions, sizeof(uint16_t) * 2); cacheFlush(ptr, sizeof(uint16_t) * 2); break; + } default: RELEASE_ASSERT_NOT_REACHED(); } @@ -2319,14 +2384,17 @@ public: ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1)); uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart); switch (ptr[0] & 0xFFF0) { - case OP_LDR_imm_T3: + case OP_LDR_imm_T3: { ASSERT(!(ptr[1] & 0x0F00)); - ptr[0] &= 0x000F; - ptr[0] |= OP_ADD_imm_T3; - ptr[1] |= (ptr[1] & 0xF000) >> 4; - ptr[1] &= 0x0FFF; + uint16_t instructions[2]; + instructions[0] = ptr[0] & 0x000F; + instructions[0] |= OP_ADD_imm_T3; + instructions[1] = ptr[1] | (ptr[1] & 0xF000) >> 4; + instructions[1] &= 0x0FFF; + performJITMemcpy(ptr, instructions, sizeof(uint16_t) * 2); cacheFlush(ptr, sizeof(uint16_t) * 2); break; + } case OP_ADD_imm_T3: break; default: @@ -2375,8 +2443,6 @@ public: linuxPageFlush(current, current + page); linuxPageFlush(current, end); -#elif OS(WINCE) - CacheRangeFlush(code, size, CACHE_SYNC_ALL); #else #error "The cacheFlush support is missing on this platform." #endif @@ -2456,11 +2522,13 @@ private: ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value)); ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value >> 16)); - location[-4] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16); - location[-3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-3] >> 8) & 0xf, lo16); - location[-2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16); - location[-1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-1] >> 8) & 0xf, hi16); + uint16_t instructions[4]; + instructions[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16); + instructions[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-3] >> 8) & 0xf, lo16); + instructions[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16); + instructions[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-1] >> 8) & 0xf, hi16); + performJITMemcpy(location - 4, instructions, 4 * sizeof(uint16_t)); if (flush) cacheFlush(location - 4, 4 * sizeof(uint16_t)); } @@ -2488,8 +2556,10 @@ private: ASSERT(imm.isValid()); ASSERT(imm.isUInt7()); uint16_t* location = reinterpret_cast<uint16_t*>(code); - location[0] &= ~((static_cast<uint16_t>(0x7f) >> 2) << 6); - location[0] |= (imm.getUInt7() >> 2) << 6; + uint16_t instruction; + instruction = location[0] & ~((static_cast<uint16_t>(0x7f) >> 2) << 6); + instruction |= (imm.getUInt7() >> 2) << 6; + performJITMemcpy(location, &instruction, sizeof(uint16_t)); cacheFlush(location, sizeof(uint16_t)); } @@ -2498,39 +2568,39 @@ private: setInt32(code, reinterpret_cast<uint32_t>(value), flush); } - static bool isB(void* address) + static bool isB(const void* address) { - uint16_t* instruction = static_cast<uint16_t*>(address); + const uint16_t* instruction = static_cast<const uint16_t*>(address); return ((instruction[0] & 0xf800) == OP_B_T4a) && ((instruction[1] & 0xd000) == OP_B_T4b); } - static bool isBX(void* address) + static bool isBX(const void* address) { - uint16_t* instruction = static_cast<uint16_t*>(address); + const uint16_t* instruction = static_cast<const uint16_t*>(address); return (instruction[0] & 0xff87) == OP_BX; } - static bool isMOV_imm_T3(void* address) + static bool isMOV_imm_T3(const void* address) { - uint16_t* instruction = static_cast<uint16_t*>(address); + const uint16_t* instruction = static_cast<const uint16_t*>(address); return ((instruction[0] & 0xFBF0) == OP_MOV_imm_T3) && ((instruction[1] & 0x8000) == 0); } - static bool isMOVT(void* address) + static bool isMOVT(const void* address) { - uint16_t* instruction = static_cast<uint16_t*>(address); + const uint16_t* instruction = static_cast<const uint16_t*>(address); return ((instruction[0] & 0xFBF0) == OP_MOVT) && ((instruction[1] & 0x8000) == 0); } - static bool isNOP_T1(void* address) + static bool isNOP_T1(const void* address) { - uint16_t* instruction = static_cast<uint16_t*>(address); + const uint16_t* instruction = static_cast<const uint16_t*>(address); return instruction[0] == OP_NOP_T1; } - static bool isNOP_T2(void* address) + static bool isNOP_T2(const void* address) { - uint16_t* instruction = static_cast<uint16_t*>(address); + const uint16_t* instruction = static_cast<const uint16_t*>(address); return (instruction[0] == OP_NOP_T2a) && (instruction[1] == OP_NOP_T2b); } @@ -2578,7 +2648,7 @@ private: return ((relative << 7) >> 7) == relative; } - void linkJumpT1(Condition cond, uint16_t* instruction, void* target) + static void linkJumpT1(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target) { // FIMXE: this should be up in the MacroAssembler layer. :-( ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1)); @@ -2593,10 +2663,11 @@ private: // All branch offsets should be an even distance. ASSERT(!(relative & 1)); - instruction[-1] = OP_B_T1 | ((cond & 0xf) << 8) | ((relative & 0x1fe) >> 1); + uint16_t newInstruction = OP_B_T1 | ((cond & 0xf) << 8) | ((relative & 0x1fe) >> 1); + performJITMemcpy(writeTarget - 1, &newInstruction, sizeof(uint16_t)); } - static void linkJumpT2(uint16_t* instruction, void* target) + static void linkJumpT2(uint16_t* writeTarget, const uint16_t* instruction, void* target) { // FIMXE: this should be up in the MacroAssembler layer. :-( ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1)); @@ -2611,10 +2682,11 @@ private: // All branch offsets should be an even distance. ASSERT(!(relative & 1)); - instruction[-1] = OP_B_T2 | ((relative & 0xffe) >> 1); + uint16_t newInstruction = OP_B_T2 | ((relative & 0xffe) >> 1); + performJITMemcpy(writeTarget - 1, &newInstruction, sizeof(uint16_t)); } - void linkJumpT3(Condition cond, uint16_t* instruction, void* target) + static void linkJumpT3(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target) { // FIMXE: this should be up in the MacroAssembler layer. :-( ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1)); @@ -2625,11 +2697,13 @@ private: // All branch offsets should be an even distance. ASSERT(!(relative & 1)); - instruction[-2] = OP_B_T3a | ((relative & 0x100000) >> 10) | ((cond & 0xf) << 6) | ((relative & 0x3f000) >> 12); - instruction[-1] = OP_B_T3b | ((relative & 0x80000) >> 8) | ((relative & 0x40000) >> 5) | ((relative & 0xffe) >> 1); + uint16_t instructions[2]; + instructions[0] = OP_B_T3a | ((relative & 0x100000) >> 10) | ((cond & 0xf) << 6) | ((relative & 0x3f000) >> 12); + instructions[1] = OP_B_T3b | ((relative & 0x80000) >> 8) | ((relative & 0x40000) >> 5) | ((relative & 0xffe) >> 1); + performJITMemcpy(writeTarget - 2, instructions, 2 * sizeof(uint16_t)); } - static void linkJumpT4(uint16_t* instruction, void* target) + static void linkJumpT4(uint16_t* writeTarget, const uint16_t* instruction, void* target) { // FIMXE: this should be up in the MacroAssembler layer. :-( ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1)); @@ -2643,47 +2717,55 @@ private: // All branch offsets should be an even distance. ASSERT(!(relative & 1)); - instruction[-2] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12); - instruction[-1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1); + uint16_t instructions[2]; + instructions[0] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12); + instructions[1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1); + performJITMemcpy(writeTarget - 2, instructions, 2 * sizeof(uint16_t)); } - void linkConditionalJumpT4(Condition cond, uint16_t* instruction, void* target) + static void linkConditionalJumpT4(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target) { // FIMXE: this should be up in the MacroAssembler layer. :-( ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1)); ASSERT(!(reinterpret_cast<intptr_t>(target) & 1)); - instruction[-3] = ifThenElse(cond) | OP_IT; - linkJumpT4(instruction, target); + uint16_t newInstruction = ifThenElse(cond) | OP_IT; + performJITMemcpy(writeTarget - 3, &newInstruction, sizeof(uint16_t)); + linkJumpT4(writeTarget, instruction, target); } - static void linkBX(uint16_t* instruction, void* target) + static void linkBX(uint16_t* writeTarget, const uint16_t* instruction, void* target) { // FIMXE: this should be up in the MacroAssembler layer. :-( - ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1)); + ASSERT_UNUSED(instruction, !(reinterpret_cast<intptr_t>(instruction) & 1)); + ASSERT(!(reinterpret_cast<intptr_t>(writeTarget) & 1)); ASSERT(!(reinterpret_cast<intptr_t>(target) & 1)); const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip; ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1)); ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16)); - instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16); - instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16); - instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16); - instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16); - instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3); + uint16_t instructions[5]; + instructions[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16); + instructions[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16); + instructions[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16); + instructions[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16); + instructions[4] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3); + + performJITMemcpy(writeTarget - 5, instructions, 5 * sizeof(uint16_t)); } - void linkConditionalBX(Condition cond, uint16_t* instruction, void* target) + static void linkConditionalBX(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target) { // FIMXE: this should be up in the MacroAssembler layer. :-( ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1)); ASSERT(!(reinterpret_cast<intptr_t>(target) & 1)); - linkBX(instruction, target); - instruction[-6] = ifThenElse(cond, true, true) | OP_IT; + linkBX(writeTarget, instruction, target); + uint16_t newInstruction = ifThenElse(cond, true, true) | OP_IT; + performJITMemcpy(writeTarget - 6, &newInstruction, sizeof(uint16_t)); } - static void linkJumpAbsolute(uint16_t* instruction, void* target) + static void linkJumpAbsolute(uint16_t* writeTarget, const uint16_t* instruction, void* target) { // FIMXE: this should be up in the MacroAssembler layer. :-( ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1)); @@ -2691,26 +2773,31 @@ private: ASSERT((isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1)) || (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2))); - + if (canBeJumpT4(instruction, target)) { // There may be a better way to fix this, but right now put the NOPs first, since in the // case of an conditional branch this will be coming after an ITTT predicating *three* // instructions! Looking backwards to modify the ITTT to an IT is not easy, due to // variable wdith encoding - the previous instruction might *look* like an ITTT but // actually be the second half of a 2-word op. - instruction[-5] = OP_NOP_T1; - instruction[-4] = OP_NOP_T2a; - instruction[-3] = OP_NOP_T2b; - linkJumpT4(instruction, target); + uint16_t instructions[3]; + instructions[0] = OP_NOP_T1; + instructions[1] = OP_NOP_T2a; + instructions[2] = OP_NOP_T2b; + performJITMemcpy(writeTarget - 5, instructions, 3 * sizeof(uint16_t)); + linkJumpT4(writeTarget, instruction, target); } else { const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip; ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1)); ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16)); - instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16); - instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16); - instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16); - instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16); - instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3); + + uint16_t instructions[5]; + instructions[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16); + instructions[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16); + instructions[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16); + instructions[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16); + instructions[4] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3); + performJITMemcpy(writeTarget - 5, instructions, 5 * sizeof(uint16_t)); } } @@ -2753,6 +2840,11 @@ private: m_buffer.putShort(op | (reg1 << 6) | (reg2 << 3) | reg3); } + ALWAYS_INLINE void oneWordOp7Imm9(OpcodeID op, uint16_t imm) + { + m_buffer.putShort(op | imm); + } + ALWAYS_INLINE void oneWordOp8Imm8(OpcodeID op, uint8_t imm) { m_buffer.putShort(op | imm); @@ -2791,6 +2883,12 @@ private: m_buffer.putShort(op2); } + ALWAYS_INLINE void twoWordOp16Imm16(OpcodeID1 op1, uint16_t imm) + { + m_buffer.putShort(op1); + m_buffer.putShort(imm); + } + ALWAYS_INLINE void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op, int imm4, RegisterID rd, ARMThumbImmediate imm) { ARMThumbImmediate newImm = imm; @@ -2851,7 +2949,6 @@ private: unsigned debugOffset() { return m_buffer.debugOffset(); } - private: AssemblerBuffer m_buffer; } m_formatter; @@ -2863,5 +2960,3 @@ private: } // namespace JSC #endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2) - -#endif // ARMAssembler_h diff --git a/Source/JavaScriptCore/assembler/AbortReason.h b/Source/JavaScriptCore/assembler/AbortReason.h new file mode 100644 index 000000000..32ae0867a --- /dev/null +++ b/Source/JavaScriptCore/assembler/AbortReason.h @@ -0,0 +1,79 @@ +/* + * Copyright (C) 2014-2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +namespace JSC { + +// It's important to not change the values of existing abort reasons unless we really +// have to. For this reason there is a BASIC-style numbering that should allow us to +// sneak new reasons in without changing the numbering of existing reasons - at least +// for a while. +enum AbortReason { + AHCallFrameMisaligned = 10, + AHIndexingTypeIsValid = 20, + AHInsaneArgumentCount = 30, + AHIsNotCell = 40, + AHIsNotInt32 = 50, + AHIsNotJSDouble = 60, + AHIsNotJSInt32 = 70, + AHIsNotJSNumber = 80, + AHIsNotNull = 90, + AHStackPointerMisaligned = 100, + AHStructureIDIsValid = 110, + AHTagMaskNotInPlace = 120, + AHTagTypeNumberNotInPlace = 130, + AHTypeInfoInlineTypeFlagsAreValid = 140, + AHTypeInfoIsValid = 150, + B3Oops = 155, + DFGBailedAtTopOfBlock = 161, + DFGBailedAtEndOfNode = 162, + DFGBasicStorageAllocatorZeroSize = 170, + DFGIsNotCell = 180, + DFGIneffectiveWatchpoint = 190, + DFGNegativeStringLength = 200, + DFGSlowPathGeneratorFellThrough = 210, + DFGUnreachableBasicBlock = 220, + DFGUnreachableNode = 225, + DFGUnreasonableOSREntryJumpDestination = 230, + DFGVarargsThrowingPathDidNotThrow = 235, + FTLCrash = 236, + JITDidReturnFromTailCall = 237, + JITDivOperandsAreNotNumbers = 240, + JITGetByValResultIsNotEmpty = 250, + JITNotSupported = 260, + JITOffsetIsNotOutOfLine = 270, + JITUncoughtExceptionAfterCall = 275, + JITUnexpectedCallFrameSize = 277, + JITUnreasonableLoopHintJumpTarget = 280, + RPWUnreasonableJumpTarget = 290, + RepatchIneffectiveWatchpoint = 300, + RepatchInsaneArgumentCount = 310, + TGInvalidPointer = 320, + TGNotSupported = 330, + YARRNoInputConsumed = 340, +}; + +} // namespace JSC diff --git a/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h b/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h index 28537201b..b791e5cb1 100644 --- a/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h +++ b/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2012 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2012, 2014-2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -23,74 +23,37 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef AbstractMacroAssembler_h -#define AbstractMacroAssembler_h +#pragma once +#include "AbortReason.h" #include "AssemblerBuffer.h" +#include "AssemblerCommon.h" +#include "CPU.h" #include "CodeLocation.h" #include "MacroAssemblerCodeRef.h" +#include "MacroAssemblerHelpers.h" #include "Options.h" -#include "WeakRandom.h" #include <wtf/CryptographicallyRandomNumber.h> #include <wtf/Noncopyable.h> - -#if ENABLE(ASSEMBLER) +#include <wtf/SharedTask.h> +#include <wtf/WeakRandom.h> namespace JSC { -inline bool isARMv7s() -{ -#if CPU(APPLE_ARMV7S) - return true; -#else - return false; -#endif -} - -inline bool isARM64() -{ -#if CPU(ARM64) - return true; -#else - return false; -#endif -} - -inline bool isX86() -{ -#if CPU(X86_64) || CPU(X86) - return true; -#else - return false; -#endif -} - -inline bool optimizeForARMv7s() -{ - return isARMv7s() && Options::enableArchitectureSpecificOptimizations(); -} - -inline bool optimizeForARM64() -{ - return isARM64() && Options::enableArchitectureSpecificOptimizations(); -} - -inline bool optimizeForX86() -{ - return isX86() && Options::enableArchitectureSpecificOptimizations(); -} +#if ENABLE(ASSEMBLER) +class AllowMacroScratchRegisterUsage; +class DisallowMacroScratchRegisterUsage; class LinkBuffer; -class RepatchBuffer; class Watchpoint; namespace DFG { struct OSRExit; } -template <class AssemblerType> +template <class AssemblerType, class MacroAssemblerType> class AbstractMacroAssembler { public: - friend class JITWriteBarrierBase; + typedef AbstractMacroAssembler<AssemblerType, MacroAssemblerType> AbstractMacroAssemblerType; typedef AssemblerType AssemblerType_T; typedef MacroAssemblerCodePtr CodePtr; @@ -101,11 +64,11 @@ public: typedef typename AssemblerType::RegisterID RegisterID; typedef typename AssemblerType::FPRegisterID FPRegisterID; - static RegisterID firstRegister() { return AssemblerType::firstRegister(); } - static RegisterID lastRegister() { return AssemblerType::lastRegister(); } + static constexpr RegisterID firstRegister() { return AssemblerType::firstRegister(); } + static constexpr RegisterID lastRegister() { return AssemblerType::lastRegister(); } - static FPRegisterID firstFPRegister() { return AssemblerType::firstFPRegister(); } - static FPRegisterID lastFPRegister() { return AssemblerType::lastFPRegister(); } + static constexpr FPRegisterID firstFPRegister() { return AssemblerType::firstFPRegister(); } + static constexpr FPRegisterID lastFPRegister() { return AssemblerType::lastFPRegister(); } // Section 1: MacroAssembler operand types // @@ -125,7 +88,9 @@ public: return TimesFour; return TimesEight; } - + + struct BaseIndex; + // Address: // // Describes a simple base-offset address. @@ -140,7 +105,9 @@ public: { return Address(base, offset + additionalOffset); } - + + BaseIndex indexedBy(RegisterID index, Scale) const; + RegisterID base; int32_t offset; }; @@ -198,11 +165,16 @@ public: , offset(offset) { } - + RegisterID base; RegisterID index; Scale scale; int32_t offset; + + BaseIndex withOffset(int32_t additionalOffset) + { + return BaseIndex(base, index, scale, offset + additionalOffset); + } }; // AbsoluteAddress: @@ -354,7 +326,7 @@ public: // A Label records a point in the generated instruction stream, typically such that // it may be used as a destination for a jump. class Label { - template<class TemplateAssemblerType> + template<class TemplateAssemblerType, class TemplateMacroAssemblerType> friend class AbstractMacroAssembler; friend struct DFG::OSRExit; friend class Jump; @@ -367,12 +339,14 @@ public: { } - Label(AbstractMacroAssembler<AssemblerType>* masm) + Label(AbstractMacroAssemblerType* masm) : m_label(masm->m_assembler.label()) { masm->invalidateAllTempRegisters(); } + bool operator==(const Label& other) const { return m_label == other.m_label; } + bool isSet() const { return m_label.isSet(); } private: AssemblerLabel m_label; @@ -389,7 +363,7 @@ public: // // addPtr(TrustedImmPtr(i), a, b) class ConvertibleLoadLabel { - template<class TemplateAssemblerType> + template<class TemplateAssemblerType, class TemplateMacroAssemblerType> friend class AbstractMacroAssembler; friend class LinkBuffer; @@ -398,7 +372,7 @@ public: { } - ConvertibleLoadLabel(AbstractMacroAssembler<AssemblerType>* masm) + ConvertibleLoadLabel(AbstractMacroAssemblerType* masm) : m_label(masm->m_assembler.labelIgnoringWatchpoints()) { } @@ -413,7 +387,7 @@ public: // A DataLabelPtr is used to refer to a location in the code containing a pointer to be // patched after the code has been generated. class DataLabelPtr { - template<class TemplateAssemblerType> + template<class TemplateAssemblerType, class TemplateMacroAssemblerType> friend class AbstractMacroAssembler; friend class LinkBuffer; public: @@ -421,7 +395,7 @@ public: { } - DataLabelPtr(AbstractMacroAssembler<AssemblerType>* masm) + DataLabelPtr(AbstractMacroAssemblerType* masm) : m_label(masm->m_assembler.label()) { } @@ -434,10 +408,10 @@ public: // DataLabel32: // - // A DataLabelPtr is used to refer to a location in the code containing a pointer to be + // A DataLabel32 is used to refer to a location in the code containing a 32-bit constant to be // patched after the code has been generated. class DataLabel32 { - template<class TemplateAssemblerType> + template<class TemplateAssemblerType, class TemplateMacroAssemblerType> friend class AbstractMacroAssembler; friend class LinkBuffer; public: @@ -445,7 +419,7 @@ public: { } - DataLabel32(AbstractMacroAssembler<AssemblerType>* masm) + DataLabel32(AbstractMacroAssemblerType* masm) : m_label(masm->m_assembler.label()) { } @@ -461,7 +435,7 @@ public: // A DataLabelCompact is used to refer to a location in the code containing a // compact immediate to be patched after the code has been generated. class DataLabelCompact { - template<class TemplateAssemblerType> + template<class TemplateAssemblerType, class TemplateMacroAssemblerType> friend class AbstractMacroAssembler; friend class LinkBuffer; public: @@ -469,7 +443,7 @@ public: { } - DataLabelCompact(AbstractMacroAssembler<AssemblerType>* masm) + DataLabelCompact(AbstractMacroAssemblerType* masm) : m_label(masm->m_assembler.label()) { } @@ -492,7 +466,7 @@ public: // relative offset such that when executed it will call to the desired // destination. class Call { - template<class TemplateAssemblerType> + template<class TemplateAssemblerType, class TemplateMacroAssemblerType> friend class AbstractMacroAssembler; public: @@ -500,7 +474,9 @@ public: None = 0x0, Linkable = 0x1, Near = 0x2, + Tail = 0x4, LinkableNear = 0x3, + LinkableNearTail = 0x7, }; Call() @@ -536,7 +512,7 @@ public: // relative offset such that when executed it will jump to the desired // destination. class Jump { - template<class TemplateAssemblerType> + template<class TemplateAssemblerType, class TemplateMacroAssemblerType> friend class AbstractMacroAssembler; friend class Call; friend struct DFG::OSRExit; @@ -581,12 +557,6 @@ public: { ASSERT((type == ARM64Assembler::JumpTestBit) || (type == ARM64Assembler::JumpTestBitFixedSize)); } -#elif CPU(SH4) - Jump(AssemblerLabel jmp, SH4Assembler::JumpType type = SH4Assembler::JumpFar) - : m_label(jmp) - , m_type(type) - { - } #else Jump(AssemblerLabel jmp) : m_label(jmp) @@ -601,7 +571,7 @@ public: return result; } - void link(AbstractMacroAssembler<AssemblerType>* masm) const + void link(AbstractMacroAssemblerType* masm) const { masm->invalidateAllTempRegisters(); @@ -618,14 +588,12 @@ public: masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_bitNumber, m_compareRegister); else masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition); -#elif CPU(SH4) - masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type); #else masm->m_assembler.linkJump(m_label, masm->m_assembler.label()); #endif } - void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm) const + void linkTo(Label label, AbstractMacroAssemblerType* masm) const { #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) masm->checkRegisterAllocationAgainstBranchRange(label.m_label.m_offset, m_label.m_offset); @@ -659,9 +627,6 @@ public: unsigned m_bitNumber; ARM64Assembler::RegisterID m_compareRegister; #endif -#if CPU(SH4) - SH4Assembler::JumpType m_type; -#endif }; struct PatchableJump { @@ -684,8 +649,6 @@ public: // A JumpList is a set of Jump objects. // All jumps in the set will be linked to the same destination. class JumpList { - friend class LinkBuffer; - public: typedef Vector<Jump, 2> JumpVector; @@ -693,23 +656,22 @@ public: JumpList(Jump jump) { - append(jump); + if (jump.isSet()) + append(jump); } - void link(AbstractMacroAssembler<AssemblerType>* masm) + void link(AbstractMacroAssemblerType* masm) const { size_t size = m_jumps.size(); for (size_t i = 0; i < size; ++i) m_jumps[i].link(masm); - m_jumps.clear(); } - void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm) + void linkTo(Label label, AbstractMacroAssemblerType* masm) const { size_t size = m_jumps.size(); for (size_t i = 0; i < size; ++i) m_jumps[i].linkTo(label, masm); - m_jumps.clear(); } void append(Jump jump) @@ -834,19 +796,215 @@ public: AssemblerType::cacheFlush(code, size); } +#if ENABLE(MASM_PROBE) + + struct CPUState { + #define DECLARE_REGISTER(_type, _regName) \ + _type _regName; + FOR_EACH_CPU_REGISTER(DECLARE_REGISTER) + #undef DECLARE_REGISTER + + static const char* gprName(RegisterID regID) + { + switch (regID) { + #define DECLARE_REGISTER(_type, _regName) \ + case RegisterID::_regName: \ + return #_regName; + FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER) + #undef DECLARE_REGISTER + default: + RELEASE_ASSERT_NOT_REACHED(); + } + } + + static const char* fprName(FPRegisterID regID) + { + switch (regID) { + #define DECLARE_REGISTER(_type, _regName) \ + case FPRegisterID::_regName: \ + return #_regName; + FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER) + #undef DECLARE_REGISTER + default: + RELEASE_ASSERT_NOT_REACHED(); + } + } + + void*& gpr(RegisterID regID) + { + switch (regID) { + #define DECLARE_REGISTER(_type, _regName) \ + case RegisterID::_regName: \ + return _regName; + FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER) + #undef DECLARE_REGISTER + default: + RELEASE_ASSERT_NOT_REACHED(); + } + } + + double& fpr(FPRegisterID regID) + { + switch (regID) { + #define DECLARE_REGISTER(_type, _regName) \ + case FPRegisterID::_regName: \ + return _regName; + FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER) + #undef DECLARE_REGISTER + default: + RELEASE_ASSERT_NOT_REACHED(); + } + } + }; + + struct ProbeContext; + typedef void (*ProbeFunction)(struct ProbeContext*); + + struct ProbeContext { + ProbeFunction probeFunction; + void* arg1; + void* arg2; + CPUState cpu; + + // Convenience methods: + void*& gpr(RegisterID regID) { return cpu.gpr(regID); } + double& fpr(FPRegisterID regID) { return cpu.fpr(regID); } + const char* gprName(RegisterID regID) { return cpu.gprName(regID); } + const char* fprName(FPRegisterID regID) { return cpu.fprName(regID); } + }; + + // This function emits code to preserve the CPUState (e.g. registers), + // call a user supplied probe function, and restore the CPUState before + // continuing with other JIT generated code. + // + // The user supplied probe function will be called with a single pointer to + // a ProbeContext struct (defined above) which contains, among other things, + // the preserved CPUState. This allows the user probe function to inspect + // the CPUState at that point in the JIT generated code. + // + // If the user probe function alters the register values in the ProbeContext, + // the altered values will be loaded into the CPU registers when the probe + // returns. + // + // The ProbeContext is stack allocated and is only valid for the duration + // of the call to the user probe function. + // + // Note: probe() should be implemented by the target specific MacroAssembler. + // This prototype is only provided here to document the interface. + + void probe(ProbeFunction, void* arg1, void* arg2); + +#endif // ENABLE(MASM_PROBE) + AssemblerType m_assembler; + static void linkJump(void* code, Jump jump, CodeLocationLabel target) + { + AssemblerType::linkJump(code, jump.m_label, target.dataLocation()); + } + + static void linkPointer(void* code, AssemblerLabel label, void* value) + { + AssemblerType::linkPointer(code, label, value); + } + + static void* getLinkerAddress(void* code, AssemblerLabel label) + { + return AssemblerType::getRelocatedAddress(code, label); + } + + static unsigned getLinkerCallReturnOffset(Call call) + { + return AssemblerType::getCallReturnOffset(call.m_label); + } + + static void repatchJump(CodeLocationJump jump, CodeLocationLabel destination) + { + AssemblerType::relinkJump(jump.dataLocation(), destination.dataLocation()); + } + + static void repatchJumpToNop(CodeLocationJump jump) + { + AssemblerType::relinkJumpToNop(jump.dataLocation()); + } + + static void repatchNearCall(CodeLocationNearCall nearCall, CodeLocationLabel destination) + { + switch (nearCall.callMode()) { + case NearCallMode::Tail: + AssemblerType::relinkJump(nearCall.dataLocation(), destination.dataLocation()); + return; + case NearCallMode::Regular: + AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress()); + return; + } + RELEASE_ASSERT_NOT_REACHED(); + } + + static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value) + { + AssemblerType::repatchCompact(dataLabelCompact.dataLocation(), value); + } + + static void repatchInt32(CodeLocationDataLabel32 dataLabel32, int32_t value) + { + AssemblerType::repatchInt32(dataLabel32.dataLocation(), value); + } + + static void repatchPointer(CodeLocationDataLabelPtr dataLabelPtr, void* value) + { + AssemblerType::repatchPointer(dataLabelPtr.dataLocation(), value); + } + + static void* readPointer(CodeLocationDataLabelPtr dataLabelPtr) + { + return AssemblerType::readPointer(dataLabelPtr.dataLocation()); + } + + static void replaceWithLoad(CodeLocationConvertibleLoad label) + { + AssemblerType::replaceWithLoad(label.dataLocation()); + } + + static void replaceWithAddressComputation(CodeLocationConvertibleLoad label) + { + AssemblerType::replaceWithAddressComputation(label.dataLocation()); + } + + template<typename Functor> + void addLinkTask(const Functor& functor) + { + m_linkTasks.append(createSharedTask<void(LinkBuffer&)>(functor)); + } + + void emitNops(size_t memoryToFillWithNopsInBytes) + { + AssemblerBuffer& buffer = m_assembler.buffer(); + size_t startCodeSize = buffer.codeSize(); + size_t targetCodeSize = startCodeSize + memoryToFillWithNopsInBytes; + buffer.ensureSpace(memoryToFillWithNopsInBytes); + bool isCopyingToExecutableMemory = false; + AssemblerType::fillNops(static_cast<char*>(buffer.data()) + startCodeSize, memoryToFillWithNopsInBytes, isCopyingToExecutableMemory); + buffer.setCodeSize(targetCodeSize); + } + protected: AbstractMacroAssembler() - : m_randomSource(cryptographicallyRandomNumber()) + : m_randomSource(0) { + invalidateAllTempRegisters(); } uint32_t random() { + if (!m_randomSourceIsInitialized) { + m_randomSourceIsInitialized = true; + m_randomSource.setSeed(cryptographicallyRandomNumber()); + } return m_randomSource.getUint32(); } + bool m_randomSourceIsInitialized { false }; WeakRandom m_randomSource; #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) @@ -874,7 +1032,7 @@ protected: friend class Label; public: - CachedTempRegister(AbstractMacroAssembler<AssemblerType>* masm, RegisterID registerID) + CachedTempRegister(AbstractMacroAssemblerType* masm, RegisterID registerID) : m_masm(masm) , m_registerID(registerID) , m_value(0) @@ -902,7 +1060,7 @@ protected: ALWAYS_INLINE void invalidate() { m_masm->clearTempRegisterValid(m_validBit); } private: - AbstractMacroAssembler<AssemblerType>* m_masm; + AbstractMacroAssemblerType* m_masm; RegisterID m_registerID; intptr_t m_value; unsigned m_validBit; @@ -928,74 +1086,25 @@ protected: m_tempRegistersValidBits |= registerMask; } + friend class AllowMacroScratchRegisterUsage; + friend class DisallowMacroScratchRegisterUsage; unsigned m_tempRegistersValidBits; + bool m_allowScratchRegister { true }; - friend class LinkBuffer; - friend class RepatchBuffer; - - static void linkJump(void* code, Jump jump, CodeLocationLabel target) - { - AssemblerType::linkJump(code, jump.m_label, target.dataLocation()); - } - - static void linkPointer(void* code, AssemblerLabel label, void* value) - { - AssemblerType::linkPointer(code, label, value); - } - - static void* getLinkerAddress(void* code, AssemblerLabel label) - { - return AssemblerType::getRelocatedAddress(code, label); - } - - static unsigned getLinkerCallReturnOffset(Call call) - { - return AssemblerType::getCallReturnOffset(call.m_label); - } - - static void repatchJump(CodeLocationJump jump, CodeLocationLabel destination) - { - AssemblerType::relinkJump(jump.dataLocation(), destination.dataLocation()); - } - - static void repatchNearCall(CodeLocationNearCall nearCall, CodeLocationLabel destination) - { - AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress()); - } - - static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value) - { - AssemblerType::repatchCompact(dataLabelCompact.dataLocation(), value); - } - - static void repatchInt32(CodeLocationDataLabel32 dataLabel32, int32_t value) - { - AssemblerType::repatchInt32(dataLabel32.dataLocation(), value); - } + Vector<RefPtr<SharedTask<void(LinkBuffer&)>>> m_linkTasks; - static void repatchPointer(CodeLocationDataLabelPtr dataLabelPtr, void* value) - { - AssemblerType::repatchPointer(dataLabelPtr.dataLocation(), value); - } - - static void* readPointer(CodeLocationDataLabelPtr dataLabelPtr) - { - return AssemblerType::readPointer(dataLabelPtr.dataLocation()); - } - - static void replaceWithLoad(CodeLocationConvertibleLoad label) - { - AssemblerType::replaceWithLoad(label.dataLocation()); - } - - static void replaceWithAddressComputation(CodeLocationConvertibleLoad label) - { - AssemblerType::replaceWithAddressComputation(label.dataLocation()); - } -}; + friend class LinkBuffer; +}; // class AbstractMacroAssembler -} // namespace JSC +template <class AssemblerType, class MacroAssemblerType> +inline typename AbstractMacroAssembler<AssemblerType, MacroAssemblerType>::BaseIndex +AbstractMacroAssembler<AssemblerType, MacroAssemblerType>::Address::indexedBy( + typename AbstractMacroAssembler<AssemblerType, MacroAssemblerType>::RegisterID index, + typename AbstractMacroAssembler<AssemblerType, MacroAssemblerType>::Scale scale) const +{ + return BaseIndex(base, index, scale, offset); +} #endif // ENABLE(ASSEMBLER) -#endif // AbstractMacroAssembler_h +} // namespace JSC diff --git a/Source/JavaScriptCore/assembler/AllowMacroScratchRegisterUsage.h b/Source/JavaScriptCore/assembler/AllowMacroScratchRegisterUsage.h new file mode 100644 index 000000000..ed7806ced --- /dev/null +++ b/Source/JavaScriptCore/assembler/AllowMacroScratchRegisterUsage.h @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#if ENABLE(ASSEMBLER) + +#include "MacroAssembler.h" + +namespace JSC { + +class AllowMacroScratchRegisterUsage { +public: + AllowMacroScratchRegisterUsage(MacroAssembler& masm) + : m_masm(masm) + , m_oldValueOfAllowScratchRegister(masm.m_allowScratchRegister) + { + masm.m_allowScratchRegister = true; + } + + ~AllowMacroScratchRegisterUsage() + { + m_masm.m_allowScratchRegister = m_oldValueOfAllowScratchRegister; + } + +private: + MacroAssembler& m_masm; + bool m_oldValueOfAllowScratchRegister; +}; + +} // namespace JSC + +#endif // ENABLE(ASSEMBLER) diff --git a/Source/JavaScriptCore/assembler/AssemblerBuffer.h b/Source/JavaScriptCore/assembler/AssemblerBuffer.h index 120868d63..7340952d5 100644 --- a/Source/JavaScriptCore/assembler/AssemblerBuffer.h +++ b/Source/JavaScriptCore/assembler/AssemblerBuffer.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2012 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2012, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -23,8 +23,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef AssemblerBuffer_h -#define AssemblerBuffer_h +#pragma once #if ENABLE(ASSEMBLER) @@ -56,53 +55,113 @@ namespace JSC { return AssemblerLabel(m_offset + offset); } + bool operator==(const AssemblerLabel& other) const { return m_offset == other.m_offset; } + uint32_t m_offset; }; - class AssemblerBuffer { - static const int inlineCapacity = 128; + class AssemblerData { + WTF_MAKE_NONCOPYABLE(AssemblerData); + static const size_t InlineCapacity = 128; public: - AssemblerBuffer() - : m_storage(inlineCapacity) - , m_buffer(m_storage.begin()) - , m_capacity(inlineCapacity) - , m_index(0) + AssemblerData() + : m_buffer(m_inlineBuffer) + , m_capacity(InlineCapacity) { } - ~AssemblerBuffer() + AssemblerData(size_t initialCapacity) { + if (initialCapacity <= InlineCapacity) { + m_capacity = InlineCapacity; + m_buffer = m_inlineBuffer; + } else { + m_capacity = initialCapacity; + m_buffer = static_cast<char*>(fastMalloc(m_capacity)); + } } - bool isAvailable(int space) + AssemblerData(AssemblerData&& other) { - return m_index + space <= m_capacity; + if (other.isInlineBuffer()) { + ASSERT(other.m_capacity == InlineCapacity); + memcpy(m_inlineBuffer, other.m_inlineBuffer, InlineCapacity); + m_buffer = m_inlineBuffer; + } else + m_buffer = other.m_buffer; + m_capacity = other.m_capacity; + + other.m_buffer = nullptr; + other.m_capacity = 0; } - void ensureSpace(int space) + AssemblerData& operator=(AssemblerData&& other) { - if (!isAvailable(space)) - grow(); + if (m_buffer && !isInlineBuffer()) + fastFree(m_buffer); + + if (other.isInlineBuffer()) { + ASSERT(other.m_capacity == InlineCapacity); + memcpy(m_inlineBuffer, other.m_inlineBuffer, InlineCapacity); + m_buffer = m_inlineBuffer; + } else + m_buffer = other.m_buffer; + m_capacity = other.m_capacity; + + other.m_buffer = nullptr; + other.m_capacity = 0; + return *this; } - bool isAligned(int alignment) const + ~AssemblerData() { - return !(m_index & (alignment - 1)); + if (m_buffer && !isInlineBuffer()) + fastFree(m_buffer); } - template<typename IntegralType> - void putIntegral(IntegralType value) + char* buffer() const { return m_buffer; } + + unsigned capacity() const { return m_capacity; } + + void grow(unsigned extraCapacity = 0) { - ensureSpace(sizeof(IntegralType)); - putIntegralUnchecked(value); + m_capacity = m_capacity + m_capacity / 2 + extraCapacity; + if (isInlineBuffer()) { + m_buffer = static_cast<char*>(fastMalloc(m_capacity)); + memcpy(m_buffer, m_inlineBuffer, InlineCapacity); + } else + m_buffer = static_cast<char*>(fastRealloc(m_buffer, m_capacity)); } - template<typename IntegralType> - void putIntegralUnchecked(IntegralType value) + private: + bool isInlineBuffer() const { return m_buffer == m_inlineBuffer; } + char* m_buffer; + char m_inlineBuffer[InlineCapacity]; + unsigned m_capacity; + }; + + class AssemblerBuffer { + public: + AssemblerBuffer() + : m_storage() + , m_index(0) { - ASSERT(isAvailable(sizeof(IntegralType))); - *reinterpret_cast_ptr<IntegralType*>(m_buffer + m_index) = value; - m_index += sizeof(IntegralType); + } + + bool isAvailable(unsigned space) + { + return m_index + space <= m_storage.capacity(); + } + + void ensureSpace(unsigned space) + { + while (!isAvailable(space)) + outOfLineGrow(); + } + + bool isAligned(int alignment) const + { + return !(m_index & (alignment - 1)); } void putByteUnchecked(int8_t value) { putIntegralUnchecked(value); } @@ -116,7 +175,7 @@ namespace JSC { void* data() const { - return m_buffer; + return m_storage.buffer(); } size_t codeSize() const @@ -124,6 +183,15 @@ namespace JSC { return m_index; } + void setCodeSize(size_t index) + { + // Warning: Only use this if you know exactly what you are doing. + // For example, say you want 40 bytes of nops, it's ok to grow + // and then fill 40 bytes of nops using bigger instructions. + m_index = index; + ASSERT(m_index <= m_storage.capacity()); + } + AssemblerLabel label() const { return AssemblerLabel(m_index); @@ -131,33 +199,104 @@ namespace JSC { unsigned debugOffset() { return m_index; } + AssemblerData&& releaseAssemblerData() { return WTFMove(m_storage); } + + // LocalWriter is a trick to keep the storage buffer and the index + // in memory while issuing multiple Stores. + // It is created in a block scope and its attribute can stay live + // between writes. + // + // LocalWriter *CANNOT* be mixed with other types of access to AssemblerBuffer. + // AssemblerBuffer cannot be used until its LocalWriter goes out of scope. + class LocalWriter { + public: + LocalWriter(AssemblerBuffer& buffer, unsigned requiredSpace) + : m_buffer(buffer) + { + buffer.ensureSpace(requiredSpace); + m_storageBuffer = buffer.m_storage.buffer(); + m_index = buffer.m_index; +#if !defined(NDEBUG) + m_initialIndex = m_index; + m_requiredSpace = requiredSpace; +#endif + } + + ~LocalWriter() + { + ASSERT(m_index - m_initialIndex <= m_requiredSpace); + ASSERT(m_buffer.m_index == m_initialIndex); + ASSERT(m_storageBuffer == m_buffer.m_storage.buffer()); + m_buffer.m_index = m_index; + } + + void putByteUnchecked(int8_t value) { putIntegralUnchecked(value); } + void putShortUnchecked(int16_t value) { putIntegralUnchecked(value); } + void putIntUnchecked(int32_t value) { putIntegralUnchecked(value); } + void putInt64Unchecked(int64_t value) { putIntegralUnchecked(value); } + private: + template<typename IntegralType> + void putIntegralUnchecked(IntegralType value) + { + ASSERT(m_index + sizeof(IntegralType) <= m_buffer.m_storage.capacity()); + *reinterpret_cast_ptr<IntegralType*>(m_storageBuffer + m_index) = value; + m_index += sizeof(IntegralType); + } + AssemblerBuffer& m_buffer; + char* m_storageBuffer; + unsigned m_index; +#if !defined(NDEBUG) + unsigned m_initialIndex; + unsigned m_requiredSpace; +#endif + }; + protected: + template<typename IntegralType> + void putIntegral(IntegralType value) + { + unsigned nextIndex = m_index + sizeof(IntegralType); + if (UNLIKELY(nextIndex > m_storage.capacity())) + outOfLineGrow(); + ASSERT(isAvailable(sizeof(IntegralType))); + *reinterpret_cast_ptr<IntegralType*>(m_storage.buffer() + m_index) = value; + m_index = nextIndex; + } + + template<typename IntegralType> + void putIntegralUnchecked(IntegralType value) + { + ASSERT(isAvailable(sizeof(IntegralType))); + *reinterpret_cast_ptr<IntegralType*>(m_storage.buffer() + m_index) = value; + m_index += sizeof(IntegralType); + } + void append(const char* data, int size) { if (!isAvailable(size)) grow(size); - memcpy(m_buffer + m_index, data, size); + memcpy(m_storage.buffer() + m_index, data, size); m_index += size; } void grow(int extraCapacity = 0) { - m_capacity += m_capacity / 2 + extraCapacity; - - m_storage.grow(m_capacity); - m_buffer = m_storage.begin(); + m_storage.grow(extraCapacity); } private: - Vector<char, inlineCapacity, UnsafeVectorOverflow> m_storage; - char* m_buffer; - int m_capacity; - int m_index; + NEVER_INLINE void outOfLineGrow() + { + m_storage.grow(); + } + + friend LocalWriter; + + AssemblerData m_storage; + unsigned m_index; }; } // namespace JSC #endif // ENABLE(ASSEMBLER) - -#endif // AssemblerBuffer_h diff --git a/Source/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h b/Source/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h index 053884b01..3b6328864 100644 --- a/Source/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h +++ b/Source/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h @@ -24,8 +24,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef AssemblerBufferWithConstantPool_h -#define AssemblerBufferWithConstantPool_h +#pragma once #if ENABLE(ASSEMBLER) @@ -332,5 +331,3 @@ private: } // namespace JSC #endif // ENABLE(ASSEMBLER) - -#endif // AssemblerBufferWithConstantPool_h diff --git a/Source/JavaScriptCore/assembler/AssemblerCommon.h b/Source/JavaScriptCore/assembler/AssemblerCommon.h new file mode 100644 index 000000000..2c6cb35f3 --- /dev/null +++ b/Source/JavaScriptCore/assembler/AssemblerCommon.h @@ -0,0 +1,290 @@ +/* + * Copyright (C) 2012, 2014, 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +namespace JSC { + +ALWAYS_INLINE bool isIOS() +{ +#if PLATFORM(IOS) + return true; +#else + return false; +#endif +} + +ALWAYS_INLINE bool isInt9(int32_t value) +{ + return value == ((value << 23) >> 23); +} + +template<typename Type> +ALWAYS_INLINE bool isUInt12(Type value) +{ + return !(value & ~static_cast<Type>(0xfff)); +} + +template<int datasize> +ALWAYS_INLINE bool isValidScaledUImm12(int32_t offset) +{ + int32_t maxPImm = 4095 * (datasize / 8); + if (offset < 0) + return false; + if (offset > maxPImm) + return false; + if (offset & ((datasize / 8) - 1)) + return false; + return true; +} + +ALWAYS_INLINE bool isValidSignedImm9(int32_t value) +{ + return isInt9(value); +} + +class ARM64LogicalImmediate { +public: + static ARM64LogicalImmediate create32(uint32_t value) + { + // Check for 0, -1 - these cannot be encoded. + if (!value || !~value) + return InvalidLogicalImmediate; + + // First look for a 32-bit pattern, then for repeating 16-bit + // patterns, 8-bit, 4-bit, and finally 2-bit. + + unsigned hsb, lsb; + bool inverted; + if (findBitRange<32>(value, hsb, lsb, inverted)) + return encodeLogicalImmediate<32>(hsb, lsb, inverted); + + if ((value & 0xffff) != (value >> 16)) + return InvalidLogicalImmediate; + value &= 0xffff; + + if (findBitRange<16>(value, hsb, lsb, inverted)) + return encodeLogicalImmediate<16>(hsb, lsb, inverted); + + if ((value & 0xff) != (value >> 8)) + return InvalidLogicalImmediate; + value &= 0xff; + + if (findBitRange<8>(value, hsb, lsb, inverted)) + return encodeLogicalImmediate<8>(hsb, lsb, inverted); + + if ((value & 0xf) != (value >> 4)) + return InvalidLogicalImmediate; + value &= 0xf; + + if (findBitRange<4>(value, hsb, lsb, inverted)) + return encodeLogicalImmediate<4>(hsb, lsb, inverted); + + if ((value & 0x3) != (value >> 2)) + return InvalidLogicalImmediate; + value &= 0x3; + + if (findBitRange<2>(value, hsb, lsb, inverted)) + return encodeLogicalImmediate<2>(hsb, lsb, inverted); + + return InvalidLogicalImmediate; + } + + static ARM64LogicalImmediate create64(uint64_t value) + { + // Check for 0, -1 - these cannot be encoded. + if (!value || !~value) + return InvalidLogicalImmediate; + + // Look for a contiguous bit range. + unsigned hsb, lsb; + bool inverted; + if (findBitRange<64>(value, hsb, lsb, inverted)) + return encodeLogicalImmediate<64>(hsb, lsb, inverted); + + // If the high & low 32 bits are equal, we can try for a 32-bit (or narrower) pattern. + if (static_cast<uint32_t>(value) == static_cast<uint32_t>(value >> 32)) + return create32(static_cast<uint32_t>(value)); + return InvalidLogicalImmediate; + } + + int value() const + { + ASSERT(isValid()); + return m_value; + } + + bool isValid() const + { + return m_value != InvalidLogicalImmediate; + } + + bool is64bit() const + { + return m_value & (1 << 12); + } + +private: + ARM64LogicalImmediate(int value) + : m_value(value) + { + } + + // Generate a mask with bits in the range hsb..0 set, for example: + // hsb:63 = 0xffffffffffffffff + // hsb:42 = 0x000007ffffffffff + // hsb: 0 = 0x0000000000000001 + static uint64_t mask(unsigned hsb) + { + ASSERT(hsb < 64); + return 0xffffffffffffffffull >> (63 - hsb); + } + + template<unsigned N> + static void partialHSB(uint64_t& value, unsigned&result) + { + if (value & (0xffffffffffffffffull << N)) { + result += N; + value >>= N; + } + } + + // Find the bit number of the highest bit set in a non-zero value, for example: + // 0x8080808080808080 = hsb:63 + // 0x0000000000000001 = hsb: 0 + // 0x000007ffffe00000 = hsb:42 + static unsigned highestSetBit(uint64_t value) + { + ASSERT(value); + unsigned hsb = 0; + partialHSB<32>(value, hsb); + partialHSB<16>(value, hsb); + partialHSB<8>(value, hsb); + partialHSB<4>(value, hsb); + partialHSB<2>(value, hsb); + partialHSB<1>(value, hsb); + return hsb; + } + + // This function takes a value and a bit width, where value obeys the following constraints: + // * bits outside of the width of the value must be zero. + // * bits within the width of value must neither be all clear or all set. + // The input is inspected to detect values that consist of either two or three contiguous + // ranges of bits. The output range hsb..lsb will describe the second range of the value. + // if the range is set, inverted will be false, and if the range is clear, inverted will + // be true. For example (with width 8): + // 00001111 = hsb:3, lsb:0, inverted:false + // 11110000 = hsb:3, lsb:0, inverted:true + // 00111100 = hsb:5, lsb:2, inverted:false + // 11000011 = hsb:5, lsb:2, inverted:true + template<unsigned width> + static bool findBitRange(uint64_t value, unsigned& hsb, unsigned& lsb, bool& inverted) + { + ASSERT(value & mask(width - 1)); + ASSERT(value != mask(width - 1)); + ASSERT(!(value & ~mask(width - 1))); + + // Detect cases where the top bit is set; if so, flip all the bits & set invert. + // This halves the number of patterns we need to look for. + const uint64_t msb = 1ull << (width - 1); + if ((inverted = (value & msb))) + value ^= mask(width - 1); + + // Find the highest set bit in value, generate a corresponding mask & flip all + // bits under it. + hsb = highestSetBit(value); + value ^= mask(hsb); + if (!value) { + // If this cleared the value, then the range hsb..0 was all set. + lsb = 0; + return true; + } + + // Try making one more mask, and flipping the bits! + lsb = highestSetBit(value); + value ^= mask(lsb); + if (!value) { + // Success - but lsb actually points to the hsb of a third range - add one + // to get to the lsb of the mid range. + ++lsb; + return true; + } + + return false; + } + + // Encodes the set of immN:immr:imms fields found in a logical immediate. + template<unsigned width> + static int encodeLogicalImmediate(unsigned hsb, unsigned lsb, bool inverted) + { + // Check width is a power of 2! + ASSERT(!(width & (width -1))); + ASSERT(width <= 64 && width >= 2); + ASSERT(hsb >= lsb); + ASSERT(hsb < width); + + int immN = 0; + int imms = 0; + int immr = 0; + + // For 64-bit values this is easy - just set immN to true, and imms just + // contains the bit number of the highest set bit of the set range. For + // values with narrower widths, these are encoded by a leading set of + // one bits, followed by a zero bit, followed by the remaining set of bits + // being the high bit of the range. For a 32-bit immediate there are no + // leading one bits, just a zero followed by a five bit number. For a + // 16-bit immediate there is one one bit, a zero bit, and then a four bit + // bit-position, etc. + if (width == 64) + immN = 1; + else + imms = 63 & ~(width + width - 1); + + if (inverted) { + // if width is 64 & hsb is 62, then we have a value something like: + // 0x80000000ffffffff (in this case with lsb 32). + // The ror should be by 1, imms (effectively set width minus 1) is + // 32. Set width is full width minus cleared width. + immr = (width - 1) - hsb; + imms |= (width - ((hsb - lsb) + 1)) - 1; + } else { + // if width is 64 & hsb is 62, then we have a value something like: + // 0x7fffffff00000000 (in this case with lsb 32). + // The value is effectively rol'ed by lsb, which is equivalent to + // a ror by width - lsb (or 0, in the case where lsb is 0). imms + // is hsb - lsb. + immr = (width - lsb) & (width - 1); + imms |= hsb - lsb; + } + + return immN << 12 | immr << 6 | imms; + } + + static const int InvalidLogicalImmediate = -1; + + int m_value; +}; + +} // namespace JSC. diff --git a/Source/JavaScriptCore/assembler/CPU.h b/Source/JavaScriptCore/assembler/CPU.h new file mode 100644 index 000000000..8e8c82f9b --- /dev/null +++ b/Source/JavaScriptCore/assembler/CPU.h @@ -0,0 +1,94 @@ +/* + * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#include "Options.h" + +namespace JSC { + +inline bool isARMv7IDIVSupported() +{ +#if HAVE(ARM_IDIV_INSTRUCTIONS) + return true; +#else + return false; +#endif +} + +inline bool isARM64() +{ +#if CPU(ARM64) + return true; +#else + return false; +#endif +} + +inline bool isX86() +{ +#if CPU(X86_64) || CPU(X86) + return true; +#else + return false; +#endif +} + +inline bool isX86_64() +{ +#if CPU(X86_64) + return true; +#else + return false; +#endif +} + +inline bool optimizeForARMv7IDIVSupported() +{ + return isARMv7IDIVSupported() && Options::useArchitectureSpecificOptimizations(); +} + +inline bool optimizeForARM64() +{ + return isARM64() && Options::useArchitectureSpecificOptimizations(); +} + +inline bool optimizeForX86() +{ + return isX86() && Options::useArchitectureSpecificOptimizations(); +} + +inline bool optimizeForX86_64() +{ + return isX86_64() && Options::useArchitectureSpecificOptimizations(); +} + +inline bool hasSensibleDoubleToInt() +{ + return optimizeForX86(); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/assembler/CodeLocation.h b/Source/JavaScriptCore/assembler/CodeLocation.h index 86d1f2b75..a115ec3d6 100644 --- a/Source/JavaScriptCore/assembler/CodeLocation.h +++ b/Source/JavaScriptCore/assembler/CodeLocation.h @@ -23,8 +23,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef CodeLocation_h -#define CodeLocation_h +#pragma once #include "MacroAssemblerCodeRef.h" @@ -32,6 +31,8 @@ namespace JSC { +enum NearCallMode { Regular, Tail }; + class CodeLocationInstruction; class CodeLocationLabel; class CodeLocationJump; @@ -59,7 +60,7 @@ public: CodeLocationLabel labelAtOffset(int offset); CodeLocationJump jumpAtOffset(int offset); CodeLocationCall callAtOffset(int offset); - CodeLocationNearCall nearCallAtOffset(int offset); + CodeLocationNearCall nearCallAtOffset(int offset, NearCallMode); CodeLocationDataLabelPtr dataLabelPtrAtOffset(int offset); CodeLocationDataLabel32 dataLabel32AtOffset(int offset); CodeLocationDataLabelCompact dataLabelCompactAtOffset(int offset); @@ -115,10 +116,13 @@ public: class CodeLocationNearCall : public CodeLocationCommon { public: CodeLocationNearCall() {} - explicit CodeLocationNearCall(MacroAssemblerCodePtr location) - : CodeLocationCommon(location) {} - explicit CodeLocationNearCall(void* location) - : CodeLocationCommon(MacroAssemblerCodePtr(location)) {} + explicit CodeLocationNearCall(MacroAssemblerCodePtr location, NearCallMode callMode) + : CodeLocationCommon(location), m_callMode(callMode) { } + explicit CodeLocationNearCall(void* location, NearCallMode callMode) + : CodeLocationCommon(MacroAssemblerCodePtr(location)), m_callMode(callMode) { } + NearCallMode callMode() { return m_callMode; } +private: + NearCallMode m_callMode = NearCallMode::Regular; }; class CodeLocationDataLabel32 : public CodeLocationCommon { @@ -181,10 +185,10 @@ inline CodeLocationCall CodeLocationCommon::callAtOffset(int offset) return CodeLocationCall(reinterpret_cast<char*>(dataLocation()) + offset); } -inline CodeLocationNearCall CodeLocationCommon::nearCallAtOffset(int offset) +inline CodeLocationNearCall CodeLocationCommon::nearCallAtOffset(int offset, NearCallMode callMode) { ASSERT_VALID_CODE_OFFSET(offset); - return CodeLocationNearCall(reinterpret_cast<char*>(dataLocation()) + offset); + return CodeLocationNearCall(reinterpret_cast<char*>(dataLocation()) + offset, callMode); } inline CodeLocationDataLabelPtr CodeLocationCommon::dataLabelPtrAtOffset(int offset) @@ -214,5 +218,3 @@ inline CodeLocationConvertibleLoad CodeLocationCommon::convertibleLoadAtOffset(i } // namespace JSC #endif // ENABLE(ASSEMBLER) - -#endif // CodeLocation_h diff --git a/Source/JavaScriptCore/assembler/DisallowMacroScratchRegisterUsage.h b/Source/JavaScriptCore/assembler/DisallowMacroScratchRegisterUsage.h new file mode 100644 index 000000000..91f038942 --- /dev/null +++ b/Source/JavaScriptCore/assembler/DisallowMacroScratchRegisterUsage.h @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#if ENABLE(ASSEMBLER) + +#include "MacroAssembler.h" + +namespace JSC { + +class DisallowMacroScratchRegisterUsage { +public: + DisallowMacroScratchRegisterUsage(MacroAssembler& masm) + : m_masm(masm) + , m_oldValueOfAllowScratchRegister(masm.m_allowScratchRegister) + { + masm.m_allowScratchRegister = false; + } + + ~DisallowMacroScratchRegisterUsage() + { + m_masm.m_allowScratchRegister = m_oldValueOfAllowScratchRegister; + } + +private: + MacroAssembler& m_masm; + bool m_oldValueOfAllowScratchRegister; +}; + +} // namespace JSC + +#endif // ENABLE(ASSEMBLER) diff --git a/Source/JavaScriptCore/assembler/LinkBuffer.cpp b/Source/JavaScriptCore/assembler/LinkBuffer.cpp index a7f469da8..0309d585d 100644 --- a/Source/JavaScriptCore/assembler/LinkBuffer.cpp +++ b/Source/JavaScriptCore/assembler/LinkBuffer.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2012-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,111 +28,164 @@ #if ENABLE(ASSEMBLER) +#include "CodeBlock.h" +#include "JITCode.h" +#include "JSCInlines.h" #include "Options.h" #include "VM.h" #include <wtf/CompilationThread.h> namespace JSC { +bool shouldDumpDisassemblyFor(CodeBlock* codeBlock) +{ + if (codeBlock && JITCode::isOptimizingJIT(codeBlock->jitType()) && Options::dumpDFGDisassembly()) + return true; + return Options::dumpDisassembly(); +} + LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithoutDisassembly() { performFinalization(); ASSERT(m_didAllocate); if (m_executableMemory) - return CodeRef(m_executableMemory); + return CodeRef(*m_executableMemory); return CodeRef::createSelfManagedCodeRef(MacroAssemblerCodePtr(m_code)); } LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithDisassembly(const char* format, ...) { - ASSERT(Options::showDisassembly() || Options::showDFGDisassembly()); - CodeRef result = finalizeCodeWithoutDisassembly(); -#if ENABLE(DISASSEMBLER) - dataLogF("Generated JIT code for "); + if (m_alreadyDisassembled) + return result; + + StringPrintStream out; + out.printf("Generated JIT code for "); va_list argList; va_start(argList, format); - WTF::dataLogFV(format, argList); + out.vprintf(format, argList); va_end(argList); - dataLogF(":\n"); + out.printf(":\n"); + + out.printf(" Code at [%p, %p):\n", result.code().executableAddress(), static_cast<char*>(result.code().executableAddress()) + result.size()); + + CString header = out.toCString(); + + if (Options::asyncDisassembly()) { + disassembleAsynchronously(header, result, m_size, " "); + return result; + } - dataLogF(" Code at [%p, %p):\n", result.code().executableAddress(), static_cast<char*>(result.code().executableAddress()) + result.size()); + dataLog(header); disassemble(result.code(), m_size, " ", WTF::dataFile()); -#else - UNUSED_PARAM(format); -#endif // ENABLE(DISASSEMBLER) return result; } #if ENABLE(BRANCH_COMPACTION) +static ALWAYS_INLINE void recordLinkOffsets(AssemblerData& assemblerData, int32_t regionStart, int32_t regionEnd, int32_t offset) +{ + int32_t ptr = regionStart / sizeof(int32_t); + const int32_t end = regionEnd / sizeof(int32_t); + int32_t* offsets = reinterpret_cast_ptr<int32_t*>(assemblerData.buffer()); + while (ptr < end) + offsets[ptr++] = offset; +} + template <typename InstructionType> -void LinkBuffer::copyCompactAndLinkCode(void* ownerUID, JITCompilationEffort effort) +void LinkBuffer::copyCompactAndLinkCode(MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort) { - m_initialSize = m_assembler->m_assembler.codeSize(); - allocate(m_initialSize, ownerUID, effort); - uint8_t* inData = (uint8_t*)m_assembler->unlinkedCode(); - uint8_t* outData = reinterpret_cast<uint8_t*>(m_code); + allocate(macroAssembler, ownerUID, effort); + const size_t initialSize = macroAssembler.m_assembler.codeSize(); + if (didFailToAllocate()) + return; + + Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink = macroAssembler.jumpsToLink(); + m_assemblerStorage = macroAssembler.m_assembler.buffer().releaseAssemblerData(); + uint8_t* inData = reinterpret_cast<uint8_t*>(m_assemblerStorage.buffer()); + + AssemblerData outBuffer(m_size); + + uint8_t* outData = reinterpret_cast<uint8_t*>(outBuffer.buffer()); + uint8_t* codeOutData = reinterpret_cast<uint8_t*>(m_code); + int readPtr = 0; int writePtr = 0; - Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink = m_assembler->jumpsToLink(); unsigned jumpCount = jumpsToLink.size(); - for (unsigned i = 0; i < jumpCount; ++i) { - int offset = readPtr - writePtr; - ASSERT(!(offset & 1)); - - // Copy the instructions from the last jump to the current one. - size_t regionSize = jumpsToLink[i].from() - readPtr; - InstructionType* copySource = reinterpret_cast_ptr<InstructionType*>(inData + readPtr); - InstructionType* copyEnd = reinterpret_cast_ptr<InstructionType*>(inData + readPtr + regionSize); - InstructionType* copyDst = reinterpret_cast_ptr<InstructionType*>(outData + writePtr); - ASSERT(!(regionSize % 2)); - ASSERT(!(readPtr % 2)); - ASSERT(!(writePtr % 2)); - while (copySource != copyEnd) - *copyDst++ = *copySource++; - m_assembler->recordLinkOffsets(readPtr, jumpsToLink[i].from(), offset); - readPtr += regionSize; - writePtr += regionSize; - - // Calculate absolute address of the jump target, in the case of backwards - // branches we need to be precise, forward branches we are pessimistic - const uint8_t* target; - if (jumpsToLink[i].to() >= jumpsToLink[i].from()) - target = outData + jumpsToLink[i].to() - offset; // Compensate for what we have collapsed so far - else - target = outData + jumpsToLink[i].to() - m_assembler->executableOffsetFor(jumpsToLink[i].to()); - - JumpLinkType jumpLinkType = m_assembler->computeJumpType(jumpsToLink[i], outData + writePtr, target); - // Compact branch if we can... - if (m_assembler->canCompact(jumpsToLink[i].type())) { - // Step back in the write stream - int32_t delta = m_assembler->jumpSizeDelta(jumpsToLink[i].type(), jumpLinkType); - if (delta) { - writePtr -= delta; - m_assembler->recordLinkOffsets(jumpsToLink[i].from() - delta, readPtr, readPtr - writePtr); + if (m_shouldPerformBranchCompaction) { + for (unsigned i = 0; i < jumpCount; ++i) { + int offset = readPtr - writePtr; + ASSERT(!(offset & 1)); + + // Copy the instructions from the last jump to the current one. + size_t regionSize = jumpsToLink[i].from() - readPtr; + InstructionType* copySource = reinterpret_cast_ptr<InstructionType*>(inData + readPtr); + InstructionType* copyEnd = reinterpret_cast_ptr<InstructionType*>(inData + readPtr + regionSize); + InstructionType* copyDst = reinterpret_cast_ptr<InstructionType*>(outData + writePtr); + ASSERT(!(regionSize % 2)); + ASSERT(!(readPtr % 2)); + ASSERT(!(writePtr % 2)); + while (copySource != copyEnd) + *copyDst++ = *copySource++; + recordLinkOffsets(m_assemblerStorage, readPtr, jumpsToLink[i].from(), offset); + readPtr += regionSize; + writePtr += regionSize; + + // Calculate absolute address of the jump target, in the case of backwards + // branches we need to be precise, forward branches we are pessimistic + const uint8_t* target; + if (jumpsToLink[i].to() >= jumpsToLink[i].from()) + target = codeOutData + jumpsToLink[i].to() - offset; // Compensate for what we have collapsed so far + else + target = codeOutData + jumpsToLink[i].to() - executableOffsetFor(jumpsToLink[i].to()); + + JumpLinkType jumpLinkType = MacroAssembler::computeJumpType(jumpsToLink[i], codeOutData + writePtr, target); + // Compact branch if we can... + if (MacroAssembler::canCompact(jumpsToLink[i].type())) { + // Step back in the write stream + int32_t delta = MacroAssembler::jumpSizeDelta(jumpsToLink[i].type(), jumpLinkType); + if (delta) { + writePtr -= delta; + recordLinkOffsets(m_assemblerStorage, jumpsToLink[i].from() - delta, readPtr, readPtr - writePtr); + } } + jumpsToLink[i].setFrom(writePtr); + } + } else { + if (!ASSERT_DISABLED) { + for (unsigned i = 0; i < jumpCount; ++i) + ASSERT(!MacroAssembler::canCompact(jumpsToLink[i].type())); } - jumpsToLink[i].setFrom(writePtr); } // Copy everything after the last jump - memcpy(outData + writePtr, inData + readPtr, m_initialSize - readPtr); - m_assembler->recordLinkOffsets(readPtr, m_initialSize, readPtr - writePtr); + memcpy(outData + writePtr, inData + readPtr, initialSize - readPtr); + recordLinkOffsets(m_assemblerStorage, readPtr, initialSize, readPtr - writePtr); for (unsigned i = 0; i < jumpCount; ++i) { - uint8_t* location = outData + jumpsToLink[i].from(); - uint8_t* target = outData + jumpsToLink[i].to() - m_assembler->executableOffsetFor(jumpsToLink[i].to()); - m_assembler->link(jumpsToLink[i], location, target); + uint8_t* location = codeOutData + jumpsToLink[i].from(); + uint8_t* target = codeOutData + jumpsToLink[i].to() - executableOffsetFor(jumpsToLink[i].to()); + MacroAssembler::link(jumpsToLink[i], outData + jumpsToLink[i].from(), location, target); } jumpsToLink.clear(); - shrink(writePtr + m_initialSize - readPtr); + + size_t compactSize = writePtr + initialSize - readPtr; + if (m_executableMemory) { + m_size = compactSize; + m_executableMemory->shrink(m_size); + } else { + size_t nopSizeInBytes = initialSize - compactSize; + bool isCopyingToExecutableMemory = false; + MacroAssembler::AssemblerType_T::fillNops(outData + compactSize, nopSizeInBytes, isCopyingToExecutableMemory); + } + + performJITMemcpy(m_code, outData, m_size); #if DUMP_LINK_STATISTICS - dumpLinkStatistics(m_code, m_initialSize, m_size); + dumpLinkStatistics(m_code, initialSize, m_size); #endif #if DUMP_CODE dumpCode(m_code, m_size); @@ -141,59 +194,63 @@ void LinkBuffer::copyCompactAndLinkCode(void* ownerUID, JITCompilationEffort eff #endif -void LinkBuffer::linkCode(void* ownerUID, JITCompilationEffort effort) +void LinkBuffer::linkCode(MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort) { + // Ensure that the end of the last invalidation point does not extend beyond the end of the buffer. + macroAssembler.label(); + #if !ENABLE(BRANCH_COMPACTION) #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL - m_assembler->m_assembler.buffer().flushConstantPool(false); + macroAssembler.m_assembler.buffer().flushConstantPool(false); #endif - AssemblerBuffer& buffer = m_assembler->m_assembler.buffer(); - allocate(buffer.codeSize(), ownerUID, effort); + allocate(macroAssembler, ownerUID, effort); if (!m_didAllocate) return; ASSERT(m_code); + AssemblerBuffer& buffer = macroAssembler.m_assembler.buffer(); #if CPU(ARM_TRADITIONAL) - m_assembler->m_assembler.prepareExecutableCopy(m_code); + macroAssembler.m_assembler.prepareExecutableCopy(m_code); #endif - memcpy(m_code, buffer.data(), buffer.codeSize()); + performJITMemcpy(m_code, buffer.data(), buffer.codeSize()); #if CPU(MIPS) - m_assembler->m_assembler.relocateJumps(buffer.data(), m_code); + macroAssembler.m_assembler.relocateJumps(buffer.data(), m_code); #endif #elif CPU(ARM_THUMB2) - copyCompactAndLinkCode<uint16_t>(ownerUID, effort); + copyCompactAndLinkCode<uint16_t>(macroAssembler, ownerUID, effort); #elif CPU(ARM64) - copyCompactAndLinkCode<uint32_t>(ownerUID, effort); -#endif + copyCompactAndLinkCode<uint32_t>(macroAssembler, ownerUID, effort); +#endif // !ENABLE(BRANCH_COMPACTION) + + m_linkTasks = WTFMove(macroAssembler.m_linkTasks); } -void LinkBuffer::allocate(size_t initialSize, void* ownerUID, JITCompilationEffort effort) +void LinkBuffer::allocate(MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort) { + size_t initialSize = macroAssembler.m_assembler.codeSize(); if (m_code) { if (initialSize > m_size) return; + size_t nopsToFillInBytes = m_size - initialSize; + macroAssembler.emitNops(nopsToFillInBytes); m_didAllocate = true; - m_size = initialSize; return; } + ASSERT(m_vm != nullptr); m_executableMemory = m_vm->executableAllocator.allocate(*m_vm, initialSize, ownerUID, effort); if (!m_executableMemory) return; - ExecutableAllocator::makeWritable(m_executableMemory->start(), m_executableMemory->sizeInBytes()); m_code = m_executableMemory->start(); m_size = initialSize; m_didAllocate = true; } -void LinkBuffer::shrink(size_t newSize) -{ - m_size = newSize; - m_executableMemory->shrink(m_size); -} - void LinkBuffer::performFinalization() { + for (auto& task : m_linkTasks) + task->run(*this); + #ifndef NDEBUG ASSERT(!isCompilationThread()); ASSERT(!m_completed); @@ -201,11 +258,6 @@ void LinkBuffer::performFinalization() m_completed = true; #endif -#if ENABLE(BRANCH_COMPACTION) - ExecutableAllocator::makeExecutable(code(), m_initialSize); -#else - ExecutableAllocator::makeExecutable(code(), m_size); -#endif MacroAssembler::cacheFlush(code(), m_size); } diff --git a/Source/JavaScriptCore/assembler/LinkBuffer.h b/Source/JavaScriptCore/assembler/LinkBuffer.h index 8d4ce521f..efb26f9ce 100644 --- a/Source/JavaScriptCore/assembler/LinkBuffer.h +++ b/Source/JavaScriptCore/assembler/LinkBuffer.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009, 2010, 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2009, 2010, 2012-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -23,8 +23,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef LinkBuffer_h -#define LinkBuffer_h +#pragma once #if ENABLE(ASSEMBLER) @@ -43,6 +42,7 @@ namespace JSC { +class CodeBlock; class VM; // LinkBuffer: @@ -79,36 +79,33 @@ class LinkBuffer { #endif public: - LinkBuffer(VM& vm, MacroAssembler* masm, void* ownerUID, JITCompilationEffort effort = JITCompilationMustSucceed) + LinkBuffer(VM& vm, MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort = JITCompilationMustSucceed) : m_size(0) -#if ENABLE(BRANCH_COMPACTION) - , m_initialSize(0) -#endif , m_didAllocate(false) , m_code(0) - , m_assembler(masm) , m_vm(&vm) #ifndef NDEBUG , m_completed(false) #endif { - linkCode(ownerUID, effort); + linkCode(macroAssembler, ownerUID, effort); } - LinkBuffer(VM& vm, MacroAssembler* masm, void* code, size_t size) + LinkBuffer(MacroAssembler& macroAssembler, void* code, size_t size, JITCompilationEffort effort = JITCompilationMustSucceed, bool shouldPerformBranchCompaction = true) : m_size(size) -#if ENABLE(BRANCH_COMPACTION) - , m_initialSize(0) -#endif , m_didAllocate(false) , m_code(code) - , m_assembler(masm) - , m_vm(&vm) + , m_vm(0) #ifndef NDEBUG , m_completed(false) #endif { - linkCode(0, JITCompilationCanFail); +#if ENABLE(BRANCH_COMPACTION) + m_shouldPerformBranchCompaction = shouldPerformBranchCompaction; +#else + UNUSED_PARAM(shouldPerformBranchCompaction); +#endif + linkCode(macroAssembler, 0, effort); } ~LinkBuffer() @@ -145,10 +142,10 @@ public: MacroAssembler::linkJump(code(), jump, label); } - void link(JumpList list, CodeLocationLabel label) + void link(const JumpList& list, CodeLocationLabel label) { - for (unsigned i = 0; i < list.m_jumps.size(); ++i) - link(list.m_jumps[i], label); + for (const Jump& jump : list.jumps()) + link(jump, label); } void patch(DataLabelPtr label, void* value) @@ -164,6 +161,11 @@ public: } // These methods are used to obtain handles to allow the code to be relinked / repatched later. + + CodeLocationLabel entrypoint() + { + return CodeLocationLabel(code()); + } CodeLocationCall locationOf(Call call) { @@ -176,7 +178,8 @@ public: { ASSERT(call.isFlagSet(Call::Linkable)); ASSERT(call.isFlagSet(Call::Near)); - return CodeLocationNearCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_label))); + return CodeLocationNearCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_label)), + call.isFlagSet(Call::Tail) ? NearCallMode::Tail : NearCallMode::Regular); } CodeLocationLabel locationOf(PatchableJump jump) @@ -244,34 +247,44 @@ public: { return m_code; } + + size_t size() const { return m_size; } - size_t size() - { - return m_size; - } + bool wasAlreadyDisassembled() const { return m_alreadyDisassembled; } + void didAlreadyDisassemble() { m_alreadyDisassembled = true; } + + VM& vm() { return *m_vm; } private: +#if ENABLE(BRANCH_COMPACTION) + int executableOffsetFor(int location) + { + if (!location) + return 0; + return bitwise_cast<int32_t*>(m_assemblerStorage.buffer())[location / sizeof(int32_t) - 1]; + } +#endif + template <typename T> T applyOffset(T src) { #if ENABLE(BRANCH_COMPACTION) - src.m_offset -= m_assembler->executableOffsetFor(src.m_offset); + src.m_offset -= executableOffsetFor(src.m_offset); #endif return src; } - + // Keep this private! - the underlying code should only be obtained externally via finalizeCode(). void* code() { return m_code; } - void allocate(size_t initialSize, void* ownerUID, JITCompilationEffort); - void shrink(size_t newSize); + void allocate(MacroAssembler&, void* ownerUID, JITCompilationEffort); - JS_EXPORT_PRIVATE void linkCode(void* ownerUID, JITCompilationEffort); + JS_EXPORT_PRIVATE void linkCode(MacroAssembler&, void* ownerUID, JITCompilationEffort); #if ENABLE(BRANCH_COMPACTION) template <typename InstructionType> - void copyCompactAndLinkCode(void* ownerUID, JITCompilationEffort); + void copyCompactAndLinkCode(MacroAssembler&, void* ownerUID, JITCompilationEffort); #endif void performFinalization(); @@ -287,15 +300,17 @@ private: RefPtr<ExecutableMemoryHandle> m_executableMemory; size_t m_size; #if ENABLE(BRANCH_COMPACTION) - size_t m_initialSize; + AssemblerData m_assemblerStorage; + bool m_shouldPerformBranchCompaction { true }; #endif bool m_didAllocate; void* m_code; - MacroAssembler* m_assembler; VM* m_vm; #ifndef NDEBUG bool m_completed; #endif + bool m_alreadyDisassembled { false }; + Vector<RefPtr<SharedTask<void(LinkBuffer&)>>> m_linkTasks; }; #define FINALIZE_CODE_IF(condition, linkBufferReference, dataLogFArgumentsForHeading) \ @@ -303,6 +318,11 @@ private: ? ((linkBufferReference).finalizeCodeWithDisassembly dataLogFArgumentsForHeading) \ : (linkBufferReference).finalizeCodeWithoutDisassembly()) +bool shouldDumpDisassemblyFor(CodeBlock*); + +#define FINALIZE_CODE_FOR(codeBlock, linkBufferReference, dataLogFArgumentsForHeading) \ + FINALIZE_CODE_IF(shouldDumpDisassemblyFor(codeBlock) || Options::asyncDisassembly(), linkBufferReference, dataLogFArgumentsForHeading) + // Use this to finalize code, like so: // // CodeRef code = FINALIZE_CODE(linkBuffer, ("my super thingy number %d", number)); @@ -316,17 +336,15 @@ private: // // ... and so on. // -// Note that the dataLogFArgumentsForHeading are only evaluated when showDisassembly +// Note that the dataLogFArgumentsForHeading are only evaluated when dumpDisassembly // is true, so you can hide expensive disassembly-only computations inside there. #define FINALIZE_CODE(linkBufferReference, dataLogFArgumentsForHeading) \ - FINALIZE_CODE_IF(JSC::Options::showDisassembly(), linkBufferReference, dataLogFArgumentsForHeading) + FINALIZE_CODE_IF(JSC::Options::asyncDisassembly() || JSC::Options::dumpDisassembly(), linkBufferReference, dataLogFArgumentsForHeading) #define FINALIZE_DFG_CODE(linkBufferReference, dataLogFArgumentsForHeading) \ - FINALIZE_CODE_IF((JSC::Options::showDisassembly() || Options::showDFGDisassembly()), linkBufferReference, dataLogFArgumentsForHeading) + FINALIZE_CODE_IF(JSC::Options::asyncDisassembly() || JSC::Options::dumpDisassembly() || Options::dumpDFGDisassembly(), linkBufferReference, dataLogFArgumentsForHeading) } // namespace JSC #endif // ENABLE(ASSEMBLER) - -#endif // LinkBuffer_h diff --git a/Source/JavaScriptCore/assembler/MIPSAssembler.h b/Source/JavaScriptCore/assembler/MIPSAssembler.h index b75b4d0af..b1c42326c 100644 --- a/Source/JavaScriptCore/assembler/MIPSAssembler.h +++ b/Source/JavaScriptCore/assembler/MIPSAssembler.h @@ -26,8 +26,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef MIPSAssembler_h -#define MIPSAssembler_h +#pragma once #if ENABLE(ASSEMBLER) && CPU(MIPS) @@ -151,11 +150,11 @@ public: typedef MIPSRegisters::FPRegisterID FPRegisterID; typedef SegmentedVector<AssemblerLabel, 64> Jumps; - static RegisterID firstRegister() { return MIPSRegisters::r0; } - static RegisterID lastRegister() { return MIPSRegisters::r31; } + static constexpr RegisterID firstRegister() { return MIPSRegisters::r0; } + static constexpr RegisterID lastRegister() { return MIPSRegisters::r31; } - static FPRegisterID firstFPRegister() { return MIPSRegisters::f0; } - static FPRegisterID lastFPRegister() { return MIPSRegisters::f31; } + static constexpr FPRegisterID firstFPRegister() { return MIPSRegisters::f0; } + static constexpr FPRegisterID lastFPRegister() { return MIPSRegisters::f31; } MIPSAssembler() : m_indexOfLastWatchpoint(INT_MIN) @@ -240,6 +239,11 @@ public: emitInst(0x3c000000 | (rt << OP_SH_RT) | (imm & 0xffff)); } + void clz(RegisterID rd, RegisterID rs) + { + emitInst(0x70000020 | (rd << OP_SH_RD) | (rs << OP_SH_RS) | (rd << OP_SH_RT)); + } + void addiu(RegisterID rt, RegisterID rs, int imm) { emitInst(0x24000000 | (rt << OP_SH_RT) | (rs << OP_SH_RS) | (imm & 0xffff)); @@ -542,6 +546,11 @@ public: emitInst(0x46200004 | (fd << OP_SH_FD) | (fs << OP_SH_FS)); } + void absd(FPRegisterID fd, FPRegisterID fs) + { + emitInst(0x46200005 | (fd << OP_SH_FD) | (fs << OP_SH_FS)); + } + void movd(FPRegisterID fd, FPRegisterID fs) { emitInst(0x46200006 | (fd << OP_SH_FD) | (fs << OP_SH_FS)); @@ -870,28 +879,8 @@ public: static void cacheFlush(void* code, size_t size) { -#if GCC_VERSION_AT_LEAST(4, 3, 0) -#if WTF_MIPS_ISA_REV(2) && !GCC_VERSION_AT_LEAST(4, 4, 3) - int lineSize; - asm("rdhwr %0, $1" : "=r" (lineSize)); - // - // Modify "start" and "end" to avoid GCC 4.3.0-4.4.2 bug in - // mips_expand_synci_loop that may execute synci one more time. - // "start" points to the fisrt byte of the cache line. - // "end" points to the last byte of the line before the last cache line. - // Because size is always a multiple of 4, this is safe to set - // "end" to the last byte. - // - intptr_t start = reinterpret_cast<intptr_t>(code) & (-lineSize); - intptr_t end = ((reinterpret_cast<intptr_t>(code) + size - 1) & (-lineSize)) - 1; - __builtin___clear_cache(reinterpret_cast<char*>(start), reinterpret_cast<char*>(end)); -#else intptr_t end = reinterpret_cast<intptr_t>(code) + size; __builtin___clear_cache(reinterpret_cast<char*>(code), reinterpret_cast<char*>(end)); -#endif -#else - _flush_cache(reinterpret_cast<char*>(code), size, BCACHE); -#endif } static ptrdiff_t maxJumpReplacementSize() @@ -1106,5 +1095,3 @@ private: } // namespace JSC #endif // ENABLE(ASSEMBLER) && CPU(MIPS) - -#endif // MIPSAssembler_h diff --git a/Source/JavaScriptCore/assembler/MacroAssembler.cpp b/Source/JavaScriptCore/assembler/MacroAssembler.cpp index 2cff056d2..0cd5bcfb0 100644 --- a/Source/JavaScriptCore/assembler/MacroAssembler.cpp +++ b/Source/JavaScriptCore/assembler/MacroAssembler.cpp @@ -28,11 +28,135 @@ #if ENABLE(ASSEMBLER) +#include <wtf/PrintStream.h> + namespace JSC { const double MacroAssembler::twoToThe32 = (double)0x100000000ull; +#if ENABLE(MASM_PROBE) +static void stdFunctionCallback(MacroAssembler::ProbeContext* context) +{ + auto func = static_cast<const std::function<void (MacroAssembler::ProbeContext*)>*>(context->arg1); + (*func)(context); +} + +void MacroAssembler::probe(std::function<void (MacroAssembler::ProbeContext*)> func) +{ + probe(stdFunctionCallback, new std::function<void (MacroAssembler::ProbeContext*)>(func), 0); +} +#endif // ENABLE(MASM_PROBE) + } // namespace JSC +namespace WTF { + +using namespace JSC; + +void printInternal(PrintStream& out, MacroAssembler::RelationalCondition cond) +{ + switch (cond) { + case MacroAssembler::Equal: + out.print("Equal"); + return; + case MacroAssembler::NotEqual: + out.print("NotEqual"); + return; + case MacroAssembler::Above: + out.print("Above"); + return; + case MacroAssembler::AboveOrEqual: + out.print("AboveOrEqual"); + return; + case MacroAssembler::Below: + out.print("Below"); + return; + case MacroAssembler::BelowOrEqual: + out.print("BelowOrEqual"); + return; + case MacroAssembler::GreaterThan: + out.print("GreaterThan"); + return; + case MacroAssembler::GreaterThanOrEqual: + out.print("GreaterThanOrEqual"); + return; + case MacroAssembler::LessThan: + out.print("LessThan"); + return; + case MacroAssembler::LessThanOrEqual: + out.print("LessThanOrEqual"); + return; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +void printInternal(PrintStream& out, MacroAssembler::ResultCondition cond) +{ + switch (cond) { + case MacroAssembler::Overflow: + out.print("Overflow"); + return; + case MacroAssembler::Signed: + out.print("Signed"); + return; + case MacroAssembler::PositiveOrZero: + out.print("PositiveOrZero"); + return; + case MacroAssembler::Zero: + out.print("Zero"); + return; + case MacroAssembler::NonZero: + out.print("NonZero"); + return; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +void printInternal(PrintStream& out, MacroAssembler::DoubleCondition cond) +{ + switch (cond) { + case MacroAssembler::DoubleEqual: + out.print("DoubleEqual"); + return; + case MacroAssembler::DoubleNotEqual: + out.print("DoubleNotEqual"); + return; + case MacroAssembler::DoubleGreaterThan: + out.print("DoubleGreaterThan"); + return; + case MacroAssembler::DoubleGreaterThanOrEqual: + out.print("DoubleGreaterThanOrEqual"); + return; + case MacroAssembler::DoubleLessThan: + out.print("DoubleLessThan"); + return; + case MacroAssembler::DoubleLessThanOrEqual: + out.print("DoubleLessThanOrEqual"); + return; + case MacroAssembler::DoubleEqualOrUnordered: + out.print("DoubleEqualOrUnordered"); + return; + case MacroAssembler::DoubleNotEqualOrUnordered: + out.print("DoubleNotEqualOrUnordered"); + return; + case MacroAssembler::DoubleGreaterThanOrUnordered: + out.print("DoubleGreaterThanOrUnordered"); + return; + case MacroAssembler::DoubleGreaterThanOrEqualOrUnordered: + out.print("DoubleGreaterThanOrEqualOrUnordered"); + return; + case MacroAssembler::DoubleLessThanOrUnordered: + out.print("DoubleLessThanOrUnordered"); + return; + case MacroAssembler::DoubleLessThanOrEqualOrUnordered: + out.print("DoubleLessThanOrEqualOrUnordered"); + return; + } + + RELEASE_ASSERT_NOT_REACHED(); +} + +} // namespace WTF + #endif // ENABLE(ASSEMBLER) diff --git a/Source/JavaScriptCore/assembler/MacroAssembler.h b/Source/JavaScriptCore/assembler/MacroAssembler.h index 4a43eb625..b6aba874d 100644 --- a/Source/JavaScriptCore/assembler/MacroAssembler.h +++ b/Source/JavaScriptCore/assembler/MacroAssembler.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -23,13 +23,12 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef MacroAssembler_h -#define MacroAssembler_h - -#include <wtf/Platform.h> +#pragma once #if ENABLE(ASSEMBLER) +#include "JSCJSValue.h" + #if CPU(ARM_THUMB2) #include "MacroAssemblerARMv7.h" namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; }; @@ -56,78 +55,53 @@ namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; }; #include "MacroAssemblerX86_64.h" namespace JSC { typedef MacroAssemblerX86_64 MacroAssemblerBase; }; -#elif CPU(SH4) -#include "MacroAssemblerSH4.h" -namespace JSC { -typedef MacroAssemblerSH4 MacroAssemblerBase; -}; - #else #error "The MacroAssembler is not supported on this platform." #endif +#include "MacroAssemblerHelpers.h" + namespace JSC { class MacroAssembler : public MacroAssemblerBase { public: - static bool isStackRelated(RegisterID reg) - { - return reg == stackPointerRegister || reg == framePointerRegister; - } - - static RegisterID firstRealRegister() - { - RegisterID firstRegister = MacroAssembler::firstRegister(); - while (MacroAssembler::isStackRelated(firstRegister)) - firstRegister = static_cast<RegisterID>(firstRegister + 1); - return firstRegister; - } - - static RegisterID nextRegister(RegisterID reg) - { - RegisterID result = static_cast<RegisterID>(reg + 1); - while (MacroAssembler::isStackRelated(result)) - result = static_cast<RegisterID>(result + 1); - return result; - } - - static RegisterID secondRealRegister() + static constexpr RegisterID nextRegister(RegisterID reg) { - return nextRegister(firstRealRegister()); + return static_cast<RegisterID>(reg + 1); } - static FPRegisterID nextFPRegister(FPRegisterID reg) + static constexpr FPRegisterID nextFPRegister(FPRegisterID reg) { return static_cast<FPRegisterID>(reg + 1); } - static unsigned numberOfRegisters() + static constexpr unsigned numberOfRegisters() { return lastRegister() - firstRegister() + 1; } - static unsigned registerIndex(RegisterID reg) + static constexpr unsigned registerIndex(RegisterID reg) { return reg - firstRegister(); } - static unsigned numberOfFPRegisters() + static constexpr unsigned numberOfFPRegisters() { return lastFPRegister() - firstFPRegister() + 1; } - static unsigned fpRegisterIndex(FPRegisterID reg) + static constexpr unsigned fpRegisterIndex(FPRegisterID reg) { return reg - firstFPRegister(); } - static unsigned registerIndex(FPRegisterID reg) + static constexpr unsigned registerIndex(FPRegisterID reg) { return fpRegisterIndex(reg) + numberOfRegisters(); } - static unsigned totalNumberOfRegisters() + static constexpr unsigned totalNumberOfRegisters() { return numberOfRegisters() + numberOfFPRegisters(); } @@ -135,14 +109,16 @@ public: using MacroAssemblerBase::pop; using MacroAssemblerBase::jump; using MacroAssemblerBase::branch32; + using MacroAssemblerBase::compare32; using MacroAssemblerBase::move; using MacroAssemblerBase::add32; + using MacroAssemblerBase::mul32; using MacroAssemblerBase::and32; using MacroAssemblerBase::branchAdd32; using MacroAssemblerBase::branchMul32; -#if CPU(X86_64) +#if CPU(ARM64) || CPU(ARM_THUMB2) || CPU(ARM_TRADITIONAL) || CPU(X86_64) using MacroAssemblerBase::branchPtr; -#endif // CPU(X86_64) +#endif using MacroAssemblerBase::branchSub32; using MacroAssemblerBase::lshift32; using MacroAssemblerBase::or32; @@ -160,7 +136,6 @@ public: static const double twoToThe32; // This is super useful for some double code. // Utilities used by the DFG JIT. -#if ENABLE(DFG_JIT) using MacroAssemblerBase::invert; static DoubleCondition invert(DoubleCondition cond) @@ -190,10 +165,9 @@ public: return DoubleGreaterThanOrEqual; case DoubleLessThanOrEqualOrUnordered: return DoubleGreaterThan; - default: - RELEASE_ASSERT_NOT_REACHED(); - return DoubleEqual; // make compiler happy } + RELEASE_ASSERT_NOT_REACHED(); + return DoubleEqual; // make compiler happy } static bool isInvertible(ResultCondition cond) @@ -201,6 +175,8 @@ public: switch (cond) { case Zero: case NonZero: + case Signed: + case PositiveOrZero: return true; default: return false; @@ -214,14 +190,65 @@ public: return NonZero; case NonZero: return Zero; + case Signed: + return PositiveOrZero; + case PositiveOrZero: + return Signed; default: RELEASE_ASSERT_NOT_REACHED(); return Zero; // Make compiler happy for release builds. } } -#endif - // Platform agnostic onvenience functions, + static RelationalCondition flip(RelationalCondition cond) + { + switch (cond) { + case Equal: + case NotEqual: + return cond; + case Above: + return Below; + case AboveOrEqual: + return BelowOrEqual; + case Below: + return Above; + case BelowOrEqual: + return AboveOrEqual; + case GreaterThan: + return LessThan; + case GreaterThanOrEqual: + return LessThanOrEqual; + case LessThan: + return GreaterThan; + case LessThanOrEqual: + return GreaterThanOrEqual; + } + + RELEASE_ASSERT_NOT_REACHED(); + return Equal; + } + + static bool isSigned(RelationalCondition cond) + { + return MacroAssemblerHelpers::isSigned<MacroAssembler>(cond); + } + + static bool isUnsigned(RelationalCondition cond) + { + return MacroAssemblerHelpers::isUnsigned<MacroAssembler>(cond); + } + + static bool isSigned(ResultCondition cond) + { + return MacroAssemblerHelpers::isSigned<MacroAssembler>(cond); + } + + static bool isUnsigned(ResultCondition cond) + { + return MacroAssemblerHelpers::isUnsigned<MacroAssembler>(cond); + } + + // Platform agnostic convenience functions, // described in terms of other macro assembly methods. void pop() { @@ -258,6 +285,10 @@ public: { push(src); } + void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm) + { + push(imm); + } void popToRestore(RegisterID dest) { pop(dest); @@ -272,6 +303,8 @@ public: loadDouble(stackPointerRegister, dest); addPtr(TrustedImm32(sizeof(double)), stackPointerRegister); } + + static ptrdiff_t pushToSaveByteOffset() { return sizeof(void*); } #endif // !CPU(ARM64) #if CPU(X86_64) || CPU(ARM64) @@ -316,6 +349,11 @@ public: branchPtr(cond, op1, imm).linkTo(target, this); } + Jump branch32(RelationalCondition cond, RegisterID left, AbsoluteAddress right) + { + return branch32(flip(cond), right, left); + } + void branch32(RelationalCondition cond, RegisterID op1, RegisterID op2, Label target) { branch32(cond, op1, op2).linkTo(target, this); @@ -346,6 +384,11 @@ public: return branch32(commute(cond), right, left); } + void compare32(RelationalCondition cond, Imm32 left, RegisterID right, RegisterID dest) + { + compare32(commute(cond), right, left, dest); + } + void branchTestPtr(ResultCondition cond, RegisterID reg, Label target) { branchTestPtr(cond, reg).linkTo(target, this); @@ -362,6 +405,11 @@ public: return PatchableJump(branchPtrWithPatch(cond, left, dataLabel, initialRightValue)); } + PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0)) + { + return PatchableJump(branch32WithPatch(cond, left, dataLabel, initialRightValue)); + } + #if !CPU(ARM_TRADITIONAL) PatchableJump patchableJump() { @@ -377,6 +425,11 @@ public: { return PatchableJump(branch32(cond, reg, imm)); } + + PatchableJump patchableBranch32(RelationalCondition cond, Address address, TrustedImm32 imm) + { + return PatchableJump(branch32(cond, address, imm)); + } #endif #endif @@ -414,6 +467,19 @@ public: return condition; } + void oops() + { + abortWithReason(B3Oops); + } + + // B3 has additional pseudo-opcodes for returning, when it wants to signal that the return + // consumes some register in some way. + void retVoid() { ret(); } + void ret32(RegisterID) { ret(); } + void ret64(RegisterID) { ret(); } + void retFloat(FPRegisterID) { ret(); } + void retDouble(FPRegisterID) { ret(); } + static const unsigned BlindingModulus = 64; bool shouldConsiderBlinding() { @@ -439,6 +505,11 @@ public: add32(src, dest); } + void addPtr(RegisterID left, RegisterID right, RegisterID dest) + { + add32(left, right, dest); + } + void addPtr(TrustedImm32 imm, RegisterID srcDest) { add32(imm, srcDest); @@ -474,6 +545,21 @@ public: and32(TrustedImm32(imm), srcDest); } + void lshiftPtr(Imm32 imm, RegisterID srcDest) + { + lshift32(trustedImm32ForShift(imm), srcDest); + } + + void rshiftPtr(Imm32 imm, RegisterID srcDest) + { + rshift32(trustedImm32ForShift(imm), srcDest); + } + + void urshiftPtr(Imm32 imm, RegisterID srcDest) + { + urshift32(trustedImm32ForShift(imm), srcDest); + } + void negPtr(RegisterID dest) { neg32(dest); @@ -595,6 +681,11 @@ public: store32(TrustedImm32(imm), address); } + void storePtr(TrustedImm32 imm, ImplicitAddress address) + { + store32(imm, address); + } + void storePtr(TrustedImmPtr imm, BaseIndex address) { store32(TrustedImm32(imm), address); @@ -691,6 +782,11 @@ public: { add64(src, dest); } + + void addPtr(RegisterID left, RegisterID right, RegisterID dest) + { + add64(left, right, dest); + } void addPtr(Address src, RegisterID dest) { @@ -747,6 +843,16 @@ public: lshift64(trustedImm32ForShift(imm), srcDest); } + void rshiftPtr(Imm32 imm, RegisterID srcDest) + { + rshift64(trustedImm32ForShift(imm), srcDest); + } + + void urshiftPtr(Imm32 imm, RegisterID srcDest) + { + urshift64(trustedImm32ForShift(imm), srcDest); + } + void negPtr(RegisterID dest) { neg64(dest); @@ -857,6 +963,11 @@ public: store64(TrustedImm64(imm), address); } + void storePtr(TrustedImm32 imm, ImplicitAddress address) + { + store64(imm, address); + } + void storePtr(TrustedImmPtr imm, BaseIndex address) { store64(TrustedImm64(imm), address); @@ -986,7 +1097,7 @@ public: if (bitwise_cast<uint64_t>(value * 1.0) != bitwise_cast<uint64_t>(value)) return shouldConsiderBlinding(); - value = abs(value); + value = fabs(value); // Only allow a limited set of fractional components double scaledValue = value * 8; if (scaledValue / 8 != value) @@ -1137,7 +1248,7 @@ public: void convertInt32ToDouble(Imm32 imm, FPRegisterID dest) { - if (shouldBlind(imm)) { + if (shouldBlind(imm) && haveScratchRegisterForBlinding()) { RegisterID scratchRegister = scratchRegisterForBlinding(); loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister); convertInt32ToDouble(scratchRegister, dest); @@ -1173,7 +1284,7 @@ public: Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right) { - if (shouldBlind(right)) { + if (shouldBlind(right) && haveScratchRegisterForBlinding()) { RegisterID scratchRegister = scratchRegisterForBlinding(); loadRotationBlindedConstant(rotationBlindConstant(right), scratchRegister); return branchPtr(cond, left, scratchRegister); @@ -1183,7 +1294,7 @@ public: void storePtr(ImmPtr imm, Address dest) { - if (shouldBlind(imm)) { + if (shouldBlind(imm) && haveScratchRegisterForBlinding()) { RegisterID scratchRegister = scratchRegisterForBlinding(); loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister); storePtr(scratchRegister, dest); @@ -1193,7 +1304,7 @@ public: void store64(Imm64 imm, Address dest) { - if (shouldBlind(imm)) { + if (shouldBlind(imm) && haveScratchRegisterForBlinding()) { RegisterID scratchRegister = scratchRegisterForBlinding(); loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister); store64(scratchRegister, dest); @@ -1203,6 +1314,37 @@ public: #endif // !CPU(X86_64) +#if ENABLE(B3_JIT) + // We should implement this the right way eventually, but for now, it's fine because it arises so + // infrequently. + void compareDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID dest) + { + move(TrustedImm32(0), dest); + Jump falseCase = branchDouble(invert(cond), left, right); + move(TrustedImm32(1), dest); + falseCase.link(this); + } + void compareFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID dest) + { + move(TrustedImm32(0), dest); + Jump falseCase = branchFloat(invert(cond), left, right); + move(TrustedImm32(1), dest); + falseCase.link(this); + } +#endif + + void lea32(Address address, RegisterID dest) + { + add32(TrustedImm32(address.offset), address.base, dest); + } + +#if CPU(X86_64) || CPU(ARM64) + void lea64(Address address, RegisterID dest) + { + add64(TrustedImm32(address.offset), address.base, dest); + } +#endif // CPU(X86_64) || CPU(ARM64) + bool shouldBlind(Imm32 imm) { #if ENABLE(FORCED_JIT_BLINDING) @@ -1316,6 +1458,16 @@ public: } else add32(imm.asTrustedImm32(), dest); } + + void add32(Imm32 imm, RegisterID src, RegisterID dest) + { + if (shouldBlind(imm)) { + BlindedImm32 key = additionBlindedConstant(imm); + add32(key.value1, src, dest); + add32(key.value2, dest); + } else + add32(imm.asTrustedImm32(), src, dest); + } void addPtr(Imm32 imm, RegisterID dest) { @@ -1327,6 +1479,27 @@ public: addPtr(imm.asTrustedImm32(), dest); } + void mul32(Imm32 imm, RegisterID src, RegisterID dest) + { + if (shouldBlind(imm)) { + if (src != dest || haveScratchRegisterForBlinding()) { + if (src == dest) { + move(src, scratchRegisterForBlinding()); + src = scratchRegisterForBlinding(); + } + loadXorBlindedConstant(xorBlindConstant(imm), dest); + mul32(src, dest); + return; + } + // If we don't have a scratch register available for use, we'll just + // place a random number of nops. + uint32_t nopCount = random() & 3; + while (nopCount--) + nop(); + } + mul32(imm.asTrustedImm32(), src, dest); + } + void and32(Imm32 imm, RegisterID dest) { if (shouldBlind(imm)) { @@ -1486,23 +1659,50 @@ public: return branch32(cond, left, right.asTrustedImm32()); } - Jump branchAdd32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest) + void compare32(RelationalCondition cond, RegisterID left, Imm32 right, RegisterID dest) { - if (src == dest) - ASSERT(haveScratchRegisterForBlinding()); + if (shouldBlind(right)) { + if (left != dest || haveScratchRegisterForBlinding()) { + RegisterID blindedConstantReg = dest; + if (left == dest) + blindedConstantReg = scratchRegisterForBlinding(); + loadXorBlindedConstant(xorBlindConstant(right), blindedConstantReg); + compare32(cond, left, blindedConstantReg, dest); + return; + } + // If we don't have a scratch register available for use, we'll just + // place a random number of nops. + uint32_t nopCount = random() & 3; + while (nopCount--) + nop(); + compare32(cond, left, right.asTrustedImm32(), dest); + return; + } + + compare32(cond, left, right.asTrustedImm32(), dest); + } + Jump branchAdd32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest) + { if (shouldBlind(imm)) { - if (src == dest) { - move(src, scratchRegisterForBlinding()); - src = scratchRegisterForBlinding(); + if (src != dest || haveScratchRegisterForBlinding()) { + if (src == dest) { + move(src, scratchRegisterForBlinding()); + src = scratchRegisterForBlinding(); + } + loadXorBlindedConstant(xorBlindConstant(imm), dest); + return branchAdd32(cond, src, dest); } - loadXorBlindedConstant(xorBlindConstant(imm), dest); - return branchAdd32(cond, src, dest); + // If we don't have a scratch register available for use, we'll just + // place a random number of nops. + uint32_t nopCount = random() & 3; + while (nopCount--) + nop(); } return branchAdd32(cond, src, imm.asTrustedImm32(), dest); } - Jump branchMul32(ResultCondition cond, Imm32 imm, RegisterID src, RegisterID dest) + Jump branchMul32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest) { if (src == dest) ASSERT(haveScratchRegisterForBlinding()); @@ -1515,7 +1715,7 @@ public: loadXorBlindedConstant(xorBlindConstant(imm), dest); return branchMul32(cond, src, dest); } - return branchMul32(cond, imm.asTrustedImm32(), src, dest); + return branchMul32(cond, src, imm.asTrustedImm32(), dest); } // branchSub32 takes a scratch register as 32 bit platforms make use of this, @@ -1560,12 +1760,35 @@ public: { urshift32(src, trustedImm32ForShift(amount), dest); } + +#if ENABLE(MASM_PROBE) + using MacroAssemblerBase::probe; + + // Let's you print from your JIT generated code. + // See comments in MacroAssemblerPrinter.h for examples of how to use this. + template<typename... Arguments> + void print(Arguments... args); + + void probe(std::function<void (ProbeContext*)>); +#endif }; } // namespace JSC +namespace WTF { + +class PrintStream; + +void printInternal(PrintStream&, JSC::MacroAssembler::RelationalCondition); +void printInternal(PrintStream&, JSC::MacroAssembler::ResultCondition); +void printInternal(PrintStream&, JSC::MacroAssembler::DoubleCondition); + +} // namespace WTF + #else // ENABLE(ASSEMBLER) +namespace JSC { + // If there is no assembler for this platform, at least allow code to make references to // some of the things it would otherwise define, albeit without giving that code any way // of doing anything useful. @@ -1579,6 +1802,6 @@ public: enum FPRegisterID { NoFPRegister }; }; -#endif // ENABLE(ASSEMBLER) +} // namespace JSC -#endif // MacroAssembler_h +#endif // ENABLE(ASSEMBLER) diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARM.cpp b/Source/JavaScriptCore/assembler/MacroAssemblerARM.cpp index a6f3e65c0..9b1440fed 100644 --- a/Source/JavaScriptCore/assembler/MacroAssemblerARM.cpp +++ b/Source/JavaScriptCore/assembler/MacroAssemblerARM.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. + * Copyright (C) 2013-2015 Apple Inc. * Copyright (C) 2009 University of Szeged * All rights reserved. * @@ -31,9 +31,7 @@ #include "MacroAssemblerARM.h" -#if USE(MASM_PROBE) -#include <wtf/StdLibExtras.h> -#endif +#include <wtf/InlineASM.h> #if OS(LINUX) #include <sys/types.h> @@ -50,7 +48,7 @@ static bool isVFPPresent() { #if OS(LINUX) int fd = open("/proc/self/auxv", O_RDONLY); - if (fd > 0) { + if (fd != -1) { Elf32_auxv_t aux; while (read(fd, &aux, sizeof(Elf32_auxv_t))) { if (aux.a_type == AT_HWCAP) { @@ -62,7 +60,7 @@ static bool isVFPPresent() } #endif // OS(LINUX) -#if (COMPILER(GCC) && defined(__VFP_FP__)) +#if (COMPILER(GCC_OR_CLANG) && defined(__VFP_FP__)) return true; #else return false; @@ -99,51 +97,256 @@ void MacroAssemblerARM::load32WithUnalignedHalfWords(BaseIndex address, Register } #endif // CPU(ARMV5_OR_LOWER) -#if USE(MASM_PROBE) +#if ENABLE(MASM_PROBE) -void MacroAssemblerARM::ProbeContext::dumpCPURegisters(const char* indentation) -{ - #define DUMP_GPREGISTER(_type, _regName) { \ - int32_t value = reinterpret_cast<int32_t>(cpu._regName); \ - dataLogF("%s %5s: 0x%08x %d\n", indentation, #_regName, value, value) ; \ - } - FOR_EACH_CPU_GPREGISTER(DUMP_GPREGISTER) - FOR_EACH_CPU_SPECIAL_REGISTER(DUMP_GPREGISTER) - #undef DUMP_GPREGISTER - - #define DUMP_FPREGISTER(_type, _regName) { \ - uint32_t* u = reinterpret_cast<uint32_t*>(&cpu._regName); \ - double* d = reinterpret_cast<double*>(&cpu._regName); \ - dataLogF("%s %5s: 0x %08x %08x %12g\n", \ - indentation, #_regName, u[1], u[0], d[0]); \ - } - FOR_EACH_CPU_FPREGISTER(DUMP_FPREGISTER) - #undef DUMP_FPREGISTER -} +extern "C" void ctiMasmProbeTrampoline(); -void MacroAssemblerARM::ProbeContext::dump(const char* indentation) -{ - if (!indentation) - indentation = ""; +#if COMPILER(GCC_OR_CLANG) + +// The following are offsets for MacroAssemblerARM::ProbeContext fields accessed +// by the ctiMasmProbeTrampoline stub. - dataLogF("%sProbeContext %p {\n", indentation, this); - dataLogF("%s probeFunction: %p\n", indentation, probeFunction); - dataLogF("%s arg1: %p %llu\n", indentation, arg1, reinterpret_cast<int64_t>(arg1)); - dataLogF("%s arg2: %p %llu\n", indentation, arg2, reinterpret_cast<int64_t>(arg2)); - dataLogF("%s cpu: {\n", indentation); +#define PTR_SIZE 4 +#define PROBE_PROBE_FUNCTION_OFFSET (0 * PTR_SIZE) +#define PROBE_ARG1_OFFSET (1 * PTR_SIZE) +#define PROBE_ARG2_OFFSET (2 * PTR_SIZE) - dumpCPURegisters(indentation); +#define PROBE_FIRST_GPREG_OFFSET (4 * PTR_SIZE) - dataLogF("%s }\n", indentation); - dataLogF("%s}\n", indentation); -} +#define GPREG_SIZE 4 +#define PROBE_CPU_R0_OFFSET (PROBE_FIRST_GPREG_OFFSET + (0 * GPREG_SIZE)) +#define PROBE_CPU_R1_OFFSET (PROBE_FIRST_GPREG_OFFSET + (1 * GPREG_SIZE)) +#define PROBE_CPU_R2_OFFSET (PROBE_FIRST_GPREG_OFFSET + (2 * GPREG_SIZE)) +#define PROBE_CPU_R3_OFFSET (PROBE_FIRST_GPREG_OFFSET + (3 * GPREG_SIZE)) +#define PROBE_CPU_R4_OFFSET (PROBE_FIRST_GPREG_OFFSET + (4 * GPREG_SIZE)) +#define PROBE_CPU_R5_OFFSET (PROBE_FIRST_GPREG_OFFSET + (5 * GPREG_SIZE)) +#define PROBE_CPU_R6_OFFSET (PROBE_FIRST_GPREG_OFFSET + (6 * GPREG_SIZE)) +#define PROBE_CPU_R7_OFFSET (PROBE_FIRST_GPREG_OFFSET + (7 * GPREG_SIZE)) +#define PROBE_CPU_R8_OFFSET (PROBE_FIRST_GPREG_OFFSET + (8 * GPREG_SIZE)) +#define PROBE_CPU_R9_OFFSET (PROBE_FIRST_GPREG_OFFSET + (9 * GPREG_SIZE)) +#define PROBE_CPU_R10_OFFSET (PROBE_FIRST_GPREG_OFFSET + (10 * GPREG_SIZE)) +#define PROBE_CPU_R11_OFFSET (PROBE_FIRST_GPREG_OFFSET + (11 * GPREG_SIZE)) +#define PROBE_CPU_IP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (12 * GPREG_SIZE)) +#define PROBE_CPU_SP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (13 * GPREG_SIZE)) +#define PROBE_CPU_LR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (14 * GPREG_SIZE)) +#define PROBE_CPU_PC_OFFSET (PROBE_FIRST_GPREG_OFFSET + (15 * GPREG_SIZE)) +#define PROBE_CPU_APSR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (16 * GPREG_SIZE)) +#define PROBE_CPU_FPSCR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (17 * GPREG_SIZE)) -extern "C" void ctiMasmProbeTrampoline(); +#define PROBE_FIRST_FPREG_OFFSET (PROBE_FIRST_GPREG_OFFSET + (18 * GPREG_SIZE)) + +#define FPREG_SIZE 8 +#define PROBE_CPU_D0_OFFSET (PROBE_FIRST_FPREG_OFFSET + (0 * FPREG_SIZE)) +#define PROBE_CPU_D1_OFFSET (PROBE_FIRST_FPREG_OFFSET + (1 * FPREG_SIZE)) +#define PROBE_CPU_D2_OFFSET (PROBE_FIRST_FPREG_OFFSET + (2 * FPREG_SIZE)) +#define PROBE_CPU_D3_OFFSET (PROBE_FIRST_FPREG_OFFSET + (3 * FPREG_SIZE)) +#define PROBE_CPU_D4_OFFSET (PROBE_FIRST_FPREG_OFFSET + (4 * FPREG_SIZE)) +#define PROBE_CPU_D5_OFFSET (PROBE_FIRST_FPREG_OFFSET + (5 * FPREG_SIZE)) +#define PROBE_CPU_D6_OFFSET (PROBE_FIRST_FPREG_OFFSET + (6 * FPREG_SIZE)) +#define PROBE_CPU_D7_OFFSET (PROBE_FIRST_FPREG_OFFSET + (7 * FPREG_SIZE)) +#define PROBE_CPU_D8_OFFSET (PROBE_FIRST_FPREG_OFFSET + (8 * FPREG_SIZE)) +#define PROBE_CPU_D9_OFFSET (PROBE_FIRST_FPREG_OFFSET + (9 * FPREG_SIZE)) +#define PROBE_CPU_D10_OFFSET (PROBE_FIRST_FPREG_OFFSET + (10 * FPREG_SIZE)) +#define PROBE_CPU_D11_OFFSET (PROBE_FIRST_FPREG_OFFSET + (11 * FPREG_SIZE)) +#define PROBE_CPU_D12_OFFSET (PROBE_FIRST_FPREG_OFFSET + (12 * FPREG_SIZE)) +#define PROBE_CPU_D13_OFFSET (PROBE_FIRST_FPREG_OFFSET + (13 * FPREG_SIZE)) +#define PROBE_CPU_D14_OFFSET (PROBE_FIRST_FPREG_OFFSET + (14 * FPREG_SIZE)) +#define PROBE_CPU_D15_OFFSET (PROBE_FIRST_FPREG_OFFSET + (15 * FPREG_SIZE)) + +#define PROBE_SIZE (PROBE_FIRST_FPREG_OFFSET + (16 * FPREG_SIZE)) + +// These ASSERTs remind you that if you change the layout of ProbeContext, +// you need to change ctiMasmProbeTrampoline offsets above to match. +#define PROBE_OFFSETOF(x) offsetof(struct MacroAssemblerARM::ProbeContext, x) +COMPILE_ASSERT(PROBE_OFFSETOF(probeFunction) == PROBE_PROBE_FUNCTION_OFFSET, ProbeContext_probeFunction_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(arg1) == PROBE_ARG1_OFFSET, ProbeContext_arg1_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(arg2) == PROBE_ARG2_OFFSET, ProbeContext_arg2_offset_matches_ctiMasmProbeTrampoline); + +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r0) == PROBE_CPU_R0_OFFSET, ProbeContext_cpu_r0_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r1) == PROBE_CPU_R1_OFFSET, ProbeContext_cpu_r1_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r2) == PROBE_CPU_R2_OFFSET, ProbeContext_cpu_r2_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r3) == PROBE_CPU_R3_OFFSET, ProbeContext_cpu_r3_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r4) == PROBE_CPU_R4_OFFSET, ProbeContext_cpu_r4_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r5) == PROBE_CPU_R5_OFFSET, ProbeContext_cpu_r5_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r6) == PROBE_CPU_R6_OFFSET, ProbeContext_cpu_r6_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r7) == PROBE_CPU_R7_OFFSET, ProbeContext_cpu_r7_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r8) == PROBE_CPU_R8_OFFSET, ProbeContext_cpu_r8_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r9) == PROBE_CPU_R9_OFFSET, ProbeContext_cpu_r9_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r10) == PROBE_CPU_R10_OFFSET, ProbeContext_cpu_r10_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r11) == PROBE_CPU_R11_OFFSET, ProbeContext_cpu_r11_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ip) == PROBE_CPU_IP_OFFSET, ProbeContext_cpu_ip_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.sp) == PROBE_CPU_SP_OFFSET, ProbeContext_cpu_sp_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.lr) == PROBE_CPU_LR_OFFSET, ProbeContext_cpu_lr_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.pc) == PROBE_CPU_PC_OFFSET, ProbeContext_cpu_pc_offset_matches_ctiMasmProbeTrampoline); + +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.apsr) == PROBE_CPU_APSR_OFFSET, ProbeContext_cpu_apsr_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.fpscr) == PROBE_CPU_FPSCR_OFFSET, ProbeContext_cpu_fpscr_offset_matches_ctiMasmProbeTrampoline); + +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d0) == PROBE_CPU_D0_OFFSET, ProbeContext_cpu_d0_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d1) == PROBE_CPU_D1_OFFSET, ProbeContext_cpu_d1_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d2) == PROBE_CPU_D2_OFFSET, ProbeContext_cpu_d2_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d3) == PROBE_CPU_D3_OFFSET, ProbeContext_cpu_d3_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d4) == PROBE_CPU_D4_OFFSET, ProbeContext_cpu_d4_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d5) == PROBE_CPU_D5_OFFSET, ProbeContext_cpu_d5_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d6) == PROBE_CPU_D6_OFFSET, ProbeContext_cpu_d6_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d7) == PROBE_CPU_D7_OFFSET, ProbeContext_cpu_d7_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d8) == PROBE_CPU_D8_OFFSET, ProbeContext_cpu_d8_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d9) == PROBE_CPU_D9_OFFSET, ProbeContext_cpu_d9_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d10) == PROBE_CPU_D10_OFFSET, ProbeContext_cpu_d10_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d11) == PROBE_CPU_D11_OFFSET, ProbeContext_cpu_d11_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d12) == PROBE_CPU_D12_OFFSET, ProbeContext_cpu_d12_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d13) == PROBE_CPU_D13_OFFSET, ProbeContext_cpu_d13_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d14) == PROBE_CPU_D14_OFFSET, ProbeContext_cpu_d14_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d15) == PROBE_CPU_D15_OFFSET, ProbeContext_cpu_d15_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(sizeof(MacroAssemblerARM::ProbeContext) == PROBE_SIZE, ProbeContext_size_matches_ctiMasmProbeTrampoline); +#undef PROBE_OFFSETOF + +asm ( + ".text" "\n" + ".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n" + HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n" + INLINE_ARM_FUNCTION(ctiMasmProbeTrampoline) "\n" + SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n" + + // MacroAssemblerARM::probe() has already generated code to store some values. + // The top of stack now looks like this: + // esp[0 * ptrSize]: probeFunction + // esp[1 * ptrSize]: arg1 + // esp[2 * ptrSize]: arg2 + // esp[3 * ptrSize]: saved r3 / S0 + // esp[4 * ptrSize]: saved ip + // esp[5 * ptrSize]: saved lr + // esp[6 * ptrSize]: saved sp + + "mov ip, sp" "\n" + "mov r3, sp" "\n" + "sub r3, r3, #" STRINGIZE_VALUE_OF(PROBE_SIZE) "\n" + + // The ARM EABI specifies that the stack needs to be 16 byte aligned. + "bic r3, r3, #0xf" "\n" + "mov sp, r3" "\n" + + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" + "add lr, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R0_OFFSET) "\n" + "stmia lr, { r0-r11 }" "\n" + "mrs lr, APSR" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n" + "vmrs lr, FPSCR" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSCR_OFFSET) "]" "\n" + + "ldr lr, [ip, #0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n" + "ldr lr, [ip, #1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG1_OFFSET) "]" "\n" + "ldr lr, [ip, #2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG2_OFFSET) "]" "\n" + "ldr lr, [ip, #3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R3_OFFSET) "]" "\n" + "ldr lr, [ip, #4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n" + "ldr lr, [ip, #5 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n" + "ldr lr, [ip, #6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" + + "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" + + "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D0_OFFSET) "\n" + "vstmia.64 ip, { d0-d15 }" "\n" + + "mov fp, sp" "\n" // Save the ProbeContext*. + + "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n" + "mov r0, sp" "\n" // the ProbeContext* arg. + "blx ip" "\n" + + "mov sp, fp" "\n" + + // To enable probes to modify register state, we copy all registers + // out of the ProbeContext before returning. + + "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D15_OFFSET + FPREG_SIZE) "\n" + "vldmdb.64 ip!, { d0-d15 }" "\n" + "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R11_OFFSET + GPREG_SIZE) "\n" + "ldmdb ip, { r0-r11 }" "\n" + "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSCR_OFFSET) "]" "\n" + "vmsr FPSCR, ip" "\n" + + // There are 5 more registers left to restore: ip, sp, lr, pc, and apsr. + // There are 2 issues that complicate the restoration of these last few + // registers: + // + // 1. Normal ARM calling convention relies on moving lr to pc to return to + // the caller. In our case, the address to return to is specified by + // ProbeContext.cpu.pc. And at that moment, we won't have any available + // scratch registers to hold the return address (lr needs to hold + // ProbeContext.cpu.lr, not the return address). + // + // The solution is to store the return address on the stack and load the + // pc from there. + // + // 2. Issue 1 means we will need to write to the stack location at + // ProbeContext.cpu.sp - 4. But if the user probe function had modified + // the value of ProbeContext.cpu.sp to point in the range between + // &ProbeContext.cpu.ip thru &ProbeContext.cpu.aspr, then the action for + // Issue 1 may trash the values to be restored before we can restore + // them. + // + // The solution is to check if ProbeContext.cpu.sp contains a value in + // the undesirable range. If so, we copy the remaining ProbeContext + // register data to a safe range (at memory lower than where + // ProbeContext.cpu.sp points) first, and restore the remaining register + // from this new range. + + "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "\n" + "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" + "cmp lr, ip" "\n" + "bgt " SYMBOL_STRING(ctiMasmProbeTrampolineEnd) "\n" + + // We get here because the new expected stack pointer location is lower + // than where it's supposed to be. This means the safe range of stack + // memory where we'll be copying the remaining register restore values to + // might be in a region of memory below the sp i.e. unallocated stack + // memory. This in turn makes it vulnerable to interrupts potentially + // trashing the copied values. To prevent that, we must first allocate the + // needed stack memory by adjusting the sp before the copying. + + "sub lr, lr, #(6 * " STRINGIZE_VALUE_OF(PTR_SIZE) + " + " STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) ")" "\n" + + "mov ip, sp" "\n" + "mov sp, lr" "\n" + "mov lr, ip" "\n" + + "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n" + "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n" + "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" + "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" + "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n" + "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n" + "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" + "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" + "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n" + "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n" + + SYMBOL_STRING(ctiMasmProbeTrampolineEnd) ":" "\n" + "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" + "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" + "sub lr, lr, #" STRINGIZE_VALUE_OF(PTR_SIZE) "\n" + "str ip, [lr]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" + + "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n" + "msr APSR, ip" "\n" + "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n" + "mov lr, ip" "\n" + "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n" + "ldr sp, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" -// For details on "What code is emitted for the probe?" and "What values are in -// the saved registers?", see comment for MacroAssemblerX86::probe() in -// MacroAssemblerX86_64.h. + "pop { pc }" "\n" +); +#endif // COMPILER(GCC_OR_CLANG) void MacroAssemblerARM::probe(MacroAssemblerARM::ProbeFunction function, void* arg1, void* arg2) { @@ -160,7 +363,7 @@ void MacroAssemblerARM::probe(MacroAssemblerARM::ProbeFunction function, void* a m_assembler.blx(RegisterID::S0); } -#endif // USE(MASM_PROBE) +#endif // ENABLE(MASM_PROBE) } // namespace JSC diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARM.h b/Source/JavaScriptCore/assembler/MacroAssemblerARM.h index 7eae2ee01..7d36034a3 100644 --- a/Source/JavaScriptCore/assembler/MacroAssemblerARM.h +++ b/Source/JavaScriptCore/assembler/MacroAssemblerARM.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2013 Apple Inc. + * Copyright (C) 2008, 2013-2016 Apple Inc. * Copyright (C) 2009, 2010 University of Szeged * All rights reserved. * @@ -25,8 +25,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef MacroAssemblerARM_h -#define MacroAssemblerARM_h +#pragma once #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) @@ -35,11 +34,14 @@ namespace JSC { -class MacroAssemblerARM : public AbstractMacroAssembler<ARMAssembler> { +class MacroAssemblerARM : public AbstractMacroAssembler<ARMAssembler, MacroAssemblerARM> { static const int DoubleConditionMask = 0x0f; static const int DoubleConditionBitSpecial = 0x10; COMPILE_ASSERT(!(DoubleConditionBitSpecial & DoubleConditionMask), DoubleConditionBitSpecial_should_not_interfere_with_ARMAssembler_Condition_codes); public: + static const unsigned numGPRs = 16; + static const unsigned numFPRs = 16; + typedef ARMRegisters::FPRegisterID FPRegisterID; enum RelationalCondition { @@ -228,13 +230,31 @@ public: store32(ARMRegisters::S1, ARMRegisters::S0); } + void or32(TrustedImm32 imm, AbsoluteAddress dest) + { + move(TrustedImmPtr(dest.m_ptr), ARMRegisters::S0); + load32(Address(ARMRegisters::S0), ARMRegisters::S1); + or32(imm, ARMRegisters::S1); // It uses S0 as temporary register, we need to reload the address. + move(TrustedImmPtr(dest.m_ptr), ARMRegisters::S0); + store32(ARMRegisters::S1, ARMRegisters::S0); + } + + void or32(TrustedImm32 imm, Address address) + { + load32(address, ARMRegisters::S0); + or32(imm, ARMRegisters::S0, ARMRegisters::S0); + store32(ARMRegisters::S0, address); + } + void or32(TrustedImm32 imm, RegisterID dest) { + ASSERT(dest != ARMRegisters::S0); m_assembler.orrs(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0)); } void or32(TrustedImm32 imm, RegisterID src, RegisterID dest) { + ASSERT(src != ARMRegisters::S0); m_assembler.orrs(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0)); } @@ -263,7 +283,10 @@ public: void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) { - m_assembler.movs(dest, m_assembler.asr(src, imm.m_value & 0x1f)); + if (!imm.m_value) + move(src, dest); + else + m_assembler.movs(dest, m_assembler.asr(src, imm.m_value & 0x1f)); } void urshift32(RegisterID shiftAmount, RegisterID dest) @@ -286,7 +309,10 @@ public: void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) { - m_assembler.movs(dest, m_assembler.lsr(src, imm.m_value & 0x1f)); + if (!imm.m_value) + move(src, dest); + else + m_assembler.movs(dest, m_assembler.lsr(src, imm.m_value & 0x1f)); } void sub32(RegisterID src, RegisterID dest) @@ -294,6 +320,11 @@ public: m_assembler.subs(dest, dest, src); } + void sub32(RegisterID left, RegisterID right, RegisterID dest) + { + m_assembler.subs(dest, left, right); + } + void sub32(TrustedImm32 imm, RegisterID dest) { m_assembler.subs(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0)); @@ -370,7 +401,12 @@ public: m_assembler.dataTransfer32(ARMAssembler::LoadUint8, dest, ARMRegisters::S0, 0); } - void load8Signed(BaseIndex address, RegisterID dest) + void load8SignedExtendTo32(Address address, RegisterID dest) + { + m_assembler.dataTransfer16(ARMAssembler::LoadInt8, dest, address.base, address.offset); + } + + void load8SignedExtendTo32(BaseIndex address, RegisterID dest) { m_assembler.baseIndexTransfer16(ARMAssembler::LoadInt8, dest, address.base, address.index, static_cast<int>(address.scale), address.offset); } @@ -385,7 +421,7 @@ public: m_assembler.baseIndexTransfer16(ARMAssembler::LoadUint16, dest, address.base, address.index, static_cast<int>(address.scale), address.offset); } - void load16Signed(BaseIndex address, RegisterID dest) + void load16SignedExtendTo32(BaseIndex address, RegisterID dest) { m_assembler.baseIndexTransfer16(ARMAssembler::LoadInt16, dest, address.base, address.index, static_cast<int>(address.scale), address.offset); } @@ -414,6 +450,18 @@ public: load16(address, dest); } + void abortWithReason(AbortReason reason) + { + move(TrustedImm32(reason), ARMRegisters::S0); + breakpoint(); + } + + void abortWithReason(AbortReason reason, intptr_t misc) + { + move(TrustedImm32(misc), ARMRegisters::S1); + abortWithReason(reason); + } + ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest) { ConvertibleLoadLabel result(this); @@ -459,16 +507,29 @@ public: m_assembler.baseIndexTransfer32(ARMAssembler::StoreUint8, src, address.base, address.index, static_cast<int>(address.scale), address.offset); } + void store8(RegisterID src, ImplicitAddress address) + { + m_assembler.dtrUp(ARMAssembler::StoreUint8, src, address.base, address.offset); + } + void store8(RegisterID src, const void* address) { move(TrustedImmPtr(address), ARMRegisters::S0); m_assembler.dtrUp(ARMAssembler::StoreUint8, src, ARMRegisters::S0, 0); } + void store8(TrustedImm32 imm, ImplicitAddress address) + { + TrustedImm32 imm8(static_cast<int8_t>(imm.m_value)); + move(imm8, ARMRegisters::S1); + store8(ARMRegisters::S1, address); + } + void store8(TrustedImm32 imm, const void* address) { + TrustedImm32 imm8(static_cast<int8_t>(imm.m_value)); move(TrustedImm32(reinterpret_cast<ARMWord>(address)), ARMRegisters::S0); - move(imm, ARMRegisters::S1); + move(imm8, ARMRegisters::S1); m_assembler.dtrUp(ARMAssembler::StoreUint8, ARMRegisters::S1, ARMRegisters::S0, 0); } @@ -517,6 +578,12 @@ public: m_assembler.pop(dest); } + void popPair(RegisterID dest1, RegisterID dest2) + { + m_assembler.pop(dest1); + m_assembler.pop(dest2); + } + void push(RegisterID src) { m_assembler.push(src); @@ -534,6 +601,12 @@ public: push(ARMRegisters::S0); } + void pushPair(RegisterID src1, RegisterID src2) + { + m_assembler.push(src2); + m_assembler.push(src1); + } + void move(TrustedImm32 imm, RegisterID dest) { m_assembler.moveImm(imm.m_value, dest); @@ -571,21 +644,29 @@ public: Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right) { - load8(left, ARMRegisters::S1); - return branch32(cond, ARMRegisters::S1, right); + TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right); + MacroAssemblerHelpers::load8OnCondition(*this, cond, left, ARMRegisters::S1); + return branch32(cond, ARMRegisters::S1, right8); } Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right) { - ASSERT(!(right.m_value & 0xFFFFFF00)); - load8(left, ARMRegisters::S1); - return branch32(cond, ARMRegisters::S1, right); + TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right); + MacroAssemblerHelpers::load8OnCondition(*this, cond, left, ARMRegisters::S1); + return branch32(cond, ARMRegisters::S1, right8); } Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right) { + TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right); move(TrustedImmPtr(left.m_ptr), ARMRegisters::S1); - load8(Address(ARMRegisters::S1), ARMRegisters::S1); + MacroAssemblerHelpers::load8OnCondition(*this, cond, Address(ARMRegisters::S1), ARMRegisters::S1); + return branch32(cond, ARMRegisters::S1, right8); + } + + Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right) + { + load32(left, ARMRegisters::S1); return branch32(cond, ARMRegisters::S1, right); } @@ -633,33 +714,36 @@ public: Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) { - load8(address, ARMRegisters::S1); - return branchTest32(cond, ARMRegisters::S1, mask); + TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask); + MacroAssemblerHelpers::load8OnCondition(*this, cond, address, ARMRegisters::S1); + return branchTest32(cond, ARMRegisters::S1, mask8); } Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) { - load8(address, ARMRegisters::S1); - return branchTest32(cond, ARMRegisters::S1, mask); + TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask); + MacroAssemblerHelpers::load8OnCondition(*this, cond, address, ARMRegisters::S1); + return branchTest32(cond, ARMRegisters::S1, mask8); } Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) { + TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask); move(TrustedImmPtr(address.m_ptr), ARMRegisters::S1); - load8(Address(ARMRegisters::S1), ARMRegisters::S1); - return branchTest32(cond, ARMRegisters::S1, mask); + MacroAssemblerHelpers::load8OnCondition(*this, cond, Address(ARMRegisters::S1), ARMRegisters::S1); + return branchTest32(cond, ARMRegisters::S1, mask8); } Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask) { - ASSERT((cond == Zero) || (cond == NonZero)); + ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == PositiveOrZero); m_assembler.tst(reg, mask); return Jump(m_assembler.jmp(ARMCondition(cond))); } Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) { - ASSERT((cond == Zero) || (cond == NonZero)); + ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == PositiveOrZero); ARMWord w = m_assembler.getImm(mask.m_value, ARMRegisters::S0, true); if (w & ARMAssembler::Op2InvertedImmediate) m_assembler.bics(ARMRegisters::S0, reg, w & ~ARMAssembler::Op2InvertedImmediate); @@ -790,7 +874,7 @@ public: return branchMul32(cond, src, dest, dest); } - Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest) + Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest) { ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); if (cond == Overflow) { @@ -858,6 +942,14 @@ public: return PatchableJump(jump); } + PatchableJump patchableBranch32(RelationalCondition cond, Address address, TrustedImm32 imm) + { + internalCompare32(address, imm); + Jump jump(m_assembler.loadBranchTarget(ARMRegisters::S1, ARMCondition(cond), false)); + m_assembler.bx(ARMRegisters::S1, ARMCondition(cond)); + return PatchableJump(jump); + } + void breakpoint() { m_assembler.bkpt(0); @@ -869,6 +961,11 @@ public: return Call(m_assembler.blx(ARMRegisters::S1), Call::LinkableNear); } + Call nearTailCall() + { + return Call(m_assembler.jmp(), Call::LinkableNearTail); + } + Call call(RegisterID target) { return Call(m_assembler.blx(target), Call::None); @@ -900,14 +997,15 @@ public: void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest) { - load8(left, ARMRegisters::S1); - compare32(cond, ARMRegisters::S1, right, dest); + TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right); + MacroAssemblerHelpers::load8OnCondition(*this, cond, left, ARMRegisters::S1); + compare32(cond, ARMRegisters::S1, right8, dest); } void test32(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest) { if (mask.m_value == -1) - m_assembler.cmp(0, reg); + m_assembler.tst(reg, reg); else m_assembler.tst(reg, m_assembler.getImm(mask.m_value, ARMRegisters::S0)); m_assembler.mov(dest, ARMAssembler::getOp2Byte(0)); @@ -922,8 +1020,9 @@ public: void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest) { - load8(address, ARMRegisters::S1); - test32(cond, ARMRegisters::S1, mask, dest); + TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask); + MacroAssemblerHelpers::load8OnCondition(*this, cond, address, ARMRegisters::S1); + test32(cond, ARMRegisters::S1, mask8, dest); } void add32(TrustedImm32 imm, RegisterID src, RegisterID dest) @@ -1021,6 +1120,13 @@ public: return dataLabel; } + DataLabel32 moveWithPatch(TrustedImm32 initialValue, RegisterID dest) + { + DataLabel32 dataLabel(this); + m_assembler.ldrUniqueImmediate(dest, static_cast<ARMWord>(initialValue.m_value)); + return dataLabel; + } + Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) { ensureSpace(3 * sizeof(ARMWord), 2 * sizeof(ARMWord)); @@ -1038,6 +1144,15 @@ public: return jump; } + Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0)) + { + load32(left, ARMRegisters::S1); + ensureSpace(3 * sizeof(ARMWord), 2 * sizeof(ARMWord)); + dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S0); + Jump jump = branch32(cond, ARMRegisters::S0, ARMRegisters::S1, true); + return jump; + } + DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address) { DataLabelPtr dataLabel = moveWithPatch(initialValue, ARMRegisters::S1); @@ -1066,6 +1181,7 @@ public: return s_isVFPPresent; } static bool supportsFloatingPointAbs() { return false; } + static bool supportsFloatingPointRounding() { return false; } void loadFloat(BaseIndex address, FPRegisterID dest) { @@ -1082,12 +1198,30 @@ public: m_assembler.baseIndexTransferFloat(ARMAssembler::LoadDouble, dest, address.base, address.index, static_cast<int>(address.scale), address.offset); } - void loadDouble(const void* address, FPRegisterID dest) + void loadDouble(TrustedImmPtr address, FPRegisterID dest) { - move(TrustedImm32(reinterpret_cast<ARMWord>(address)), ARMRegisters::S0); + move(TrustedImm32(reinterpret_cast<ARMWord>(address.m_value)), ARMRegisters::S0); m_assembler.doubleDtrUp(ARMAssembler::LoadDouble, dest, ARMRegisters::S0, 0); } + NO_RETURN_DUE_TO_CRASH void ceilDouble(FPRegisterID, FPRegisterID) + { + ASSERT(!supportsFloatingPointRounding()); + CRASH(); + } + + NO_RETURN_DUE_TO_CRASH void floorDouble(FPRegisterID, FPRegisterID) + { + ASSERT(!supportsFloatingPointRounding()); + CRASH(); + } + + NO_RETURN_DUE_TO_CRASH void roundTowardZeroDouble(FPRegisterID, FPRegisterID) + { + ASSERT(!supportsFloatingPointRounding()); + CRASH(); + } + void storeFloat(FPRegisterID src, BaseIndex address) { m_assembler.baseIndexTransferFloat(ARMAssembler::StoreFloat, src, address.base, address.index, static_cast<int>(address.scale), address.offset); @@ -1103,9 +1237,9 @@ public: m_assembler.baseIndexTransferFloat(ARMAssembler::StoreDouble, src, address.base, address.index, static_cast<int>(address.scale), address.offset); } - void storeDouble(FPRegisterID src, const void* address) + void storeDouble(FPRegisterID src, TrustedImmPtr address) { - move(TrustedImm32(reinterpret_cast<ARMWord>(address)), ARMRegisters::S0); + move(TrustedImm32(reinterpret_cast<ARMWord>(address.m_value)), ARMRegisters::S0); m_assembler.dataTransferFloat(ARMAssembler::StoreDouble, src, ARMRegisters::S0, 0); } @@ -1115,6 +1249,12 @@ public: m_assembler.vmov_f64(dest, src); } + void moveZeroToDouble(FPRegisterID reg) + { + static double zeroConstant = 0.; + loadDouble(TrustedImmPtr(&zeroConstant), reg); + } + void addDouble(FPRegisterID src, FPRegisterID dest) { m_assembler.vadd_f64(dest, dest, src); @@ -1133,7 +1273,7 @@ public: void addDouble(AbsoluteAddress address, FPRegisterID dest) { - loadDouble(address.m_ptr, ARMRegisters::SD0); + loadDouble(TrustedImmPtr(address.m_ptr), ARMRegisters::SD0); addDouble(ARMRegisters::SD0, dest); } @@ -1330,6 +1470,11 @@ public: m_assembler.dmbSY(); } + void storeFence() + { + m_assembler.dmbISHST(); + } + static FunctionPtr readCallTarget(CodeLocationCall call) { return FunctionPtr(reinterpret_cast<void(*)()>(ARMAssembler::readCallTarget(call.dataLocation()))); @@ -1342,11 +1487,22 @@ public: static ptrdiff_t maxJumpReplacementSize() { - ARMAssembler::maxJumpReplacementSize(); - return 0; + return ARMAssembler::maxJumpReplacementSize(); + } + + static ptrdiff_t patchableJumpSize() + { + return ARMAssembler::patchableJumpSize(); } static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; } + static bool canJumpReplacePatchableBranch32WithPatch() { return false; } + + static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32) + { + UNREACHABLE_FOR_PLATFORM(); + return CodeLocationLabel(); + } static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr) { @@ -1364,36 +1520,29 @@ public: ARMAssembler::revertBranchPtrWithPatch(instructionStart.dataLocation(), reg, reinterpret_cast<uintptr_t>(initialValue) & 0xffff); } - static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*) + static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t) { UNREACHABLE_FOR_PLATFORM(); } -#if USE(MASM_PROBE) - struct CPUState { - #define DECLARE_REGISTER(_type, _regName) \ - _type _regName; - FOR_EACH_CPU_REGISTER(DECLARE_REGISTER) - #undef DECLARE_REGISTER - }; - - struct ProbeContext; - typedef void (*ProbeFunction)(struct ProbeContext*); + static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*) + { + UNREACHABLE_FOR_PLATFORM(); + } - struct ProbeContext { - ProbeFunction probeFunction; - void* arg1; - void* arg2; - CPUState cpu; + static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) + { + ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress()); + } - void dump(const char* indentation = 0); - private: - void dumpCPURegisters(const char* indentation); - }; + static void repatchCall(CodeLocationCall call, FunctionPtr destination) + { + ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress()); + } - // For details about probe(), see comment in MacroAssemblerX86_64.h. - void probe(ProbeFunction, void* arg1 = 0, void* arg2 = 0); -#endif // USE(MASM_PROBE) +#if ENABLE(MASM_PROBE) + void probe(ProbeFunction, void* arg1, void* arg2); +#endif // ENABLE(MASM_PROBE) protected: ARMAssembler::Condition ARMCondition(RelationalCondition cond) @@ -1424,7 +1573,6 @@ protected: private: friend class LinkBuffer; - friend class RepatchBuffer; void internalCompare32(RegisterID left, TrustedImm32 right) { @@ -1435,22 +1583,26 @@ private: m_assembler.cmp(left, m_assembler.getImm(right.m_value, ARMRegisters::S0)); } - static void linkCall(void* code, Call call, FunctionPtr function) + void internalCompare32(Address left, TrustedImm32 right) { - ARMAssembler::linkCall(code, call.m_label, function.value()); + ARMWord tmp = (static_cast<unsigned>(right.m_value) == 0x80000000) ? ARMAssembler::InvalidImmediate : m_assembler.getOp2(-right.m_value); + load32(left, ARMRegisters::S1); + if (tmp != ARMAssembler::InvalidImmediate) + m_assembler.cmn(ARMRegisters::S1, tmp); + else + m_assembler.cmp(ARMRegisters::S1, m_assembler.getImm(right.m_value, ARMRegisters::S0)); } - static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) + static void linkCall(void* code, Call call, FunctionPtr function) { - ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress()); + if (call.isFlagSet(Call::Tail)) + ARMAssembler::linkJump(code, call.m_label, function.value()); + else + ARMAssembler::linkCall(code, call.m_label, function.value()); } - static void repatchCall(CodeLocationCall call, FunctionPtr destination) - { - ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress()); - } -#if USE(MASM_PROBE) +#if ENABLE(MASM_PROBE) inline TrustedImm32 trustedImm32FromPtr(void* ptr) { return TrustedImm32(TrustedImmPtr(ptr)); @@ -1470,8 +1622,6 @@ private: static const bool s_isVFPPresent; }; -} +} // namespace JSC #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) - -#endif // MacroAssemblerARM_h diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARM64.cpp b/Source/JavaScriptCore/assembler/MacroAssemblerARM64.cpp new file mode 100644 index 000000000..8e7b51b9f --- /dev/null +++ b/Source/JavaScriptCore/assembler/MacroAssemblerARM64.cpp @@ -0,0 +1,507 @@ +/* + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" + +#if ENABLE(ASSEMBLER) && CPU(ARM64) +#include "MacroAssemblerARM64.h" + +#include <wtf/InlineASM.h> + +namespace JSC { + +#if ENABLE(MASM_PROBE) + +extern "C" void ctiMasmProbeTrampoline(); + +using namespace ARM64Registers; + +#if COMPILER(GCC_OR_CLANG) + +// The following are offsets for MacroAssemblerARM64::ProbeContext fields accessed +// by the ctiMasmProbeTrampoline stub. +#define PTR_SIZE 8 +#define PROBE_PROBE_FUNCTION_OFFSET (0 * PTR_SIZE) +#define PROBE_ARG1_OFFSET (1 * PTR_SIZE) +#define PROBE_ARG2_OFFSET (2 * PTR_SIZE) + +#define PROBE_FIRST_GPREG_OFFSET (3 * PTR_SIZE) + +#define GPREG_SIZE 8 +#define PROBE_CPU_X0_OFFSET (PROBE_FIRST_GPREG_OFFSET + (0 * GPREG_SIZE)) +#define PROBE_CPU_X1_OFFSET (PROBE_FIRST_GPREG_OFFSET + (1 * GPREG_SIZE)) +#define PROBE_CPU_X2_OFFSET (PROBE_FIRST_GPREG_OFFSET + (2 * GPREG_SIZE)) +#define PROBE_CPU_X3_OFFSET (PROBE_FIRST_GPREG_OFFSET + (3 * GPREG_SIZE)) +#define PROBE_CPU_X4_OFFSET (PROBE_FIRST_GPREG_OFFSET + (4 * GPREG_SIZE)) +#define PROBE_CPU_X5_OFFSET (PROBE_FIRST_GPREG_OFFSET + (5 * GPREG_SIZE)) +#define PROBE_CPU_X6_OFFSET (PROBE_FIRST_GPREG_OFFSET + (6 * GPREG_SIZE)) +#define PROBE_CPU_X7_OFFSET (PROBE_FIRST_GPREG_OFFSET + (7 * GPREG_SIZE)) +#define PROBE_CPU_X8_OFFSET (PROBE_FIRST_GPREG_OFFSET + (8 * GPREG_SIZE)) +#define PROBE_CPU_X9_OFFSET (PROBE_FIRST_GPREG_OFFSET + (9 * GPREG_SIZE)) +#define PROBE_CPU_X10_OFFSET (PROBE_FIRST_GPREG_OFFSET + (10 * GPREG_SIZE)) +#define PROBE_CPU_X11_OFFSET (PROBE_FIRST_GPREG_OFFSET + (11 * GPREG_SIZE)) +#define PROBE_CPU_X12_OFFSET (PROBE_FIRST_GPREG_OFFSET + (12 * GPREG_SIZE)) +#define PROBE_CPU_X13_OFFSET (PROBE_FIRST_GPREG_OFFSET + (13 * GPREG_SIZE)) +#define PROBE_CPU_X14_OFFSET (PROBE_FIRST_GPREG_OFFSET + (14 * GPREG_SIZE)) +#define PROBE_CPU_X15_OFFSET (PROBE_FIRST_GPREG_OFFSET + (15 * GPREG_SIZE)) +#define PROBE_CPU_X16_OFFSET (PROBE_FIRST_GPREG_OFFSET + (16 * GPREG_SIZE)) +#define PROBE_CPU_X17_OFFSET (PROBE_FIRST_GPREG_OFFSET + (17 * GPREG_SIZE)) +#define PROBE_CPU_X18_OFFSET (PROBE_FIRST_GPREG_OFFSET + (18 * GPREG_SIZE)) +#define PROBE_CPU_X19_OFFSET (PROBE_FIRST_GPREG_OFFSET + (19 * GPREG_SIZE)) +#define PROBE_CPU_X20_OFFSET (PROBE_FIRST_GPREG_OFFSET + (20 * GPREG_SIZE)) +#define PROBE_CPU_X21_OFFSET (PROBE_FIRST_GPREG_OFFSET + (21 * GPREG_SIZE)) +#define PROBE_CPU_X22_OFFSET (PROBE_FIRST_GPREG_OFFSET + (22 * GPREG_SIZE)) +#define PROBE_CPU_X23_OFFSET (PROBE_FIRST_GPREG_OFFSET + (23 * GPREG_SIZE)) +#define PROBE_CPU_X24_OFFSET (PROBE_FIRST_GPREG_OFFSET + (24 * GPREG_SIZE)) +#define PROBE_CPU_X25_OFFSET (PROBE_FIRST_GPREG_OFFSET + (25 * GPREG_SIZE)) +#define PROBE_CPU_X26_OFFSET (PROBE_FIRST_GPREG_OFFSET + (26 * GPREG_SIZE)) +#define PROBE_CPU_X27_OFFSET (PROBE_FIRST_GPREG_OFFSET + (27 * GPREG_SIZE)) +#define PROBE_CPU_X28_OFFSET (PROBE_FIRST_GPREG_OFFSET + (28 * GPREG_SIZE)) +#define PROBE_CPU_FP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (29 * GPREG_SIZE)) +#define PROBE_CPU_LR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (30 * GPREG_SIZE)) +#define PROBE_CPU_SP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (31 * GPREG_SIZE)) + +#define PROBE_CPU_PC_OFFSET (PROBE_FIRST_GPREG_OFFSET + (32 * GPREG_SIZE)) +#define PROBE_CPU_NZCV_OFFSET (PROBE_FIRST_GPREG_OFFSET + (33 * GPREG_SIZE)) +#define PROBE_CPU_FPSR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (34 * GPREG_SIZE)) + +#define PROBE_FIRST_FPREG_OFFSET (PROBE_FIRST_GPREG_OFFSET + (35 * GPREG_SIZE)) + +#define FPREG_SIZE 8 +#define PROBE_CPU_Q0_OFFSET (PROBE_FIRST_FPREG_OFFSET + (0 * FPREG_SIZE)) +#define PROBE_CPU_Q1_OFFSET (PROBE_FIRST_FPREG_OFFSET + (1 * FPREG_SIZE)) +#define PROBE_CPU_Q2_OFFSET (PROBE_FIRST_FPREG_OFFSET + (2 * FPREG_SIZE)) +#define PROBE_CPU_Q3_OFFSET (PROBE_FIRST_FPREG_OFFSET + (3 * FPREG_SIZE)) +#define PROBE_CPU_Q4_OFFSET (PROBE_FIRST_FPREG_OFFSET + (4 * FPREG_SIZE)) +#define PROBE_CPU_Q5_OFFSET (PROBE_FIRST_FPREG_OFFSET + (5 * FPREG_SIZE)) +#define PROBE_CPU_Q6_OFFSET (PROBE_FIRST_FPREG_OFFSET + (6 * FPREG_SIZE)) +#define PROBE_CPU_Q7_OFFSET (PROBE_FIRST_FPREG_OFFSET + (7 * FPREG_SIZE)) +#define PROBE_CPU_Q8_OFFSET (PROBE_FIRST_FPREG_OFFSET + (8 * FPREG_SIZE)) +#define PROBE_CPU_Q9_OFFSET (PROBE_FIRST_FPREG_OFFSET + (9 * FPREG_SIZE)) +#define PROBE_CPU_Q10_OFFSET (PROBE_FIRST_FPREG_OFFSET + (10 * FPREG_SIZE)) +#define PROBE_CPU_Q11_OFFSET (PROBE_FIRST_FPREG_OFFSET + (11 * FPREG_SIZE)) +#define PROBE_CPU_Q12_OFFSET (PROBE_FIRST_FPREG_OFFSET + (12 * FPREG_SIZE)) +#define PROBE_CPU_Q13_OFFSET (PROBE_FIRST_FPREG_OFFSET + (13 * FPREG_SIZE)) +#define PROBE_CPU_Q14_OFFSET (PROBE_FIRST_FPREG_OFFSET + (14 * FPREG_SIZE)) +#define PROBE_CPU_Q15_OFFSET (PROBE_FIRST_FPREG_OFFSET + (15 * FPREG_SIZE)) +#define PROBE_CPU_Q16_OFFSET (PROBE_FIRST_FPREG_OFFSET + (16 * FPREG_SIZE)) +#define PROBE_CPU_Q17_OFFSET (PROBE_FIRST_FPREG_OFFSET + (17 * FPREG_SIZE)) +#define PROBE_CPU_Q18_OFFSET (PROBE_FIRST_FPREG_OFFSET + (18 * FPREG_SIZE)) +#define PROBE_CPU_Q19_OFFSET (PROBE_FIRST_FPREG_OFFSET + (19 * FPREG_SIZE)) +#define PROBE_CPU_Q20_OFFSET (PROBE_FIRST_FPREG_OFFSET + (20 * FPREG_SIZE)) +#define PROBE_CPU_Q21_OFFSET (PROBE_FIRST_FPREG_OFFSET + (21 * FPREG_SIZE)) +#define PROBE_CPU_Q22_OFFSET (PROBE_FIRST_FPREG_OFFSET + (22 * FPREG_SIZE)) +#define PROBE_CPU_Q23_OFFSET (PROBE_FIRST_FPREG_OFFSET + (23 * FPREG_SIZE)) +#define PROBE_CPU_Q24_OFFSET (PROBE_FIRST_FPREG_OFFSET + (24 * FPREG_SIZE)) +#define PROBE_CPU_Q25_OFFSET (PROBE_FIRST_FPREG_OFFSET + (25 * FPREG_SIZE)) +#define PROBE_CPU_Q26_OFFSET (PROBE_FIRST_FPREG_OFFSET + (26 * FPREG_SIZE)) +#define PROBE_CPU_Q27_OFFSET (PROBE_FIRST_FPREG_OFFSET + (27 * FPREG_SIZE)) +#define PROBE_CPU_Q28_OFFSET (PROBE_FIRST_FPREG_OFFSET + (28 * FPREG_SIZE)) +#define PROBE_CPU_Q29_OFFSET (PROBE_FIRST_FPREG_OFFSET + (29 * FPREG_SIZE)) +#define PROBE_CPU_Q30_OFFSET (PROBE_FIRST_FPREG_OFFSET + (30 * FPREG_SIZE)) +#define PROBE_CPU_Q31_OFFSET (PROBE_FIRST_FPREG_OFFSET + (31 * FPREG_SIZE)) +#define PROBE_SIZE (PROBE_FIRST_FPREG_OFFSET + (32 * FPREG_SIZE)) +#define SAVED_CALLER_SP PROBE_SIZE +#define PROBE_SIZE_PLUS_SAVED_CALLER_SP (SAVED_CALLER_SP + PTR_SIZE) + +// These ASSERTs remind you that if you change the layout of ProbeContext, +// you need to change ctiMasmProbeTrampoline offsets above to match. +#define PROBE_OFFSETOF(x) offsetof(struct MacroAssemblerARM64::ProbeContext, x) +COMPILE_ASSERT(PROBE_OFFSETOF(probeFunction) == PROBE_PROBE_FUNCTION_OFFSET, ProbeContext_probeFunction_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(arg1) == PROBE_ARG1_OFFSET, ProbeContext_arg1_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(arg2) == PROBE_ARG2_OFFSET, ProbeContext_arg2_offset_matches_ctiMasmProbeTrampoline); + +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x0) == PROBE_CPU_X0_OFFSET, ProbeContext_cpu_x0_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x1) == PROBE_CPU_X1_OFFSET, ProbeContext_cpu_x1_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x2) == PROBE_CPU_X2_OFFSET, ProbeContext_cpu_x2_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x3) == PROBE_CPU_X3_OFFSET, ProbeContext_cpu_x3_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x4) == PROBE_CPU_X4_OFFSET, ProbeContext_cpu_x4_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x5) == PROBE_CPU_X5_OFFSET, ProbeContext_cpu_x5_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x6) == PROBE_CPU_X6_OFFSET, ProbeContext_cpu_x6_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x7) == PROBE_CPU_X7_OFFSET, ProbeContext_cpu_x7_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x8) == PROBE_CPU_X8_OFFSET, ProbeContext_cpu_x8_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x9) == PROBE_CPU_X9_OFFSET, ProbeContext_cpu_x9_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x10) == PROBE_CPU_X10_OFFSET, ProbeContext_cpu_x10_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x11) == PROBE_CPU_X11_OFFSET, ProbeContext_cpu_x11_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x12) == PROBE_CPU_X12_OFFSET, ProbeContext_cpu_x12_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x13) == PROBE_CPU_X13_OFFSET, ProbeContext_cpu_x13_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x14) == PROBE_CPU_X14_OFFSET, ProbeContext_cpu_x14_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x15) == PROBE_CPU_X15_OFFSET, ProbeContext_cpu_x15_offset_matches_ctiMasmProbeTrampoline); + +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x16) == PROBE_CPU_X16_OFFSET, ProbeContext_cpu_x16_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x17) == PROBE_CPU_X17_OFFSET, ProbeContext_cpu_x17_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x18) == PROBE_CPU_X18_OFFSET, ProbeContext_cpu_x18_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x19) == PROBE_CPU_X19_OFFSET, ProbeContext_cpu_x19_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x20) == PROBE_CPU_X20_OFFSET, ProbeContext_cpu_x20_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x21) == PROBE_CPU_X21_OFFSET, ProbeContext_cpu_x21_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x22) == PROBE_CPU_X22_OFFSET, ProbeContext_cpu_x22_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x23) == PROBE_CPU_X23_OFFSET, ProbeContext_cpu_x23_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x24) == PROBE_CPU_X24_OFFSET, ProbeContext_cpu_x24_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x25) == PROBE_CPU_X25_OFFSET, ProbeContext_cpu_x25_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x26) == PROBE_CPU_X26_OFFSET, ProbeContext_cpu_x26_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x27) == PROBE_CPU_X27_OFFSET, ProbeContext_cpu_x27_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.x28) == PROBE_CPU_X28_OFFSET, ProbeContext_cpu_x28_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.fp) == PROBE_CPU_FP_OFFSET, ProbeContext_cpu_fp_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.lr) == PROBE_CPU_LR_OFFSET, ProbeContext_cpu_lr_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.sp) == PROBE_CPU_SP_OFFSET, ProbeContext_cpu_sp_offset_matches_ctiMasmProbeTrampoline); + +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.pc) == PROBE_CPU_PC_OFFSET, ProbeContext_cpu_pc_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.nzcv) == PROBE_CPU_NZCV_OFFSET, ProbeContext_cpu_nzcv_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.fpsr) == PROBE_CPU_FPSR_OFFSET, ProbeContext_cpu_fpsr_offset_matches_ctiMasmProbeTrampoline); + +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q0) == PROBE_CPU_Q0_OFFSET, ProbeContext_cpu_q0_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q1) == PROBE_CPU_Q1_OFFSET, ProbeContext_cpu_q1_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q2) == PROBE_CPU_Q2_OFFSET, ProbeContext_cpu_q2_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q3) == PROBE_CPU_Q3_OFFSET, ProbeContext_cpu_q3_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q4) == PROBE_CPU_Q4_OFFSET, ProbeContext_cpu_q4_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q5) == PROBE_CPU_Q5_OFFSET, ProbeContext_cpu_q5_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q6) == PROBE_CPU_Q6_OFFSET, ProbeContext_cpu_q6_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q7) == PROBE_CPU_Q7_OFFSET, ProbeContext_cpu_q7_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q8) == PROBE_CPU_Q8_OFFSET, ProbeContext_cpu_q8_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q9) == PROBE_CPU_Q9_OFFSET, ProbeContext_cpu_q9_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q10) == PROBE_CPU_Q10_OFFSET, ProbeContext_cpu_q10_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q11) == PROBE_CPU_Q11_OFFSET, ProbeContext_cpu_q11_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q12) == PROBE_CPU_Q12_OFFSET, ProbeContext_cpu_q12_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q13) == PROBE_CPU_Q13_OFFSET, ProbeContext_cpu_q13_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q14) == PROBE_CPU_Q14_OFFSET, ProbeContext_cpu_q14_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q15) == PROBE_CPU_Q15_OFFSET, ProbeContext_cpu_q15_offset_matches_ctiMasmProbeTrampoline); + +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q16) == PROBE_CPU_Q16_OFFSET, ProbeContext_cpu_q16_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q17) == PROBE_CPU_Q17_OFFSET, ProbeContext_cpu_q17_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q18) == PROBE_CPU_Q18_OFFSET, ProbeContext_cpu_q18_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q19) == PROBE_CPU_Q19_OFFSET, ProbeContext_cpu_q19_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q20) == PROBE_CPU_Q20_OFFSET, ProbeContext_cpu_q20_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q21) == PROBE_CPU_Q21_OFFSET, ProbeContext_cpu_q21_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q22) == PROBE_CPU_Q22_OFFSET, ProbeContext_cpu_q22_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q23) == PROBE_CPU_Q23_OFFSET, ProbeContext_cpu_q23_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q24) == PROBE_CPU_Q24_OFFSET, ProbeContext_cpu_q24_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q25) == PROBE_CPU_Q25_OFFSET, ProbeContext_cpu_q25_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q26) == PROBE_CPU_Q26_OFFSET, ProbeContext_cpu_q26_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q27) == PROBE_CPU_Q27_OFFSET, ProbeContext_cpu_q27_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q28) == PROBE_CPU_Q28_OFFSET, ProbeContext_cpu_q28_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q29) == PROBE_CPU_Q29_OFFSET, ProbeContext_cpu_q29_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q30) == PROBE_CPU_Q30_OFFSET, ProbeContext_cpu_q30_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.q31) == PROBE_CPU_Q31_OFFSET, ProbeContext_cpu_q31_offset_matches_ctiMasmProbeTrampoline); + +COMPILE_ASSERT(sizeof(MacroAssemblerARM64::ProbeContext) == PROBE_SIZE, ProbeContext_size_matches_ctiMasmProbeTrampoline); + +#undef PROBE_OFFSETOF + +asm ( + ".text" "\n" + ".align 2" "\n" + ".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n" + HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n" + SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n" + + // MacroAssemblerARM64::probe() has already generated code to store some values. + // The top of stack (the caller save buffer) now looks like this: + // sp[0 * ptrSize]: probeFunction + // sp[1 * ptrSize]: arg1 + // sp[2 * ptrSize]: arg2 + // sp[3 * ptrSize]: address of arm64ProbeTrampoline() + // sp[4 * ptrSize]: saved x27 + // sp[5 * ptrSize]: saved x28 + // sp[6 * ptrSize]: saved lr + // sp[7 * ptrSize]: saved sp + + "mov x27, sp" "\n" + "mov x28, sp" "\n" + + "sub x28, x28, #" STRINGIZE_VALUE_OF(PROBE_SIZE_PLUS_SAVED_CALLER_SP) "\n" + + // The ARM EABI specifies that the stack needs to be 16 byte aligned. + "bic x28, x28, #0xf" "\n" + "mov sp, x28" "\n" + + "str x27, [sp, #" STRINGIZE_VALUE_OF(SAVED_CALLER_SP) "]" "\n" + + "str x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X0_OFFSET) "]" "\n" + "str x1, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X1_OFFSET) "]" "\n" + "str x2, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X2_OFFSET) "]" "\n" + "str x3, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X3_OFFSET) "]" "\n" + "str x4, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X4_OFFSET) "]" "\n" + "str x5, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X5_OFFSET) "]" "\n" + "str x6, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X6_OFFSET) "]" "\n" + "str x7, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X7_OFFSET) "]" "\n" + "str x8, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X8_OFFSET) "]" "\n" + "str x9, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X9_OFFSET) "]" "\n" + "str x10, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X10_OFFSET) "]" "\n" + "str x11, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X11_OFFSET) "]" "\n" + "str x12, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X12_OFFSET) "]" "\n" + "str x13, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X13_OFFSET) "]" "\n" + "str x14, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X14_OFFSET) "]" "\n" + "str x15, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X15_OFFSET) "]" "\n" + "str x16, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X16_OFFSET) "]" "\n" + "str x17, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X17_OFFSET) "]" "\n" + "str x18, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X18_OFFSET) "]" "\n" + "str x19, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X19_OFFSET) "]" "\n" + "str x20, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X20_OFFSET) "]" "\n" + "str x21, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X21_OFFSET) "]" "\n" + "str x22, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X22_OFFSET) "]" "\n" + "str x23, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X23_OFFSET) "]" "\n" + "str x24, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X24_OFFSET) "]" "\n" + "str x25, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X25_OFFSET) "]" "\n" + "str x26, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X26_OFFSET) "]" "\n" + + "ldr x0, [x27, #4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X27_OFFSET) "]" "\n" + "ldr x0, [x27, #5 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X28_OFFSET) "]" "\n" + + "str fp, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FP_OFFSET) "]" "\n" + + "ldr x0, [x27, #6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n" + "ldr x0, [x27, #7 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" + + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" + + "mrs x0, nzcv" "\n" + "str x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_NZCV_OFFSET) "]" "\n" + "mrs x0, fpsr" "\n" + "str x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSR_OFFSET) "]" "\n" + + "ldr x0, [x27, #0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n" + "ldr x0, [x27, #1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG1_OFFSET) "]" "\n" + "ldr x0, [x27, #2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG2_OFFSET) "]" "\n" + + "str d0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q0_OFFSET) "]" "\n" + "str d1, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q1_OFFSET) "]" "\n" + "str d2, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q2_OFFSET) "]" "\n" + "str d3, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q3_OFFSET) "]" "\n" + "str d4, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q4_OFFSET) "]" "\n" + "str d5, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q5_OFFSET) "]" "\n" + "str d6, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q6_OFFSET) "]" "\n" + "str d7, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q7_OFFSET) "]" "\n" + "str d8, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q8_OFFSET) "]" "\n" + "str d9, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q9_OFFSET) "]" "\n" + "str d10, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q10_OFFSET) "]" "\n" + "str d11, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q11_OFFSET) "]" "\n" + "str d12, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q12_OFFSET) "]" "\n" + "str d13, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q13_OFFSET) "]" "\n" + "str d14, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q14_OFFSET) "]" "\n" + "str d15, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q15_OFFSET) "]" "\n" + "str d16, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q16_OFFSET) "]" "\n" + "str d17, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q17_OFFSET) "]" "\n" + "str d18, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q18_OFFSET) "]" "\n" + "str d19, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q19_OFFSET) "]" "\n" + "str d20, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q20_OFFSET) "]" "\n" + "str d21, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q21_OFFSET) "]" "\n" + "str d22, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q22_OFFSET) "]" "\n" + "str d23, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q23_OFFSET) "]" "\n" + "str d24, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q24_OFFSET) "]" "\n" + "str d25, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q25_OFFSET) "]" "\n" + "str d26, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q26_OFFSET) "]" "\n" + "str d27, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q27_OFFSET) "]" "\n" + "str d28, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q28_OFFSET) "]" "\n" + "str d29, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q29_OFFSET) "]" "\n" + "str d30, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q30_OFFSET) "]" "\n" + "str d31, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q31_OFFSET) "]" "\n" + + "mov x28, sp" "\n" // Save the ProbeContext*. + + "mov x0, sp" "\n" // the ProbeContext* arg. + "ldr x27, [x27, #3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "blr x27" "\n" + + "mov sp, x28" "\n" + + // To enable probes to modify register state, we copy all registers + // out of the ProbeContext before returning. That is except for x18, pc and sp. + + // x18 is "reserved for the platform. Conforming software should not make use of it." + // Hence, the JITs would not be using it, and the probe should also not be modifying it. + // See https://developer.apple.com/library/ios/documentation/Xcode/Conceptual/iPhoneOSABIReference/Articles/ARM64FunctionCallingConventions.html. + + // We can't modify the pc, because the only way to set its value on ARM64 is via + // an indirect branch or a ret, which means we'll need a free register to do so. + // The probe mechanism is required to not perturb any registers that the caller + // may use. Hence, we don't have this free register available. + + // In order to return to the caller, we need to ret via lr. The probe mechanism will + // restore lr's value after returning to the caller by loading the restore value + // from the caller save buffer. The caller expects to access the caller save buffer via + // sp. Hence, we cannot allow sp to be modified by the probe. + + "ldr d0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q0_OFFSET) "]" "\n" + "ldr d1, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q1_OFFSET) "]" "\n" + "ldr d2, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q2_OFFSET) "]" "\n" + "ldr d3, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q3_OFFSET) "]" "\n" + "ldr d4, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q4_OFFSET) "]" "\n" + "ldr d5, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q5_OFFSET) "]" "\n" + "ldr d6, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q6_OFFSET) "]" "\n" + "ldr d7, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q7_OFFSET) "]" "\n" + "ldr d8, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q8_OFFSET) "]" "\n" + "ldr d9, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q9_OFFSET) "]" "\n" + "ldr d10, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q10_OFFSET) "]" "\n" + "ldr d11, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q11_OFFSET) "]" "\n" + "ldr d12, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q12_OFFSET) "]" "\n" + "ldr d13, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q13_OFFSET) "]" "\n" + "ldr d14, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q14_OFFSET) "]" "\n" + "ldr d15, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q15_OFFSET) "]" "\n" + "ldr d16, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q16_OFFSET) "]" "\n" + "ldr d17, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q17_OFFSET) "]" "\n" + "ldr d18, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q18_OFFSET) "]" "\n" + "ldr d19, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q19_OFFSET) "]" "\n" + "ldr d20, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q20_OFFSET) "]" "\n" + "ldr d21, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q21_OFFSET) "]" "\n" + "ldr d22, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q22_OFFSET) "]" "\n" + "ldr d23, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q23_OFFSET) "]" "\n" + "ldr d24, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q24_OFFSET) "]" "\n" + "ldr d25, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q25_OFFSET) "]" "\n" + "ldr d26, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q26_OFFSET) "]" "\n" + "ldr d27, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q27_OFFSET) "]" "\n" + "ldr d28, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q28_OFFSET) "]" "\n" + "ldr d29, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q29_OFFSET) "]" "\n" + "ldr d30, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q30_OFFSET) "]" "\n" + "ldr d31, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_Q31_OFFSET) "]" "\n" + + "ldr x0, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X0_OFFSET) "]" "\n" + "ldr x1, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X1_OFFSET) "]" "\n" + "ldr x2, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X2_OFFSET) "]" "\n" + "ldr x3, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X3_OFFSET) "]" "\n" + "ldr x4, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X4_OFFSET) "]" "\n" + "ldr x5, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X5_OFFSET) "]" "\n" + "ldr x6, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X6_OFFSET) "]" "\n" + "ldr x7, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X7_OFFSET) "]" "\n" + "ldr x8, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X8_OFFSET) "]" "\n" + "ldr x9, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X9_OFFSET) "]" "\n" + "ldr x10, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X10_OFFSET) "]" "\n" + "ldr x11, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X11_OFFSET) "]" "\n" + "ldr x12, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X12_OFFSET) "]" "\n" + "ldr x13, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X13_OFFSET) "]" "\n" + "ldr x14, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X14_OFFSET) "]" "\n" + "ldr x15, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X15_OFFSET) "]" "\n" + "ldr x16, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X16_OFFSET) "]" "\n" + "ldr x17, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X17_OFFSET) "]" "\n" + // x18 should not be modified by the probe. See comment above for details. + "ldr x19, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X19_OFFSET) "]" "\n" + "ldr x20, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X20_OFFSET) "]" "\n" + "ldr x21, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X21_OFFSET) "]" "\n" + "ldr x22, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X22_OFFSET) "]" "\n" + "ldr x23, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X23_OFFSET) "]" "\n" + "ldr x24, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X24_OFFSET) "]" "\n" + "ldr x25, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X25_OFFSET) "]" "\n" + "ldr x26, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X26_OFFSET) "]" "\n" + + "ldr x27, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSR_OFFSET) "]" "\n" + "msr fpsr, x27" "\n" + + "ldr x27, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_NZCV_OFFSET) "]" "\n" + "msr nzcv, x27" "\n" + "ldr fp, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FP_OFFSET) "]" "\n" + + "ldr x27, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X27_OFFSET) "]" "\n" + "ldr x28, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X28_OFFSET) "]" "\n" + + // There are 4 more registers left to restore: x27, x28, lr, sp, and pc. + // The JIT code's lr and sp will be restored by the caller. + + // Restore pc by loading it into lr. The ret below will put in the pc. + "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" + + // We need x27 as a scratch register to help with popping the ProbeContext. + // Hence, before we pop the ProbeContext, we need to copy the restore value + // for x27 from the ProbeContext to the caller save buffer. + "ldr x28, [sp, #" STRINGIZE_VALUE_OF(SAVED_CALLER_SP) "]" "\n" + "ldr x27, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X27_OFFSET) "]" "\n" + "str x27, [x28, #4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + + // Since lr is also restored by the caller, we need to copy its restore + // value to the caller save buffer too. + "ldr x27, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n" + "str x27, [x28, #6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + + // We're now done with x28, and can restore its value. + "ldr x28, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_X28_OFFSET) "]" "\n" + + // We're now done with the ProbeContext, and can pop it to restore sp so that + // it points to the caller save buffer. + "ldr x27, [sp, #" STRINGIZE_VALUE_OF(SAVED_CALLER_SP) "]" "\n" + "mov sp, x27" "\n" + + // We're now done with x27, and can restore it. + "ldr x27, [sp, #4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + + "ret" "\n" +); +#endif // COMPILER(GCC_OR_CLANG) + +static void arm64ProbeTrampoline(MacroAssemblerARM64::ProbeContext* context) +{ + void* origSP = context->cpu.sp; + void* origPC = context->cpu.pc; + + context->probeFunction(context); + + if (context->cpu.sp != origSP) { + dataLog("MacroAssembler probe ERROR: ARM64 does not support the probe changing the SP. The change will be ignored\n"); + context->cpu.sp = origSP; + } + if (context->cpu.pc != origPC) { + dataLog("MacroAssembler probe ERROR: ARM64 does not support the probe changing the PC. The change will be ignored\n"); + context->cpu.pc = origPC; + } +} + +void MacroAssemblerARM64::probe(MacroAssemblerARM64::ProbeFunction function, void* arg1, void* arg2) +{ + sub64(TrustedImm32(8 * 8), sp); + + store64(x27, Address(sp, 4 * 8)); + store64(x28, Address(sp, 5 * 8)); + store64(lr, Address(sp, 6 * 8)); + + add64(TrustedImm32(8 * 8), sp, x28); + store64(x28, Address(sp, 7 * 8)); // Save original sp value. + + move(TrustedImmPtr(reinterpret_cast<void*>(function)), x28); + store64(x28, Address(sp)); + move(TrustedImmPtr(arg1), x28); + store64(x28, Address(sp, 1 * 8)); + move(TrustedImmPtr(arg2), x28); + store64(x28, Address(sp, 2 * 8)); + move(TrustedImmPtr(reinterpret_cast<void*>(arm64ProbeTrampoline)), x28); + store64(x28, Address(sp, 3 * 8)); + + move(TrustedImmPtr(reinterpret_cast<void*>(ctiMasmProbeTrampoline)), x28); + m_assembler.blr(x28); + + // ctiMasmProbeTrampoline should have restored every register except for + // lr and the sp. + load64(Address(sp, 6 * 8), lr); + add64(TrustedImm32(8 * 8), sp); +} +#endif // ENABLE(MASM_PROBE) + +} // namespace JSC + +#endif // ENABLE(ASSEMBLER) && CPU(ARM64) + diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h b/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h index a128923fc..f4cdd36c0 100644 --- a/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h +++ b/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2014-2017 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -23,20 +23,32 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef MacroAssemblerARM64_h -#define MacroAssemblerARM64_h +#pragma once #if ENABLE(ASSEMBLER) #include "ARM64Assembler.h" #include "AbstractMacroAssembler.h" #include <wtf/MathExtras.h> +#include <wtf/Optional.h> namespace JSC { -class MacroAssemblerARM64 : public AbstractMacroAssembler<ARM64Assembler> { +class MacroAssemblerARM64 : public AbstractMacroAssembler<ARM64Assembler, MacroAssemblerARM64> { +public: + static const unsigned numGPRs = 32; + static const unsigned numFPRs = 32; + static const RegisterID dataTempRegister = ARM64Registers::ip0; static const RegisterID memoryTempRegister = ARM64Registers::ip1; + + RegisterID scratchRegister() + { + RELEASE_ASSERT(m_allowScratchRegister); + return getCachedDataTempRegisterIDAndInvalidate(); + } + +private: static const ARM64Registers::FPRegisterID fpTempRegister = ARM64Registers::q31; static const ARM64Assembler::SetFlags S = ARM64Assembler::S; static const intptr_t maskHalfWord0 = 0xffffl; @@ -64,13 +76,11 @@ public: Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); } void* unlinkedCode() { return m_assembler.unlinkedCode(); } - bool canCompact(JumpType jumpType) { return m_assembler.canCompact(jumpType); } - JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(jumpType, from, to); } - JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(record, from, to); } - void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) {return m_assembler.recordLinkOffsets(regionStart, regionEnd, offset); } - int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return m_assembler.jumpSizeDelta(jumpType, jumpLinkType); } - void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return m_assembler.link(record, from, to); } - int executableOffsetFor(int location) { return m_assembler.executableOffsetFor(location); } + static bool canCompact(JumpType jumpType) { return ARM64Assembler::canCompact(jumpType); } + static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return ARM64Assembler::computeJumpType(jumpType, from, to); } + static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return ARM64Assembler::computeJumpType(record, from, to); } + static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return ARM64Assembler::jumpSizeDelta(jumpType, jumpLinkType); } + static void link(LinkRecord& record, uint8_t* from, const uint8_t* fromInstruction, uint8_t* to) { return ARM64Assembler::link(record, from, fromInstruction, to); } static const Scale ScalePtr = TimesEight; @@ -130,10 +140,15 @@ public: // FIXME: Get reasonable implementations for these static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; } static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; } - static bool shouldBlindForSpecificArch(uintptr_t value) { return value >= 0x00ffffff; } // Integer operations: + void add32(RegisterID a, RegisterID b, RegisterID dest) + { + ASSERT(a != ARM64Registers::sp && b != ARM64Registers::sp); + m_assembler.add<32>(dest, a, b); + } + void add32(RegisterID src, RegisterID dest) { m_assembler.add<32>(dest, dest, src); @@ -150,7 +165,10 @@ public: m_assembler.add<32>(dest, src, UInt12(imm.m_value)); else if (isUInt12(-imm.m_value)) m_assembler.sub<32>(dest, src, UInt12(-imm.m_value)); - else { + else if (src != dest) { + move(imm, dest); + add32(src, dest); + } else { move(imm, getCachedDataTempRegisterIDAndInvalidate()); m_assembler.add<32>(dest, src, dataTempRegister); } @@ -199,9 +217,20 @@ public: add32(dataTempRegister, dest); } + void add64(RegisterID a, RegisterID b, RegisterID dest) + { + ASSERT(a != ARM64Registers::sp || b != ARM64Registers::sp); + if (b == ARM64Registers::sp) + std::swap(a, b); + m_assembler.add<64>(dest, a, b); + } + void add64(RegisterID src, RegisterID dest) { - m_assembler.add<64>(dest, dest, src); + if (src == ARM64Registers::sp) + m_assembler.add<64>(dest, src, dest); + else + m_assembler.add<64>(dest, dest, src); } void add64(TrustedImm32 imm, RegisterID dest) @@ -288,6 +317,11 @@ public: store64(dataTempRegister, address.m_ptr); } + void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest) + { + add64(imm, srcDest); + } + void add64(Address src, RegisterID dest) { load64(src, getCachedDataTempRegisterIDAndInvalidate()); @@ -334,6 +368,24 @@ public: and32(dataTempRegister, dest); } + void and64(RegisterID src1, RegisterID src2, RegisterID dest) + { + m_assembler.and_<64>(dest, src1, src2); + } + + void and64(TrustedImm64 imm, RegisterID src, RegisterID dest) + { + LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value); + + if (logicalImm.isValid()) { + m_assembler.and_<64>(dest, src, logicalImm); + return; + } + + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.and_<64>(dest, src, dataTempRegister); + } + void and64(RegisterID src, RegisterID dest) { m_assembler.and_<64>(dest, dest, src); @@ -370,6 +422,31 @@ public: m_assembler.clz<32>(dest, src); } + void countLeadingZeros64(RegisterID src, RegisterID dest) + { + m_assembler.clz<64>(dest, src); + } + + void countTrailingZeros32(RegisterID src, RegisterID dest) + { + // Arm does not have a count trailing zeros only a count leading zeros. + m_assembler.rbit<32>(dest, src); + m_assembler.clz<32>(dest, dest); + } + + void countTrailingZeros64(RegisterID src, RegisterID dest) + { + // Arm does not have a count trailing zeros only a count leading zeros. + m_assembler.rbit<64>(dest, src); + m_assembler.clz<64>(dest, dest); + } + + // Only used for testing purposes. + void illegalInstruction() + { + m_assembler.illegalInstruction(); + } + void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest) { m_assembler.lsl<32>(dest, src, shiftAmount); @@ -409,21 +486,81 @@ public: { lshift64(dest, imm, dest); } + + void mul32(RegisterID left, RegisterID right, RegisterID dest) + { + m_assembler.mul<32>(dest, left, right); + } void mul32(RegisterID src, RegisterID dest) { m_assembler.mul<32>(dest, dest, src); } - + + void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest) + { + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.mul<32>(dest, src, dataTempRegister); + } + void mul64(RegisterID src, RegisterID dest) { m_assembler.mul<64>(dest, dest, src); } - void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest) + void mul64(RegisterID left, RegisterID right, RegisterID dest) { - move(imm, getCachedDataTempRegisterIDAndInvalidate()); - m_assembler.mul<32>(dest, src, dataTempRegister); + m_assembler.mul<64>(dest, left, right); + } + + void multiplyAdd32(RegisterID mulLeft, RegisterID mulRight, RegisterID summand, RegisterID dest) + { + m_assembler.madd<32>(dest, mulLeft, mulRight, summand); + } + + void multiplySub32(RegisterID mulLeft, RegisterID mulRight, RegisterID minuend, RegisterID dest) + { + m_assembler.msub<32>(dest, mulLeft, mulRight, minuend); + } + + void multiplyNeg32(RegisterID mulLeft, RegisterID mulRight, RegisterID dest) + { + m_assembler.msub<32>(dest, mulLeft, mulRight, ARM64Registers::zr); + } + + void multiplyAdd64(RegisterID mulLeft, RegisterID mulRight, RegisterID summand, RegisterID dest) + { + m_assembler.madd<64>(dest, mulLeft, mulRight, summand); + } + + void multiplySub64(RegisterID mulLeft, RegisterID mulRight, RegisterID minuend, RegisterID dest) + { + m_assembler.msub<64>(dest, mulLeft, mulRight, minuend); + } + + void multiplyNeg64(RegisterID mulLeft, RegisterID mulRight, RegisterID dest) + { + m_assembler.msub<64>(dest, mulLeft, mulRight, ARM64Registers::zr); + } + + void div32(RegisterID dividend, RegisterID divisor, RegisterID dest) + { + m_assembler.sdiv<32>(dest, dividend, divisor); + } + + void div64(RegisterID dividend, RegisterID divisor, RegisterID dest) + { + m_assembler.sdiv<64>(dest, dividend, divisor); + } + + void uDiv32(RegisterID dividend, RegisterID divisor, RegisterID dest) + { + m_assembler.udiv<32>(dest, dividend, divisor); + } + + void uDiv64(RegisterID dividend, RegisterID divisor, RegisterID dest) + { + m_assembler.udiv<64>(dest, dividend, divisor); } void neg32(RegisterID dest) @@ -460,6 +597,7 @@ public: return; } + ASSERT(src != dataTempRegister); move(imm, getCachedDataTempRegisterIDAndInvalidate()); m_assembler.orr<32>(dest, src, dataTempRegister); } @@ -471,6 +609,27 @@ public: store32(dataTempRegister, address.m_ptr); } + void or32(TrustedImm32 imm, AbsoluteAddress address) + { + LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value); + if (logicalImm.isValid()) { + load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.orr<32>(dataTempRegister, dataTempRegister, logicalImm); + store32(dataTempRegister, address.m_ptr); + } else { + load32(address.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate()); + or32(imm, memoryTempRegister, getCachedDataTempRegisterIDAndInvalidate()); + store32(dataTempRegister, address.m_ptr); + } + } + + void or32(TrustedImm32 imm, Address address) + { + load32(address, getCachedDataTempRegisterIDAndInvalidate()); + or32(imm, dataTempRegister, dataTempRegister); + store32(dataTempRegister, address); + } + void or64(RegisterID src, RegisterID dest) { or64(dest, src, dest); @@ -491,14 +650,27 @@ public: LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value))); if (logicalImm.isValid()) { - m_assembler.orr<64>(dest, dest, logicalImm); + m_assembler.orr<64>(dest, src, logicalImm); return; } signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate()); m_assembler.orr<64>(dest, src, dataTempRegister); } - + + void or64(TrustedImm64 imm, RegisterID src, RegisterID dest) + { + LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value); + + if (logicalImm.isValid()) { + m_assembler.orr<64>(dest, src, logicalImm); + return; + } + + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.orr<64>(dest, src, dataTempRegister); + } + void or64(TrustedImm64 imm, RegisterID dest) { LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value))); @@ -512,9 +684,34 @@ public: m_assembler.orr<64>(dest, dest, dataTempRegister); } + void rotateRight32(RegisterID src, TrustedImm32 imm, RegisterID dest) + { + m_assembler.ror<32>(dest, src, imm.m_value & 31); + } + + void rotateRight32(TrustedImm32 imm, RegisterID srcDst) + { + rotateRight32(srcDst, imm, srcDst); + } + + void rotateRight32(RegisterID src, RegisterID shiftAmmount, RegisterID dest) + { + m_assembler.ror<32>(dest, src, shiftAmmount); + } + + void rotateRight64(RegisterID src, TrustedImm32 imm, RegisterID dest) + { + m_assembler.ror<64>(dest, src, imm.m_value & 63); + } + void rotateRight64(TrustedImm32 imm, RegisterID srcDst) { - m_assembler.ror<64>(srcDst, srcDst, imm.m_value & 63); + rotateRight64(srcDst, imm, srcDst); + } + + void rotateRight64(RegisterID src, RegisterID shiftAmmount, RegisterID dest) + { + m_assembler.ror<64>(dest, src, shiftAmmount); } void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest) @@ -539,12 +736,12 @@ public: void rshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest) { - m_assembler.lsr<64>(dest, src, shiftAmount); + m_assembler.asr<64>(dest, src, shiftAmount); } void rshift64(RegisterID src, TrustedImm32 imm, RegisterID dest) { - m_assembler.lsr<64>(dest, src, imm.m_value & 0x3f); + m_assembler.asr<64>(dest, src, imm.m_value & 0x3f); } void rshift64(RegisterID shiftAmount, RegisterID dest) @@ -562,6 +759,11 @@ public: m_assembler.sub<32>(dest, dest, src); } + void sub32(RegisterID left, RegisterID right, RegisterID dest) + { + m_assembler.sub<32>(dest, left, right); + } + void sub32(TrustedImm32 imm, RegisterID dest) { if (isUInt12(imm.m_value)) { @@ -624,6 +826,11 @@ public: { m_assembler.sub<64>(dest, dest, src); } + + void sub64(RegisterID a, RegisterID b, RegisterID dest) + { + m_assembler.sub<64>(dest, a, b); + } void sub64(TrustedImm32 imm, RegisterID dest) { @@ -677,6 +884,26 @@ public: urshift32(dest, imm, dest); } + void urshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest) + { + m_assembler.lsr<64>(dest, src, shiftAmount); + } + + void urshift64(RegisterID src, TrustedImm32 imm, RegisterID dest) + { + m_assembler.lsr<64>(dest, src, imm.m_value & 0x3f); + } + + void urshift64(RegisterID shiftAmount, RegisterID dest) + { + urshift64(dest, shiftAmount, dest); + } + + void urshift64(TrustedImm32 imm, RegisterID dest) + { + urshift64(dest, imm, dest); + } + void xor32(RegisterID src, RegisterID dest) { xor32(dest, src, dest); @@ -700,7 +927,7 @@ public: LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value); if (logicalImm.isValid()) { - m_assembler.eor<32>(dest, dest, logicalImm); + m_assembler.eor<32>(dest, src, logicalImm); return; } @@ -731,6 +958,23 @@ public: xor64(imm, dest, dest); } + void xor64(TrustedImm64 imm, RegisterID src, RegisterID dest) + { + if (imm.m_value == -1) + m_assembler.mvn<64>(dest, src); + else { + LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value); + + if (logicalImm.isValid()) { + m_assembler.eor<64>(dest, src, logicalImm); + return; + } + + move(imm, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.eor<64>(dest, src, dataTempRegister); + } + } + void xor64(TrustedImm32 imm, RegisterID src, RegisterID dest) { if (imm.m_value == -1) @@ -739,7 +983,7 @@ public: LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value))); if (logicalImm.isValid()) { - m_assembler.eor<64>(dest, dest, logicalImm); + m_assembler.eor<64>(dest, src, logicalImm); return; } @@ -748,6 +992,20 @@ public: } } + void not32(RegisterID src, RegisterID dest) + { + m_assembler.mvn<32>(dest, src); + } + + void not64(RegisterID src, RegisterID dest) + { + m_assembler.mvn<64>(dest, src); + } + + void not64(RegisterID srcDst) + { + m_assembler.mvn<64>(srcDst, srcDst); + } // Memory access operations: @@ -777,6 +1035,11 @@ public: load<64>(address, dest); } + void load64(RegisterID src, PostIndex simm, RegisterID dest) + { + m_assembler.ldr<64>(dest, src, simm); + } + DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest) { DataLabel32 label(this); @@ -793,6 +1056,38 @@ public: return label; } + void loadPair64(RegisterID src, RegisterID dest1, RegisterID dest2) + { + loadPair64(src, TrustedImm32(0), dest1, dest2); + } + + void loadPair64(RegisterID src, TrustedImm32 offset, RegisterID dest1, RegisterID dest2) + { + m_assembler.ldp<64>(dest1, dest2, src, offset.m_value); + } + + void loadPair64WithNonTemporalAccess(RegisterID src, RegisterID dest1, RegisterID dest2) + { + loadPair64WithNonTemporalAccess(src, TrustedImm32(0), dest1, dest2); + } + + void loadPair64WithNonTemporalAccess(RegisterID src, TrustedImm32 offset, RegisterID dest1, RegisterID dest2) + { + m_assembler.ldnp<64>(dest1, dest2, src, offset.m_value); + } + + void abortWithReason(AbortReason reason) + { + move(TrustedImm32(reason), dataTempRegister); + breakpoint(); + } + + void abortWithReason(AbortReason reason, intptr_t misc) + { + move(TrustedImm64(misc), memoryTempRegister); + abortWithReason(reason); + } + ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest) { ConvertibleLoadLabel result(this); @@ -874,16 +1169,35 @@ public: load16(address, dest); } - void load16Signed(BaseIndex address, RegisterID dest) + void load16SignedExtendTo32(ImplicitAddress address, RegisterID dest) + { + if (tryLoadSignedWithOffset<16>(dest, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.ldrsh<32>(dest, address.base, memoryTempRegister); + } + + void load16SignedExtendTo32(BaseIndex address, RegisterID dest) { if (!address.offset && (!address.scale || address.scale == 1)) { - m_assembler.ldrsh<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale); + m_assembler.ldrsh<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale); return; } signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale); - m_assembler.ldrsh<64>(dest, address.base, memoryTempRegister); + m_assembler.ldrsh<32>(dest, address.base, memoryTempRegister); + } + + void zeroExtend16To32(RegisterID src, RegisterID dest) + { + m_assembler.uxth<32>(dest, src); + } + + void signExtend16To32(RegisterID src, RegisterID dest) + { + m_assembler.sxth<32>(dest, src); } void load8(ImplicitAddress address, RegisterID dest) @@ -909,22 +1223,54 @@ public: void load8(const void* address, RegisterID dest) { - moveToCachedReg(TrustedImmPtr(address), m_cachedMemoryTempRegister); + moveToCachedReg(TrustedImmPtr(address), cachedMemoryTempRegister()); m_assembler.ldrb(dest, memoryTempRegister, ARM64Registers::zr); if (dest == memoryTempRegister) - m_cachedMemoryTempRegister.invalidate(); + cachedMemoryTempRegister().invalidate(); } - void load8Signed(BaseIndex address, RegisterID dest) + void load8(RegisterID src, PostIndex simm, RegisterID dest) + { + m_assembler.ldrb(dest, src, simm); + } + + void load8SignedExtendTo32(ImplicitAddress address, RegisterID dest) + { + if (tryLoadSignedWithOffset<8>(dest, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.ldrsb<32>(dest, address.base, memoryTempRegister); + } + + void load8SignedExtendTo32(BaseIndex address, RegisterID dest) { if (!address.offset && !address.scale) { - m_assembler.ldrsb<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale); + m_assembler.ldrsb<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale); return; } signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale); - m_assembler.ldrsb<64>(dest, address.base, memoryTempRegister); + m_assembler.ldrsb<32>(dest, address.base, memoryTempRegister); + } + + void load8SignedExtendTo32(const void* address, RegisterID dest) + { + moveToCachedReg(TrustedImmPtr(address), cachedMemoryTempRegister()); + m_assembler.ldrsb<32>(dest, memoryTempRegister, ARM64Registers::zr); + if (dest == memoryTempRegister) + cachedMemoryTempRegister().invalidate(); + } + + void zeroExtend8To32(RegisterID src, RegisterID dest) + { + m_assembler.uxtb<32>(dest, src); + } + + void signExtend8To32(RegisterID src, RegisterID dest) + { + m_assembler.sxtb<32>(dest, src); } void store64(RegisterID src, ImplicitAddress address) @@ -953,6 +1299,11 @@ public: store<64>(src, address); } + void store64(TrustedImm32 imm, ImplicitAddress address) + { + store64(TrustedImm64(imm.m_value), address); + } + void store64(TrustedImm64 imm, ImplicitAddress address) { if (!imm.m_value) { @@ -960,7 +1311,7 @@ public: return; } - moveToCachedReg(imm, m_dataMemoryTempRegister); + moveToCachedReg(imm, dataMemoryTempRegister()); store64(dataTempRegister, address); } @@ -971,9 +1322,14 @@ public: return; } - moveToCachedReg(imm, m_dataMemoryTempRegister); + moveToCachedReg(imm, dataMemoryTempRegister()); store64(dataTempRegister, address); } + + void store64(RegisterID src, RegisterID dest, PostIndex simm) + { + m_assembler.str<64>(src, dest, simm); + } DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address) { @@ -983,6 +1339,26 @@ public: return label; } + void storePair64(RegisterID src1, RegisterID src2, RegisterID dest) + { + storePair64(src1, src2, dest, TrustedImm32(0)); + } + + void storePair64(RegisterID src1, RegisterID src2, RegisterID dest, TrustedImm32 offset) + { + m_assembler.stp<64>(src1, src2, dest, offset.m_value); + } + + void storePair64WithNonTemporalAccess(RegisterID src1, RegisterID src2, RegisterID dest) + { + storePair64WithNonTemporalAccess(src1, src2, dest, TrustedImm32(0)); + } + + void storePair64WithNonTemporalAccess(RegisterID src1, RegisterID src2, RegisterID dest, TrustedImm32 offset) + { + m_assembler.stnp<64>(src1, src2, dest, offset.m_value); + } + void store32(RegisterID src, ImplicitAddress address) { if (tryStoreWithOffset<32>(src, address.base, address.offset)) @@ -1016,7 +1392,7 @@ public: return; } - moveToCachedReg(imm, m_dataMemoryTempRegister); + moveToCachedReg(imm, dataMemoryTempRegister()); store32(dataTempRegister, address); } @@ -1027,7 +1403,7 @@ public: return; } - moveToCachedReg(imm, m_dataMemoryTempRegister); + moveToCachedReg(imm, dataMemoryTempRegister()); store32(dataTempRegister, address); } @@ -1038,10 +1414,20 @@ public: return; } - moveToCachedReg(imm, m_dataMemoryTempRegister); + moveToCachedReg(imm, dataMemoryTempRegister()); store32(dataTempRegister, address); } + void storeZero32(ImplicitAddress address) + { + store32(ARM64Registers::zr, address); + } + + void storeZero32(BaseIndex address) + { + store32(ARM64Registers::zr, address); + } + DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address) { DataLabel32 label(this); @@ -1050,6 +1436,15 @@ public: return label; } + void store16(RegisterID src, ImplicitAddress address) + { + if (tryStoreWithOffset<16>(src, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.strh(src, address.base, memoryTempRegister); + } + void store16(RegisterID src, BaseIndex address) { if (!address.offset && (!address.scale || address.scale == 1)) { @@ -1080,17 +1475,43 @@ public: m_assembler.strb(src, memoryTempRegister, 0); } + void store8(RegisterID src, ImplicitAddress address) + { + if (tryStoreWithOffset<8>(src, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.strb(src, address.base, memoryTempRegister); + } + void store8(TrustedImm32 imm, void* address) { - if (!imm.m_value) { + TrustedImm32 imm8(static_cast<int8_t>(imm.m_value)); + if (!imm8.m_value) { store8(ARM64Registers::zr, address); return; } - move(imm, getCachedDataTempRegisterIDAndInvalidate()); + move(imm8, getCachedDataTempRegisterIDAndInvalidate()); store8(dataTempRegister, address); } + void store8(TrustedImm32 imm, ImplicitAddress address) + { + TrustedImm32 imm8(static_cast<int8_t>(imm.m_value)); + if (!imm8.m_value) { + store8(ARM64Registers::zr, address); + return; + } + + move(imm8, getCachedDataTempRegisterIDAndInvalidate()); + store8(dataTempRegister, address); + } + + void store8(RegisterID src, RegisterID dest, PostIndex simm) + { + m_assembler.strb(src, dest, simm); + } // Floating-point operations: @@ -1098,6 +1519,7 @@ public: static bool supportsFloatingPointTruncate() { return true; } static bool supportsFloatingPointSqrt() { return true; } static bool supportsFloatingPointAbs() { return true; } + static bool supportsFloatingPointRounding() { return true; } enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful }; @@ -1106,6 +1528,11 @@ public: m_assembler.fabs<64>(dest, src); } + void absFloat(FPRegisterID src, FPRegisterID dest) + { + m_assembler.fabs<32>(dest, src); + } + void addDouble(FPRegisterID src, FPRegisterID dest) { addDouble(dest, src, dest); @@ -1124,20 +1551,56 @@ public: void addDouble(AbsoluteAddress address, FPRegisterID dest) { - loadDouble(address.m_ptr, fpTempRegister); + loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister); addDouble(fpTempRegister, dest); } + void addFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + m_assembler.fadd<32>(dest, op1, op2); + } + void ceilDouble(FPRegisterID src, FPRegisterID dest) { m_assembler.frintp<64>(dest, src); } + void ceilFloat(FPRegisterID src, FPRegisterID dest) + { + m_assembler.frintp<32>(dest, src); + } + void floorDouble(FPRegisterID src, FPRegisterID dest) { m_assembler.frintm<64>(dest, src); } + void floorFloat(FPRegisterID src, FPRegisterID dest) + { + m_assembler.frintm<32>(dest, src); + } + + void roundTowardNearestIntDouble(FPRegisterID src, FPRegisterID dest) + { + m_assembler.frintn<64>(dest, src); + } + + void roundTowardNearestIntFloat(FPRegisterID src, FPRegisterID dest) + { + m_assembler.frintn<32>(dest, src); + } + + void roundTowardZeroDouble(FPRegisterID src, FPRegisterID dest) + { + m_assembler.frintz<64>(dest, src); + } + + void roundTowardZeroFloat(FPRegisterID src, FPRegisterID dest) + { + m_assembler.frintz<32>(dest, src); + } + + // Convert 'src' to an integer, and places the resulting 'dest'. // If the result is not representable as a 32 bit value, branch. // May also branch for some values that are representable in 32 bits @@ -1150,32 +1613,26 @@ public: m_assembler.scvtf<64, 32>(fpTempRegister, dest); failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister)); - // If the result is zero, it might have been -0.0, and the double comparison won't catch this! - if (negZeroCheck) - failureCases.append(branchTest32(Zero, dest)); + // Test for negative zero. + if (negZeroCheck) { + Jump valueIsNonZero = branchTest32(NonZero, dest); + RegisterID scratch = getCachedMemoryTempRegisterIDAndInvalidate(); + m_assembler.fmov<64>(scratch, src); + failureCases.append(makeTestBitAndBranch(scratch, 63, IsNonZero)); + valueIsNonZero.link(this); + } } Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right) { m_assembler.fcmp<64>(left, right); + return jumpAfterFloatingPointCompare(cond); + } - if (cond == DoubleNotEqual) { - // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump. - Jump unordered = makeBranch(ARM64Assembler::ConditionVS); - Jump result = makeBranch(ARM64Assembler::ConditionNE); - unordered.link(this); - return result; - } - if (cond == DoubleEqualOrUnordered) { - Jump unordered = makeBranch(ARM64Assembler::ConditionVS); - Jump notEqual = makeBranch(ARM64Assembler::ConditionNE); - unordered.link(this); - // We get here if either unordered or equal. - Jump result = jump(); - notEqual.link(this); - return result; - } - return makeBranch(cond); + Jump branchFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right) + { + m_assembler.fcmp<32>(left, right); + return jumpAfterFloatingPointCompare(cond); } Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID) @@ -1204,7 +1661,7 @@ public: // Truncate to a 64-bit integer in dataTempRegister, copy the low 32-bit to dest. m_assembler.fcvtzs<64, 64>(getCachedDataTempRegisterIDAndInvalidate(), src); zeroExtend32ToPtr(dataTempRegister, dest); - // Check thlow 32-bits sign extend to be equal to the full value. + // Check the low 32-bits sign extend to be equal to the full value. m_assembler.cmp<64>(dataTempRegister, dataTempRegister, ARM64Assembler::SXTW, 0); return Jump(makeBranch(branchType == BranchIfTruncateSuccessful ? Equal : NotEqual)); } @@ -1241,12 +1698,32 @@ public: load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate()); convertInt32ToDouble(dataTempRegister, dest); } + + void convertInt32ToFloat(RegisterID src, FPRegisterID dest) + { + m_assembler.scvtf<32, 32>(dest, src); + } void convertInt64ToDouble(RegisterID src, FPRegisterID dest) { m_assembler.scvtf<64, 64>(dest, src); } - + + void convertInt64ToFloat(RegisterID src, FPRegisterID dest) + { + m_assembler.scvtf<32, 64>(dest, src); + } + + void convertUInt64ToDouble(RegisterID src, FPRegisterID dest) + { + m_assembler.ucvtf<64, 64>(dest, src); + } + + void convertUInt64ToFloat(RegisterID src, FPRegisterID dest) + { + m_assembler.ucvtf<32, 64>(dest, src); + } + void divDouble(FPRegisterID src, FPRegisterID dest) { divDouble(dest, src, dest); @@ -1257,6 +1734,11 @@ public: m_assembler.fdiv<64>(dest, op1, op2); } + void divFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + m_assembler.fdiv<32>(dest, op1, op2); + } + void loadDouble(ImplicitAddress address, FPRegisterID dest) { if (tryLoadWithOffset<64>(dest, address.base, address.offset)) @@ -1278,12 +1760,21 @@ public: m_assembler.ldr<64>(dest, address.base, memoryTempRegister); } - void loadDouble(const void* address, FPRegisterID dest) + void loadDouble(TrustedImmPtr address, FPRegisterID dest) { - moveToCachedReg(TrustedImmPtr(address), m_cachedMemoryTempRegister); + moveToCachedReg(address, cachedMemoryTempRegister()); m_assembler.ldr<64>(dest, memoryTempRegister, ARM64Registers::zr); } + void loadFloat(ImplicitAddress address, FPRegisterID dest) + { + if (tryLoadWithOffset<32>(dest, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.ldr<32>(dest, address.base, memoryTempRegister); + } + void loadFloat(BaseIndex address, FPRegisterID dest) { if (!address.offset && (!address.scale || address.scale == 2)) { @@ -1301,16 +1792,130 @@ public: m_assembler.fmov<64>(dest, src); } + void moveZeroToDouble(FPRegisterID reg) + { + m_assembler.fmov<64>(reg, ARM64Registers::zr); + } + void moveDoubleTo64(FPRegisterID src, RegisterID dest) { m_assembler.fmov<64>(dest, src); } + void moveFloatTo32(FPRegisterID src, RegisterID dest) + { + m_assembler.fmov<32>(dest, src); + } + void move64ToDouble(RegisterID src, FPRegisterID dest) { m_assembler.fmov<64>(dest, src); } + void move32ToFloat(RegisterID src, FPRegisterID dest) + { + m_assembler.fmov<32>(dest, src); + } + + void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest) + { + m_assembler.fcmp<64>(left, right); + moveConditionallyAfterFloatingPointCompare<64>(cond, src, dest); + } + + void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + m_assembler.fcmp<64>(left, right); + moveConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest); + } + + void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest) + { + m_assembler.fcmp<32>(left, right); + moveConditionallyAfterFloatingPointCompare<64>(cond, src, dest); + } + + void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + m_assembler.fcmp<32>(left, right); + moveConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest); + } + + template<int datasize> + void moveConditionallyAfterFloatingPointCompare(DoubleCondition cond, RegisterID src, RegisterID dest) + { + if (cond == DoubleNotEqual) { + Jump unordered = makeBranch(ARM64Assembler::ConditionVS); + m_assembler.csel<datasize>(dest, src, dest, ARM64Assembler::ConditionNE); + unordered.link(this); + return; + } + if (cond == DoubleEqualOrUnordered) { + // If the compare is unordered, src is copied to dest and the + // next csel has all arguments equal to src. + // If the compare is ordered, dest is unchanged and EQ decides + // what value to set. + m_assembler.csel<datasize>(dest, src, dest, ARM64Assembler::ConditionVS); + m_assembler.csel<datasize>(dest, src, dest, ARM64Assembler::ConditionEQ); + return; + } + m_assembler.csel<datasize>(dest, src, dest, ARM64Condition(cond)); + } + + template<int datasize> + void moveConditionallyAfterFloatingPointCompare(DoubleCondition cond, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + if (cond == DoubleNotEqual) { + Jump unordered = makeBranch(ARM64Assembler::ConditionVS); + m_assembler.csel<datasize>(dest, thenCase, elseCase, ARM64Assembler::ConditionNE); + unordered.link(this); + return; + } + if (cond == DoubleEqualOrUnordered) { + // If the compare is unordered, thenCase is copied to elseCase and the + // next csel has all arguments equal to thenCase. + // If the compare is ordered, dest is unchanged and EQ decides + // what value to set. + m_assembler.csel<datasize>(elseCase, thenCase, elseCase, ARM64Assembler::ConditionVS); + m_assembler.csel<datasize>(dest, thenCase, elseCase, ARM64Assembler::ConditionEQ); + return; + } + m_assembler.csel<datasize>(dest, thenCase, elseCase, ARM64Condition(cond)); + } + + template<int datasize> + void moveDoubleConditionallyAfterFloatingPointCompare(DoubleCondition cond, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + if (cond == DoubleNotEqual) { + Jump unordered = makeBranch(ARM64Assembler::ConditionVS); + m_assembler.fcsel<datasize>(dest, thenCase, elseCase, ARM64Assembler::ConditionNE); + unordered.link(this); + return; + } + if (cond == DoubleEqualOrUnordered) { + // If the compare is unordered, thenCase is copied to elseCase and the + // next csel has all arguments equal to thenCase. + // If the compare is ordered, dest is unchanged and EQ decides + // what value to set. + m_assembler.fcsel<datasize>(elseCase, thenCase, elseCase, ARM64Assembler::ConditionVS); + m_assembler.fcsel<datasize>(dest, thenCase, elseCase, ARM64Assembler::ConditionEQ); + return; + } + m_assembler.fcsel<datasize>(dest, thenCase, elseCase, ARM64Condition(cond)); + } + + void moveDoubleConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + m_assembler.fcmp<64>(left, right); + moveDoubleConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest); + } + + void moveDoubleConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + m_assembler.fcmp<32>(left, right); + moveDoubleConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest); + } + void mulDouble(FPRegisterID src, FPRegisterID dest) { mulDouble(dest, src, dest); @@ -1327,16 +1932,51 @@ public: mulDouble(fpTempRegister, dest); } + void mulFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + m_assembler.fmul<32>(dest, op1, op2); + } + + void andDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + m_assembler.vand<64>(dest, op1, op2); + } + + void andFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + andDouble(op1, op2, dest); + } + + void orDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + m_assembler.vorr<64>(dest, op1, op2); + } + + void orFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + orDouble(op1, op2, dest); + } + void negateDouble(FPRegisterID src, FPRegisterID dest) { m_assembler.fneg<64>(dest, src); } + void negateFloat(FPRegisterID src, FPRegisterID dest) + { + m_assembler.fneg<32>(dest, src); + } + void sqrtDouble(FPRegisterID src, FPRegisterID dest) { m_assembler.fsqrt<64>(dest, src); } + void sqrtFloat(FPRegisterID src, FPRegisterID dest) + { + m_assembler.fsqrt<32>(dest, src); + } + void storeDouble(FPRegisterID src, ImplicitAddress address) { if (tryStoreWithOffset<64>(src, address.base, address.offset)) @@ -1346,9 +1986,9 @@ public: m_assembler.str<64>(src, address.base, memoryTempRegister); } - void storeDouble(FPRegisterID src, const void* address) + void storeDouble(FPRegisterID src, TrustedImmPtr address) { - moveToCachedReg(TrustedImmPtr(address), m_cachedMemoryTempRegister); + moveToCachedReg(address, cachedMemoryTempRegister()); m_assembler.str<64>(src, memoryTempRegister, ARM64Registers::zr); } @@ -1363,6 +2003,15 @@ public: m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale); m_assembler.str<64>(src, address.base, memoryTempRegister); } + + void storeFloat(FPRegisterID src, ImplicitAddress address) + { + if (tryStoreWithOffset<32>(src, address.base, address.offset)) + return; + + signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate()); + m_assembler.str<32>(src, address.base, memoryTempRegister); + } void storeFloat(FPRegisterID src, BaseIndex address) { @@ -1392,6 +2041,11 @@ public: subDouble(fpTempRegister, dest); } + void subFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + m_assembler.fsub<32>(dest, op1, op2); + } + // Result is undefined if the value is outside of the integer range. void truncateDoubleToInt32(FPRegisterID src, RegisterID dest) { @@ -1403,6 +2057,45 @@ public: m_assembler.fcvtzu<32, 64>(dest, src); } + void truncateDoubleToInt64(FPRegisterID src, RegisterID dest) + { + m_assembler.fcvtzs<64, 64>(dest, src); + } + + void truncateDoubleToUint64(FPRegisterID src, RegisterID dest, FPRegisterID, FPRegisterID) + { + truncateDoubleToUint64(src, dest); + } + + void truncateDoubleToUint64(FPRegisterID src, RegisterID dest) + { + m_assembler.fcvtzu<64, 64>(dest, src); + } + + void truncateFloatToInt32(FPRegisterID src, RegisterID dest) + { + m_assembler.fcvtzs<32, 32>(dest, src); + } + + void truncateFloatToUint32(FPRegisterID src, RegisterID dest) + { + m_assembler.fcvtzu<32, 32>(dest, src); + } + + void truncateFloatToInt64(FPRegisterID src, RegisterID dest) + { + m_assembler.fcvtzs<64, 32>(dest, src); + } + + void truncateFloatToUint64(FPRegisterID src, RegisterID dest, FPRegisterID, FPRegisterID) + { + truncateFloatToUint64(src, dest); + } + + void truncateFloatToUint64(FPRegisterID src, RegisterID dest) + { + m_assembler.fcvtzu<64, 32>(dest, src); + } // Stack manipulation operations: // @@ -1437,6 +2130,16 @@ public: CRASH(); } + void popPair(RegisterID dest1, RegisterID dest2) + { + m_assembler.ldp<64>(dest1, dest2, ARM64Registers::sp, PairPostIndex(16)); + } + + void pushPair(RegisterID src1, RegisterID src2) + { + m_assembler.stp<64>(src1, src2, ARM64Registers::sp, PairPreIndex(-16)); + } + void popToRestore(RegisterID dest) { m_assembler.ldr<64>(dest, ARM64Registers::sp, PostIndex(16)); @@ -1446,6 +2149,15 @@ public: { m_assembler.str<64>(src, ARM64Registers::sp, PreIndex(-16)); } + + void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm) + { + RegisterID reg = dataTempRegister; + pushPair(reg, reg); + move(imm, reg); + store64(reg, stackPointerRegister); + load64(Address(stackPointerRegister, 8), reg); + } void pushToSave(Address address) { @@ -1471,6 +2183,7 @@ public: storeDouble(src, stackPointerRegister); } + static ptrdiff_t pushToSaveByteOffset() { return 16; } // Register move operations: @@ -1501,6 +2214,11 @@ public: move(reg2, reg1); move(dataTempRegister, reg2); } + + void signExtend32ToPtr(TrustedImm32 imm, RegisterID dest) + { + move(TrustedImmPtr(reinterpret_cast<void*>(static_cast<intptr_t>(imm.m_value))), dest); + } void signExtend32ToPtr(RegisterID src, RegisterID dest) { @@ -1512,6 +2230,169 @@ public: m_assembler.uxtw(dest, src); } + void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest) + { + m_assembler.cmp<32>(left, right); + m_assembler.csel<32>(dest, src, dest, ARM64Condition(cond)); + } + + void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + m_assembler.cmp<32>(left, right); + m_assembler.csel<32>(dest, thenCase, elseCase, ARM64Condition(cond)); + } + + void moveConditionally32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + if (!right.m_value) { + if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) { + moveConditionallyTest32(*resultCondition, left, left, thenCase, elseCase, dest); + return; + } + } + + if (isUInt12(right.m_value)) + m_assembler.cmp<32>(left, UInt12(right.m_value)); + else if (isUInt12(-right.m_value)) + m_assembler.cmn<32>(left, UInt12(-right.m_value)); + else { + moveToCachedReg(right, dataMemoryTempRegister()); + m_assembler.cmp<32>(left, dataTempRegister); + } + m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond)); + } + + void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest) + { + m_assembler.cmp<64>(left, right); + m_assembler.csel<64>(dest, src, dest, ARM64Condition(cond)); + } + + void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + m_assembler.cmp<64>(left, right); + m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond)); + } + + void moveConditionally64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + if (!right.m_value) { + if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) { + moveConditionallyTest64(*resultCondition, left, left, thenCase, elseCase, dest); + return; + } + } + + if (isUInt12(right.m_value)) + m_assembler.cmp<64>(left, UInt12(right.m_value)); + else if (isUInt12(-right.m_value)) + m_assembler.cmn<64>(left, UInt12(-right.m_value)); + else { + moveToCachedReg(right, dataMemoryTempRegister()); + m_assembler.cmp<64>(left, dataTempRegister); + } + m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond)); + } + + void moveConditionallyTest32(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest) + { + m_assembler.tst<32>(testReg, mask); + m_assembler.csel<32>(dest, src, dest, ARM64Condition(cond)); + } + + void moveConditionallyTest32(ResultCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + m_assembler.tst<32>(left, right); + m_assembler.csel<32>(dest, thenCase, elseCase, ARM64Condition(cond)); + } + + void moveConditionallyTest32(ResultCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + test32(left, right); + m_assembler.csel<32>(dest, thenCase, elseCase, ARM64Condition(cond)); + } + + void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest) + { + m_assembler.tst<64>(testReg, mask); + m_assembler.csel<64>(dest, src, dest, ARM64Condition(cond)); + } + + void moveConditionallyTest64(ResultCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + m_assembler.tst<64>(left, right); + m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond)); + } + + void moveDoubleConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + m_assembler.cmp<32>(left, right); + m_assembler.fcsel<32>(dest, thenCase, elseCase, ARM64Condition(cond)); + } + + void moveDoubleConditionally32(RelationalCondition cond, RegisterID left, TrustedImm32 right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + if (!right.m_value) { + if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) { + moveDoubleConditionallyTest32(*resultCondition, left, left, thenCase, elseCase, dest); + return; + } + } + + if (isUInt12(right.m_value)) + m_assembler.cmp<32>(left, UInt12(right.m_value)); + else if (isUInt12(-right.m_value)) + m_assembler.cmn<32>(left, UInt12(-right.m_value)); + else { + moveToCachedReg(right, dataMemoryTempRegister()); + m_assembler.cmp<32>(left, dataTempRegister); + } + m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond)); + } + + void moveDoubleConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + m_assembler.cmp<64>(left, right); + m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond)); + } + + void moveDoubleConditionally64(RelationalCondition cond, RegisterID left, TrustedImm32 right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + if (!right.m_value) { + if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) { + moveDoubleConditionallyTest64(*resultCondition, left, left, thenCase, elseCase, dest); + return; + } + } + + if (isUInt12(right.m_value)) + m_assembler.cmp<64>(left, UInt12(right.m_value)); + else if (isUInt12(-right.m_value)) + m_assembler.cmn<64>(left, UInt12(-right.m_value)); + else { + moveToCachedReg(right, dataMemoryTempRegister()); + m_assembler.cmp<64>(left, dataTempRegister); + } + m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond)); + } + + void moveDoubleConditionallyTest32(ResultCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + m_assembler.tst<32>(left, right); + m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond)); + } + + void moveDoubleConditionallyTest32(ResultCondition cond, RegisterID left, TrustedImm32 right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + test32(left, right); + m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond)); + } + + void moveDoubleConditionallyTest64(ResultCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + m_assembler.tst<64>(left, right); + m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond)); + } // Forwards / external control flow operations: // @@ -1539,12 +2420,17 @@ public: Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right) { + if (!right.m_value) { + if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) + return branchTest32(*resultCondition, left, left); + } + if (isUInt12(right.m_value)) m_assembler.cmp<32>(left, UInt12(right.m_value)); else if (isUInt12(-right.m_value)) m_assembler.cmn<32>(left, UInt12(-right.m_value)); else { - moveToCachedReg(right, m_dataMemoryTempRegister); + moveToCachedReg(right, dataMemoryTempRegister()); m_assembler.cmp<32>(left, dataTempRegister); } return Jump(makeBranch(cond)); @@ -1588,19 +2474,52 @@ public: Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right) { + if (right == ARM64Registers::sp) { + if (cond == Equal && left != ARM64Registers::sp) { + // CMP can only use SP for the left argument, since we are testing for equality, the order + // does not matter here. + std::swap(left, right); + } else { + move(right, getCachedDataTempRegisterIDAndInvalidate()); + right = dataTempRegister; + } + } m_assembler.cmp<64>(left, right); return Jump(makeBranch(cond)); } + Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm32 right) + { + if (!right.m_value) { + if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) + return branchTest64(*resultCondition, left, left); + } + + if (isUInt12(right.m_value)) + m_assembler.cmp<64>(left, UInt12(right.m_value)); + else if (isUInt12(-right.m_value)) + m_assembler.cmn<64>(left, UInt12(-right.m_value)); + else { + moveToCachedReg(right, dataMemoryTempRegister()); + m_assembler.cmp<64>(left, dataTempRegister); + } + return Jump(makeBranch(cond)); + } + Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right) { intptr_t immediate = right.m_value; + if (!immediate) { + if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) + return branchTest64(*resultCondition, left, left); + } + if (isUInt12(immediate)) m_assembler.cmp<64>(left, UInt12(static_cast<int32_t>(immediate))); else if (isUInt12(-immediate)) m_assembler.cmn<64>(left, UInt12(static_cast<int32_t>(-immediate))); else { - moveToCachedReg(right, m_dataMemoryTempRegister); + moveToCachedReg(right, dataMemoryTempRegister()); m_assembler.cmp<64>(left, dataTempRegister); } return Jump(makeBranch(cond)); @@ -1630,33 +2549,62 @@ public: return branch64(cond, memoryTempRegister, right); } + Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right) + { + load64(left, getCachedMemoryTempRegisterIDAndInvalidate()); + return branch64(cond, memoryTempRegister, right); + } + Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right) { - ASSERT(!(0xffffff00 & right.m_value)); - load8(left, getCachedMemoryTempRegisterIDAndInvalidate()); - return branch32(cond, memoryTempRegister, right); + TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right); + MacroAssemblerHelpers::load8OnCondition(*this, cond, left, getCachedMemoryTempRegisterIDAndInvalidate()); + return branch32(cond, memoryTempRegister, right8); } Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right) { - ASSERT(!(0xffffff00 & right.m_value)); - load8(left, getCachedMemoryTempRegisterIDAndInvalidate()); - return branch32(cond, memoryTempRegister, right); + TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right); + MacroAssemblerHelpers::load8OnCondition(*this, cond, left, getCachedMemoryTempRegisterIDAndInvalidate()); + return branch32(cond, memoryTempRegister, right8); } Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right) { - ASSERT(!(0xffffff00 & right.m_value)); - load8(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate()); - return branch32(cond, memoryTempRegister, right); + TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right); + MacroAssemblerHelpers::load8OnCondition(*this, cond, left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate()); + return branch32(cond, memoryTempRegister, right8); } Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask) { + if (reg == mask && (cond == Zero || cond == NonZero)) + return Jump(makeCompareAndBranch<32>(static_cast<ZeroCondition>(cond), reg)); m_assembler.tst<32>(reg, mask); return Jump(makeBranch(cond)); } + void test32(RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) + { + if (mask.m_value == -1) + m_assembler.tst<32>(reg, reg); + else { + LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value); + + if (logicalImm.isValid()) + m_assembler.tst<32>(reg, logicalImm); + else { + move(mask, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.tst<32>(reg, dataTempRegister); + } + } + } + + Jump branch(ResultCondition cond) + { + return Jump(makeBranch(cond)); + } + Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) { if (mask.m_value == -1) { @@ -1666,13 +2614,10 @@ public: } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero))) return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond))); else { - if ((cond == Zero) || (cond == NonZero)) { - LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value); - - if (logicalImm.isValid()) { - m_assembler.tst<32>(reg, logicalImm); - return Jump(makeBranch(cond)); - } + LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value); + if (logicalImm.isValid()) { + m_assembler.tst<32>(reg, logicalImm); + return Jump(makeBranch(cond)); } move(mask, getCachedDataTempRegisterIDAndInvalidate()); @@ -1695,6 +2640,8 @@ public: Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask) { + if (reg == mask && (cond == Zero || cond == NonZero)) + return Jump(makeCompareAndBranch<64>(static_cast<ZeroCondition>(cond), reg)); m_assembler.tst<64>(reg, mask); return Jump(makeBranch(cond)); } @@ -1708,13 +2655,11 @@ public: } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero))) return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond))); else { - if ((cond == Zero) || (cond == NonZero)) { - LogicalImmediate logicalImm = LogicalImmediate::create64(mask.m_value); + LogicalImmediate logicalImm = LogicalImmediate::create64(mask.m_value); - if (logicalImm.isValid()) { - m_assembler.tst<64>(reg, logicalImm); - return Jump(makeBranch(cond)); - } + if (logicalImm.isValid()) { + m_assembler.tst<64>(reg, logicalImm); + return Jump(makeBranch(cond)); } signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate()); @@ -1723,6 +2668,28 @@ public: return Jump(makeBranch(cond)); } + Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm64 mask) + { + if (mask.m_value == -1) { + if ((cond == Zero) || (cond == NonZero)) + return Jump(makeCompareAndBranch<64>(static_cast<ZeroCondition>(cond), reg)); + m_assembler.tst<64>(reg, reg); + } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero))) + return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond))); + else { + LogicalImmediate logicalImm = LogicalImmediate::create64(mask.m_value); + + if (logicalImm.isValid()) { + m_assembler.tst<64>(reg, logicalImm); + return Jump(makeBranch(cond)); + } + + move(mask, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.tst<64>(reg, dataTempRegister); + } + return Jump(makeBranch(cond)); + } + Jump branchTest64(ResultCondition cond, Address address, RegisterID mask) { load64(address, getCachedDataTempRegisterIDAndInvalidate()); @@ -1749,27 +2716,36 @@ public: Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) { - load8(address, getCachedDataTempRegisterIDAndInvalidate()); - return branchTest32(cond, dataTempRegister, mask); + TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask); + MacroAssemblerHelpers::load8OnCondition(*this, cond, address, getCachedDataTempRegisterIDAndInvalidate()); + return branchTest32(cond, dataTempRegister, mask8); } Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) { - load8(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate()); - return branchTest32(cond, dataTempRegister, mask); + TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask); + MacroAssemblerHelpers::load8OnCondition(*this, cond, address.m_ptr, getCachedDataTempRegisterIDAndInvalidate()); + return branchTest32(cond, dataTempRegister, mask8); } Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1)) { + TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask); move(TrustedImmPtr(reinterpret_cast<void*>(address.offset)), getCachedDataTempRegisterIDAndInvalidate()); - m_assembler.ldrb(dataTempRegister, address.base, dataTempRegister); - return branchTest32(cond, dataTempRegister, mask); + + if (MacroAssemblerHelpers::isUnsigned<MacroAssemblerARM64>(cond)) + m_assembler.ldrb(dataTempRegister, address.base, dataTempRegister); + else + m_assembler.ldrsb<32>(dataTempRegister, address.base, dataTempRegister); + + return branchTest32(cond, dataTempRegister, mask8); } Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) { - load8(address, getCachedDataTempRegisterIDAndInvalidate()); - return branchTest32(cond, dataTempRegister, mask); + TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask); + MacroAssemblerHelpers::load8OnCondition(*this, cond, address, getCachedDataTempRegisterIDAndInvalidate()); + return branchTest32(cond, dataTempRegister, mask8); } Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right) @@ -1875,7 +2851,14 @@ public: return branchAdd64(cond, dest, imm, dest); } - Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest) + Jump branchAdd64(RelationalCondition cond, TrustedImm32 imm, RegisterID dest) + { + ASSERT(isUInt12(imm.m_value)); + m_assembler.add<64, S>(dest, dest, UInt12(imm.m_value)); + return Jump(makeBranch(cond)); + } + + Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID scratch1, RegisterID scratch2, RegisterID dest) { ASSERT(cond != Signed); @@ -1886,14 +2869,19 @@ public: // This is a signed multiple of two 32-bit values, producing a 64-bit result. m_assembler.smull(dest, src1, src2); - // Copy bits 63..32 of the result to bits 31..0 of dataTempRegister. - m_assembler.asr<64>(getCachedDataTempRegisterIDAndInvalidate(), dest, 32); - // Splat bit 31 of the result to bits 31..0 of memoryTempRegister. - m_assembler.asr<32>(getCachedMemoryTempRegisterIDAndInvalidate(), dest, 31); + // Copy bits 63..32 of the result to bits 31..0 of scratch1. + m_assembler.asr<64>(scratch1, dest, 32); + // Splat bit 31 of the result to bits 31..0 of scratch2. + m_assembler.asr<32>(scratch2, dest, 31); // After a mul32 the top 32 bits of the register should be clear. zeroExtend32ToPtr(dest, dest); // Check that bits 31..63 of the original result were all equal. - return branch32(NotEqual, memoryTempRegister, dataTempRegister); + return branch32(NotEqual, scratch2, scratch1); + } + + Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest) + { + return branchMul32(cond, src1, src2, getCachedDataTempRegisterIDAndInvalidate(), getCachedMemoryTempRegisterIDAndInvalidate(), dest); } Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest) @@ -1901,13 +2889,13 @@ public: return branchMul32(cond, dest, src, dest); } - Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest) + Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest) { move(imm, getCachedDataTempRegisterIDAndInvalidate()); return branchMul32(cond, dataTempRegister, src, dest); } - Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest) + Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID scratch1, RegisterID scratch2, RegisterID dest) { ASSERT(cond != Signed); @@ -1917,12 +2905,17 @@ public: if (cond != Overflow) return branchTest64(cond, dest); - // Compute bits 127..64 of the result into dataTempRegister. - m_assembler.smulh(getCachedDataTempRegisterIDAndInvalidate(), src1, src2); - // Splat bit 63 of the result to bits 63..0 of memoryTempRegister. - m_assembler.asr<64>(getCachedMemoryTempRegisterIDAndInvalidate(), dest, 63); + // Compute bits 127..64 of the result into scratch1. + m_assembler.smulh(scratch1, src1, src2); + // Splat bit 63 of the result to bits 63..0 of scratch2. + m_assembler.asr<64>(scratch2, dest, 63); // Check that bits 31..63 of the original result were all equal. - return branch64(NotEqual, memoryTempRegister, dataTempRegister); + return branch64(NotEqual, scratch2, scratch1); + } + + Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest) + { + return branchMul64(cond, src1, src2, getCachedDataTempRegisterIDAndInvalidate(), getCachedMemoryTempRegisterIDAndInvalidate(), dest); } Jump branchMul64(ResultCondition cond, RegisterID src, RegisterID dest) @@ -2010,6 +3003,13 @@ public: return branchSub64(cond, dest, imm, dest); } + Jump branchSub64(RelationalCondition cond, TrustedImm32 imm, RegisterID dest) + { + ASSERT(isUInt12(imm.m_value)); + m_assembler.sub<64, S>(dest, dest, UInt12(imm.m_value)); + return Jump(makeBranch(cond)); + } + // Jumps, calls, returns @@ -2054,6 +3054,12 @@ public: load64(address, getCachedDataTempRegisterIDAndInvalidate()); m_assembler.br(dataTempRegister); } + + void jump(BaseIndex address) + { + load64(address, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.br(dataTempRegister); + } void jump(AbsoluteAddress address) { @@ -2074,6 +3080,13 @@ public: return Call(m_assembler.label(), Call::LinkableNear); } + ALWAYS_INLINE Call nearTailCall() + { + AssemblerLabel label = m_assembler.label(); + m_assembler.b(); + return Call(label, Call::LinkableNearTail); + } + ALWAYS_INLINE void ret() { m_assembler.ret(); @@ -2108,8 +3121,21 @@ public: void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest) { - move(right, getCachedDataTempRegisterIDAndInvalidate()); - m_assembler.cmp<32>(left, dataTempRegister); + if (!right.m_value) { + if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) { + test32(*resultCondition, left, left, dest); + return; + } + } + + if (isUInt12(right.m_value)) + m_assembler.cmp<32>(left, UInt12(right.m_value)); + else if (isUInt12(-right.m_value)) + m_assembler.cmn<32>(left, UInt12(-right.m_value)); + else { + move(right, getCachedDataTempRegisterIDAndInvalidate()); + m_assembler.cmp<32>(left, dataTempRegister); + } m_assembler.cset<32>(dest, ARM64Condition(cond)); } @@ -2121,6 +3147,13 @@ public: void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest) { + if (!right.m_value) { + if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) { + test64(*resultCondition, left, left, dest); + return; + } + } + signExtend32ToPtr(right, getCachedDataTempRegisterIDAndInvalidate()); m_assembler.cmp<64>(left, dataTempRegister); m_assembler.cset<32>(dest, ARM64Condition(cond)); @@ -2128,32 +3161,35 @@ public: void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest) { - load8(left, getCachedMemoryTempRegisterIDAndInvalidate()); - move(right, getCachedDataTempRegisterIDAndInvalidate()); + TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right); + MacroAssemblerHelpers::load8OnCondition(*this, cond, left, getCachedMemoryTempRegisterIDAndInvalidate()); + move(right8, getCachedDataTempRegisterIDAndInvalidate()); compare32(cond, memoryTempRegister, dataTempRegister, dest); } - + + void test32(ResultCondition cond, RegisterID src, RegisterID mask, RegisterID dest) + { + m_assembler.tst<32>(src, mask); + m_assembler.cset<32>(dest, ARM64Condition(cond)); + } + void test32(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest) { - if (mask.m_value == -1) - m_assembler.tst<32>(src, src); - else { - signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate()); - m_assembler.tst<32>(src, dataTempRegister); - } + test32(src, mask); m_assembler.cset<32>(dest, ARM64Condition(cond)); } void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest) { - load32(address, getCachedDataTempRegisterIDAndInvalidate()); - test32(cond, dataTempRegister, mask, dest); + load32(address, getCachedMemoryTempRegisterIDAndInvalidate()); + test32(cond, memoryTempRegister, mask, dest); } void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest) { - load8(address, getCachedDataTempRegisterIDAndInvalidate()); - test32(cond, dataTempRegister, mask, dest); + TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask); + MacroAssemblerHelpers::load8OnCondition(*this, cond, address, getCachedMemoryTempRegisterIDAndInvalidate()); + test32(cond, memoryTempRegister, mask8, dest); } void test64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest) @@ -2173,6 +3209,10 @@ public: m_assembler.cset<32>(dest, ARM64Condition(cond)); } + void setCarry(RegisterID dest) + { + m_assembler.cset<32>(dest, ARM64Assembler::ConditionCS); + } // Patchable operations @@ -2204,10 +3244,17 @@ public: return branch64(cond, left, dataTempRegister); } - PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0)) + ALWAYS_INLINE Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0)) + { + dataLabel = DataLabel32(this); + moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate()); + return branch32(cond, left, dataTempRegister); + } + + PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right) { m_makeJumpPatchable = true; - Jump result = branch32(cond, left, TrustedImm32(right)); + Jump result = branch64(cond, left, TrustedImm64(right)); m_makeJumpPatchable = false; return PatchableJump(result); } @@ -2228,6 +3275,30 @@ public: return PatchableJump(result); } + PatchableJump patchableBranch32(RelationalCondition cond, Address left, TrustedImm32 imm) + { + m_makeJumpPatchable = true; + Jump result = branch32(cond, left, imm); + m_makeJumpPatchable = false; + return PatchableJump(result); + } + + PatchableJump patchableBranch64(RelationalCondition cond, RegisterID reg, TrustedImm64 imm) + { + m_makeJumpPatchable = true; + Jump result = branch64(cond, reg, imm); + m_makeJumpPatchable = false; + return PatchableJump(result); + } + + PatchableJump patchableBranch64(RelationalCondition cond, RegisterID left, RegisterID right) + { + m_makeJumpPatchable = true; + Jump result = branch64(cond, left, right); + m_makeJumpPatchable = false; + return PatchableJump(result); + } + PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) { m_makeJumpPatchable = true; @@ -2236,6 +3307,14 @@ public: return PatchableJump(result); } + PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0)) + { + m_makeJumpPatchable = true; + Jump result = branch32WithPatch(cond, left, dataLabel, initialRightValue); + m_makeJumpPatchable = false; + return PatchableJump(result); + } + PatchableJump patchableJump() { m_makeJumpPatchable = true; @@ -2274,11 +3353,26 @@ public: m_assembler.nop(); } + // We take memoryFence to mean acqrel. This has acqrel semantics on ARM64. void memoryFence() { - m_assembler.dmbSY(); + m_assembler.dmbISH(); } + // We take this to mean that it prevents motion of normal stores. That's a store fence on ARM64 (hence the "ST"). + void storeFence() + { + m_assembler.dmbISHST(); + } + + // We take this to mean that it prevents motion of normal loads. Ideally we'd have expressed this + // using dependencies or half fences, but there are cases where this is as good as it gets. The only + // way to get a standalone load fence instruction on ARM is to use the ISH fence, which is just like + // the memoryFence(). + void loadFence() + { + m_assembler.dmbISH(); + } // Misc helper functions. @@ -2288,6 +3382,23 @@ public: return static_cast<RelationalCondition>(ARM64Assembler::invert(static_cast<ARM64Assembler::Condition>(cond))); } + static std::optional<ResultCondition> commuteCompareToZeroIntoTest(RelationalCondition cond) + { + switch (cond) { + case Equal: + return Zero; + case NotEqual: + return NonZero; + case LessThan: + return Signed; + case GreaterThanOrEqual: + return PositiveOrZero; + break; + default: + return std::nullopt; + } + } + static FunctionPtr readCallTarget(CodeLocationCall call) { return FunctionPtr(reinterpret_cast<void(*)()>(ARM64Assembler::readCallTarget(call.dataLocation()))); @@ -2303,9 +3414,20 @@ public: return ARM64Assembler::maxJumpReplacementSize(); } - RegisterID scratchRegisterForBlinding() { return getCachedDataTempRegisterIDAndInvalidate(); } + static ptrdiff_t patchableJumpSize() + { + return ARM64Assembler::patchableJumpSize(); + } + + RegisterID scratchRegisterForBlinding() + { + // We *do not* have a scratch register for blinding. + RELEASE_ASSERT_NOT_REACHED(); + return getCachedDataTempRegisterIDAndInvalidate(); + } static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; } + static bool canJumpReplacePatchableBranch32WithPatch() { return false; } static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label) { @@ -2318,6 +3440,12 @@ public: return CodeLocationLabel(); } + static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32) + { + UNREACHABLE_FOR_PLATFORM(); + return CodeLocationLabel(); + } + static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue) { reemitInitialMoveWithPatch(instructionStart.dataLocation(), initialValue); @@ -2328,6 +3456,25 @@ public: UNREACHABLE_FOR_PLATFORM(); } + static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t) + { + UNREACHABLE_FOR_PLATFORM(); + } + + static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) + { + ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress()); + } + + static void repatchCall(CodeLocationCall call, FunctionPtr destination) + { + ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress()); + } + +#if ENABLE(MASM_PROBE) + void probe(ProbeFunction, void* arg1, void* arg2); +#endif // ENABLE(MASM_PROBE) + protected: ALWAYS_INLINE Jump makeBranch(ARM64Assembler::Condition cond) { @@ -2381,8 +3528,26 @@ protected: } private: - ALWAYS_INLINE RegisterID getCachedDataTempRegisterIDAndInvalidate() { return m_dataMemoryTempRegister.registerIDInvalidate(); } - ALWAYS_INLINE RegisterID getCachedMemoryTempRegisterIDAndInvalidate() { return m_cachedMemoryTempRegister.registerIDInvalidate(); } + ALWAYS_INLINE RegisterID getCachedDataTempRegisterIDAndInvalidate() + { + RELEASE_ASSERT(m_allowScratchRegister); + return dataMemoryTempRegister().registerIDInvalidate(); + } + ALWAYS_INLINE RegisterID getCachedMemoryTempRegisterIDAndInvalidate() + { + RELEASE_ASSERT(m_allowScratchRegister); + return cachedMemoryTempRegister().registerIDInvalidate(); + } + ALWAYS_INLINE CachedTempRegister& dataMemoryTempRegister() + { + RELEASE_ASSERT(m_allowScratchRegister); + return m_dataMemoryTempRegister; + } + ALWAYS_INLINE CachedTempRegister& cachedMemoryTempRegister() + { + RELEASE_ASSERT(m_allowScratchRegister); + return m_cachedMemoryTempRegister; + } ALWAYS_INLINE bool isInIntRange(intptr_t value) { @@ -2461,6 +3626,18 @@ private: } template<int datasize> + ALWAYS_INLINE void loadSignedAddressedByUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm) + { + loadUnsignedImmediate<datasize>(rt, rn, pimm); + } + + template<int datasize> + ALWAYS_INLINE void loadSignedAddressedByUnscaledImmediate(RegisterID rt, RegisterID rn, int simm) + { + loadUnscaledImmediate<datasize>(rt, rn, simm); + } + + template<int datasize> ALWAYS_INLINE void storeUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm) { m_assembler.str<datasize>(rt, rn, pimm); @@ -2498,21 +3675,16 @@ private: } } - void signExtend32ToPtr(TrustedImm32 imm, RegisterID dest) - { - move(TrustedImmPtr(reinterpret_cast<void*>(static_cast<intptr_t>(imm.m_value))), dest); - } - template<int datasize> ALWAYS_INLINE void load(const void* address, RegisterID dest) { intptr_t currentRegisterContents; - if (m_cachedMemoryTempRegister.value(currentRegisterContents)) { + if (cachedMemoryTempRegister().value(currentRegisterContents)) { intptr_t addressAsInt = reinterpret_cast<intptr_t>(address); intptr_t addressDelta = addressAsInt - currentRegisterContents; if (dest == memoryTempRegister) - m_cachedMemoryTempRegister.invalidate(); + cachedMemoryTempRegister().invalidate(); if (isInIntRange(addressDelta)) { if (ARM64Assembler::canEncodeSImmOffset(addressDelta)) { @@ -2528,7 +3700,7 @@ private: if ((addressAsInt & (~maskHalfWord0)) == (currentRegisterContents & (~maskHalfWord0))) { m_assembler.movk<64>(memoryTempRegister, addressAsInt & maskHalfWord0, 0); - m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address)); + cachedMemoryTempRegister().setValue(reinterpret_cast<intptr_t>(address)); m_assembler.ldr<datasize>(dest, memoryTempRegister, ARM64Registers::zr); return; } @@ -2536,17 +3708,18 @@ private: move(TrustedImmPtr(address), memoryTempRegister); if (dest == memoryTempRegister) - m_cachedMemoryTempRegister.invalidate(); + cachedMemoryTempRegister().invalidate(); else - m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address)); + cachedMemoryTempRegister().setValue(reinterpret_cast<intptr_t>(address)); m_assembler.ldr<datasize>(dest, memoryTempRegister, ARM64Registers::zr); } template<int datasize> ALWAYS_INLINE void store(RegisterID src, const void* address) { + ASSERT(src != memoryTempRegister); intptr_t currentRegisterContents; - if (m_cachedMemoryTempRegister.value(currentRegisterContents)) { + if (cachedMemoryTempRegister().value(currentRegisterContents)) { intptr_t addressAsInt = reinterpret_cast<intptr_t>(address); intptr_t addressDelta = addressAsInt - currentRegisterContents; @@ -2564,14 +3737,14 @@ private: if ((addressAsInt & (~maskHalfWord0)) == (currentRegisterContents & (~maskHalfWord0))) { m_assembler.movk<64>(memoryTempRegister, addressAsInt & maskHalfWord0, 0); - m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address)); + cachedMemoryTempRegister().setValue(reinterpret_cast<intptr_t>(address)); m_assembler.str<datasize>(src, memoryTempRegister, ARM64Registers::zr); return; } } move(TrustedImmPtr(address), memoryTempRegister); - m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address)); + cachedMemoryTempRegister().setValue(reinterpret_cast<intptr_t>(address)); m_assembler.str<datasize>(src, memoryTempRegister, ARM64Registers::zr); } @@ -2648,6 +3821,20 @@ private: } template<int datasize> + ALWAYS_INLINE bool tryLoadSignedWithOffset(RegisterID rt, RegisterID rn, int32_t offset) + { + if (ARM64Assembler::canEncodeSImmOffset(offset)) { + loadSignedAddressedByUnscaledImmediate<datasize>(rt, rn, offset); + return true; + } + if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) { + loadSignedAddressedByUnsignedImmediate<datasize>(rt, rn, static_cast<unsigned>(offset)); + return true; + } + return false; + } + + template<int datasize> ALWAYS_INLINE bool tryLoadWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset) { if (ARM64Assembler::canEncodeSImmOffset(offset)) { @@ -2689,25 +3876,37 @@ private: return false; } - friend class LinkBuffer; - friend class RepatchBuffer; - - static void linkCall(void* code, Call call, FunctionPtr function) + Jump jumpAfterFloatingPointCompare(DoubleCondition cond) { - if (call.isFlagSet(Call::Near)) - ARM64Assembler::linkCall(code, call.m_label, function.value()); - else - ARM64Assembler::linkPointer(code, call.m_label.labelAtOffset(REPATCH_OFFSET_CALL_TO_POINTER), function.value()); + if (cond == DoubleNotEqual) { + // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump. + Jump unordered = makeBranch(ARM64Assembler::ConditionVS); + Jump result = makeBranch(ARM64Assembler::ConditionNE); + unordered.link(this); + return result; + } + if (cond == DoubleEqualOrUnordered) { + Jump unordered = makeBranch(ARM64Assembler::ConditionVS); + Jump notEqual = makeBranch(ARM64Assembler::ConditionNE); + unordered.link(this); + // We get here if either unordered or equal. + Jump result = jump(); + notEqual.link(this); + return result; + } + return makeBranch(cond); } - static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) - { - ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress()); - } + friend class LinkBuffer; - static void repatchCall(CodeLocationCall call, FunctionPtr destination) + static void linkCall(void* code, Call call, FunctionPtr function) { - ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress()); + if (!call.isFlagSet(Call::Near)) + ARM64Assembler::linkPointer(code, call.m_label.labelAtOffset(REPATCH_OFFSET_CALL_TO_POINTER), function.value()); + else if (call.isFlagSet(Call::Tail)) + ARM64Assembler::linkJump(code, call.m_label, function.value()); + else + ARM64Assembler::linkCall(code, call.m_label, function.value()); } CachedTempRegister m_dataMemoryTempRegister; @@ -2729,6 +3928,18 @@ ALWAYS_INLINE void MacroAssemblerARM64::loadUnsignedImmediate<16>(RegisterID rt, } template<> +ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm) +{ + m_assembler.ldrsb<64>(rt, rn, pimm); +} + +template<> +ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnsignedImmediate<16>(RegisterID rt, RegisterID rn, unsigned pimm) +{ + m_assembler.ldrsh<64>(rt, rn, pimm); +} + +template<> ALWAYS_INLINE void MacroAssemblerARM64::loadUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm) { m_assembler.ldurb(rt, rn, simm); @@ -2741,6 +3952,18 @@ ALWAYS_INLINE void MacroAssemblerARM64::loadUnscaledImmediate<16>(RegisterID rt, } template<> +ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm) +{ + m_assembler.ldursb<64>(rt, rn, simm); +} + +template<> +ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnscaledImmediate<16>(RegisterID rt, RegisterID rn, int simm) +{ + m_assembler.ldursh<64>(rt, rn, simm); +} + +template<> ALWAYS_INLINE void MacroAssemblerARM64::storeUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm) { m_assembler.strb(rt, rn, pimm); @@ -2767,5 +3990,3 @@ ALWAYS_INLINE void MacroAssemblerARM64::storeUnscaledImmediate<16>(RegisterID rt } // namespace JSC #endif // ENABLE(ASSEMBLER) - -#endif // MacroAssemblerARM64_h diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.cpp b/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.cpp new file mode 100644 index 000000000..7119697bb --- /dev/null +++ b/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.cpp @@ -0,0 +1,348 @@ +/* + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" + +#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2) +#include "MacroAssemblerARMv7.h" + +#include <wtf/InlineASM.h> + +namespace JSC { + +#if ENABLE(MASM_PROBE) + +extern "C" void ctiMasmProbeTrampoline(); + +#if COMPILER(GCC_OR_CLANG) + +// The following are offsets for MacroAssemblerARMv7::ProbeContext fields accessed +// by the ctiMasmProbeTrampoline stub. + +#define PTR_SIZE 4 +#define PROBE_PROBE_FUNCTION_OFFSET (0 * PTR_SIZE) +#define PROBE_ARG1_OFFSET (1 * PTR_SIZE) +#define PROBE_ARG2_OFFSET (2 * PTR_SIZE) + +#define PROBE_FIRST_GPREG_OFFSET (3 * PTR_SIZE) + +#define GPREG_SIZE 4 +#define PROBE_CPU_R0_OFFSET (PROBE_FIRST_GPREG_OFFSET + (0 * GPREG_SIZE)) +#define PROBE_CPU_R1_OFFSET (PROBE_FIRST_GPREG_OFFSET + (1 * GPREG_SIZE)) +#define PROBE_CPU_R2_OFFSET (PROBE_FIRST_GPREG_OFFSET + (2 * GPREG_SIZE)) +#define PROBE_CPU_R3_OFFSET (PROBE_FIRST_GPREG_OFFSET + (3 * GPREG_SIZE)) +#define PROBE_CPU_R4_OFFSET (PROBE_FIRST_GPREG_OFFSET + (4 * GPREG_SIZE)) +#define PROBE_CPU_R5_OFFSET (PROBE_FIRST_GPREG_OFFSET + (5 * GPREG_SIZE)) +#define PROBE_CPU_R6_OFFSET (PROBE_FIRST_GPREG_OFFSET + (6 * GPREG_SIZE)) +#define PROBE_CPU_R7_OFFSET (PROBE_FIRST_GPREG_OFFSET + (7 * GPREG_SIZE)) +#define PROBE_CPU_R8_OFFSET (PROBE_FIRST_GPREG_OFFSET + (8 * GPREG_SIZE)) +#define PROBE_CPU_R9_OFFSET (PROBE_FIRST_GPREG_OFFSET + (9 * GPREG_SIZE)) +#define PROBE_CPU_R10_OFFSET (PROBE_FIRST_GPREG_OFFSET + (10 * GPREG_SIZE)) +#define PROBE_CPU_R11_OFFSET (PROBE_FIRST_GPREG_OFFSET + (11 * GPREG_SIZE)) +#define PROBE_CPU_IP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (12 * GPREG_SIZE)) +#define PROBE_CPU_SP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (13 * GPREG_SIZE)) +#define PROBE_CPU_LR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (14 * GPREG_SIZE)) +#define PROBE_CPU_PC_OFFSET (PROBE_FIRST_GPREG_OFFSET + (15 * GPREG_SIZE)) + +#define PROBE_CPU_APSR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (16 * GPREG_SIZE)) +#define PROBE_CPU_FPSCR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (17 * GPREG_SIZE)) + +#define PROBE_FIRST_FPREG_OFFSET (PROBE_FIRST_GPREG_OFFSET + (18 * GPREG_SIZE)) + +#define FPREG_SIZE 8 +#define PROBE_CPU_D0_OFFSET (PROBE_FIRST_FPREG_OFFSET + (0 * FPREG_SIZE)) +#define PROBE_CPU_D1_OFFSET (PROBE_FIRST_FPREG_OFFSET + (1 * FPREG_SIZE)) +#define PROBE_CPU_D2_OFFSET (PROBE_FIRST_FPREG_OFFSET + (2 * FPREG_SIZE)) +#define PROBE_CPU_D3_OFFSET (PROBE_FIRST_FPREG_OFFSET + (3 * FPREG_SIZE)) +#define PROBE_CPU_D4_OFFSET (PROBE_FIRST_FPREG_OFFSET + (4 * FPREG_SIZE)) +#define PROBE_CPU_D5_OFFSET (PROBE_FIRST_FPREG_OFFSET + (5 * FPREG_SIZE)) +#define PROBE_CPU_D6_OFFSET (PROBE_FIRST_FPREG_OFFSET + (6 * FPREG_SIZE)) +#define PROBE_CPU_D7_OFFSET (PROBE_FIRST_FPREG_OFFSET + (7 * FPREG_SIZE)) +#define PROBE_CPU_D8_OFFSET (PROBE_FIRST_FPREG_OFFSET + (8 * FPREG_SIZE)) +#define PROBE_CPU_D9_OFFSET (PROBE_FIRST_FPREG_OFFSET + (9 * FPREG_SIZE)) +#define PROBE_CPU_D10_OFFSET (PROBE_FIRST_FPREG_OFFSET + (10 * FPREG_SIZE)) +#define PROBE_CPU_D11_OFFSET (PROBE_FIRST_FPREG_OFFSET + (11 * FPREG_SIZE)) +#define PROBE_CPU_D12_OFFSET (PROBE_FIRST_FPREG_OFFSET + (12 * FPREG_SIZE)) +#define PROBE_CPU_D13_OFFSET (PROBE_FIRST_FPREG_OFFSET + (13 * FPREG_SIZE)) +#define PROBE_CPU_D14_OFFSET (PROBE_FIRST_FPREG_OFFSET + (14 * FPREG_SIZE)) +#define PROBE_CPU_D15_OFFSET (PROBE_FIRST_FPREG_OFFSET + (15 * FPREG_SIZE)) +#define PROBE_CPU_D16_OFFSET (PROBE_FIRST_FPREG_OFFSET + (16 * FPREG_SIZE)) +#define PROBE_CPU_D17_OFFSET (PROBE_FIRST_FPREG_OFFSET + (17 * FPREG_SIZE)) +#define PROBE_CPU_D18_OFFSET (PROBE_FIRST_FPREG_OFFSET + (18 * FPREG_SIZE)) +#define PROBE_CPU_D19_OFFSET (PROBE_FIRST_FPREG_OFFSET + (19 * FPREG_SIZE)) +#define PROBE_CPU_D20_OFFSET (PROBE_FIRST_FPREG_OFFSET + (20 * FPREG_SIZE)) +#define PROBE_CPU_D21_OFFSET (PROBE_FIRST_FPREG_OFFSET + (21 * FPREG_SIZE)) +#define PROBE_CPU_D22_OFFSET (PROBE_FIRST_FPREG_OFFSET + (22 * FPREG_SIZE)) +#define PROBE_CPU_D23_OFFSET (PROBE_FIRST_FPREG_OFFSET + (23 * FPREG_SIZE)) +#define PROBE_CPU_D24_OFFSET (PROBE_FIRST_FPREG_OFFSET + (24 * FPREG_SIZE)) +#define PROBE_CPU_D25_OFFSET (PROBE_FIRST_FPREG_OFFSET + (25 * FPREG_SIZE)) +#define PROBE_CPU_D26_OFFSET (PROBE_FIRST_FPREG_OFFSET + (26 * FPREG_SIZE)) +#define PROBE_CPU_D27_OFFSET (PROBE_FIRST_FPREG_OFFSET + (27 * FPREG_SIZE)) +#define PROBE_CPU_D28_OFFSET (PROBE_FIRST_FPREG_OFFSET + (28 * FPREG_SIZE)) +#define PROBE_CPU_D29_OFFSET (PROBE_FIRST_FPREG_OFFSET + (29 * FPREG_SIZE)) +#define PROBE_CPU_D30_OFFSET (PROBE_FIRST_FPREG_OFFSET + (30 * FPREG_SIZE)) +#define PROBE_CPU_D31_OFFSET (PROBE_FIRST_FPREG_OFFSET + (31 * FPREG_SIZE)) +#define PROBE_SIZE (PROBE_FIRST_FPREG_OFFSET + (32 * FPREG_SIZE)) + +// These ASSERTs remind you that if you change the layout of ProbeContext, +// you need to change ctiMasmProbeTrampoline offsets above to match. +#define PROBE_OFFSETOF(x) offsetof(struct MacroAssemblerARMv7::ProbeContext, x) +COMPILE_ASSERT(PROBE_OFFSETOF(probeFunction) == PROBE_PROBE_FUNCTION_OFFSET, ProbeContext_probeFunction_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(arg1) == PROBE_ARG1_OFFSET, ProbeContext_arg1_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(arg2) == PROBE_ARG2_OFFSET, ProbeContext_arg2_offset_matches_ctiMasmProbeTrampoline); + +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r0) == PROBE_CPU_R0_OFFSET, ProbeContext_cpu_r0_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r1) == PROBE_CPU_R1_OFFSET, ProbeContext_cpu_r1_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r2) == PROBE_CPU_R2_OFFSET, ProbeContext_cpu_r2_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r3) == PROBE_CPU_R3_OFFSET, ProbeContext_cpu_r3_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r4) == PROBE_CPU_R4_OFFSET, ProbeContext_cpu_r4_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r5) == PROBE_CPU_R5_OFFSET, ProbeContext_cpu_r5_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r6) == PROBE_CPU_R6_OFFSET, ProbeContext_cpu_r6_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r7) == PROBE_CPU_R7_OFFSET, ProbeContext_cpu_r7_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r8) == PROBE_CPU_R8_OFFSET, ProbeContext_cpu_r8_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r9) == PROBE_CPU_R9_OFFSET, ProbeContext_cpu_r9_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r10) == PROBE_CPU_R10_OFFSET, ProbeContext_cpu_r10_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r11) == PROBE_CPU_R11_OFFSET, ProbeContext_cpu_r11_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ip) == PROBE_CPU_IP_OFFSET, ProbeContext_cpu_ip_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.sp) == PROBE_CPU_SP_OFFSET, ProbeContext_cpu_sp_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.lr) == PROBE_CPU_LR_OFFSET, ProbeContext_cpu_lr_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.pc) == PROBE_CPU_PC_OFFSET, ProbeContext_cpu_pc_offset_matches_ctiMasmProbeTrampoline); + +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.apsr) == PROBE_CPU_APSR_OFFSET, ProbeContext_cpu_apsr_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.fpscr) == PROBE_CPU_FPSCR_OFFSET, ProbeContext_cpu_fpscr_offset_matches_ctiMasmProbeTrampoline); + +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d0) == PROBE_CPU_D0_OFFSET, ProbeContext_cpu_d0_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d1) == PROBE_CPU_D1_OFFSET, ProbeContext_cpu_d1_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d2) == PROBE_CPU_D2_OFFSET, ProbeContext_cpu_d2_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d3) == PROBE_CPU_D3_OFFSET, ProbeContext_cpu_d3_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d4) == PROBE_CPU_D4_OFFSET, ProbeContext_cpu_d4_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d5) == PROBE_CPU_D5_OFFSET, ProbeContext_cpu_d5_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d6) == PROBE_CPU_D6_OFFSET, ProbeContext_cpu_d6_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d7) == PROBE_CPU_D7_OFFSET, ProbeContext_cpu_d7_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d8) == PROBE_CPU_D8_OFFSET, ProbeContext_cpu_d8_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d9) == PROBE_CPU_D9_OFFSET, ProbeContext_cpu_d9_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d10) == PROBE_CPU_D10_OFFSET, ProbeContext_cpu_d10_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d11) == PROBE_CPU_D11_OFFSET, ProbeContext_cpu_d11_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d12) == PROBE_CPU_D12_OFFSET, ProbeContext_cpu_d12_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d13) == PROBE_CPU_D13_OFFSET, ProbeContext_cpu_d13_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d14) == PROBE_CPU_D14_OFFSET, ProbeContext_cpu_d14_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d15) == PROBE_CPU_D15_OFFSET, ProbeContext_cpu_d15_offset_matches_ctiMasmProbeTrampoline); + +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d16) == PROBE_CPU_D16_OFFSET, ProbeContext_cpu_d16_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d17) == PROBE_CPU_D17_OFFSET, ProbeContext_cpu_d17_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d18) == PROBE_CPU_D18_OFFSET, ProbeContext_cpu_d18_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d19) == PROBE_CPU_D19_OFFSET, ProbeContext_cpu_d19_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d20) == PROBE_CPU_D20_OFFSET, ProbeContext_cpu_d20_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d21) == PROBE_CPU_D21_OFFSET, ProbeContext_cpu_d21_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d22) == PROBE_CPU_D22_OFFSET, ProbeContext_cpu_d22_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d23) == PROBE_CPU_D23_OFFSET, ProbeContext_cpu_d23_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d24) == PROBE_CPU_D24_OFFSET, ProbeContext_cpu_d24_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d25) == PROBE_CPU_D25_OFFSET, ProbeContext_cpu_d25_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d26) == PROBE_CPU_D26_OFFSET, ProbeContext_cpu_d26_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d27) == PROBE_CPU_D27_OFFSET, ProbeContext_cpu_d27_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d28) == PROBE_CPU_D28_OFFSET, ProbeContext_cpu_d28_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d29) == PROBE_CPU_D29_OFFSET, ProbeContext_cpu_d29_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d30) == PROBE_CPU_D30_OFFSET, ProbeContext_cpu_d30_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d31) == PROBE_CPU_D31_OFFSET, ProbeContext_cpu_d31_offset_matches_ctiMasmProbeTrampoline); + +COMPILE_ASSERT(sizeof(MacroAssemblerARMv7::ProbeContext) == PROBE_SIZE, ProbeContext_size_matches_ctiMasmProbeTrampoline); + +#undef PROBE_OFFSETOF + +asm ( + ".text" "\n" + ".align 2" "\n" + ".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n" + HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n" + ".thumb" "\n" + ".thumb_func " THUMB_FUNC_PARAM(ctiMasmProbeTrampoline) "\n" + SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n" + + // MacroAssemblerARMv7::probe() has already generated code to store some values. + // The top of stack now looks like this: + // esp[0 * ptrSize]: probeFunction + // esp[1 * ptrSize]: arg1 + // esp[2 * ptrSize]: arg2 + // esp[3 * ptrSize]: saved r0 + // esp[4 * ptrSize]: saved ip + // esp[5 * ptrSize]: saved lr + // esp[6 * ptrSize]: saved sp + + "mov ip, sp" "\n" + "mov r0, sp" "\n" + "sub r0, r0, #" STRINGIZE_VALUE_OF(PROBE_SIZE) "\n" + + // The ARM EABI specifies that the stack needs to be 16 byte aligned. + "bic r0, r0, #0xf" "\n" + "mov sp, r0" "\n" + + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" + "add lr, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R1_OFFSET) "\n" + "stmia lr, { r1-r11 }" "\n" + "mrs lr, APSR" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n" + "vmrs lr, FPSCR" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSCR_OFFSET) "]" "\n" + + "ldr lr, [ip, #0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n" + "ldr lr, [ip, #1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG1_OFFSET) "]" "\n" + "ldr lr, [ip, #2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG2_OFFSET) "]" "\n" + "ldr lr, [ip, #3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R0_OFFSET) "]" "\n" + "ldr lr, [ip, #4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n" + "ldr lr, [ip, #5 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n" + "ldr lr, [ip, #6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" + + "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" + + "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D0_OFFSET) "\n" + "vstmia.64 ip!, { d0-d15 }" "\n" + "vstmia.64 ip!, { d16-d31 }" "\n" + + "mov fp, sp" "\n" // Save the ProbeContext*. + + "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n" + "mov r0, sp" "\n" // the ProbeContext* arg. + "blx ip" "\n" + + "mov sp, fp" "\n" + + // To enable probes to modify register state, we copy all registers + // out of the ProbeContext before returning. + + "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D31_OFFSET + FPREG_SIZE) "\n" + "vldmdb.64 ip!, { d16-d31 }" "\n" + "vldmdb.64 ip!, { d0-d15 }" "\n" + + "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R11_OFFSET + GPREG_SIZE) "\n" + "ldmdb ip, { r0-r11 }" "\n" + "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSCR_OFFSET) "]" "\n" + "vmsr FPSCR, ip" "\n" + + // There are 5 more registers left to restore: ip, sp, lr, pc, and apsr. + // There are 2 issues that complicate the restoration of these last few + // registers: + // + // 1. Normal ARM calling convention relies on moving lr to pc to return to + // the caller. In our case, the address to return to is specified by + // ProbeContext.cpu.pc. And at that moment, we won't have any available + // scratch registers to hold the return address (lr needs to hold + // ProbeContext.cpu.lr, not the return address). + // + // The solution is to store the return address on the stack and load the + // pc from there. + // + // 2. Issue 1 means we will need to write to the stack location at + // ProbeContext.cpu.sp - 4. But if the user probe function had modified + // the value of ProbeContext.cpu.sp to point in the range between + // &ProbeContext.cpu.ip thru &ProbeContext.cpu.aspr, then the action for + // Issue 1 may trash the values to be restored before we can restore + // them. + // + // The solution is to check if ProbeContext.cpu.sp contains a value in + // the undesirable range. If so, we copy the remaining ProbeContext + // register data to a safe range (at memory lower than where + // ProbeContext.cpu.sp points) first, and restore the remaining register + // from this new range. + + "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "\n" + "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" + "cmp lr, ip" "\n" + "it gt" "\n" + "bgt " SYMBOL_STRING(ctiMasmProbeTrampolineEnd) "\n" + + // We get here because the new expected stack pointer location is lower + // than where it's supposed to be. This means the safe range of stack + // memory where we'll be copying the remaining register restore values to + // might be in a region of memory below the sp i.e. unallocated stack + // memory. This, in turn, makes it vulnerable to interrupts potentially + // trashing the copied values. To prevent that, we must first allocate the + // needed stack memory by adjusting the sp before the copying. + + "sub lr, lr, #(6 * " STRINGIZE_VALUE_OF(PTR_SIZE) + " + " STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) ")" "\n" + + "mov ip, sp" "\n" + "mov sp, lr" "\n" + "mov lr, ip" "\n" + + "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n" + "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n" + "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" + "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" + "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n" + "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n" + "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" + "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" + "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n" + "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n" + + ".thumb_func " THUMB_FUNC_PARAM(ctiMasmProbeTrampolineEnd) "\n" + SYMBOL_STRING(ctiMasmProbeTrampolineEnd) ":" "\n" + "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" + "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" + "sub lr, lr, #" STRINGIZE_VALUE_OF(PTR_SIZE) "\n" + "str ip, [lr]" "\n" + "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" + + "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n" + "msr APSR, ip" "\n" + "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n" + "mov lr, ip" "\n" + "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n" + "ldr sp, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n" + + "pop { pc }" "\n" +); +#endif // COMPILER(GCC_OR_CLANG) + +void MacroAssemblerARMv7::probe(MacroAssemblerARMv7::ProbeFunction function, void* arg1, void* arg2) +{ + push(RegisterID::lr); + push(RegisterID::lr); + add32(TrustedImm32(8), RegisterID::sp, RegisterID::lr); + store32(RegisterID::lr, ArmAddress(RegisterID::sp, 4)); + push(RegisterID::ip); + push(RegisterID::r0); + // The following uses RegisterID::ip. So, they must come after we push ip above. + push(trustedImm32FromPtr(arg2)); + push(trustedImm32FromPtr(arg1)); + push(trustedImm32FromPtr(function)); + + move(trustedImm32FromPtr(ctiMasmProbeTrampoline), RegisterID::ip); + m_assembler.blx(RegisterID::ip); +} +#endif // ENABLE(MASM_PROBE) + +} // namespace JSC + +#endif // ENABLE(ASSEMBLER) + diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h b/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h index 68a04fd22..3c95f2802 100644 --- a/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h +++ b/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009, 2010 Apple Inc. All rights reserved. + * Copyright (C) 2009-2010, 2014-2016 Apple Inc. All rights reserved. * Copyright (C) 2010 University of Szeged * * Redistribution and use in source and binary forms, with or without @@ -24,8 +24,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef MacroAssemblerARMv7_h -#define MacroAssemblerARMv7_h +#pragma once #if ENABLE(ASSEMBLER) @@ -34,7 +33,7 @@ namespace JSC { -class MacroAssemblerARMv7 : public AbstractMacroAssembler<ARMv7Assembler> { +class MacroAssemblerARMv7 : public AbstractMacroAssembler<ARMv7Assembler, MacroAssemblerARMv7> { static const RegisterID dataTempRegister = ARMRegisters::ip; static const RegisterID addressTempRegister = ARMRegisters::r6; @@ -42,6 +41,9 @@ class MacroAssemblerARMv7 : public AbstractMacroAssembler<ARMv7Assembler> { inline ARMRegisters::FPSingleRegisterID fpTempRegisterAsSingle() { return ARMRegisters::asSingle(fpTempRegister); } public: + static const unsigned numGPRs = 16; + static const unsigned numFPRs = 16; + MacroAssemblerARMv7() : m_makeJumpPatchable(false) { @@ -62,12 +64,11 @@ public: Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); } void* unlinkedCode() { return m_assembler.unlinkedCode(); } - bool canCompact(JumpType jumpType) { return m_assembler.canCompact(jumpType); } - JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(jumpType, from, to); } - JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(record, from, to); } - void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) {return m_assembler.recordLinkOffsets(regionStart, regionEnd, offset); } - int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return m_assembler.jumpSizeDelta(jumpType, jumpLinkType); } - void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return m_assembler.link(record, from, to); } + static bool canCompact(JumpType jumpType) { return ARMv7Assembler::canCompact(jumpType); } + static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return ARMv7Assembler::computeJumpType(jumpType, from, to); } + static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return ARMv7Assembler::computeJumpType(record, from, to); } + static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return ARMv7Assembler::jumpSizeDelta(jumpType, jumpLinkType); } + static void link(LinkRecord& record, uint8_t* from, const uint8_t* fromInstruction, uint8_t* to) { return ARMv7Assembler::link(record, from, fromInstruction, to); } struct ArmAddress { enum AddressType { @@ -156,6 +157,11 @@ public: m_assembler.add(dest, dest, src); } + void add32(RegisterID left, RegisterID right, RegisterID dest) + { + m_assembler.add(dest, left, right); + } + void add32(TrustedImm32 imm, RegisterID dest) { add32(imm, dest, dest); @@ -170,6 +176,14 @@ public: void add32(TrustedImm32 imm, RegisterID src, RegisterID dest) { ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value); + + // For adds with stack pointer destination, moving the src first to sp is + // needed to avoid unpredictable instruction + if (dest == ARMRegisters::sp && src != dest) { + move(src, ARMRegisters::sp); + src = ARMRegisters::sp; + } + if (armImm.isValid()) m_assembler.add(dest, src, armImm); else { @@ -218,6 +232,11 @@ public: store32(dataTempRegister, address.m_ptr); } + void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest) + { + add32(imm, srcDest); + } + void add64(TrustedImm32 imm, AbsoluteAddress address) { move(TrustedImmPtr(address.m_ptr), addressTempRegister); @@ -305,6 +324,11 @@ public: m_assembler.smull(dest, dataTempRegister, dest, src); } + void mul32(RegisterID left, RegisterID right, RegisterID dest) + { + m_assembler.smull(dest, dataTempRegister, left, right); + } + void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest) { move(imm, dataTempRegister); @@ -329,6 +353,31 @@ public: store32(dataTempRegister, addressTempRegister); } + void or32(TrustedImm32 imm, AbsoluteAddress address) + { + ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value); + if (armImm.isValid()) { + move(TrustedImmPtr(address.m_ptr), addressTempRegister); + load32(addressTempRegister, dataTempRegister); + m_assembler.orr(dataTempRegister, dataTempRegister, armImm); + store32(dataTempRegister, addressTempRegister); + } else { + move(TrustedImmPtr(address.m_ptr), addressTempRegister); + load32(addressTempRegister, dataTempRegister); + move(imm, addressTempRegister); + m_assembler.orr(dataTempRegister, dataTempRegister, addressTempRegister); + move(TrustedImmPtr(address.m_ptr), addressTempRegister); + store32(dataTempRegister, addressTempRegister); + } + } + + void or32(TrustedImm32 imm, Address address) + { + load32(address, dataTempRegister); + or32(imm, dataTempRegister, dataTempRegister); + store32(dataTempRegister, address); + } + void or32(TrustedImm32 imm, RegisterID dest) { or32(imm, dest, dest); @@ -345,6 +394,7 @@ public: if (armImm.isValid()) m_assembler.orr(dest, src, armImm); else { + ASSERT(src != dataTempRegister); move(imm, dataTempRegister); m_assembler.orr(dest, src, dataTempRegister); } @@ -362,7 +412,10 @@ public: void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) { - m_assembler.asr(dest, src, imm.m_value & 0x1f); + if (!imm.m_value) + move(src, dest); + else + m_assembler.asr(dest, src, imm.m_value & 0x1f); } void rshift32(RegisterID shiftAmount, RegisterID dest) @@ -387,7 +440,10 @@ public: void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) { - m_assembler.lsr(dest, src, imm.m_value & 0x1f); + if (!imm.m_value) + move(src, dest); + else + m_assembler.lsr(dest, src, imm.m_value & 0x1f); } void urshift32(RegisterID shiftAmount, RegisterID dest) @@ -405,6 +461,11 @@ public: m_assembler.sub(dest, dest, src); } + void sub32(RegisterID left, RegisterID right, RegisterID dest) + { + m_assembler.sub(dest, left, right); + } + void sub32(TrustedImm32 imm, RegisterID dest) { ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value); @@ -527,7 +588,7 @@ private: } } - void load16Signed(ArmAddress address, RegisterID dest) + void load16SignedExtendTo32(ArmAddress address, RegisterID dest) { ASSERT(address.type == ArmAddress::HasIndex); m_assembler.ldrsh(dest, address.base, address.u.index, address.u.scale); @@ -547,7 +608,7 @@ private: } } - void load8Signed(ArmAddress address, RegisterID dest) + void load8SignedExtendTo32(ArmAddress address, RegisterID dest) { ASSERT(address.type == ArmAddress::HasIndex); m_assembler.ldrsb(dest, address.base, address.u.index, address.u.scale); @@ -624,6 +685,18 @@ public: m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0)); } + void abortWithReason(AbortReason reason) + { + move(TrustedImm32(reason), dataTempRegister); + breakpoint(); + } + + void abortWithReason(AbortReason reason, intptr_t misc) + { + move(TrustedImm32(misc), addressTempRegister); + abortWithReason(reason); + } + ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest) { ConvertibleLoadLabel result(this); @@ -637,7 +710,7 @@ public: load8(setupArmAddress(address), dest); } - void load8Signed(ImplicitAddress, RegisterID) + void load8SignedExtendTo32(ImplicitAddress, RegisterID) { UNREACHABLE_FOR_PLATFORM(); } @@ -647,9 +720,9 @@ public: load8(setupArmAddress(address), dest); } - void load8Signed(BaseIndex address, RegisterID dest) + void load8SignedExtendTo32(BaseIndex address, RegisterID dest) { - load8Signed(setupArmAddress(address), dest); + load8SignedExtendTo32(setupArmAddress(address), dest); } void load8(const void* address, RegisterID dest) @@ -683,9 +756,9 @@ public: m_assembler.ldrh(dest, makeBaseIndexBase(address), address.index, address.scale); } - void load16Signed(BaseIndex address, RegisterID dest) + void load16SignedExtendTo32(BaseIndex address, RegisterID dest) { - load16Signed(setupArmAddress(address), dest); + load16SignedExtendTo32(setupArmAddress(address), dest); } void load16(ImplicitAddress address, RegisterID dest) @@ -699,7 +772,7 @@ public: } } - void load16Signed(ImplicitAddress, RegisterID) + void load16SignedExtendTo32(ImplicitAddress, RegisterID) { UNREACHABLE_FOR_PLATFORM(); } @@ -745,6 +818,11 @@ public: store32(dataTempRegister, address); } + void store8(RegisterID src, Address address) + { + store8(src, setupArmAddress(address)); + } + void store8(RegisterID src, BaseIndex address) { store8(src, setupArmAddress(address)); @@ -758,7 +836,15 @@ public: void store8(TrustedImm32 imm, void* address) { - move(imm, dataTempRegister); + TrustedImm32 imm8(static_cast<int8_t>(imm.m_value)); + move(imm8, dataTempRegister); + store8(dataTempRegister, address); + } + + void store8(TrustedImm32 imm, Address address) + { + TrustedImm32 imm8(static_cast<int8_t>(imm.m_value)); + move(imm8, dataTempRegister); store8(dataTempRegister, address); } @@ -803,6 +889,7 @@ public: static bool supportsFloatingPointTruncate() { return true; } static bool supportsFloatingPointSqrt() { return true; } static bool supportsFloatingPointAbs() { return true; } + static bool supportsFloatingPointRounding() { return false; } void loadDouble(ImplicitAddress address, FPRegisterID dest) { @@ -856,9 +943,15 @@ public: m_assembler.vmov(dest, src); } - void loadDouble(const void* address, FPRegisterID dest) + void moveZeroToDouble(FPRegisterID reg) { - move(TrustedImmPtr(address), addressTempRegister); + static double zeroConstant = 0.; + loadDouble(TrustedImmPtr(&zeroConstant), reg); + } + + void loadDouble(TrustedImmPtr address, FPRegisterID dest) + { + move(address, addressTempRegister); m_assembler.vldr(dest, addressTempRegister, 0); } @@ -892,9 +985,9 @@ public: m_assembler.fsts(ARMRegisters::asSingle(src), base, offset); } - void storeDouble(FPRegisterID src, const void* address) + void storeDouble(FPRegisterID src, TrustedImmPtr address) { - move(TrustedImmPtr(address), addressTempRegister); + move(address, addressTempRegister); storeDouble(src, addressTempRegister); } @@ -932,7 +1025,7 @@ public: void addDouble(AbsoluteAddress address, FPRegisterID dest) { - loadDouble(address.m_ptr, fpTempRegister); + loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister); m_assembler.vadd(dest, dest, fpTempRegister); } @@ -993,6 +1086,24 @@ public: m_assembler.vneg(dest, src); } + NO_RETURN_DUE_TO_CRASH void ceilDouble(FPRegisterID, FPRegisterID) + { + ASSERT(!supportsFloatingPointRounding()); + CRASH(); + } + + NO_RETURN_DUE_TO_CRASH void floorDouble(FPRegisterID, FPRegisterID) + { + ASSERT(!supportsFloatingPointRounding()); + CRASH(); + } + + NO_RETURN_DUE_TO_CRASH void roundTowardZeroDouble(FPRegisterID, FPRegisterID) + { + ASSERT(!supportsFloatingPointRounding()); + CRASH(); + } + void convertInt32ToDouble(RegisterID src, FPRegisterID dest) { m_assembler.vmov(fpTempRegister, src, src); @@ -1138,14 +1249,12 @@ public: void pop(RegisterID dest) { - // store postindexed with writeback - m_assembler.ldr(dest, ARMRegisters::sp, sizeof(void*), false, true); + m_assembler.pop(dest); } void push(RegisterID src) { - // store preindexed with writeback - m_assembler.str(src, ARMRegisters::sp, -sizeof(void*), true, true); + m_assembler.push(src); } void push(Address address) @@ -1160,6 +1269,16 @@ public: push(dataTempRegister); } + void popPair(RegisterID dest1, RegisterID dest2) + { + m_assembler.pop(1 << dest1 | 1 << dest2); + } + + void pushPair(RegisterID src1, RegisterID src2) + { + m_assembler.push(1 << src1 | 1 << src2); + } + // Register move operations: // // Move values in registers. @@ -1225,6 +1344,11 @@ public: m_assembler.dmbSY(); } + void storeFence() + { + m_assembler.dmbISHST(); + } + static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination) { ARMv7Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation()); @@ -1235,6 +1359,11 @@ public: return ARMv7Assembler::maxJumpReplacementSize(); } + static ptrdiff_t patchableJumpSize() + { + return ARMv7Assembler::patchableJumpSize(); + } + // Forwards / external control flow operations: // // This set of jump and conditional branch operations return a Jump @@ -1255,25 +1384,22 @@ public: private: // Should we be using TEQ for equal/not-equal? - void compare32(RegisterID left, TrustedImm32 right) + void compare32AndSetFlags(RegisterID left, TrustedImm32 right) { int32_t imm = right.m_value; - if (!imm) - m_assembler.tst(left, left); + ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm); + if (armImm.isValid()) + m_assembler.cmp(left, armImm); + else if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid()) + m_assembler.cmn(left, armImm); else { - ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm); - if (armImm.isValid()) - m_assembler.cmp(left, armImm); - else if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid()) - m_assembler.cmn(left, armImm); - else { - move(TrustedImm32(imm), dataTempRegister); - m_assembler.cmp(left, dataTempRegister); - } + move(TrustedImm32(imm), dataTempRegister); + m_assembler.cmp(left, dataTempRegister); } } - void test32(RegisterID reg, TrustedImm32 mask) +public: + void test32(RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) { int32_t imm = mask.m_value; @@ -1281,16 +1407,28 @@ private: m_assembler.tst(reg, reg); else { ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm); - if (armImm.isValid()) - m_assembler.tst(reg, armImm); - else { + if (armImm.isValid()) { + if (reg == ARMRegisters::sp) { + move(reg, addressTempRegister); + m_assembler.tst(addressTempRegister, armImm); + } else + m_assembler.tst(reg, armImm); + } else { move(mask, dataTempRegister); - m_assembler.tst(reg, dataTempRegister); + if (reg == ARMRegisters::sp) { + move(reg, addressTempRegister); + m_assembler.tst(addressTempRegister, dataTempRegister); + } else + m_assembler.tst(reg, dataTempRegister); } } } + + Jump branch(ResultCondition cond) + { + return Jump(makeBranch(cond)); + } -public: Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right) { m_assembler.cmp(left, right); @@ -1299,7 +1437,7 @@ public: Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right) { - compare32(left, right); + compare32AndSetFlags(left, right); return Jump(makeBranch(cond)); } @@ -1349,44 +1487,54 @@ public: return branch32(cond, addressTempRegister, right); } + Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right) + { + load32(left, dataTempRegister); + return branch32(cond, dataTempRegister, right); + } + Jump branch8(RelationalCondition cond, RegisterID left, TrustedImm32 right) { - compare32(left, right); + TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right); + compare32AndSetFlags(left, right8); return Jump(makeBranch(cond)); } Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right) { - ASSERT(!(0xffffff00 & right.m_value)); // use addressTempRegister incase the branch8 we call uses dataTempRegister. :-/ - load8(left, addressTempRegister); - return branch8(cond, addressTempRegister, right); + TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right); + MacroAssemblerHelpers::load8OnCondition(*this, cond, left, addressTempRegister); + return branch8(cond, addressTempRegister, right8); } Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right) { - ASSERT(!(0xffffff00 & right.m_value)); // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/ - load8(left, addressTempRegister); - return branch32(cond, addressTempRegister, right); + TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right); + MacroAssemblerHelpers::load8OnCondition(*this, cond, left, addressTempRegister); + return branch32(cond, addressTempRegister, right8); } Jump branch8(RelationalCondition cond, AbsoluteAddress address, TrustedImm32 right) { // Use addressTempRegister instead of dataTempRegister, since branch32 uses dataTempRegister. + TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right); move(TrustedImmPtr(address.m_ptr), addressTempRegister); - load8(Address(addressTempRegister), addressTempRegister); - return branch32(cond, addressTempRegister, right); + MacroAssemblerHelpers::load8OnCondition(*this, cond, Address(addressTempRegister), addressTempRegister); + return branch32(cond, addressTempRegister, right8); } Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask) { + ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == PositiveOrZero); m_assembler.tst(reg, mask); return Jump(makeBranch(cond)); } Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) { + ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == PositiveOrZero); test32(reg, mask); return Jump(makeBranch(cond)); } @@ -1408,23 +1556,26 @@ public: Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) { // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/ - load8(address, addressTempRegister); - return branchTest32(cond, addressTempRegister, mask); + TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask); + MacroAssemblerHelpers::load8OnCondition(*this, cond, address, addressTempRegister); + return branchTest32(cond, addressTempRegister, mask8); } Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) { // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/ - load8(address, addressTempRegister); - return branchTest32(cond, addressTempRegister, mask); + TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask); + MacroAssemblerHelpers::load8OnCondition(*this, cond, address, addressTempRegister); + return branchTest32(cond, addressTempRegister, mask8); } Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) { // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/ + TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask); move(TrustedImmPtr(address.m_ptr), addressTempRegister); - load8(Address(addressTempRegister), addressTempRegister); - return branchTest32(cond, addressTempRegister, mask); + MacroAssemblerHelpers::load8OnCondition(*this, cond, Address(addressTempRegister), addressTempRegister); + return branchTest32(cond, addressTempRegister, mask8); } void jump(RegisterID target) @@ -1534,7 +1685,7 @@ public: return branchMul32(cond, src, dest, dest); } - Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest) + Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest) { move(imm, dataTempRegister); return branchMul32(cond, dataTempRegister, src, dest); @@ -1607,6 +1758,12 @@ public: return Call(m_assembler.blx(dataTempRegister), Call::LinkableNear); } + ALWAYS_INLINE Call nearTailCall() + { + moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister); + return Call(m_assembler.bx(dataTempRegister), Call::LinkableNearTail); + } + ALWAYS_INLINE Call call() { moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister); @@ -1645,13 +1802,14 @@ public: void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest) { - load8(left, addressTempRegister); - compare32(cond, addressTempRegister, right, dest); + TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right); + MacroAssemblerHelpers::load8OnCondition(*this, cond, left, addressTempRegister); + compare32(cond, addressTempRegister, right8, dest); } void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest) { - compare32(left, right); + compare32AndSetFlags(left, right); m_assembler.it(armV7Condition(cond), false); m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1)); m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0)); @@ -1672,8 +1830,9 @@ public: void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest) { - load8(address, dataTempRegister); - test32(dataTempRegister, mask); + TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask); + MacroAssemblerHelpers::load8OnCondition(*this, cond, address, dataTempRegister); + test32(dataTempRegister, mask8); m_assembler.it(armV7Condition(cond), false); m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1)); m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0)); @@ -1706,6 +1865,13 @@ public: return branch32(cond, addressTempRegister, dataTempRegister); } + ALWAYS_INLINE Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0)) + { + load32(left, addressTempRegister); + dataLabel = moveWithPatch(initialRightValue, dataTempRegister); + return branch32(cond, addressTempRegister, dataTempRegister); + } + PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0)) { m_makeJumpPatchable = true; @@ -1730,6 +1896,14 @@ public: return PatchableJump(result); } + PatchableJump patchableBranch32(RelationalCondition cond, Address left, TrustedImm32 imm) + { + m_makeJumpPatchable = true; + Jump result = branch32(cond, left, imm); + m_makeJumpPatchable = false; + return PatchableJump(result); + } + PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) { m_makeJumpPatchable = true; @@ -1738,6 +1912,14 @@ public: return PatchableJump(result); } + PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0)) + { + m_makeJumpPatchable = true; + Jump result = branch32WithPatch(cond, left, dataLabel, initialRightValue); + m_makeJumpPatchable = false; + return PatchableJump(result); + } + PatchableJump patchableJump() { padBeforePatch(); @@ -1770,17 +1952,13 @@ public: } - int executableOffsetFor(int location) - { - return m_assembler.executableOffsetFor(location); - } - static FunctionPtr readCallTarget(CodeLocationCall call) { return FunctionPtr(reinterpret_cast<void(*)()>(ARMv7Assembler::readCallTarget(call.dataLocation()))); } static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; } + static bool canJumpReplacePatchableBranch32WithPatch() { return false; } static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label) { @@ -1804,36 +1982,35 @@ public: return CodeLocationLabel(); } + static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32) + { + UNREACHABLE_FOR_PLATFORM(); + return CodeLocationLabel(); + } + static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*) { UNREACHABLE_FOR_PLATFORM(); } -#if USE(MASM_PROBE) - struct CPUState { - #define DECLARE_REGISTER(_type, _regName) \ - _type _regName; - FOR_EACH_CPU_REGISTER(DECLARE_REGISTER) - #undef DECLARE_REGISTER - }; - - struct ProbeContext; - typedef void (*ProbeFunction)(struct ProbeContext*); + static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t) + { + UNREACHABLE_FOR_PLATFORM(); + } - struct ProbeContext { - ProbeFunction probeFunction; - void* arg1; - void* arg2; - CPUState cpu; + static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) + { + ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress()); + } - void dump(const char* indentation = 0); - private: - void dumpCPURegisters(const char* indentation); - }; + static void repatchCall(CodeLocationCall call, FunctionPtr destination) + { + ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress()); + } - // For details about probe(), see comment in MacroAssemblerX86_64.h. - void probe(ProbeFunction, void* arg1 = 0, void* arg2 = 0); -#endif // USE(MASM_PROBE) +#if ENABLE(MASM_PROBE) + void probe(ProbeFunction, void* arg1, void* arg2); +#endif // ENABLE(MASM_PROBE) protected: ALWAYS_INLINE Jump jump() @@ -1928,24 +2105,16 @@ protected: private: friend class LinkBuffer; - friend class RepatchBuffer; static void linkCall(void* code, Call call, FunctionPtr function) { - ARMv7Assembler::linkCall(code, call.m_label, function.value()); - } - - static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) - { - ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress()); - } - - static void repatchCall(CodeLocationCall call, FunctionPtr destination) - { - ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress()); + if (call.isFlagSet(Call::Tail)) + ARMv7Assembler::linkJump(code, call.m_label, function.value()); + else + ARMv7Assembler::linkCall(code, call.m_label, function.value()); } -#if USE(MASM_PROBE) +#if ENABLE(MASM_PROBE) inline TrustedImm32 trustedImm32FromPtr(void* ptr) { return TrustedImm32(TrustedImmPtr(ptr)); @@ -1968,5 +2137,3 @@ private: } // namespace JSC #endif // ENABLE(ASSEMBLER) - -#endif // MacroAssemblerARMv7_h diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.cpp b/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.cpp new file mode 100644 index 000000000..06460c9cb --- /dev/null +++ b/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.cpp @@ -0,0 +1,86 @@ +/* + * Copyright (C) 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "MacroAssemblerCodeRef.h" + +#include "JSCInlines.h" +#include "LLIntData.h" + +namespace JSC { + +MacroAssemblerCodePtr MacroAssemblerCodePtr::createLLIntCodePtr(OpcodeID codeId) +{ + return createFromExecutableAddress(LLInt::getCodePtr(codeId)); +} + +void MacroAssemblerCodePtr::dumpWithName(const char* name, PrintStream& out) const +{ + if (!m_value) { + out.print(name, "(null)"); + return; + } + if (executableAddress() == dataLocation()) { + out.print(name, "(", RawPointer(executableAddress()), ")"); + return; + } + out.print(name, "(executable = ", RawPointer(executableAddress()), ", dataLocation = ", RawPointer(dataLocation()), ")"); +} + +void MacroAssemblerCodePtr::dump(PrintStream& out) const +{ + dumpWithName("CodePtr", out); +} + +MacroAssemblerCodeRef MacroAssemblerCodeRef::createLLIntCodeRef(OpcodeID codeId) +{ + return createSelfManagedCodeRef(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(codeId))); +} + +bool MacroAssemblerCodeRef::tryToDisassemble(PrintStream& out, const char* prefix) const +{ + return JSC::tryToDisassemble(m_codePtr, size(), prefix, out); +} + +bool MacroAssemblerCodeRef::tryToDisassemble(const char* prefix) const +{ + return tryToDisassemble(WTF::dataFile(), prefix); +} + +CString MacroAssemblerCodeRef::disassembly() const +{ + StringPrintStream out; + if (!tryToDisassemble(out, "")) + return CString(); + return out.toCString(); +} + +void MacroAssemblerCodeRef::dump(PrintStream& out) const +{ + m_codePtr.dumpWithName("CodeRef", out); +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h b/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h index 5f8ba8a92..c31cf8526 100644 --- a/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h +++ b/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009, 2012 Apple Inc. All rights reserved. + * Copyright (C) 2009, 2012, 2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -23,20 +23,17 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef MacroAssemblerCodeRef_h -#define MacroAssemblerCodeRef_h +#pragma once -#include "Disassembler.h" #include "ExecutableAllocator.h" -#include "LLIntData.h" #include <wtf/DataLog.h> -#include <wtf/PassRefPtr.h> #include <wtf/PrintStream.h> #include <wtf/RefPtr.h> +#include <wtf/text/CString.h> // ASSERT_VALID_CODE_POINTER checks that ptr is a non-null pointer, and that it is a valid // instruction address on the platform (for example, check any alignment requirements). -#if CPU(ARM_THUMB2) && !ENABLE(LLINT_C_LOOP) +#if CPU(ARM_THUMB2) && ENABLE(JIT) // ARM instructions must be 16-bit aligned. Thumb2 code pointers to be loaded into // into the processor are decorated with the bottom bit set, while traditional ARM has // the lower bit clear. Since we don't know what kind of pointer, we check for both @@ -51,34 +48,10 @@ #define ASSERT_VALID_CODE_OFFSET(offset) // Anything goes! #endif -#if CPU(X86) && OS(WINDOWS) -#define CALLING_CONVENTION_IS_STDCALL 1 -#ifndef CDECL -#if COMPILER(MSVC) -#define CDECL __cdecl -#else -#define CDECL __attribute__ ((__cdecl)) -#endif // COMPILER(MSVC) -#endif // CDECL -#else -#define CALLING_CONVENTION_IS_STDCALL 0 -#endif - -#if CPU(X86) -#define HAS_FASTCALL_CALLING_CONVENTION 1 -#ifndef FASTCALL -#if COMPILER(MSVC) -#define FASTCALL __fastcall -#else -#define FASTCALL __attribute__ ((fastcall)) -#endif // COMPILER(MSVC) -#endif // FASTCALL -#else -#define HAS_FASTCALL_CALLING_CONVENTION 0 -#endif // CPU(X86) - namespace JSC { +enum OpcodeID : unsigned; + // FunctionPtr: // // FunctionPtr should be used to wrap pointers to C/C++ functions in JSC @@ -132,6 +105,12 @@ public: ASSERT_VALID_CODE_POINTER(m_value); } + template<typename returnType, typename argType1, typename argType2, typename argType3, typename argType4, typename argType5, typename argType6> + FunctionPtr(returnType(*value)(argType1, argType2, argType3, argType4, argType5, argType6)) + : m_value((void*)value) + { + ASSERT_VALID_CODE_POINTER(m_value); + } // MSVC doesn't seem to treat functions with different calling conventions as // different types; these methods already defined for fastcall, below. #if CALLING_CONVENTION_IS_STDCALL && !OS(WINDOWS) @@ -172,7 +151,7 @@ public: } #endif -#if HAS_FASTCALL_CALLING_CONVENTION +#if COMPILER_SUPPORTS(FASTCALL_CALLING_CONVENTION) template<typename returnType> FunctionPtr(returnType (FASTCALL *value)()) @@ -254,6 +233,11 @@ public: } void* value() const { return m_value; } + + void dump(PrintStream& out) const + { + out.print(RawPointer(m_value)); + } private: void* m_value; @@ -288,12 +272,7 @@ public: return result; } -#if ENABLE(LLINT) - static MacroAssemblerCodePtr createLLIntCodePtr(LLIntCode codeId) - { - return createFromExecutableAddress(LLInt::getCodePtr(codeId)); - } -#endif + static MacroAssemblerCodePtr createLLIntCodePtr(OpcodeID codeId); explicit MacroAssemblerCodePtr(ReturnAddressPtr ra) : m_value(ra.value()) @@ -309,29 +288,16 @@ public: void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return m_value; } #endif - bool operator!() const - { - return !m_value; - } + explicit operator bool() const { return m_value; } bool operator==(const MacroAssemblerCodePtr& other) const { return m_value == other.m_value; } - void dumpWithName(const char* name, PrintStream& out) const - { - if (executableAddress() == dataLocation()) { - out.print(name, "(", RawPointer(executableAddress()), ")"); - return; - } - out.print(name, "(executable = ", RawPointer(executableAddress()), ", dataLocation = ", RawPointer(dataLocation()), ")"); - } + void dumpWithName(const char* name, PrintStream& out) const; - void dump(PrintStream& out) const - { - dumpWithName("CodePtr", out); - } + void dump(PrintStream& out) const; enum EmptyValueTag { EmptyValue }; enum DeletedValueTag { DeletedValue }; @@ -387,9 +353,9 @@ public: { } - MacroAssemblerCodeRef(PassRefPtr<ExecutableMemoryHandle> executableMemory) + MacroAssemblerCodeRef(Ref<ExecutableMemoryHandle>&& executableMemory) : m_codePtr(executableMemory->start()) - , m_executableMemory(executableMemory) + , m_executableMemory(WTFMove(executableMemory)) { ASSERT(m_executableMemory->isManaged()); ASSERT(m_executableMemory->start()); @@ -404,13 +370,8 @@ public: return MacroAssemblerCodeRef(codePtr); } -#if ENABLE(LLINT) // Helper for creating self-managed code refs from LLInt. - static MacroAssemblerCodeRef createLLIntCodeRef(LLIntCode codeId) - { - return createSelfManagedCodeRef(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(codeId))); - } -#endif + static MacroAssemblerCodeRef createLLIntCodeRef(OpcodeID codeId); ExecutableMemoryHandle* executableMemory() const { @@ -428,18 +389,16 @@ public: return 0; return m_executableMemory->sizeInBytes(); } + + bool tryToDisassemble(PrintStream& out, const char* prefix = "") const; - bool tryToDisassemble(const char* prefix) const - { - return JSC::tryToDisassemble(m_codePtr, size(), prefix, WTF::dataFile()); - } + bool tryToDisassemble(const char* prefix = "") const; - bool operator!() const { return !m_codePtr; } + JS_EXPORT_PRIVATE CString disassembly() const; - void dump(PrintStream& out) const - { - m_codePtr.dumpWithName("CodeRef", out); - } + explicit operator bool() const { return !!m_codePtr; } + + void dump(PrintStream& out) const; private: MacroAssemblerCodePtr m_codePtr; @@ -459,5 +418,3 @@ template<typename T> struct HashTraits; template<> struct HashTraits<JSC::MacroAssemblerCodePtr> : public CustomHashTraits<JSC::MacroAssemblerCodePtr> { }; } // namespace WTF - -#endif // MacroAssemblerCodeRef_h diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerHelpers.h b/Source/JavaScriptCore/assembler/MacroAssemblerHelpers.h new file mode 100644 index 000000000..047e94c27 --- /dev/null +++ b/Source/JavaScriptCore/assembler/MacroAssemblerHelpers.h @@ -0,0 +1,131 @@ +/* + * Copyright (C) 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +namespace JSC { +namespace MacroAssemblerHelpers { + +// True if this: +// branch8(cond, value, value) +// Is the same as this: +// branch32(cond, signExt8(value), signExt8(value)) +template<typename MacroAssemblerType> +inline bool isSigned(typename MacroAssemblerType::RelationalCondition cond) +{ + switch (cond) { + case MacroAssemblerType::Equal: + case MacroAssemblerType::NotEqual: + case MacroAssemblerType::GreaterThan: + case MacroAssemblerType::GreaterThanOrEqual: + case MacroAssemblerType::LessThan: + case MacroAssemblerType::LessThanOrEqual: + return true; + default: + return false; + } +} + +// True if this: +// branch8(cond, value, value) +// Is the same as this: +// branch32(cond, zeroExt8(value), zeroExt8(value)) +template<typename MacroAssemblerType> +inline bool isUnsigned(typename MacroAssemblerType::RelationalCondition cond) +{ + switch (cond) { + case MacroAssemblerType::Equal: + case MacroAssemblerType::NotEqual: + case MacroAssemblerType::Above: + case MacroAssemblerType::AboveOrEqual: + case MacroAssemblerType::Below: + case MacroAssemblerType::BelowOrEqual: + return true; + default: + return false; + } +} + +// True if this: +// test8(cond, value, value) +// Is the same as this: +// test32(cond, signExt8(value), signExt8(value)) +template<typename MacroAssemblerType> +inline bool isSigned(typename MacroAssemblerType::ResultCondition cond) +{ + switch (cond) { + case MacroAssemblerType::Signed: + case MacroAssemblerType::PositiveOrZero: + case MacroAssemblerType::Zero: + case MacroAssemblerType::NonZero: + return true; + default: + return false; + } +} + +// True if this: +// test8(cond, value, value) +// Is the same as this: +// test32(cond, zeroExt8(value), zeroExt8(value)) +template<typename MacroAssemblerType> +inline bool isUnsigned(typename MacroAssemblerType::ResultCondition cond) +{ + switch (cond) { + case MacroAssemblerType::Zero: + case MacroAssemblerType::NonZero: + return true; + default: + return false; + } +} + +template<typename MacroAssemblerType> +inline typename MacroAssemblerType::TrustedImm32 mask8OnCondition(MacroAssemblerType&, typename MacroAssemblerType::RelationalCondition cond, typename MacroAssemblerType::TrustedImm32 value) +{ + if (isUnsigned<MacroAssemblerType>(cond)) + return typename MacroAssemblerType::TrustedImm32(static_cast<uint8_t>(value.m_value)); + return typename MacroAssemblerType::TrustedImm32(static_cast<int8_t>(value.m_value)); +} + +template<typename MacroAssemblerType> +inline typename MacroAssemblerType::TrustedImm32 mask8OnCondition(MacroAssemblerType&, typename MacroAssemblerType::ResultCondition cond, typename MacroAssemblerType::TrustedImm32 value) +{ + if (isUnsigned<MacroAssemblerType>(cond)) + return typename MacroAssemblerType::TrustedImm32(static_cast<uint8_t>(value.m_value)); + ASSERT_WITH_MESSAGE(cond != MacroAssemblerType::Overflow, "Overflow is not used for 8bit test operations."); + ASSERT(isSigned<MacroAssemblerType>(cond)); + return typename MacroAssemblerType::TrustedImm32(static_cast<int8_t>(value.m_value)); +} + +template<typename MacroAssemblerType, typename Condition, typename ...Args> +void load8OnCondition(MacroAssemblerType& jit, Condition cond, Args... args) +{ + if (isUnsigned<MacroAssemblerType>(cond)) + return jit.load8(std::forward<Args>(args)...); + return jit.load8SignedExtendTo32(std::forward<Args>(args)...); +} + +} } // namespace JSC diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h b/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h index a30247d33..8d0ac915a 100644 --- a/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h +++ b/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2014 Apple Inc. All rights reserved. * Copyright (C) 2010 MIPS Technologies, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -24,8 +24,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef MacroAssemblerMIPS_h -#define MacroAssemblerMIPS_h +#pragma once #if ENABLE(ASSEMBLER) && CPU(MIPS) @@ -34,7 +33,7 @@ namespace JSC { -class MacroAssemblerMIPS : public AbstractMacroAssembler<MIPSAssembler> { +class MacroAssemblerMIPS : public AbstractMacroAssembler<MIPSAssembler, MacroAssemblerMIPS> { public: typedef MIPSRegisters::FPRegisterID FPRegisterID; @@ -55,9 +54,9 @@ public: // For storing data loaded from the memory static const RegisterID dataTempRegister = MIPSRegisters::t1; // For storing address base - static const RegisterID addrTempRegister = MIPSRegisters::t2; + static const RegisterID addrTempRegister = MIPSRegisters::t7; // For storing compare result - static const RegisterID cmpTempRegister = MIPSRegisters::t3; + static const RegisterID cmpTempRegister = MIPSRegisters::t8; // FP temp register static const FPRegisterID fpTempRegister = MIPSRegisters::f16; @@ -289,7 +288,7 @@ public: { if (!imm.m_value && !m_fixedWidth) move(MIPSRegisters::zero, dest); - else if (imm.m_value > 0 && imm.m_value < 65535 && !m_fixedWidth) + else if (imm.m_value > 0 && imm.m_value <= 65535 && !m_fixedWidth) m_assembler.andi(dest, dest, imm.m_value); else { /* @@ -305,7 +304,7 @@ public: { if (!imm.m_value && !m_fixedWidth) move(MIPSRegisters::zero, dest); - else if (imm.m_value > 0 && imm.m_value < 65535 && !m_fixedWidth) + else if (imm.m_value > 0 && imm.m_value <= 65535 && !m_fixedWidth) m_assembler.andi(dest, src, imm.m_value); else { move(imm, immTempRegister); @@ -313,6 +312,15 @@ public: } } + void countLeadingZeros32(RegisterID src, RegisterID dest) + { +#if WTF_MIPS_ISA_AT_LEAST(32) + m_assembler.clz(dest, src); +#else + static_assert(false, "CLZ opcode is not available for this ISA"); +#endif + } + void lshift32(RegisterID shiftAmount, RegisterID dest) { m_assembler.sllv(dest, dest, shiftAmount); @@ -376,12 +384,23 @@ public: m_assembler.orInsn(dest, op1, op2); } + void or32(TrustedImm32 imm, AbsoluteAddress dest) + { + if (!imm.m_value && !m_fixedWidth) + return; + + // TODO: Swap dataTempRegister and immTempRegister usage + load32(dest.m_ptr, immTempRegister); + or32(imm, immTempRegister); + store32(immTempRegister, dest.m_ptr); + } + void or32(TrustedImm32 imm, RegisterID dest) { if (!imm.m_value && !m_fixedWidth) return; - if (imm.m_value > 0 && imm.m_value < 65535 + if (imm.m_value > 0 && imm.m_value <= 65535 && !m_fixedWidth) { m_assembler.ori(dest, dest, imm.m_value); return; @@ -397,10 +416,12 @@ public: void or32(TrustedImm32 imm, RegisterID src, RegisterID dest) { - if (!imm.m_value && !m_fixedWidth) + if (!imm.m_value && !m_fixedWidth) { + move(src, dest); return; + } - if (imm.m_value > 0 && imm.m_value < 65535 && !m_fixedWidth) { + if (imm.m_value > 0 && imm.m_value <= 65535 && !m_fixedWidth) { m_assembler.ori(dest, src, imm.m_value); return; } @@ -619,10 +640,28 @@ public: { m_assembler.sqrtd(dst, src); } - - void absDouble(FPRegisterID, FPRegisterID) + + void absDouble(FPRegisterID src, FPRegisterID dst) + { + m_assembler.absd(dst, src); + } + + NO_RETURN_DUE_TO_CRASH void ceilDouble(FPRegisterID, FPRegisterID) + { + ASSERT(!supportsFloatingPointRounding()); + CRASH(); + } + + NO_RETURN_DUE_TO_CRASH void floorDouble(FPRegisterID, FPRegisterID) + { + ASSERT(!supportsFloatingPointRounding()); + CRASH(); + } + + NO_RETURN_DUE_TO_CRASH void roundTowardZeroDouble(FPRegisterID, FPRegisterID) { - RELEASE_ASSERT_NOT_REACHED(); + ASSERT(!supportsFloatingPointRounding()); + CRASH(); } ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest) @@ -707,7 +746,24 @@ public: m_assembler.lbu(dest, addrTempRegister, 0); } - void load8Signed(BaseIndex address, RegisterID dest) + void load8SignedExtendTo32(ImplicitAddress address, RegisterID dest) + { + if (address.offset >= -32768 && address.offset <= 32767 + && !m_fixedWidth) + m_assembler.lb(dest, address.base, address.offset); + else { + /* + lui addrTemp, (offset + 0x8000) >> 16 + addu addrTemp, addrTemp, base + lb dest, (offset & 0xffff)(addrTemp) + */ + m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16); + m_assembler.addu(addrTempRegister, addrTempRegister, address.base); + m_assembler.lb(dest, addrTempRegister, address.offset); + } + } + + void load8SignedExtendTo32(BaseIndex address, RegisterID dest) { if (address.offset >= -32768 && address.offset <= 32767 && !m_fixedWidth) { @@ -735,6 +791,22 @@ public: } } + ALWAYS_INLINE void load8SignedExtendTo32(AbsoluteAddress address, RegisterID dest) + { + load8SignedExtendTo32(address.m_ptr, dest); + } + + void load8SignedExtendTo32(const void* address, RegisterID dest) + { + /* + li addrTemp, address + lb dest, 0(addrTemp) + */ + move(TrustedImmPtr(address), addrTempRegister); + m_assembler.lb(dest, addrTempRegister, 0); + } + + void load32(ImplicitAddress address, RegisterID dest) { if (address.offset >= -32768 && address.offset <= 32767 @@ -782,7 +854,53 @@ public: void load16Unaligned(BaseIndex address, RegisterID dest) { - load16(address, dest); + if (address.offset >= -32768 && address.offset <= 32767 && !m_fixedWidth) { + /* + sll addrtemp, address.index, address.scale + addu addrtemp, addrtemp, address.base + lbu immTemp, address.offset+x(addrtemp) (x=0 for LE, x=1 for BE) + lbu dest, address.offset+x(addrtemp) (x=1 for LE, x=0 for BE) + sll dest, dest, 8 + or dest, dest, immTemp + */ + m_assembler.sll(addrTempRegister, address.index, address.scale); + m_assembler.addu(addrTempRegister, addrTempRegister, address.base); +#if CPU(BIG_ENDIAN) + m_assembler.lbu(immTempRegister, addrTempRegister, address.offset + 1); + m_assembler.lbu(dest, addrTempRegister, address.offset); +#else + m_assembler.lbu(immTempRegister, addrTempRegister, address.offset); + m_assembler.lbu(dest, addrTempRegister, address.offset + 1); +#endif + m_assembler.sll(dest, dest, 8); + m_assembler.orInsn(dest, dest, immTempRegister); + } else { + /* + sll addrTemp, address.index, address.scale + addu addrTemp, addrTemp, address.base + lui immTemp, address.offset >> 16 + ori immTemp, immTemp, address.offset & 0xffff + addu addrTemp, addrTemp, immTemp + lbu immTemp, x(addrtemp) (x=0 for LE, x=1 for BE) + lbu dest, x(addrtemp) (x=1 for LE, x=0 for BE) + sll dest, dest, 8 + or dest, dest, immTemp + */ + m_assembler.sll(addrTempRegister, address.index, address.scale); + m_assembler.addu(addrTempRegister, addrTempRegister, address.base); + m_assembler.lui(immTempRegister, address.offset >> 16); + m_assembler.ori(immTempRegister, immTempRegister, address.offset); + m_assembler.addu(addrTempRegister, addrTempRegister, immTempRegister); +#if CPU(BIG_ENDIAN) + m_assembler.lbu(immTempRegister, addrTempRegister, 1); + m_assembler.lbu(dest, addrTempRegister, 0); +#else + m_assembler.lbu(immTempRegister, addrTempRegister, 0); + m_assembler.lbu(dest, addrTempRegister, 1); +#endif + m_assembler.sll(dest, dest, 8); + m_assembler.orInsn(dest, dest, immTempRegister); + } } void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest) @@ -919,7 +1037,7 @@ public: } } - void load16Signed(BaseIndex address, RegisterID dest) + void load16SignedExtendTo32(BaseIndex address, RegisterID dest) { if (address.offset >= -32768 && address.offset <= 32767 && !m_fixedWidth) { @@ -1005,16 +1123,45 @@ public: li addrTemp, address sb src, 0(addrTemp) */ - if (!imm.m_value && !m_fixedWidth) { + TrustedImm32 imm8(static_cast<int8_t>(imm.m_value)); + if (!imm8.m_value && !m_fixedWidth) { move(TrustedImmPtr(address), addrTempRegister); m_assembler.sb(MIPSRegisters::zero, addrTempRegister, 0); } else { - move(imm, immTempRegister); + move(imm8, immTempRegister); move(TrustedImmPtr(address), addrTempRegister); m_assembler.sb(immTempRegister, addrTempRegister, 0); } } + void store8(TrustedImm32 imm, ImplicitAddress address) + { + TrustedImm32 imm8(static_cast<int8_t>(imm.m_value)); + if (address.offset >= -32768 && address.offset <= 32767 + && !m_fixedWidth) { + if (!imm8.m_value) + m_assembler.sb(MIPSRegisters::zero, address.base, address.offset); + else { + move(imm8, immTempRegister); + m_assembler.sb(immTempRegister, address.base, address.offset); + } + } else { + /* + lui addrTemp, (offset + 0x8000) >> 16 + addu addrTemp, addrTemp, base + sb immTemp, (offset & 0xffff)(addrTemp) + */ + m_assembler.lui(addrTempRegister, (address.offset + 0x8000) >> 16); + m_assembler.addu(addrTempRegister, addrTempRegister, address.base); + if (!imm8.m_value && !m_fixedWidth) + m_assembler.sb(MIPSRegisters::zero, addrTempRegister, address.offset); + else { + move(imm8, immTempRegister); + m_assembler.sb(immTempRegister, addrTempRegister, address.offset); + } + } + } + void store16(RegisterID src, BaseIndex address) { if (address.offset >= -32768 && address.offset <= 32767 @@ -1208,7 +1355,17 @@ public: return false; #endif } - static bool supportsFloatingPointAbs() { return false; } + + static bool supportsFloatingPointAbs() + { +#if WTF_MIPS_DOUBLE_FLOAT && WTF_MIPS_ISA_AT_LEAST(2) + return true; +#else + return false; +#endif + } + + static bool supportsFloatingPointRounding() { return false; } // Stack manipulation operations: // @@ -1224,6 +1381,13 @@ public: m_assembler.addiu(MIPSRegisters::sp, MIPSRegisters::sp, 4); } + void popPair(RegisterID dest1, RegisterID dest2) + { + m_assembler.lw(dest1, MIPSRegisters::sp, 0); + m_assembler.lw(dest2, MIPSRegisters::sp, 4); + m_assembler.addiu(MIPSRegisters::sp, MIPSRegisters::sp, 8); + } + void push(RegisterID src) { m_assembler.addiu(MIPSRegisters::sp, MIPSRegisters::sp, -4); @@ -1242,6 +1406,13 @@ public: push(immTempRegister); } + void pushPair(RegisterID src1, RegisterID src2) + { + m_assembler.addiu(MIPSRegisters::sp, MIPSRegisters::sp, -8); + m_assembler.sw(src2, MIPSRegisters::sp, 4); + m_assembler.sw(src1, MIPSRegisters::sp, 0); + } + // Register move operations: // // Move values in registers. @@ -1307,38 +1478,35 @@ public: Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right) { - // Make sure the immediate value is unsigned 8 bits. - ASSERT(!(right.m_value & 0xFFFFFF00)); - load8(left, dataTempRegister); - move(right, immTempRegister); + TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right); + MacroAssemblerHelpers::load8OnCondition(*this, cond, left, dataTempRegister); + move(right8, immTempRegister); return branch32(cond, dataTempRegister, immTempRegister); } Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right) { - // Make sure the immediate value is unsigned 8 bits. - ASSERT(!(right.m_value & 0xFFFFFF00)); - load8(left, dataTempRegister); - move(right, immTempRegister); + TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right); + MacroAssemblerHelpers::load8OnCondition(*this, cond, left, dataTempRegister); + move(right8, immTempRegister); return branch32(cond, dataTempRegister, immTempRegister); } void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest) { - // Make sure the immediate value is unsigned 8 bits. - ASSERT(!(right.m_value & 0xFFFFFF00)); - load8(left, dataTempRegister); - move(right, immTempRegister); + TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right); + MacroAssemblerHelpers::load8OnCondition(*this, cond, left, dataTempRegister); + move(right8, immTempRegister); compare32(cond, dataTempRegister, immTempRegister, dest); } Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right) { - ASSERT(!(right.m_value & 0xFFFFFF00)); - load8(left, dataTempRegister); + TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right); + MacroAssemblerHelpers::load8OnCondition(*this, cond, left, dataTempRegister); // Be careful that the previous load8() uses immTempRegister. // So, we need to put move() after load8(). - move(right, immTempRegister); + move(right8, immTempRegister); return branch32(cond, dataTempRegister, immTempRegister); } @@ -1444,20 +1612,36 @@ public: Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask) { - ASSERT((cond == Zero) || (cond == NonZero)); + ASSERT((cond == Zero) || (cond == NonZero) || (cond == Signed)); m_assembler.andInsn(cmpTempRegister, reg, mask); - if (cond == Zero) + switch (cond) { + case Zero: return branchEqual(cmpTempRegister, MIPSRegisters::zero); - return branchNotEqual(cmpTempRegister, MIPSRegisters::zero); + case NonZero: + return branchNotEqual(cmpTempRegister, MIPSRegisters::zero); + case Signed: + m_assembler.slt(cmpTempRegister, cmpTempRegister, MIPSRegisters::zero); + return branchNotEqual(cmpTempRegister, MIPSRegisters::zero); + default: + RELEASE_ASSERT_NOT_REACHED(); + } } Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) { - ASSERT((cond == Zero) || (cond == NonZero)); + ASSERT((cond == Zero) || (cond == NonZero) || (cond == Signed)); if (mask.m_value == -1 && !m_fixedWidth) { - if (cond == Zero) + switch (cond) { + case Zero: return branchEqual(reg, MIPSRegisters::zero); - return branchNotEqual(reg, MIPSRegisters::zero); + case NonZero: + return branchNotEqual(reg, MIPSRegisters::zero); + case Signed: + m_assembler.slt(cmpTempRegister, reg, MIPSRegisters::zero); + return branchNotEqual(cmpTempRegister, MIPSRegisters::zero); + default: + RELEASE_ASSERT_NOT_REACHED(); + } } move(mask, immTempRegister); return branchTest32(cond, reg, immTempRegister); @@ -1477,21 +1661,24 @@ public: Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) { - load8(address, dataTempRegister); - return branchTest32(cond, dataTempRegister, mask); + TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask); + MacroAssemblerHelpers::load8OnCondition(*this, cond, address, dataTempRegister); + return branchTest32(cond, dataTempRegister, mask8); } Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) { - load8(address, dataTempRegister); - return branchTest32(cond, dataTempRegister, mask); + TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask); + MacroAssemblerHelpers::load8OnCondition(*this, cond, address, dataTempRegister); + return branchTest32(cond, dataTempRegister, mask8); } Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) { + TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask); move(TrustedImmPtr(address.m_ptr), dataTempRegister); - load8(Address(dataTempRegister), dataTempRegister); - return branchTest32(cond, dataTempRegister, mask); + MacroAssemblerHelpers::load8OnCondition(*this, cond, Address(dataTempRegister), dataTempRegister); + return branchTest32(cond, dataTempRegister, mask8); } Jump jump() @@ -1657,6 +1844,12 @@ public: return branchAdd32(cond, immTempRegister, dest); } + Jump branchAdd32(ResultCondition cond, Address address, RegisterID dest) + { + load32(address, immTempRegister); + return branchAdd32(cond, immTempRegister, dest); + } + Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest) { move(imm, immTempRegister); @@ -1819,7 +2012,7 @@ public: return Jump(); } - Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest) + Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest) { move(imm, immTempRegister); return branchMul32(cond, immTempRegister, src, dest); @@ -1975,6 +2168,16 @@ public: return Call(m_assembler.label(), Call::LinkableNear); } + Call nearTailCall() + { + m_assembler.nop(); + m_assembler.nop(); + m_assembler.beq(MIPSRegisters::zero, MIPSRegisters::zero, 0); + m_assembler.nop(); + insertRelaxationWords(); + return Call(m_assembler.label(), Call::LinkableNearTail); + } + Call call() { m_assembler.lui(MIPSRegisters::t9, 0); @@ -2048,14 +2251,15 @@ public: void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest) { ASSERT((cond == Zero) || (cond == NonZero)); - load8(address, dataTempRegister); - if (mask.m_value == -1 && !m_fixedWidth) { + TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask); + MacroAssemblerHelpers::load8OnCondition(*this, cond, address, dataTempRegister); + if ((mask8.m_value & 0xff) == 0xff && !m_fixedWidth) { if (cond == Zero) m_assembler.sltiu(dest, dataTempRegister, 1); else m_assembler.sltu(dest, MIPSRegisters::zero, dataTempRegister); } else { - move(mask, immTempRegister); + move(mask8, immTempRegister); m_assembler.andInsn(cmpTempRegister, dataTempRegister, immTempRegister); if (cond == Zero) m_assembler.sltiu(dest, cmpTempRegister, 1); @@ -2120,6 +2324,16 @@ public: return temp; } + Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0)) + { + m_fixedWidth = true; + load32(left, dataTempRegister); + dataLabel = moveWithPatch(initialRightValue, immTempRegister); + Jump temp = branch32(cond, dataTempRegister, immTempRegister); + m_fixedWidth = false; + return temp; + } + DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address) { m_fixedWidth = true; @@ -2268,7 +2482,7 @@ public: #endif } - void loadDouble(const void* address, FPRegisterID dest) + void loadDouble(TrustedImmPtr address, FPRegisterID dest) { #if WTF_MIPS_ISA(1) /* @@ -2276,7 +2490,7 @@ public: lwc1 dest, 0(addrTemp) lwc1 dest+1, 4(addrTemp) */ - move(TrustedImmPtr(address), addrTempRegister); + move(address, addrTempRegister); m_assembler.lwc1(dest, addrTempRegister, 0); m_assembler.lwc1(FPRegisterID(dest + 1), addrTempRegister, 4); #else @@ -2284,7 +2498,7 @@ public: li addrTemp, address ldc1 dest, 0(addrTemp) */ - move(TrustedImmPtr(address), addrTempRegister); + move(address, addrTempRegister); m_assembler.ldc1(dest, addrTempRegister, 0); #endif } @@ -2406,14 +2620,14 @@ public: #endif } - void storeDouble(FPRegisterID src, const void* address) + void storeDouble(FPRegisterID src, TrustedImmPtr address) { #if WTF_MIPS_ISA(1) - move(TrustedImmPtr(address), addrTempRegister); + move(address, addrTempRegister); m_assembler.swc1(src, addrTempRegister, 0); m_assembler.swc1(FPRegisterID(src + 1), addrTempRegister, 4); #else - move(TrustedImmPtr(address), addrTempRegister); + move(address, addrTempRegister); m_assembler.sdc1(src, addrTempRegister, 0); #endif } @@ -2424,6 +2638,11 @@ public: m_assembler.movd(dest, src); } + void moveZeroToDouble(FPRegisterID reg) + { + convertInt32ToDouble(MIPSRegisters::zero, reg); + } + void swapDouble(FPRegisterID fr1, FPRegisterID fr2) { moveDouble(fr1, fpTempRegister); @@ -2449,7 +2668,7 @@ public: void addDouble(AbsoluteAddress address, FPRegisterID dest) { - loadDouble(address.m_ptr, fpTempRegister); + loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister); m_assembler.addd(dest, dest, fpTempRegister); } @@ -2656,7 +2875,7 @@ public: { m_assembler.truncwd(fpTempRegister, src); m_assembler.mfc1(dest, fpTempRegister); - return branch32(branchType == BranchIfTruncateFailed ? Equal : NotEqual, dest, TrustedImm32(0)); + return branch32(branchType == BranchIfTruncateFailed ? Equal : NotEqual, dest, TrustedImm32(0x7fffffff)); } // Result is undefined if the value is outside of the integer range. @@ -2740,6 +2959,18 @@ public: m_assembler.sync(); } + void abortWithReason(AbortReason reason) + { + move(TrustedImm32(reason), dataTempRegister); + breakpoint(); + } + + void abortWithReason(AbortReason reason, intptr_t misc) + { + move(TrustedImm32(misc), immTempRegister); + abortWithReason(reason); + } + static FunctionPtr readCallTarget(CodeLocationCall call) { return FunctionPtr(reinterpret_cast<void(*)()>(MIPSAssembler::readCallTarget(call.dataLocation()))); @@ -2757,6 +2988,13 @@ public: } static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; } + static bool canJumpReplacePatchableBranch32WithPatch() { return false; } + + static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32) + { + UNREACHABLE_FOR_PLATFORM(); + return CodeLocationLabel(); + } static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label) { @@ -2774,23 +3012,14 @@ public: return CodeLocationLabel(); } - static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*) + static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t) { UNREACHABLE_FOR_PLATFORM(); } - -private: - // If m_fixedWidth is true, we will generate a fixed number of instructions. - // Otherwise, we can emit any number of instructions. - bool m_fixedWidth; - - friend class LinkBuffer; - friend class RepatchBuffer; - - static void linkCall(void* code, Call call, FunctionPtr function) + static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*) { - MIPSAssembler::linkCall(code, call.m_label, function.value()); + UNREACHABLE_FOR_PLATFORM(); } static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) @@ -2803,10 +3032,23 @@ private: MIPSAssembler::relinkCall(call.dataLocation(), destination.executableAddress()); } +private: + // If m_fixedWidth is true, we will generate a fixed number of instructions. + // Otherwise, we can emit any number of instructions. + bool m_fixedWidth; + + friend class LinkBuffer; + + static void linkCall(void* code, Call call, FunctionPtr function) + { + if (call.isFlagSet(Call::Tail)) + MIPSAssembler::linkJump(code, call.m_label, function.value()); + else + MIPSAssembler::linkCall(code, call.m_label, function.value()); + } + }; -} +} // namespace JSC #endif // ENABLE(ASSEMBLER) && CPU(MIPS) - -#endif // MacroAssemblerMIPS_h diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerPrinter.cpp b/Source/JavaScriptCore/assembler/MacroAssemblerPrinter.cpp new file mode 100644 index 000000000..c6c175752 --- /dev/null +++ b/Source/JavaScriptCore/assembler/MacroAssemblerPrinter.cpp @@ -0,0 +1,216 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "MacroAssemblerPrinter.h" + +#if ENABLE(MASM_PROBE) + +#include "MacroAssembler.h" + +namespace JSC { + +using CPUState = MacroAssembler::CPUState; +using ProbeContext = MacroAssembler::ProbeContext; +using RegisterID = MacroAssembler::RegisterID; +using FPRegisterID = MacroAssembler::FPRegisterID; + +static void printIndent(int indentation) +{ + for (; indentation > 0; indentation--) + dataLog(" "); +} + +#define INDENT printIndent(indentation) + +void printCPU(CPUState& cpu, int indentation) +{ + INDENT, dataLog("cpu: {\n"); + printCPURegisters(cpu, indentation + 1); + INDENT, dataLog("}\n"); +} + +void printCPURegisters(CPUState& cpu, int indentation) +{ +#if USE(JSVALUE32_64) + #define INTPTR_HEX_VALUE_FORMAT "0x%08lx" +#else + #define INTPTR_HEX_VALUE_FORMAT "0x%016lx" +#endif + + #define PRINT_GPREGISTER(_type, _regName) { \ + intptr_t value = reinterpret_cast<intptr_t>(cpu._regName); \ + INDENT, dataLogF("%6s: " INTPTR_HEX_VALUE_FORMAT " %ld\n", #_regName, value, value) ; \ + } + FOR_EACH_CPU_GPREGISTER(PRINT_GPREGISTER) + FOR_EACH_CPU_SPECIAL_REGISTER(PRINT_GPREGISTER) + #undef PRINT_GPREGISTER + #undef INTPTR_HEX_VALUE_FORMAT + + #define PRINT_FPREGISTER(_type, _regName) { \ + uint64_t* u = reinterpret_cast<uint64_t*>(&cpu._regName); \ + double* d = reinterpret_cast<double*>(&cpu._regName); \ + INDENT, dataLogF("%6s: 0x%016llx %.13g\n", #_regName, *u, *d); \ + } + FOR_EACH_CPU_FPREGISTER(PRINT_FPREGISTER) + #undef PRINT_FPREGISTER +} + +static void printPC(CPUState& cpu) +{ + union { + void* voidPtr; + intptr_t intptrValue; + } u; +#if CPU(X86) || CPU(X86_64) + u.voidPtr = cpu.eip; +#elif CPU(ARM_TRADITIONAL) || CPU(ARM_THUMB2) || CPU(ARM64) + u.voidPtr = cpu.pc; +#else +#error "Unsupported CPU" +#endif + dataLogF("pc:<%p %ld>", u.voidPtr, u.intptrValue); +} + +void printRegister(CPUState& cpu, RegisterID regID) +{ + const char* name = CPUState::gprName(regID); + union { + void* voidPtr; + intptr_t intptrValue; + } u; + u.voidPtr = cpu.gpr(regID); + dataLogF("%s:<%p %ld>", name, u.voidPtr, u.intptrValue); +} + +void printRegister(CPUState& cpu, FPRegisterID regID) +{ + const char* name = CPUState::fprName(regID); + union { + double doubleValue; + uint64_t uint64Value; + } u; + u.doubleValue = cpu.fpr(regID); + dataLogF("%s:<0x%016llx %.13g>", name, u.uint64Value, u.doubleValue); +} + +void printMemory(CPUState& cpu, const Memory& memory) +{ + uint8_t* ptr = nullptr; + switch (memory.addressType) { + case Memory::AddressType::Address: { + ptr = reinterpret_cast<uint8_t*>(cpu.gpr(memory.u.address.base)); + ptr += memory.u.address.offset; + break; + } + case Memory::AddressType::AbsoluteAddress: { + ptr = reinterpret_cast<uint8_t*>(const_cast<void*>(memory.u.absoluteAddress.m_ptr)); + break; + } + } + + if (memory.dumpStyle == Memory::SingleWordDump) { + if (memory.numBytes == sizeof(int8_t)) { + auto p = reinterpret_cast<int8_t*>(ptr); + dataLogF("%p:<0x%02x %d>", p, *p, *p); + return; + } + if (memory.numBytes == sizeof(int16_t)) { + auto p = reinterpret_cast<int16_t*>(ptr); + dataLogF("%p:<0x%04x %d>", p, *p, *p); + return; + } + if (memory.numBytes == sizeof(int32_t)) { + auto p = reinterpret_cast<int32_t*>(ptr); + dataLogF("%p:<0x%08x %d>", p, *p, *p); + return; + } + if (memory.numBytes == sizeof(int64_t)) { + auto p = reinterpret_cast<int64_t*>(ptr); + dataLogF("%p:<0x%016llx %lld>", p, *p, *p); + return; + } + // Else, unknown word size. Fall thru and dump in the generic way. + } + + // Generic dump: dump rows of 16 bytes in 4 byte groupings. + size_t numBytes = memory.numBytes; + for (size_t i = 0; i < numBytes; i++) { + if (!(i % 16)) + dataLogF("%p: ", &ptr[i]); + else if (!(i % 4)) + dataLog(" "); + + dataLogF("%02x", ptr[i]); + + if (i % 16 == 15) + dataLog("\n"); + } + if (numBytes % 16 < 15) + dataLog("\n"); +} + +void MacroAssemblerPrinter::printCallback(ProbeContext* context) +{ + typedef PrintArg Arg; + PrintArgsList& argsList = + *reinterpret_cast<PrintArgsList*>(context->arg1); + for (size_t i = 0; i < argsList.size(); i++) { + auto& arg = argsList[i]; + switch (arg.type) { + case Arg::Type::AllRegisters: + printCPU(context->cpu, 1); + break; + case Arg::Type::PCRegister: + printPC(context->cpu); + break; + case Arg::Type::RegisterID: + printRegister(context->cpu, arg.u.gpRegisterID); + break; + case Arg::Type::FPRegisterID: + printRegister(context->cpu, arg.u.fpRegisterID); + break; + case Arg::Type::Memory: + printMemory(context->cpu, arg.u.memory); + break; + case Arg::Type::ConstCharPtr: + dataLog(arg.u.constCharPtr); + break; + case Arg::Type::ConstVoidPtr: + dataLogF("%p", arg.u.constVoidPtr); + break; + case Arg::Type::IntptrValue: + dataLog(arg.u.intptrValue); + break; + case Arg::Type::UintptrValue: + dataLog(arg.u.uintptrValue); + break; + } + } +} + +} // namespace JSC + +#endif // ENABLE(MASM_PROBE) diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerPrinter.h b/Source/JavaScriptCore/assembler/MacroAssemblerPrinter.h new file mode 100644 index 000000000..bbce7ee58 --- /dev/null +++ b/Source/JavaScriptCore/assembler/MacroAssemblerPrinter.h @@ -0,0 +1,302 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#if ENABLE(MASM_PROBE) + +#include "MacroAssembler.h" + +namespace JSC { + +// What is MacroAssembler::print()? +// =============================== +// The MacroAsssembler::print() makes it easy to add print logging +// from JIT compiled code, and can be used to print all types of values +// at runtime e.g. CPU register values being operated on by the compiled +// code. +// +// print() is built on top of MacroAsssembler::probe(), and hence +// inserting logging in JIT compiled code will not perturb register values. +// The only register value that is perturbed is the PC (program counter) +// since there is now more compiled code to do the printing. +// +// How to use the MacroAssembler print()? +// ===================================== +// 1. #include "MacroAssemblerPrinter.h" in the JIT file where you want to use print(). +// +// 2. Add print() calls like these in your JIT code: +// +// jit.print("Hello world\n"); // Emits code to print the string. +// +// CodeBlock* cb = ...; +// jit.print(cb, "\n"); // Emits code to print the pointer value. +// +// RegisterID regID = ...; +// jit.print(regID, "\n"); // Emits code to print the register value (not the id). +// +// // Emits code to print all registers. Unlike other items, this prints +// // multiple lines as follows: +// // cpu { +// // eax: 0x123456789 +// // ebx: 0x000000abc +// // ... +// // } +// jit.print(AllRegisters()); +// +// jit.print(MemWord<uint8_t>(regID), "\n"); // Emits code to print a byte pointed to by the register. +// jit.print(MemWord<uint32_t>(regID), "\n"); // Emits code to print a 32-bit word pointed to by the register. +// +// jit.print(MemWord<uint8_t>(Address(regID, 23), "\n"); // Emits code to print a byte at the address. +// jit.print(MemWord<intptr_t>(AbsoluteAddress(&cb), "\n"); // Emits code to print an intptr_t sized word at the address. +// +// jit.print(Memory(reg, 100), "\n"); // Emits code to print a 100 bytes at the address pointed by the register. +// jit.print(Memory(Address(reg, 4), 100), "\n"); // Emits code to print a 100 bytes at the address. +// +// // Print multiple things at once. This incurs the probe overhead only once +// // to print all the items. +// jit.print("cb:", cb, " regID:", regID, " cpu:\n", AllRegisters()); +// +// The type of values that can be printed is encapsulated in the PrintArg struct below. +// +// Note: print() does not automatically insert a '\n' at the end of the line. +// If you want a '\n', you'll have to add it explicitly (as in the examples above). + + +// This is a marker type only used with MacroAssemblerPrinter::print(). +// See MacroAssemblerPrinter::print() below for details. +struct AllRegisters { }; +struct PCRegister { }; + +struct Memory { + using Address = MacroAssembler::Address; + using AbsoluteAddress = MacroAssembler::AbsoluteAddress; + using RegisterID = MacroAssembler::RegisterID; + + enum class AddressType { + Address, + AbsoluteAddress, + }; + + enum DumpStyle { + SingleWordDump, + GenericDump, + }; + + Memory(RegisterID& reg, size_t bytes, DumpStyle style = GenericDump) + : addressType(AddressType::Address) + , dumpStyle(style) + , numBytes(bytes) + { + u.address = Address(reg, 0); + } + + Memory(const Address& address, size_t bytes, DumpStyle style = GenericDump) + : addressType(AddressType::Address) + , dumpStyle(style) + , numBytes(bytes) + { + u.address = address; + } + + Memory(const AbsoluteAddress& address, size_t bytes, DumpStyle style = GenericDump) + : addressType(AddressType::AbsoluteAddress) + , dumpStyle(style) + , numBytes(bytes) + { + u.absoluteAddress = address; + } + + AddressType addressType; + DumpStyle dumpStyle; + size_t numBytes; + union UnionedAddress { + UnionedAddress() { } + + Address address; + AbsoluteAddress absoluteAddress; + } u; +}; + +template <typename IntType> +struct MemWord : public Memory { + MemWord(RegisterID& reg) + : Memory(reg, sizeof(IntType), Memory::SingleWordDump) + { } + + MemWord(const Address& address) + : Memory(address, sizeof(IntType), Memory::SingleWordDump) + { } + + MemWord(const AbsoluteAddress& address) + : Memory(address, sizeof(IntType), Memory::SingleWordDump) + { } +}; + + +class MacroAssemblerPrinter { + using CPUState = MacroAssembler::CPUState; + using ProbeContext = MacroAssembler::ProbeContext; + using RegisterID = MacroAssembler::RegisterID; + using FPRegisterID = MacroAssembler::FPRegisterID; + +public: + template<typename... Arguments> + static void print(MacroAssembler* masm, Arguments... args) + { + auto argsList = std::make_unique<PrintArgsList>(); + appendPrintArg(argsList.get(), args...); + masm->probe(printCallback, argsList.release(), 0); + } + +private: + struct PrintArg { + + enum class Type { + AllRegisters, + PCRegister, + RegisterID, + FPRegisterID, + Memory, + ConstCharPtr, + ConstVoidPtr, + IntptrValue, + UintptrValue, + }; + + PrintArg(AllRegisters&) + : type(Type::AllRegisters) + { + } + + PrintArg(PCRegister&) + : type(Type::PCRegister) + { + } + + PrintArg(RegisterID regID) + : type(Type::RegisterID) + { + u.gpRegisterID = regID; + } + + PrintArg(FPRegisterID regID) + : type(Type::FPRegisterID) + { + u.fpRegisterID = regID; + } + + PrintArg(const Memory& memory) + : type(Type::Memory) + { + u.memory = memory; + } + + PrintArg(const char* ptr) + : type(Type::ConstCharPtr) + { + u.constCharPtr = ptr; + } + + PrintArg(const void* ptr) + : type(Type::ConstVoidPtr) + { + u.constVoidPtr = ptr; + } + + PrintArg(int value) + : type(Type::IntptrValue) + { + u.intptrValue = value; + } + + PrintArg(unsigned value) + : type(Type::UintptrValue) + { + u.intptrValue = value; + } + + PrintArg(intptr_t value) + : type(Type::IntptrValue) + { + u.intptrValue = value; + } + + PrintArg(uintptr_t value) + : type(Type::UintptrValue) + { + u.uintptrValue = value; + } + + Type type; + union Value { + Value() { } + + RegisterID gpRegisterID; + FPRegisterID fpRegisterID; + Memory memory; + const char* constCharPtr; + const void* constVoidPtr; + intptr_t intptrValue; + uintptr_t uintptrValue; + } u; + }; + + typedef Vector<PrintArg> PrintArgsList; + + template<typename FirstArg, typename... Arguments> + static void appendPrintArg(PrintArgsList* argsList, FirstArg& firstArg, Arguments... otherArgs) + { + argsList->append(PrintArg(firstArg)); + appendPrintArg(argsList, otherArgs...); + } + + static void appendPrintArg(PrintArgsList*) { } + +private: + static void printCallback(ProbeContext*); +}; + +template<typename... Arguments> +void MacroAssembler::print(Arguments... args) +{ + MacroAssemblerPrinter::print(this, args...); +} + + +// These printers will print a block of information. That block may be +// indented with the specified indentation. +void printCPU(MacroAssembler::CPUState&, int indentation = 0); +void printCPURegisters(MacroAssembler::CPUState&, int indentation = 0); + +// These printers will print the specified information in line in the +// print stream. Hence, no indentation will be applied. +void printRegister(MacroAssembler::CPUState&, MacroAssembler::RegisterID); +void printRegister(MacroAssembler::CPUState&, MacroAssembler::FPRegisterID); +void printMemory(MacroAssembler::CPUState&, const Memory&); + +} // namespace JSC + +#endif // ENABLE(MASM_PROBE) diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerSH4.h b/Source/JavaScriptCore/assembler/MacroAssemblerSH4.h deleted file mode 100644 index 32ea2b1b5..000000000 --- a/Source/JavaScriptCore/assembler/MacroAssemblerSH4.h +++ /dev/null @@ -1,2543 +0,0 @@ -/* - * Copyright (C) 2013 Cisco Systems, Inc. All rights reserved. - * Copyright (C) 2009-2011 STMicroelectronics. All rights reserved. - * Copyright (C) 2008 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -#ifndef MacroAssemblerSH4_h -#define MacroAssemblerSH4_h - -#if ENABLE(ASSEMBLER) && CPU(SH4) - -#include "SH4Assembler.h" -#include "AbstractMacroAssembler.h" -#include <wtf/Assertions.h> - -namespace JSC { - -class MacroAssemblerSH4 : public AbstractMacroAssembler<SH4Assembler> { -public: - typedef SH4Assembler::FPRegisterID FPRegisterID; - - static const Scale ScalePtr = TimesFour; - static const FPRegisterID fscratch = SH4Registers::dr10; - static const RegisterID stackPointerRegister = SH4Registers::sp; - static const RegisterID framePointerRegister = SH4Registers::fp; - static const RegisterID linkRegister = SH4Registers::pr; - static const RegisterID scratchReg3 = SH4Registers::r13; - - static const int MaximumCompactPtrAlignedAddressOffset = 60; - - static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value) - { - return (value >= 0) && (value <= MaximumCompactPtrAlignedAddressOffset) && (!(value & 3)); - } - - enum RelationalCondition { - Equal = SH4Assembler::EQ, - NotEqual = SH4Assembler::NE, - Above = SH4Assembler::HI, - AboveOrEqual = SH4Assembler::HS, - Below = SH4Assembler::LI, - BelowOrEqual = SH4Assembler::LS, - GreaterThan = SH4Assembler::GT, - GreaterThanOrEqual = SH4Assembler::GE, - LessThan = SH4Assembler::LT, - LessThanOrEqual = SH4Assembler::LE - }; - - enum ResultCondition { - Overflow = SH4Assembler::OF, - Signed = SH4Assembler::SI, - PositiveOrZero = SH4Assembler::NS, - Zero = SH4Assembler::EQ, - NonZero = SH4Assembler::NE - }; - - enum DoubleCondition { - // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN. - DoubleEqual = SH4Assembler::EQ, - DoubleNotEqual = SH4Assembler::NE, - DoubleGreaterThan = SH4Assembler::GT, - DoubleGreaterThanOrEqual = SH4Assembler::GE, - DoubleLessThan = SH4Assembler::LT, - DoubleLessThanOrEqual = SH4Assembler::LE, - // If either operand is NaN, these conditions always evaluate to true. - DoubleEqualOrUnordered = SH4Assembler::EQU, - DoubleNotEqualOrUnordered = SH4Assembler::NEU, - DoubleGreaterThanOrUnordered = SH4Assembler::GTU, - DoubleGreaterThanOrEqualOrUnordered = SH4Assembler::GEU, - DoubleLessThanOrUnordered = SH4Assembler::LTU, - DoubleLessThanOrEqualOrUnordered = SH4Assembler::LEU, - }; - - RegisterID claimScratch() - { - return m_assembler.claimScratch(); - } - - void releaseScratch(RegisterID reg) - { - m_assembler.releaseScratch(reg); - } - - static RelationalCondition invert(RelationalCondition cond) - { - switch (cond) { - case Equal: - return NotEqual; - case NotEqual: - return Equal; - case Above: - return BelowOrEqual; - case AboveOrEqual: - return Below; - case Below: - return AboveOrEqual; - case BelowOrEqual: - return Above; - case GreaterThan: - return LessThanOrEqual; - case GreaterThanOrEqual: - return LessThan; - case LessThan: - return GreaterThanOrEqual; - case LessThanOrEqual: - return GreaterThan; - default: - RELEASE_ASSERT_NOT_REACHED(); - } - } - - // Integer arithmetic operations - - void add32(RegisterID src, RegisterID dest) - { - m_assembler.addlRegReg(src, dest); - } - - void add32(RegisterID src1, RegisterID src2, RegisterID dest) - { - if (src1 == dest) - add32(src2, dest); - else { - move(src2, dest); - add32(src1, dest); - } - } - - void add32(TrustedImm32 imm, RegisterID dest) - { - if (!imm.m_value) - return; - - if (m_assembler.isImmediate(imm.m_value)) { - m_assembler.addlImm8r(imm.m_value, dest); - return; - } - - RegisterID scr = claimScratch(); - m_assembler.loadConstant(imm.m_value, scr); - m_assembler.addlRegReg(scr, dest); - releaseScratch(scr); - } - - void add32(TrustedImm32 imm, RegisterID src, RegisterID dest) - { - move(src, dest); - add32(imm, dest); - } - - void add32(TrustedImm32 imm, Address address) - { - if (!imm.m_value) - return; - - RegisterID scr = claimScratch(); - load32(address, scr); - add32(imm, scr); - store32(scr, address); - releaseScratch(scr); - } - - void add32(Address src, RegisterID dest) - { - RegisterID scr = claimScratch(); - load32(src, scr); - m_assembler.addlRegReg(scr, dest); - releaseScratch(scr); - } - - void add32(AbsoluteAddress src, RegisterID dest) - { - RegisterID scr = claimScratch(); - load32(src.m_ptr, scr); - m_assembler.addlRegReg(scr, dest); - releaseScratch(scr); - } - - void and32(RegisterID src, RegisterID dest) - { - m_assembler.andlRegReg(src, dest); - } - - void and32(RegisterID src1, RegisterID src2, RegisterID dest) - { - if (src1 == dest) - and32(src2, dest); - else { - move(src2, dest); - and32(src1, dest); - } - } - - void and32(Address src, RegisterID dest) - { - RegisterID scr = claimScratch(); - load32(src, scr); - and32(scr, dest); - releaseScratch(scr); - } - - void and32(TrustedImm32 imm, RegisterID dest) - { - if (!imm.m_value) { - m_assembler.movImm8(0, dest); - return; - } - - if ((imm.m_value <= 255) && (imm.m_value >= 0) && (dest == SH4Registers::r0)) { - m_assembler.andlImm8r(imm.m_value, dest); - return; - } - - RegisterID scr = claimScratch(); - m_assembler.loadConstant(imm.m_value, scr); - m_assembler.andlRegReg(scr, dest); - releaseScratch(scr); - } - - void and32(TrustedImm32 imm, RegisterID src, RegisterID dest) - { - if (src != dest) { - move(imm, dest); - and32(src, dest); - return; - } - - and32(imm, dest); - } - - void lshift32(RegisterID shiftamount, RegisterID dest) - { - RegisterID shiftTmp = claimScratch(); - m_assembler.loadConstant(0x1f, shiftTmp); - m_assembler.andlRegReg(shiftamount, shiftTmp); - m_assembler.shldRegReg(dest, shiftTmp); - releaseScratch(shiftTmp); - } - - void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest) - { - move(src, dest); - lshift32(shiftAmount, dest); - } - - void lshift32(TrustedImm32 imm, RegisterID dest) - { - int immMasked = imm.m_value & 0x1f; - if (!immMasked) - return; - - if ((immMasked == 1) || (immMasked == 2) || (immMasked == 8) || (immMasked == 16)) { - m_assembler.shllImm8r(immMasked, dest); - return; - } - - RegisterID shiftTmp = claimScratch(); - m_assembler.loadConstant(immMasked, shiftTmp); - m_assembler.shldRegReg(dest, shiftTmp); - releaseScratch(shiftTmp); - } - - void lshift32(RegisterID src, TrustedImm32 shiftamount, RegisterID dest) - { - move(src, dest); - lshift32(shiftamount, dest); - } - - void mul32(RegisterID src, RegisterID dest) - { - mul32(src, dest, dest); - } - - void mul32(RegisterID src1, RegisterID src2, RegisterID dest) - { - m_assembler.imullRegReg(src1, src2); - m_assembler.stsmacl(dest); - } - - void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest) - { - if (src == dest) { - RegisterID immval = claimScratch(); - move(imm, immval); - mul32(immval, dest); - releaseScratch(immval); - } else { - move(imm, dest); - mul32(src, dest); - } - } - - void or32(RegisterID src, RegisterID dest) - { - m_assembler.orlRegReg(src, dest); - } - - void or32(TrustedImm32 imm, RegisterID dest) - { - if ((imm.m_value <= 255) && (imm.m_value >= 0) && (dest == SH4Registers::r0)) { - m_assembler.orlImm8r(imm.m_value, dest); - return; - } - - RegisterID scr = claimScratch(); - m_assembler.loadConstant(imm.m_value, scr); - m_assembler.orlRegReg(scr, dest); - releaseScratch(scr); - } - - void or32(RegisterID op1, RegisterID op2, RegisterID dest) - { - if (op1 == op2) - move(op1, dest); - else if (op1 == dest) - or32(op2, dest); - else { - move(op2, dest); - or32(op1, dest); - } - } - - void or32(TrustedImm32 imm, RegisterID src, RegisterID dest) - { - if (src != dest) { - move(imm, dest); - or32(src, dest); - return; - } - - or32(imm, dest); - } - - void or32(RegisterID src, AbsoluteAddress address) - { - RegisterID destptr = claimScratch(); - move(TrustedImmPtr(address.m_ptr), destptr); - RegisterID destval = claimScratch(); - m_assembler.movlMemReg(destptr, destval); - m_assembler.orlRegReg(src, destval); - m_assembler.movlRegMem(destval, destptr); - releaseScratch(destval); - releaseScratch(destptr); - } - - void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest) - { - if (src != dest) { - move(imm, dest); - xor32(src, dest); - return; - } - - xor32(imm, dest); - } - - void rshift32(RegisterID shiftamount, RegisterID dest) - { - RegisterID shiftTmp = claimScratch(); - m_assembler.loadConstant(0x1f, shiftTmp); - m_assembler.andlRegReg(shiftamount, shiftTmp); - m_assembler.neg(shiftTmp, shiftTmp); - m_assembler.shadRegReg(dest, shiftTmp); - releaseScratch(shiftTmp); - } - - void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest) - { - move(src, dest); - rshift32(shiftAmount, dest); - } - - void rshift32(TrustedImm32 imm, RegisterID dest) - { - int immMasked = imm.m_value & 0x1f; - if (!immMasked) - return; - - if (immMasked == 1) { - m_assembler.sharImm8r(immMasked, dest); - return; - } - - RegisterID shiftTmp = claimScratch(); - m_assembler.loadConstant(-immMasked, shiftTmp); - m_assembler.shadRegReg(dest, shiftTmp); - releaseScratch(shiftTmp); - } - - void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) - { - move(src, dest); - rshift32(imm, dest); - } - - void sub32(RegisterID src, RegisterID dest) - { - m_assembler.sublRegReg(src, dest); - } - - void sub32(TrustedImm32 imm, AbsoluteAddress address) - { - if (!imm.m_value) - return; - - RegisterID result = claimScratch(); - RegisterID scratchReg = claimScratch(); - - move(TrustedImmPtr(address.m_ptr), scratchReg); - m_assembler.movlMemReg(scratchReg, result); - - if (m_assembler.isImmediate(-imm.m_value)) - m_assembler.addlImm8r(-imm.m_value, result); - else { - m_assembler.loadConstant(imm.m_value, scratchReg3); - m_assembler.sublRegReg(scratchReg3, result); - } - - store32(result, scratchReg); - releaseScratch(result); - releaseScratch(scratchReg); - } - - void sub32(TrustedImm32 imm, Address address) - { - add32(TrustedImm32(-imm.m_value), address); - } - - void add32(TrustedImm32 imm, AbsoluteAddress address) - { - if (!imm.m_value) - return; - - RegisterID result = claimScratch(); - RegisterID scratchReg = claimScratch(); - - move(TrustedImmPtr(address.m_ptr), scratchReg); - m_assembler.movlMemReg(scratchReg, result); - - if (m_assembler.isImmediate(imm.m_value)) - m_assembler.addlImm8r(imm.m_value, result); - else { - m_assembler.loadConstant(imm.m_value, scratchReg3); - m_assembler.addlRegReg(scratchReg3, result); - } - - store32(result, scratchReg); - releaseScratch(result); - releaseScratch(scratchReg); - } - - void add64(TrustedImm32 imm, AbsoluteAddress address) - { - RegisterID scr1 = claimScratch(); - RegisterID scr2 = claimScratch(); - - // Add 32-bit LSB first. - move(TrustedImmPtr(address.m_ptr), scratchReg3); - m_assembler.movlMemReg(scratchReg3, scr1); // scr1 = 32-bit LSB of int64 @ address - m_assembler.loadConstant(imm.m_value, scr2); - m_assembler.clrt(); - m_assembler.addclRegReg(scr1, scr2); - m_assembler.movlRegMem(scr2, scratchReg3); // Update address with 32-bit LSB result. - - // Then add 32-bit MSB. - m_assembler.addlImm8r(4, scratchReg3); - m_assembler.movlMemReg(scratchReg3, scr1); // scr1 = 32-bit MSB of int64 @ address - m_assembler.movt(scr2); - if (imm.m_value < 0) - m_assembler.addlImm8r(-1, scr2); // Sign extend imm value if needed. - m_assembler.addvlRegReg(scr2, scr1); - m_assembler.movlRegMem(scr1, scratchReg3); // Update (address + 4) with 32-bit MSB result. - - releaseScratch(scr2); - releaseScratch(scr1); - } - - void sub32(TrustedImm32 imm, RegisterID dest) - { - if (!imm.m_value) - return; - - if (m_assembler.isImmediate(-imm.m_value)) { - m_assembler.addlImm8r(-imm.m_value, dest); - return; - } - - RegisterID scr = claimScratch(); - m_assembler.loadConstant(imm.m_value, scr); - m_assembler.sublRegReg(scr, dest); - releaseScratch(scr); - } - - void sub32(Address src, RegisterID dest) - { - RegisterID scr = claimScratch(); - load32(src, scr); - m_assembler.sublRegReg(scr, dest); - releaseScratch(scr); - } - - void xor32(RegisterID src, RegisterID dest) - { - m_assembler.xorlRegReg(src, dest); - } - - void xor32(RegisterID src1, RegisterID src2, RegisterID dest) - { - if (src1 == dest) - xor32(src2, dest); - else { - move(src2, dest); - xor32(src1, dest); - } - } - - void xor32(TrustedImm32 imm, RegisterID srcDest) - { - if (imm.m_value == -1) { - m_assembler.notlReg(srcDest, srcDest); - return; - } - - if ((srcDest != SH4Registers::r0) || (imm.m_value > 255) || (imm.m_value < 0)) { - RegisterID scr = claimScratch(); - m_assembler.loadConstant(imm.m_value, scr); - m_assembler.xorlRegReg(scr, srcDest); - releaseScratch(scr); - return; - } - - m_assembler.xorlImm8r(imm.m_value, srcDest); - } - - void compare32(int imm, RegisterID dst, RelationalCondition cond) - { - if (((cond == Equal) || (cond == NotEqual)) && (dst == SH4Registers::r0) && m_assembler.isImmediate(imm)) { - m_assembler.cmpEqImmR0(imm, dst); - return; - } - - if (((cond == Equal) || (cond == NotEqual)) && !imm) { - m_assembler.testlRegReg(dst, dst); - return; - } - - RegisterID scr = claimScratch(); - m_assembler.loadConstant(imm, scr); - m_assembler.cmplRegReg(scr, dst, SH4Condition(cond)); - releaseScratch(scr); - } - - void compare32(int offset, RegisterID base, RegisterID left, RelationalCondition cond) - { - RegisterID scr = claimScratch(); - if (!offset) { - m_assembler.movlMemReg(base, scr); - m_assembler.cmplRegReg(scr, left, SH4Condition(cond)); - releaseScratch(scr); - return; - } - - if ((offset < 0) || (offset >= 64)) { - m_assembler.loadConstant(offset, scr); - m_assembler.addlRegReg(base, scr); - m_assembler.movlMemReg(scr, scr); - m_assembler.cmplRegReg(scr, left, SH4Condition(cond)); - releaseScratch(scr); - return; - } - - m_assembler.movlMemReg(offset >> 2, base, scr); - m_assembler.cmplRegReg(scr, left, SH4Condition(cond)); - releaseScratch(scr); - } - - void testImm(int imm, int offset, RegisterID base) - { - RegisterID scr = claimScratch(); - load32(base, offset, scr); - - RegisterID scr1 = claimScratch(); - move(TrustedImm32(imm), scr1); - - m_assembler.testlRegReg(scr, scr1); - releaseScratch(scr); - releaseScratch(scr1); - } - - void testlImm(int imm, RegisterID dst) - { - if ((dst == SH4Registers::r0) && (imm <= 255) && (imm >= 0)) { - m_assembler.testlImm8r(imm, dst); - return; - } - - RegisterID scr = claimScratch(); - m_assembler.loadConstant(imm, scr); - m_assembler.testlRegReg(scr, dst); - releaseScratch(scr); - } - - void compare32(RegisterID right, int offset, RegisterID base, RelationalCondition cond) - { - if (!offset) { - RegisterID scr = claimScratch(); - m_assembler.movlMemReg(base, scr); - m_assembler.cmplRegReg(right, scr, SH4Condition(cond)); - releaseScratch(scr); - return; - } - - if ((offset < 0) || (offset >= 64)) { - RegisterID scr = claimScratch(); - m_assembler.loadConstant(offset, scr); - m_assembler.addlRegReg(base, scr); - m_assembler.movlMemReg(scr, scr); - m_assembler.cmplRegReg(right, scr, SH4Condition(cond)); - releaseScratch(scr); - return; - } - - RegisterID scr = claimScratch(); - m_assembler.movlMemReg(offset >> 2, base, scr); - m_assembler.cmplRegReg(right, scr, SH4Condition(cond)); - releaseScratch(scr); - } - - void compare32(int imm, int offset, RegisterID base, RelationalCondition cond) - { - RegisterID scr = claimScratch(); - load32(base, offset, scr); - - RegisterID scr1 = claimScratch(); - move(TrustedImm32(imm), scr1); - - m_assembler.cmplRegReg(scr1, scr, SH4Condition(cond)); - - releaseScratch(scr1); - releaseScratch(scr); - } - - // Memory access operation - - ALWAYS_INLINE void loadEffectiveAddress(BaseIndex address, RegisterID dest, int extraoffset = 0) - { - if (dest == address.base) { - RegisterID scaledIndex = claimScratch(); - move(address.index, scaledIndex); - lshift32(TrustedImm32(address.scale), scaledIndex); - add32(scaledIndex, dest); - releaseScratch(scaledIndex); - } else { - move(address.index, dest); - lshift32(TrustedImm32(address.scale), dest); - add32(address.base, dest); - } - - add32(TrustedImm32(address.offset + extraoffset), dest); - } - - void load32(ImplicitAddress address, RegisterID dest) - { - load32(address.base, address.offset, dest); - } - - void load8(ImplicitAddress address, RegisterID dest) - { - load8(address.base, address.offset, dest); - } - - void load8(BaseIndex address, RegisterID dest) - { - RegisterID scr = claimScratch(); - move(address.index, scr); - lshift32(TrustedImm32(address.scale), scr); - add32(address.base, scr); - load8(scr, address.offset, dest); - releaseScratch(scr); - } - - void load8(AbsoluteAddress address, RegisterID dest) - { - move(TrustedImmPtr(address.m_ptr), dest); - m_assembler.movbMemReg(dest, dest); - m_assembler.extub(dest, dest); - } - - void load8(const void* address, RegisterID dest) - { - load8(AbsoluteAddress(address), dest); - } - - void load8PostInc(RegisterID base, RegisterID dest) - { - m_assembler.movbMemRegIn(base, dest); - m_assembler.extub(dest, dest); - } - - void load8Signed(BaseIndex address, RegisterID dest) - { - RegisterID scr = claimScratch(); - move(address.index, scr); - lshift32(TrustedImm32(address.scale), scr); - add32(address.base, scr); - load8Signed(scr, address.offset, dest); - releaseScratch(scr); - } - - void load32(BaseIndex address, RegisterID dest) - { - RegisterID scr = claimScratch(); - move(address.index, scr); - lshift32(TrustedImm32(address.scale), scr); - add32(address.base, scr); - load32(scr, address.offset, dest); - releaseScratch(scr); - } - - void load32(const void* address, RegisterID dest) - { - move(TrustedImmPtr(address), dest); - m_assembler.movlMemReg(dest, dest); - } - - void load32(RegisterID base, int offset, RegisterID dest) - { - if (!offset) { - m_assembler.movlMemReg(base, dest); - return; - } - - if ((offset >= 0) && (offset < 64)) { - m_assembler.movlMemReg(offset >> 2, base, dest); - return; - } - - RegisterID scr = (dest == base) ? claimScratch() : dest; - - m_assembler.loadConstant(offset, scr); - if (base == SH4Registers::r0) - m_assembler.movlR0mr(scr, dest); - else { - m_assembler.addlRegReg(base, scr); - m_assembler.movlMemReg(scr, dest); - } - - if (dest == base) - releaseScratch(scr); - } - - void load8Signed(RegisterID base, int offset, RegisterID dest) - { - if (!offset) { - m_assembler.movbMemReg(base, dest); - return; - } - - if ((offset > 0) && (offset <= 15) && (dest == SH4Registers::r0)) { - m_assembler.movbMemReg(offset, base, dest); - return; - } - - RegisterID scr = (dest == base) ? claimScratch() : dest; - - m_assembler.loadConstant(offset, scr); - if (base == SH4Registers::r0) - m_assembler.movbR0mr(scr, dest); - else { - m_assembler.addlRegReg(base, scr); - m_assembler.movbMemReg(scr, dest); - } - - if (dest == base) - releaseScratch(scr); - } - - void load8(RegisterID base, int offset, RegisterID dest) - { - load8Signed(base, offset, dest); - m_assembler.extub(dest, dest); - } - - void load32(RegisterID src, RegisterID dst) - { - m_assembler.movlMemReg(src, dst); - } - - void load16(ImplicitAddress address, RegisterID dest) - { - if (!address.offset) { - m_assembler.movwMemReg(address.base, dest); - m_assembler.extuw(dest, dest); - return; - } - - if ((address.offset > 0) && (address.offset <= 30) && (dest == SH4Registers::r0)) { - m_assembler.movwMemReg(address.offset >> 1, address.base, dest); - m_assembler.extuw(dest, dest); - return; - } - - RegisterID scr = (dest == address.base) ? claimScratch() : dest; - - m_assembler.loadConstant(address.offset, scr); - if (address.base == SH4Registers::r0) - m_assembler.movwR0mr(scr, dest); - else { - m_assembler.addlRegReg(address.base, scr); - m_assembler.movwMemReg(scr, dest); - } - m_assembler.extuw(dest, dest); - - if (dest == address.base) - releaseScratch(scr); - } - - void load16Unaligned(BaseIndex address, RegisterID dest) - { - RegisterID scr = claimScratch(); - - loadEffectiveAddress(address, scr); - - RegisterID scr1 = claimScratch(); - load8PostInc(scr, scr1); - load8(scr, dest); - m_assembler.shllImm8r(8, dest); - or32(scr1, dest); - - releaseScratch(scr); - releaseScratch(scr1); - } - - void load16(RegisterID src, RegisterID dest) - { - m_assembler.movwMemReg(src, dest); - m_assembler.extuw(dest, dest); - } - - void load16Signed(RegisterID src, RegisterID dest) - { - m_assembler.movwMemReg(src, dest); - } - - void load16(BaseIndex address, RegisterID dest) - { - load16Signed(address, dest); - m_assembler.extuw(dest, dest); - } - - void load16PostInc(RegisterID base, RegisterID dest) - { - m_assembler.movwMemRegIn(base, dest); - m_assembler.extuw(dest, dest); - } - - void load16Signed(BaseIndex address, RegisterID dest) - { - RegisterID scr = claimScratch(); - - move(address.index, scr); - lshift32(TrustedImm32(address.scale), scr); - add32(TrustedImm32(address.offset), scr); - - if (address.base == SH4Registers::r0) - m_assembler.movwR0mr(scr, dest); - else { - add32(address.base, scr); - load16Signed(scr, dest); - } - - releaseScratch(scr); - } - - void store8(RegisterID src, BaseIndex address) - { - RegisterID scr = claimScratch(); - - move(address.index, scr); - lshift32(TrustedImm32(address.scale), scr); - add32(TrustedImm32(address.offset), scr); - - if (address.base == SH4Registers::r0) - m_assembler.movbRegMemr0(src, scr); - else { - add32(address.base, scr); - m_assembler.movbRegMem(src, scr); - } - - releaseScratch(scr); - } - - void store8(RegisterID src, void* address) - { - RegisterID destptr = claimScratch(); - move(TrustedImmPtr(address), destptr); - m_assembler.movbRegMem(src, destptr); - releaseScratch(destptr); - } - - void store8(TrustedImm32 imm, void* address) - { - ASSERT((imm.m_value >= -128) && (imm.m_value <= 127)); - RegisterID dstptr = claimScratch(); - move(TrustedImmPtr(address), dstptr); - RegisterID srcval = claimScratch(); - move(imm, srcval); - m_assembler.movbRegMem(srcval, dstptr); - releaseScratch(dstptr); - releaseScratch(srcval); - } - - void store16(RegisterID src, BaseIndex address) - { - RegisterID scr = claimScratch(); - - move(address.index, scr); - lshift32(TrustedImm32(address.scale), scr); - add32(TrustedImm32(address.offset), scr); - - if (address.base == SH4Registers::r0) - m_assembler.movwRegMemr0(src, scr); - else { - add32(address.base, scr); - m_assembler.movwRegMem(src, scr); - } - - releaseScratch(scr); - } - - void store32(RegisterID src, ImplicitAddress address) - { - if (!address.offset) { - m_assembler.movlRegMem(src, address.base); - return; - } - - if ((address.offset >= 0) && (address.offset < 64)) { - m_assembler.movlRegMem(src, address.offset >> 2, address.base); - return; - } - - RegisterID scr = claimScratch(); - m_assembler.loadConstant(address.offset, scr); - if (address.base == SH4Registers::r0) - m_assembler.movlRegMemr0(src, scr); - else { - m_assembler.addlRegReg(address.base, scr); - m_assembler.movlRegMem(src, scr); - } - releaseScratch(scr); - } - - void store32(RegisterID src, RegisterID dst) - { - m_assembler.movlRegMem(src, dst); - } - - void store32(TrustedImm32 imm, ImplicitAddress address) - { - RegisterID scr = claimScratch(); - m_assembler.loadConstant(imm.m_value, scr); - store32(scr, address); - releaseScratch(scr); - } - - void store32(RegisterID src, BaseIndex address) - { - RegisterID scr = claimScratch(); - - move(address.index, scr); - lshift32(TrustedImm32(address.scale), scr); - add32(address.base, scr); - store32(src, Address(scr, address.offset)); - - releaseScratch(scr); - } - - void store32(TrustedImm32 imm, void* address) - { - RegisterID scr = claimScratch(); - RegisterID scr1 = claimScratch(); - m_assembler.loadConstant(imm.m_value, scr); - move(TrustedImmPtr(address), scr1); - m_assembler.movlRegMem(scr, scr1); - releaseScratch(scr); - releaseScratch(scr1); - } - - void store32(RegisterID src, void* address) - { - RegisterID scr = claimScratch(); - move(TrustedImmPtr(address), scr); - m_assembler.movlRegMem(src, scr); - releaseScratch(scr); - } - - void store32(TrustedImm32 imm, BaseIndex address) - { - RegisterID destptr = claimScratch(); - - loadEffectiveAddress(address, destptr); - - RegisterID srcval = claimScratch(); - move(imm, srcval); - m_assembler.movlRegMem(srcval, destptr); - releaseScratch(srcval); - releaseScratch(destptr); - } - - DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest) - { - RegisterID scr = claimScratch(); - DataLabel32 label(this); - m_assembler.loadConstantUnReusable(address.offset, scr); - m_assembler.addlRegReg(address.base, scr); - m_assembler.movlMemReg(scr, dest); - releaseScratch(scr); - return label; - } - - DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address) - { - RegisterID scr = claimScratch(); - DataLabel32 label(this); - m_assembler.loadConstantUnReusable(address.offset, scr); - m_assembler.addlRegReg(address.base, scr); - m_assembler.movlRegMem(src, scr); - releaseScratch(scr); - return label; - } - - DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest) - { - DataLabelCompact dataLabel(this); - ASSERT(isCompactPtrAlignedAddressOffset(address.offset)); - m_assembler.movlMemRegCompact(address.offset >> 2, address.base, dest); - return dataLabel; - } - - ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest) - { - ConvertibleLoadLabel result(this); - - RegisterID scr = claimScratch(); - m_assembler.movImm8(address.offset, scr); - m_assembler.addlRegReg(address.base, scr); - m_assembler.movlMemReg(scr, dest); - releaseScratch(scr); - - return result; - } - - // Floating-point operations - - static bool supportsFloatingPoint() { return true; } - static bool supportsFloatingPointTruncate() { return true; } - static bool supportsFloatingPointSqrt() { return true; } - static bool supportsFloatingPointAbs() { return true; } - - void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2) - { - m_assembler.fldsfpul((FPRegisterID)(src + 1)); - m_assembler.stsfpulReg(dest1); - m_assembler.fldsfpul(src); - m_assembler.stsfpulReg(dest2); - } - - void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID) - { - m_assembler.ldsrmfpul(src1); - m_assembler.fstsfpul((FPRegisterID)(dest + 1)); - m_assembler.ldsrmfpul(src2); - m_assembler.fstsfpul(dest); - } - - void moveDouble(FPRegisterID src, FPRegisterID dest) - { - if (src != dest) { - m_assembler.fmovsRegReg((FPRegisterID)(src + 1), (FPRegisterID)(dest + 1)); - m_assembler.fmovsRegReg(src, dest); - } - } - - void swapDouble(FPRegisterID fr1, FPRegisterID fr2) - { - if (fr1 != fr2) { - m_assembler.fldsfpul((FPRegisterID)(fr1 + 1)); - m_assembler.fmovsRegReg((FPRegisterID)(fr2 + 1), (FPRegisterID)(fr1 + 1)); - m_assembler.fstsfpul((FPRegisterID)(fr2 + 1)); - m_assembler.fldsfpul(fr1); - m_assembler.fmovsRegReg(fr2, fr1); - m_assembler.fstsfpul(fr2); - } - } - - void loadFloat(BaseIndex address, FPRegisterID dest) - { - RegisterID scr = claimScratch(); - - loadEffectiveAddress(address, scr); - - m_assembler.fmovsReadrm(scr, dest); - releaseScratch(scr); - } - - void loadDouble(BaseIndex address, FPRegisterID dest) - { - RegisterID scr = claimScratch(); - - loadEffectiveAddress(address, scr); - - m_assembler.fmovsReadrminc(scr, (FPRegisterID)(dest + 1)); - m_assembler.fmovsReadrm(scr, dest); - releaseScratch(scr); - } - - void loadDouble(ImplicitAddress address, FPRegisterID dest) - { - RegisterID scr = claimScratch(); - - m_assembler.loadConstant(address.offset, scr); - if (address.base == SH4Registers::r0) { - m_assembler.fmovsReadr0r(scr, (FPRegisterID)(dest + 1)); - m_assembler.addlImm8r(4, scr); - m_assembler.fmovsReadr0r(scr, dest); - releaseScratch(scr); - return; - } - - m_assembler.addlRegReg(address.base, scr); - m_assembler.fmovsReadrminc(scr, (FPRegisterID)(dest + 1)); - m_assembler.fmovsReadrm(scr, dest); - releaseScratch(scr); - } - - void loadDouble(const void* address, FPRegisterID dest) - { - RegisterID scr = claimScratch(); - move(TrustedImmPtr(address), scr); - m_assembler.fmovsReadrminc(scr, (FPRegisterID)(dest + 1)); - m_assembler.fmovsReadrm(scr, dest); - releaseScratch(scr); - } - - void storeFloat(FPRegisterID src, BaseIndex address) - { - RegisterID scr = claimScratch(); - loadEffectiveAddress(address, scr); - m_assembler.fmovsWriterm(src, scr); - releaseScratch(scr); - } - - void storeDouble(FPRegisterID src, ImplicitAddress address) - { - RegisterID scr = claimScratch(); - m_assembler.loadConstant(address.offset + 8, scr); - m_assembler.addlRegReg(address.base, scr); - m_assembler.fmovsWriterndec(src, scr); - m_assembler.fmovsWriterndec((FPRegisterID)(src + 1), scr); - releaseScratch(scr); - } - - void storeDouble(FPRegisterID src, BaseIndex address) - { - RegisterID scr = claimScratch(); - - loadEffectiveAddress(address, scr, 8); - - m_assembler.fmovsWriterndec(src, scr); - m_assembler.fmovsWriterndec((FPRegisterID)(src + 1), scr); - - releaseScratch(scr); - } - - void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) - { - if (op1 == dest) - addDouble(op2, dest); - else { - moveDouble(op2, dest); - addDouble(op1, dest); - } - } - - void storeDouble(FPRegisterID src, const void* address) - { - RegisterID scr = claimScratch(); - m_assembler.loadConstant(reinterpret_cast<uint32_t>(const_cast<void*>(address)) + 8, scr); - m_assembler.fmovsWriterndec(src, scr); - m_assembler.fmovsWriterndec((FPRegisterID)(src + 1), scr); - releaseScratch(scr); - } - - void addDouble(FPRegisterID src, FPRegisterID dest) - { - m_assembler.daddRegReg(src, dest); - } - - void addDouble(AbsoluteAddress address, FPRegisterID dest) - { - loadDouble(address.m_ptr, fscratch); - addDouble(fscratch, dest); - } - - void addDouble(Address address, FPRegisterID dest) - { - loadDouble(address, fscratch); - addDouble(fscratch, dest); - } - - void subDouble(FPRegisterID src, FPRegisterID dest) - { - m_assembler.dsubRegReg(src, dest); - } - - void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) - { - if (op2 == dest) { - moveDouble(op1, fscratch); - subDouble(op2, fscratch); - moveDouble(fscratch, dest); - } else { - moveDouble(op1, dest); - subDouble(op2, dest); - } - } - - void subDouble(Address address, FPRegisterID dest) - { - loadDouble(address, fscratch); - subDouble(fscratch, dest); - } - - void mulDouble(FPRegisterID src, FPRegisterID dest) - { - m_assembler.dmulRegReg(src, dest); - } - - void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) - { - if (op1 == dest) - mulDouble(op2, dest); - else { - moveDouble(op2, dest); - mulDouble(op1, dest); - } - } - - void mulDouble(Address address, FPRegisterID dest) - { - loadDouble(address, fscratch); - mulDouble(fscratch, dest); - } - - void divDouble(FPRegisterID src, FPRegisterID dest) - { - m_assembler.ddivRegReg(src, dest); - } - - void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) - { - if (op2 == dest) { - moveDouble(op1, fscratch); - divDouble(op2, fscratch); - moveDouble(fscratch, dest); - } else { - moveDouble(op1, dest); - divDouble(op2, dest); - } - } - - void negateDouble(FPRegisterID src, FPRegisterID dest) - { - moveDouble(src, dest); - m_assembler.dneg(dest); - } - - void convertFloatToDouble(FPRegisterID src, FPRegisterID dst) - { - m_assembler.fldsfpul(src); - m_assembler.dcnvsd(dst); - } - - void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst) - { - m_assembler.dcnvds(src); - m_assembler.fstsfpul(dst); - } - - void convertInt32ToDouble(RegisterID src, FPRegisterID dest) - { - m_assembler.ldsrmfpul(src); - m_assembler.floatfpulDreg(dest); - } - - void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest) - { - RegisterID scr = claimScratch(); - load32(src.m_ptr, scr); - convertInt32ToDouble(scr, dest); - releaseScratch(scr); - } - - void convertInt32ToDouble(Address src, FPRegisterID dest) - { - RegisterID scr = claimScratch(); - load32(src, scr); - convertInt32ToDouble(scr, dest); - releaseScratch(scr); - } - - void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest) - { - RegisterID scr = claimScratch(); - Jump m_jump; - JumpList end; - - loadEffectiveAddress(address, scr); - - RegisterID scr1 = claimScratch(); - if (dest != SH4Registers::r0) - move(SH4Registers::r0, scr1); - - m_assembler.ensureSpace(m_assembler.maxInstructionSize + 58, sizeof(uint32_t)); - move(scr, SH4Registers::r0); - m_assembler.testlImm8r(0x3, SH4Registers::r0); - m_jump = Jump(m_assembler.jne(), SH4Assembler::JumpNear); - - if (dest != SH4Registers::r0) - move(scr1, SH4Registers::r0); - - load32(scr, dest); - end.append(Jump(m_assembler.bra(), SH4Assembler::JumpNear)); - m_assembler.nop(); - m_jump.link(this); - m_assembler.testlImm8r(0x1, SH4Registers::r0); - - if (dest != SH4Registers::r0) - move(scr1, SH4Registers::r0); - - m_jump = Jump(m_assembler.jne(), SH4Assembler::JumpNear); - load16PostInc(scr, scr1); - load16(scr, dest); - m_assembler.shllImm8r(16, dest); - or32(scr1, dest); - end.append(Jump(m_assembler.bra(), SH4Assembler::JumpNear)); - m_assembler.nop(); - m_jump.link(this); - load8PostInc(scr, scr1); - load16PostInc(scr, dest); - m_assembler.shllImm8r(8, dest); - or32(dest, scr1); - load8(scr, dest); - m_assembler.shllImm8r(8, dest); - m_assembler.shllImm8r(16, dest); - or32(scr1, dest); - end.link(this); - - releaseScratch(scr); - releaseScratch(scr1); - } - - Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right) - { - RegisterID scr = scratchReg3; - load32WithUnalignedHalfWords(left, scr); - if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) - m_assembler.testlRegReg(scr, scr); - else - compare32(right.m_value, scr, cond); - - if (cond == NotEqual) - return branchFalse(); - return branchTrue(); - } - - Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch) - { - m_assembler.movImm8(0, scratchReg3); - convertInt32ToDouble(scratchReg3, scratch); - return branchDouble(DoubleNotEqual, reg, scratch); - } - - Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch) - { - m_assembler.movImm8(0, scratchReg3); - convertInt32ToDouble(scratchReg3, scratch); - return branchDouble(DoubleEqualOrUnordered, reg, scratch); - } - - Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right) - { - if (cond == DoubleEqual) { - m_assembler.dcmppeq(right, left); - return branchTrue(); - } - - if (cond == DoubleNotEqual) { - JumpList end; - m_assembler.dcmppeq(left, left); - m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t)); - end.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear)); - m_assembler.dcmppeq(right, right); - end.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear)); - m_assembler.dcmppeq(right, left); - Jump m_jump = branchFalse(); - end.link(this); - return m_jump; - } - - if (cond == DoubleGreaterThan) { - m_assembler.dcmppgt(right, left); - return branchTrue(); - } - - if (cond == DoubleGreaterThanOrEqual) { - JumpList end; - m_assembler.dcmppeq(left, left); - m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t)); - end.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear)); - m_assembler.dcmppeq(right, right); - end.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear)); - m_assembler.dcmppgt(left, right); - Jump m_jump = branchFalse(); - end.link(this); - return m_jump; - } - - if (cond == DoubleLessThan) { - m_assembler.dcmppgt(left, right); - return branchTrue(); - } - - if (cond == DoubleLessThanOrEqual) { - JumpList end; - m_assembler.dcmppeq(left, left); - m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t)); - end.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear)); - m_assembler.dcmppeq(right, right); - end.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear)); - m_assembler.dcmppgt(right, left); - Jump m_jump = branchFalse(); - end.link(this); - return m_jump; - } - - if (cond == DoubleEqualOrUnordered) { - JumpList takeBranch; - m_assembler.dcmppeq(left, left); - m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t)); - takeBranch.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear)); - m_assembler.dcmppeq(right, right); - takeBranch.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear)); - m_assembler.dcmppeq(left, right); - m_assembler.branch(BF_OPCODE, 2); - takeBranch.link(this); - return Jump(m_assembler.extraInstrForBranch(scratchReg3)); - } - - if (cond == DoubleGreaterThanOrUnordered) { - JumpList takeBranch; - m_assembler.dcmppeq(left, left); - m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t)); - takeBranch.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear)); - m_assembler.dcmppeq(right, right); - takeBranch.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear)); - m_assembler.dcmppgt(right, left); - m_assembler.branch(BF_OPCODE, 2); - takeBranch.link(this); - return Jump(m_assembler.extraInstrForBranch(scratchReg3)); - } - - if (cond == DoubleGreaterThanOrEqualOrUnordered) { - m_assembler.dcmppgt(left, right); - return branchFalse(); - } - - if (cond == DoubleLessThanOrUnordered) { - JumpList takeBranch; - m_assembler.dcmppeq(left, left); - m_assembler.ensureSpace(m_assembler.maxInstructionSize + 22, sizeof(uint32_t)); - takeBranch.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear)); - m_assembler.dcmppeq(right, right); - takeBranch.append(Jump(m_assembler.jne(), SH4Assembler::JumpNear)); - m_assembler.dcmppgt(left, right); - m_assembler.branch(BF_OPCODE, 2); - takeBranch.link(this); - return Jump(m_assembler.extraInstrForBranch(scratchReg3)); - } - - if (cond == DoubleLessThanOrEqualOrUnordered) { - m_assembler.dcmppgt(right, left); - return branchFalse(); - } - - ASSERT(cond == DoubleNotEqualOrUnordered); - m_assembler.dcmppeq(right, left); - return branchFalse(); - } - - Jump branchTrue() - { - m_assembler.ensureSpace(m_assembler.maxInstructionSize + 6, sizeof(uint32_t)); - m_assembler.branch(BF_OPCODE, 2); - return Jump(m_assembler.extraInstrForBranch(scratchReg3)); - } - - Jump branchFalse() - { - m_assembler.ensureSpace(m_assembler.maxInstructionSize + 6, sizeof(uint32_t)); - m_assembler.branch(BT_OPCODE, 2); - return Jump(m_assembler.extraInstrForBranch(scratchReg3)); - } - - Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right) - { - RegisterID scr = claimScratch(); - move(left.index, scr); - lshift32(TrustedImm32(left.scale), scr); - add32(left.base, scr); - load32(scr, left.offset, scr); - compare32(right.m_value, scr, cond); - releaseScratch(scr); - - if (cond == NotEqual) - return branchFalse(); - return branchTrue(); - } - - void sqrtDouble(FPRegisterID src, FPRegisterID dest) - { - moveDouble(src, dest); - m_assembler.dsqrt(dest); - } - - void absDouble(FPRegisterID src, FPRegisterID dest) - { - moveDouble(src, dest); - m_assembler.dabs(dest); - } - - Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) - { - RegisterID addressTempRegister = claimScratch(); - load8(address, addressTempRegister); - Jump jmp = branchTest32(cond, addressTempRegister, mask); - releaseScratch(addressTempRegister); - return jmp; - } - - Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) - { - RegisterID addressTempRegister = claimScratch(); - load8(address, addressTempRegister); - Jump jmp = branchTest32(cond, addressTempRegister, mask); - releaseScratch(addressTempRegister); - return jmp; - } - - Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) - { - RegisterID addressTempRegister = claimScratch(); - move(TrustedImmPtr(address.m_ptr), addressTempRegister); - load8(Address(addressTempRegister), addressTempRegister); - Jump jmp = branchTest32(cond, addressTempRegister, mask); - releaseScratch(addressTempRegister); - return jmp; - } - - void signExtend32ToPtr(RegisterID src, RegisterID dest) - { - move(src, dest); - } - - void zeroExtend32ToPtr(RegisterID src, RegisterID dest) - { - move(src, dest); - } - - Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right) - { - RegisterID addressTempRegister = claimScratch(); - load8(left, addressTempRegister); - Jump jmp = branch32(cond, addressTempRegister, right); - releaseScratch(addressTempRegister); - return jmp; - } - - Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right) - { - RegisterID addressTempRegister = claimScratch(); - load8(left, addressTempRegister); - Jump jmp = branch32(cond, addressTempRegister, right); - releaseScratch(addressTempRegister); - return jmp; - } - - void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest) - { - RegisterID addressTempRegister = claimScratch(); - load8(left, addressTempRegister); - compare32(cond, addressTempRegister, right, dest); - releaseScratch(addressTempRegister); - } - - enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful }; - Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed) - { - Jump result; - truncateDoubleToInt32(src, dest); - RegisterID intscr = claimScratch(); - m_assembler.loadConstant(0x7fffffff, intscr); - m_assembler.cmplRegReg(dest, intscr, SH4Condition(Equal)); - m_assembler.ensureSpace(m_assembler.maxInstructionSize + 12, sizeof(uint32_t)); - if (branchType == BranchIfTruncateFailed) { - m_assembler.branch(BT_OPCODE, 2); - m_assembler.addlImm8r(1, intscr); - m_assembler.cmplRegReg(dest, intscr, SH4Condition(Equal)); - result = branchTrue(); - } else { - Jump out = Jump(m_assembler.je(), SH4Assembler::JumpNear); - m_assembler.addlImm8r(1, intscr); - m_assembler.cmplRegReg(dest, intscr, SH4Condition(Equal)); - result = branchFalse(); - out.link(this); - } - releaseScratch(intscr); - return result; - } - - Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed) - { - Jump result; - RegisterID intscr = claimScratch(); - m_assembler.loadConstant(0x80000000, intscr); - convertInt32ToDouble(intscr, fscratch); - addDouble(src, fscratch); - truncateDoubleToInt32(fscratch, dest); - m_assembler.cmplRegReg(dest, intscr, SH4Condition(Equal)); - m_assembler.ensureSpace(m_assembler.maxInstructionSize + 16, sizeof(uint32_t)); - if (branchType == BranchIfTruncateFailed) { - m_assembler.branch(BT_OPCODE, 4); - m_assembler.addlImm8r(-1, intscr); - m_assembler.cmplRegReg(dest, intscr, SH4Condition(Equal)); - m_assembler.addlImm8r(1, intscr); - m_assembler.sublRegReg(intscr, dest); - result = branchTrue(); - } else { - Jump out = Jump(m_assembler.je(), SH4Assembler::JumpNear); - m_assembler.addlImm8r(-1, intscr); - m_assembler.cmplRegReg(dest, intscr, SH4Condition(Equal)); - m_assembler.addlImm8r(1, intscr); - m_assembler.sublRegReg(intscr, dest); - result = branchFalse(); - out.link(this); - } - releaseScratch(intscr); - return result; - } - - void truncateDoubleToInt32(FPRegisterID src, RegisterID dest) - { - m_assembler.ftrcdrmfpul(src); - m_assembler.stsfpulReg(dest); - } - - void truncateDoubleToUint32(FPRegisterID src, RegisterID dest) - { - RegisterID intscr = claimScratch(); - m_assembler.loadConstant(0x80000000, intscr); - convertInt32ToDouble(intscr, fscratch); - addDouble(src, fscratch); - m_assembler.ftrcdrmfpul(fscratch); - m_assembler.stsfpulReg(dest); - m_assembler.sublRegReg(intscr, dest); - releaseScratch(intscr); - } - - // Stack manipulation operations - - void pop(RegisterID dest) - { - m_assembler.popReg(dest); - } - - void push(RegisterID src) - { - m_assembler.pushReg(src); - } - - void push(TrustedImm32 imm) - { - RegisterID scr = claimScratch(); - m_assembler.loadConstant(imm.m_value, scr); - push(scr); - releaseScratch(scr); - } - - // Register move operations - - void move(TrustedImm32 imm, RegisterID dest) - { - m_assembler.loadConstant(imm.m_value, dest); - } - - DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest) - { - m_assembler.ensureSpace(m_assembler.maxInstructionSize, sizeof(uint32_t)); - DataLabelPtr dataLabel(this); - m_assembler.loadConstantUnReusable(reinterpret_cast<uint32_t>(initialValue.m_value), dest); - return dataLabel; - } - - void move(RegisterID src, RegisterID dest) - { - if (src != dest) - m_assembler.movlRegReg(src, dest); - } - - void move(TrustedImmPtr imm, RegisterID dest) - { - m_assembler.loadConstant(imm.asIntptr(), dest); - } - - void swap(RegisterID reg1, RegisterID reg2) - { - if (reg1 != reg2) { - xor32(reg1, reg2); - xor32(reg2, reg1); - xor32(reg1, reg2); - } - } - - void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest) - { - m_assembler.cmplRegReg(right, left, SH4Condition(cond)); - if (cond != NotEqual) { - m_assembler.movt(dest); - return; - } - - m_assembler.ensureSpace(m_assembler.maxInstructionSize + 4); - m_assembler.movImm8(0, dest); - m_assembler.branch(BT_OPCODE, 0); - m_assembler.movImm8(1, dest); - } - - void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest) - { - if (left != dest) { - move(right, dest); - compare32(cond, left, dest, dest); - return; - } - - RegisterID scr = claimScratch(); - move(right, scr); - compare32(cond, left, scr, dest); - releaseScratch(scr); - } - - void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest) - { - ASSERT((cond == Zero) || (cond == NonZero)); - - load8(address, dest); - if (mask.m_value == -1) - compare32(0, dest, static_cast<RelationalCondition>(cond)); - else - testlImm(mask.m_value, dest); - if (cond != NonZero) { - m_assembler.movt(dest); - return; - } - - m_assembler.ensureSpace(m_assembler.maxInstructionSize + 4); - m_assembler.movImm8(0, dest); - m_assembler.branch(BT_OPCODE, 0); - m_assembler.movImm8(1, dest); - } - - void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest) - { - ASSERT((cond == Zero) || (cond == NonZero)); - - load32(address, dest); - if (mask.m_value == -1) - compare32(0, dest, static_cast<RelationalCondition>(cond)); - else - testlImm(mask.m_value, dest); - if (cond != NonZero) { - m_assembler.movt(dest); - return; - } - - m_assembler.ensureSpace(m_assembler.maxInstructionSize + 4); - m_assembler.movImm8(0, dest); - m_assembler.branch(BT_OPCODE, 0); - m_assembler.movImm8(1, dest); - } - - void loadPtrLinkReg(ImplicitAddress address) - { - RegisterID scr = claimScratch(); - load32(address, scr); - m_assembler.ldspr(scr); - releaseScratch(scr); - } - - Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right) - { - m_assembler.cmplRegReg(right, left, SH4Condition(cond)); - /* BT label => BF off - nop LDR reg - nop braf @reg - nop nop - */ - if (cond == NotEqual) - return branchFalse(); - return branchTrue(); - } - - Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right) - { - if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) - m_assembler.testlRegReg(left, left); - else - compare32(right.m_value, left, cond); - - if (cond == NotEqual) - return branchFalse(); - return branchTrue(); - } - - Jump branch32(RelationalCondition cond, RegisterID left, Address right) - { - compare32(right.offset, right.base, left, cond); - if (cond == NotEqual) - return branchFalse(); - return branchTrue(); - } - - Jump branch32(RelationalCondition cond, Address left, RegisterID right) - { - compare32(right, left.offset, left.base, cond); - if (cond == NotEqual) - return branchFalse(); - return branchTrue(); - } - - Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right) - { - compare32(right.m_value, left.offset, left.base, cond); - if (cond == NotEqual) - return branchFalse(); - return branchTrue(); - } - - Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right) - { - RegisterID scr = claimScratch(); - - load32(left.m_ptr, scr); - m_assembler.cmplRegReg(right, scr, SH4Condition(cond)); - releaseScratch(scr); - - if (cond == NotEqual) - return branchFalse(); - return branchTrue(); - } - - Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right) - { - RegisterID addressTempRegister = claimScratch(); - - move(TrustedImmPtr(left.m_ptr), addressTempRegister); - m_assembler.movlMemReg(addressTempRegister, addressTempRegister); - compare32(right.m_value, addressTempRegister, cond); - releaseScratch(addressTempRegister); - - if (cond == NotEqual) - return branchFalse(); - return branchTrue(); - } - - Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right) - { - ASSERT(!(right.m_value & 0xFFFFFF00)); - RegisterID lefttmp = claimScratch(); - - loadEffectiveAddress(left, lefttmp); - - load8(lefttmp, lefttmp); - RegisterID righttmp = claimScratch(); - m_assembler.loadConstant(right.m_value, righttmp); - - Jump result = branch32(cond, lefttmp, righttmp); - releaseScratch(lefttmp); - releaseScratch(righttmp); - return result; - } - - Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask) - { - ASSERT((cond == Zero) || (cond == NonZero)); - - m_assembler.testlRegReg(reg, mask); - - if (cond == NonZero) // NotEqual - return branchFalse(); - return branchTrue(); - } - - Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) - { - ASSERT((cond == Zero) || (cond == NonZero)); - - if (mask.m_value == -1) - m_assembler.testlRegReg(reg, reg); - else - testlImm(mask.m_value, reg); - - if (cond == NonZero) // NotEqual - return branchFalse(); - return branchTrue(); - } - - Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) - { - ASSERT((cond == Zero) || (cond == NonZero)); - - if (mask.m_value == -1) - compare32(0, address.offset, address.base, static_cast<RelationalCondition>(cond)); - else - testImm(mask.m_value, address.offset, address.base); - - if (cond == NonZero) // NotEqual - return branchFalse(); - return branchTrue(); - } - - Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) - { - ASSERT((cond == Zero) || (cond == NonZero)); - - RegisterID scr = claimScratch(); - - move(address.index, scr); - lshift32(TrustedImm32(address.scale), scr); - add32(address.base, scr); - load32(scr, address.offset, scr); - - if (mask.m_value == -1) - m_assembler.testlRegReg(scr, scr); - else - testlImm(mask.m_value, scr); - - releaseScratch(scr); - - if (cond == NonZero) // NotEqual - return branchFalse(); - return branchTrue(); - } - - Jump jump() - { - return Jump(m_assembler.jmp()); - } - - void jump(RegisterID target) - { - m_assembler.jmpReg(target); - } - - void jump(Address address) - { - RegisterID scr = claimScratch(); - load32(address, scr); - m_assembler.jmpReg(scr); - releaseScratch(scr); - } - - void jump(AbsoluteAddress address) - { - RegisterID scr = claimScratch(); - - move(TrustedImmPtr(address.m_ptr), scr); - m_assembler.movlMemReg(scr, scr); - m_assembler.jmpReg(scr); - releaseScratch(scr); - } - - // Arithmetic control flow operations - - Jump branchNeg32(ResultCondition cond, RegisterID srcDest) - { - ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); - - if (cond == Overflow) - return branchMul32(cond, TrustedImm32(-1), srcDest, srcDest); - - neg32(srcDest); - - if (cond == Signed) { - m_assembler.cmppz(srcDest); - return branchFalse(); - } - - compare32(0, srcDest, Equal); - return (cond == NonZero) ? branchFalse() : branchTrue(); - } - - Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest) - { - ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero)); - - if (cond == Overflow) { - m_assembler.addvlRegReg(src, dest); - return branchTrue(); - } - - m_assembler.addlRegReg(src, dest); - - if ((cond == Signed) || (cond == PositiveOrZero)) { - m_assembler.cmppz(dest); - return (cond == Signed) ? branchFalse() : branchTrue(); - } - - compare32(0, dest, Equal); - return (cond == NonZero) ? branchFalse() : branchTrue(); - } - - Jump branchAdd32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest) - { - ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero)); - - if (cond == Overflow) { - if (src1 == dest) - m_assembler.addvlRegReg(src2, dest); - else { - move(src2, dest); - m_assembler.addvlRegReg(src1, dest); - } - return branchTrue(); - } - - add32(src1, src2, dest); - - if ((cond == Signed) || (cond == PositiveOrZero)) { - m_assembler.cmppz(dest); - return (cond == Signed) ? branchFalse() : branchTrue(); - } - - compare32(0, dest, Equal); - return (cond == NonZero) ? branchFalse() : branchTrue(); - } - - Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest) - { - ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero)); - - RegisterID immval = claimScratch(); - move(imm, immval); - Jump result = branchAdd32(cond, immval, dest); - releaseScratch(immval); - return result; - } - - Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest) - { - ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero)); - - move(src, dest); - - if (cond == Overflow) { - move(imm, scratchReg3); - m_assembler.addvlRegReg(scratchReg3, dest); - return branchTrue(); - } - - add32(imm, dest); - - if ((cond == Signed) || (cond == PositiveOrZero)) { - m_assembler.cmppz(dest); - return (cond == Signed) ? branchFalse() : branchTrue(); - } - - compare32(0, dest, Equal); - return (cond == NonZero) ? branchFalse() : branchTrue(); - } - - Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest) - { - ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero)); - bool result; - - move(imm, scratchReg3); - RegisterID destptr = claimScratch(); - RegisterID destval = claimScratch(); - move(TrustedImmPtr(dest.m_ptr), destptr); - m_assembler.movlMemReg(destptr, destval); - if (cond == Overflow) { - m_assembler.addvlRegReg(scratchReg3, destval); - result = true; - } else { - m_assembler.addlRegReg(scratchReg3, destval); - if ((cond == Signed) || (cond == PositiveOrZero)) { - m_assembler.cmppz(destval); - result = (cond == PositiveOrZero); - } else { - m_assembler.testlRegReg(destval, destval); - result = (cond != NonZero); - } - } - m_assembler.movlRegMem(destval, destptr); - releaseScratch(destval); - releaseScratch(destptr); - return result ? branchTrue() : branchFalse(); - } - - Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest) - { - ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); - - if (cond == Overflow) { - RegisterID scrsign = claimScratch(); - RegisterID msbres = claimScratch(); - m_assembler.dmulslRegReg(src, dest); - m_assembler.stsmacl(dest); - m_assembler.cmppz(dest); - m_assembler.movt(scrsign); - m_assembler.addlImm8r(-1, scrsign); - m_assembler.stsmach(msbres); - m_assembler.cmplRegReg(msbres, scrsign, SH4Condition(Equal)); - releaseScratch(msbres); - releaseScratch(scrsign); - return branchFalse(); - } - - mul32(src, dest); - - if (cond == Signed) { - m_assembler.cmppz(dest); - return branchFalse(); - } - - compare32(0, dest, static_cast<RelationalCondition>(cond)); - return (cond == NonZero) ? branchFalse() : branchTrue(); - } - - Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest) - { - ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); - - if (cond == Overflow) { - RegisterID scrsign = claimScratch(); - RegisterID msbres = claimScratch(); - m_assembler.dmulslRegReg(src1, src2); - m_assembler.stsmacl(dest); - m_assembler.cmppz(dest); - m_assembler.movt(scrsign); - m_assembler.addlImm8r(-1, scrsign); - m_assembler.stsmach(msbres); - m_assembler.cmplRegReg(msbres, scrsign, SH4Condition(Equal)); - releaseScratch(msbres); - releaseScratch(scrsign); - return branchFalse(); - } - - mul32(src1, src2, dest); - - if (cond == Signed) { - m_assembler.cmppz(dest); - return branchFalse(); - } - - compare32(0, dest, Equal); - return (cond == NonZero) ? branchFalse() : branchTrue(); - } - - Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest) - { - ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); - - if (src == dest) { - move(imm, scratchReg3); - return branchMul32(cond, scratchReg3, dest); - } - - move(imm, dest); - return branchMul32(cond, src, dest); - } - - Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest) - { - ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); - - if (cond == Overflow) { - m_assembler.subvlRegReg(src, dest); - return branchTrue(); - } - - sub32(src, dest); - - if (cond == Signed) { - m_assembler.cmppz(dest); - return branchFalse(); - } - - compare32(0, dest, static_cast<RelationalCondition>(cond)); - return (cond == NonZero) ? branchFalse() : branchTrue(); - } - - Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest) - { - ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); - - RegisterID immval = claimScratch(); - move(imm, immval); - Jump result = branchSub32(cond, immval, dest); - releaseScratch(immval); - return result; - } - - Jump branchSub32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest) - { - ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); - - move(src, dest); - return branchSub32(cond, imm, dest); - } - - Jump branchSub32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest) - { - ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); - - if (src2 != dest) { - move(src1, dest); - return branchSub32(cond, src2, dest); - } - - if (cond == Overflow) { - RegisterID tmpval = claimScratch(); - move(src1, tmpval); - m_assembler.subvlRegReg(src2, tmpval); - move(tmpval, dest); - releaseScratch(tmpval); - return branchTrue(); - } - - RegisterID tmpval = claimScratch(); - move(src1, tmpval); - sub32(src2, tmpval); - move(tmpval, dest); - releaseScratch(tmpval); - - if (cond == Signed) { - m_assembler.cmppz(dest); - return branchFalse(); - } - - compare32(0, dest, static_cast<RelationalCondition>(cond)); - return (cond == NonZero) ? branchFalse() : branchTrue(); - } - - Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest) - { - ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero)); - - or32(src, dest); - - if (cond == Signed) { - m_assembler.cmppz(dest); - return branchFalse(); - } - - compare32(0, dest, static_cast<RelationalCondition>(cond)); - return (cond == NonZero) ? branchFalse() : branchTrue(); - } - - void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true) - { - truncateDoubleToInt32(src, dest); - convertInt32ToDouble(dest, fscratch); - failureCases.append(branchDouble(DoubleNotEqualOrUnordered, fscratch, src)); - - if (negZeroCheck) - failureCases.append(branch32(Equal, dest, TrustedImm32(0))); - } - - void neg32(RegisterID dst) - { - m_assembler.neg(dst, dst); - } - - void urshift32(RegisterID shiftamount, RegisterID dest) - { - RegisterID shiftTmp = claimScratch(); - m_assembler.loadConstant(0x1f, shiftTmp); - m_assembler.andlRegReg(shiftamount, shiftTmp); - m_assembler.neg(shiftTmp, shiftTmp); - m_assembler.shldRegReg(dest, shiftTmp); - releaseScratch(shiftTmp); - } - - void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest) - { - move(src, dest); - urshift32(shiftAmount, dest); - } - - void urshift32(TrustedImm32 imm, RegisterID dest) - { - int immMasked = imm.m_value & 0x1f; - if (!immMasked) - return; - - if ((immMasked == 1) || (immMasked == 2) || (immMasked == 8) || (immMasked == 16)) { - m_assembler.shlrImm8r(immMasked, dest); - return; - } - - RegisterID shiftTmp = claimScratch(); - m_assembler.loadConstant(-immMasked, shiftTmp); - m_assembler.shldRegReg(dest, shiftTmp); - releaseScratch(shiftTmp); - } - - void urshift32(RegisterID src, TrustedImm32 shiftamount, RegisterID dest) - { - move(src, dest); - urshift32(shiftamount, dest); - } - - Call call() - { - return Call(m_assembler.call(), Call::Linkable); - } - - Call nearCall() - { - return Call(m_assembler.call(), Call::LinkableNear); - } - - Call call(RegisterID target) - { - return Call(m_assembler.call(target), Call::None); - } - - void call(Address address) - { - RegisterID target = claimScratch(); - load32(address.base, address.offset, target); - m_assembler.ensureSpace(m_assembler.maxInstructionSize + 2); - m_assembler.branch(JSR_OPCODE, target); - m_assembler.nop(); - releaseScratch(target); - } - - void breakpoint() - { - m_assembler.ensureSpace(m_assembler.maxInstructionSize + 2); - m_assembler.bkpt(); - m_assembler.nop(); - } - - Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) - { - RegisterID dataTempRegister = claimScratch(); - - m_assembler.ensureSpace(m_assembler.maxInstructionSize + 10, 2 * sizeof(uint32_t)); - dataLabel = moveWithPatch(initialRightValue, dataTempRegister); - m_assembler.cmplRegReg(dataTempRegister, left, SH4Condition(cond)); - releaseScratch(dataTempRegister); - - if (cond == NotEqual) - return branchFalse(); - return branchTrue(); - } - - Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) - { - RegisterID scr = claimScratch(); - - m_assembler.loadConstant(left.offset, scr); - m_assembler.addlRegReg(left.base, scr); - m_assembler.movlMemReg(scr, scr); - RegisterID scr1 = claimScratch(); - m_assembler.ensureSpace(m_assembler.maxInstructionSize + 10, 2 * sizeof(uint32_t)); - dataLabel = moveWithPatch(initialRightValue, scr1); - m_assembler.cmplRegReg(scr1, scr, SH4Condition(cond)); - releaseScratch(scr); - releaseScratch(scr1); - - if (cond == NotEqual) - return branchFalse(); - return branchTrue(); - } - - void ret() - { - m_assembler.ret(); - m_assembler.nop(); - } - - DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address) - { - RegisterID scr = claimScratch(); - DataLabelPtr label = moveWithPatch(initialValue, scr); - store32(scr, address); - releaseScratch(scr); - return label; - } - - DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(TrustedImmPtr(0), address); } - - int sizeOfConstantPool() - { - return m_assembler.sizeOfConstantPool(); - } - - Call tailRecursiveCall() - { - RegisterID scr = claimScratch(); - - m_assembler.loadConstantUnReusable(0x0, scr, true); - Jump m_jump = Jump(m_assembler.jmp(scr)); - releaseScratch(scr); - - return Call::fromTailJump(m_jump); - } - - Call makeTailRecursiveCall(Jump oldJump) - { - oldJump.link(this); - return tailRecursiveCall(); - } - - void nop() - { - m_assembler.nop(); - } - - void memoryFence() - { - m_assembler.synco(); - } - - static FunctionPtr readCallTarget(CodeLocationCall call) - { - return FunctionPtr(reinterpret_cast<void(*)()>(SH4Assembler::readCallTarget(call.dataLocation()))); - } - - static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination) - { - SH4Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation()); - } - - static ptrdiff_t maxJumpReplacementSize() - { - return SH4Assembler::maxJumpReplacementSize(); - } - - static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; } - - static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label) - { - return label.labelAtOffset(0); - } - - static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID rd, void* initialValue) - { - SH4Assembler::revertJumpReplacementToBranchPtrWithPatch(instructionStart.dataLocation(), rd, reinterpret_cast<int>(initialValue)); - } - - static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr) - { - UNREACHABLE_FOR_PLATFORM(); - return CodeLocationLabel(); - } - - static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*) - { - UNREACHABLE_FOR_PLATFORM(); - } - -protected: - SH4Assembler::Condition SH4Condition(RelationalCondition cond) - { - return static_cast<SH4Assembler::Condition>(cond); - } - - SH4Assembler::Condition SH4Condition(ResultCondition cond) - { - return static_cast<SH4Assembler::Condition>(cond); - } -private: - friend class LinkBuffer; - friend class RepatchBuffer; - - static void linkCall(void* code, Call call, FunctionPtr function) - { - SH4Assembler::linkCall(code, call.m_label, function.value()); - } - - static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) - { - SH4Assembler::relinkCall(call.dataLocation(), destination.executableAddress()); - } - - static void repatchCall(CodeLocationCall call, FunctionPtr destination) - { - SH4Assembler::relinkCall(call.dataLocation(), destination.executableAddress()); - } -}; - -} // namespace JSC - -#endif // ENABLE(ASSEMBLER) - -#endif // MacroAssemblerSH4_h diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86.h b/Source/JavaScriptCore/assembler/MacroAssemblerX86.h index 547158fa7..75f35456d 100644 --- a/Source/JavaScriptCore/assembler/MacroAssemblerX86.h +++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -23,21 +23,19 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef MacroAssemblerX86_h -#define MacroAssemblerX86_h +#pragma once #if ENABLE(ASSEMBLER) && CPU(X86) #include "MacroAssemblerX86Common.h" -#if USE(MASM_PROBE) -#include <wtf/StdLibExtras.h> -#endif - namespace JSC { class MacroAssemblerX86 : public MacroAssemblerX86Common { public: + static const unsigned numGPRs = 8; + static const unsigned numFPRs = 8; + static const Scale ScalePtr = TimesFour; using MacroAssemblerX86Common::add32; @@ -111,6 +109,18 @@ public: m_assembler.movzbl_mr(address, dest); } + void abortWithReason(AbortReason reason) + { + move(TrustedImm32(reason), X86Registers::eax); + breakpoint(); + } + + void abortWithReason(AbortReason reason, intptr_t misc) + { + move(TrustedImm32(misc), X86Registers::edx); + abortWithReason(reason); + } + ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest) { ConvertibleLoadLabel result = ConvertibleLoadLabel(this); @@ -123,11 +133,11 @@ public: m_assembler.addsd_mr(address.m_ptr, dest); } - void storeDouble(FPRegisterID src, const void* address) + void storeDouble(FPRegisterID src, TrustedImmPtr address) { ASSERT(isSSE2Present()); - ASSERT(address); - m_assembler.movsd_rm(src, address); + ASSERT(address.m_value); + m_assembler.movsd_rm(src, address.m_value); } void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest) @@ -152,22 +162,24 @@ public: void store8(TrustedImm32 imm, void* address) { - ASSERT(-128 <= imm.m_value && imm.m_value < 128); - m_assembler.movb_i8m(imm.m_value, address); + TrustedImm32 imm8(static_cast<int8_t>(imm.m_value)); + m_assembler.movb_i8m(imm8.m_value, address); } - // Possibly clobbers src. void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2) { - movePackedToInt32(src, dest1); - rshiftPacked(TrustedImm32(32), src); - movePackedToInt32(src, dest2); + ASSERT(isSSE2Present()); + m_assembler.pextrw_irr(3, src, dest1); + m_assembler.pextrw_irr(2, src, dest2); + lshift32(TrustedImm32(16), dest1); + or32(dest1, dest2); + moveFloatTo32(src, dest1); } void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch) { - moveInt32ToPacked(src1, dest); - moveInt32ToPacked(src2, scratch); + move32ToFloat(src1, dest); + move32ToFloat(src2, scratch); lshiftPacked(TrustedImm32(32), scratch); orPacked(scratch, dest); } @@ -227,17 +239,18 @@ public: Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right) { - m_assembler.cmpb_im(right.m_value, left.m_ptr); + TrustedImm32 right8(static_cast<int8_t>(right.m_value)); + m_assembler.cmpb_im(right8.m_value, left.m_ptr); return Jump(m_assembler.jCC(x86Condition(cond))); } Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) { - ASSERT(mask.m_value >= -128 && mask.m_value <= 255); - if (mask.m_value == -1) + TrustedImm32 mask8(static_cast<int8_t>(mask.m_value)); + if (mask8.m_value == -1) m_assembler.cmpb_im(0, address.m_ptr); else - m_assembler.testb_im(mask.m_value, address.m_ptr); + m_assembler.testb_im(mask8.m_value, address.m_ptr); return Jump(m_assembler.jCC(x86Condition(cond))); } @@ -257,6 +270,14 @@ public: return Jump(m_assembler.jCC(x86Condition(cond))); } + Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0)) + { + padBeforePatch(); + m_assembler.cmpl_im_force32(initialRightValue.m_value, left.offset, left.base); + dataLabel = DataLabel32(this); + return Jump(m_assembler.jCC(x86Condition(cond))); + } + DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address) { padBeforePatch(); @@ -265,7 +286,6 @@ public: } static bool supportsFloatingPoint() { return isSSE2Present(); } - // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate() static bool supportsFloatingPointTruncate() { return isSSE2Present(); } static bool supportsFloatingPointSqrt() { return isSSE2Present(); } static bool supportsFloatingPointAbs() { return isSSE2Present(); } @@ -277,6 +297,7 @@ public: } static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; } + static bool canJumpReplacePatchableBranch32WithPatch() { return true; } static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label) { @@ -299,6 +320,17 @@ public: return label.labelAtOffset(-totalBytes); } + static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32 label) + { + const int opcodeBytes = 1; + const int modRMBytes = 1; + const int offsetBytes = 0; + const int immediateBytes = 4; + const int totalBytes = opcodeBytes + modRMBytes + offsetBytes + immediateBytes; + ASSERT(totalBytes >= maxJumpReplacementSize()); + return label.labelAtOffset(-totalBytes); + } + static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID reg, void* initialValue) { X86Assembler::revertJumpTo_cmpl_ir_force32(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), reg); @@ -310,18 +342,10 @@ public: X86Assembler::revertJumpTo_cmpl_im_force32(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), 0, address.base); } -#if USE(MASM_PROBE) - // For details about probe(), see comment in MacroAssemblerX86_64.h. - void probe(ProbeFunction, void* arg1 = 0, void* arg2 = 0); -#endif // USE(MASM_PROBE) - -private: - friend class LinkBuffer; - friend class RepatchBuffer; - - static void linkCall(void* code, Call call, FunctionPtr function) + static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel instructionStart, Address address, int32_t initialValue) { - X86Assembler::linkCall(code, call.m_label, function.value()); + ASSERT(!address.offset); + X86Assembler::revertJumpTo_cmpl_im_force32(instructionStart.executableAddress(), initialValue, 0, address.base); } static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) @@ -334,47 +358,18 @@ private: X86Assembler::relinkCall(call.dataLocation(), destination.executableAddress()); } -#if USE(MASM_PROBE) - inline TrustedImm32 trustedImm32FromPtr(void* ptr) - { - return TrustedImm32(TrustedImmPtr(ptr)); - } - - inline TrustedImm32 trustedImm32FromPtr(ProbeFunction function) - { - return TrustedImm32(TrustedImmPtr(reinterpret_cast<void*>(function))); - } +private: + friend class LinkBuffer; - inline TrustedImm32 trustedImm32FromPtr(void (*function)()) + static void linkCall(void* code, Call call, FunctionPtr function) { - return TrustedImm32(TrustedImmPtr(reinterpret_cast<void*>(function))); + if (call.isFlagSet(Call::Tail)) + X86Assembler::linkJump(code, call.m_label, function.value()); + else + X86Assembler::linkCall(code, call.m_label, function.value()); } -#endif }; -#if USE(MASM_PROBE) - -extern "C" void ctiMasmProbeTrampoline(); - -// For details on "What code is emitted for the probe?" and "What values are in -// the saved registers?", see comment for MacroAssemblerX86::probe() in -// MacroAssemblerX86_64.h. - -inline void MacroAssemblerX86::probe(MacroAssemblerX86::ProbeFunction function, void* arg1, void* arg2) -{ - push(RegisterID::esp); - push(RegisterID::eax); - push(trustedImm32FromPtr(arg2)); - push(trustedImm32FromPtr(arg1)); - push(trustedImm32FromPtr(function)); - - move(trustedImm32FromPtr(ctiMasmProbeTrampoline), RegisterID::eax); - call(RegisterID::eax); -} -#endif // USE(MASM_PROBE) - } // namespace JSC #endif // ENABLE(ASSEMBLER) - -#endif // MacroAssemblerX86_h diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.cpp b/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.cpp index 0fab05fb5..528c60fa5 100644 --- a/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.cpp +++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 Apple Inc. All rights reserved. + * Copyright (C) 2013-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,55 +28,534 @@ #if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64)) #include "MacroAssemblerX86Common.h" +#include <wtf/InlineASM.h> + namespace JSC { -#if USE(MASM_PROBE) +#if ENABLE(MASM_PROBE) + +extern "C" void ctiMasmProbeTrampoline(); + +#if COMPILER(GCC_OR_CLANG) + +// The following are offsets for MacroAssemblerX86Common::ProbeContext fields accessed +// by the ctiMasmProbeTrampoline stub. -void MacroAssemblerX86Common::ProbeContext::dumpCPURegisters(const char* indentation) -{ #if CPU(X86) - #define DUMP_GPREGISTER(_type, _regName) { \ - int32_t value = reinterpret_cast<int32_t>(cpu._regName); \ - dataLogF("%s %6s: 0x%08x %d\n", indentation, #_regName, value, value) ; \ - } -#elif CPU(X86_64) - #define DUMP_GPREGISTER(_type, _regName) { \ - int64_t value = reinterpret_cast<int64_t>(cpu._regName); \ - dataLogF("%s %6s: 0x%016llx %lld\n", indentation, #_regName, value, value) ; \ - } +#define PTR_SIZE 4 +#else // CPU(X86_64) +#define PTR_SIZE 8 #endif - FOR_EACH_CPU_GPREGISTER(DUMP_GPREGISTER) - FOR_EACH_CPU_SPECIAL_REGISTER(DUMP_GPREGISTER) - #undef DUMP_GPREGISTER - - #define DUMP_FPREGISTER(_type, _regName) { \ - uint32_t* u = reinterpret_cast<uint32_t*>(&cpu._regName); \ - double* d = reinterpret_cast<double*>(&cpu._regName); \ - dataLogF("%s %6s: 0x%08x%08x 0x%08x%08x %12g %12g\n", \ - indentation, #_regName, u[3], u[2], u[1], u[0], d[1], d[0]); \ - } - FOR_EACH_CPU_FPREGISTER(DUMP_FPREGISTER) - #undef DUMP_FPREGISTER -} -void MacroAssemblerX86Common::ProbeContext::dump(const char* indentation) -{ - if (!indentation) - indentation = ""; +#define PROBE_PROBE_FUNCTION_OFFSET (0 * PTR_SIZE) +#define PROBE_ARG1_OFFSET (1 * PTR_SIZE) +#define PROBE_ARG2_OFFSET (2 * PTR_SIZE) + +#define PROBE_FIRST_GPR_OFFSET (3 * PTR_SIZE) +#define PROBE_CPU_EAX_OFFSET (PROBE_FIRST_GPR_OFFSET + (0 * PTR_SIZE)) +#define PROBE_CPU_ECX_OFFSET (PROBE_FIRST_GPR_OFFSET + (1 * PTR_SIZE)) +#define PROBE_CPU_EDX_OFFSET (PROBE_FIRST_GPR_OFFSET + (2 * PTR_SIZE)) +#define PROBE_CPU_EBX_OFFSET (PROBE_FIRST_GPR_OFFSET + (3 * PTR_SIZE)) +#define PROBE_CPU_ESP_OFFSET (PROBE_FIRST_GPR_OFFSET + (4 * PTR_SIZE)) +#define PROBE_CPU_EBP_OFFSET (PROBE_FIRST_GPR_OFFSET + (5 * PTR_SIZE)) +#define PROBE_CPU_ESI_OFFSET (PROBE_FIRST_GPR_OFFSET + (6 * PTR_SIZE)) +#define PROBE_CPU_EDI_OFFSET (PROBE_FIRST_GPR_OFFSET + (7 * PTR_SIZE)) + +#if CPU(X86) +#define PROBE_FIRST_SPECIAL_OFFSET (PROBE_FIRST_GPR_OFFSET + (8 * PTR_SIZE)) +#else // CPU(X86_64) +#define PROBE_CPU_R8_OFFSET (PROBE_FIRST_GPR_OFFSET + (8 * PTR_SIZE)) +#define PROBE_CPU_R9_OFFSET (PROBE_FIRST_GPR_OFFSET + (9 * PTR_SIZE)) +#define PROBE_CPU_R10_OFFSET (PROBE_FIRST_GPR_OFFSET + (10 * PTR_SIZE)) +#define PROBE_CPU_R11_OFFSET (PROBE_FIRST_GPR_OFFSET + (11 * PTR_SIZE)) +#define PROBE_CPU_R12_OFFSET (PROBE_FIRST_GPR_OFFSET + (12 * PTR_SIZE)) +#define PROBE_CPU_R13_OFFSET (PROBE_FIRST_GPR_OFFSET + (13 * PTR_SIZE)) +#define PROBE_CPU_R14_OFFSET (PROBE_FIRST_GPR_OFFSET + (14 * PTR_SIZE)) +#define PROBE_CPU_R15_OFFSET (PROBE_FIRST_GPR_OFFSET + (15 * PTR_SIZE)) +#define PROBE_FIRST_SPECIAL_OFFSET (PROBE_FIRST_GPR_OFFSET + (16 * PTR_SIZE)) +#endif // CPU(X86_64) + +#define PROBE_CPU_EIP_OFFSET (PROBE_FIRST_SPECIAL_OFFSET + (0 * PTR_SIZE)) +#define PROBE_CPU_EFLAGS_OFFSET (PROBE_FIRST_SPECIAL_OFFSET + (1 * PTR_SIZE)) +#define PROBE_FIRST_XMM_OFFSET (PROBE_FIRST_SPECIAL_OFFSET + (2 * PTR_SIZE)) + +#define XMM_SIZE 8 +#define PROBE_CPU_XMM0_OFFSET (PROBE_FIRST_XMM_OFFSET + (0 * XMM_SIZE)) +#define PROBE_CPU_XMM1_OFFSET (PROBE_FIRST_XMM_OFFSET + (1 * XMM_SIZE)) +#define PROBE_CPU_XMM2_OFFSET (PROBE_FIRST_XMM_OFFSET + (2 * XMM_SIZE)) +#define PROBE_CPU_XMM3_OFFSET (PROBE_FIRST_XMM_OFFSET + (3 * XMM_SIZE)) +#define PROBE_CPU_XMM4_OFFSET (PROBE_FIRST_XMM_OFFSET + (4 * XMM_SIZE)) +#define PROBE_CPU_XMM5_OFFSET (PROBE_FIRST_XMM_OFFSET + (5 * XMM_SIZE)) +#define PROBE_CPU_XMM6_OFFSET (PROBE_FIRST_XMM_OFFSET + (6 * XMM_SIZE)) +#define PROBE_CPU_XMM7_OFFSET (PROBE_FIRST_XMM_OFFSET + (7 * XMM_SIZE)) + +#if CPU(X86) +#define PROBE_SIZE (PROBE_CPU_XMM7_OFFSET + XMM_SIZE) +#else // CPU(X86_64) +#define PROBE_CPU_XMM8_OFFSET (PROBE_FIRST_XMM_OFFSET + (8 * XMM_SIZE)) +#define PROBE_CPU_XMM9_OFFSET (PROBE_FIRST_XMM_OFFSET + (9 * XMM_SIZE)) +#define PROBE_CPU_XMM10_OFFSET (PROBE_FIRST_XMM_OFFSET + (10 * XMM_SIZE)) +#define PROBE_CPU_XMM11_OFFSET (PROBE_FIRST_XMM_OFFSET + (11 * XMM_SIZE)) +#define PROBE_CPU_XMM12_OFFSET (PROBE_FIRST_XMM_OFFSET + (12 * XMM_SIZE)) +#define PROBE_CPU_XMM13_OFFSET (PROBE_FIRST_XMM_OFFSET + (13 * XMM_SIZE)) +#define PROBE_CPU_XMM14_OFFSET (PROBE_FIRST_XMM_OFFSET + (14 * XMM_SIZE)) +#define PROBE_CPU_XMM15_OFFSET (PROBE_FIRST_XMM_OFFSET + (15 * XMM_SIZE)) +#define PROBE_SIZE (PROBE_CPU_XMM15_OFFSET + XMM_SIZE) +#endif // CPU(X86_64) + +// These ASSERTs remind you that if you change the layout of ProbeContext, +// you need to change ctiMasmProbeTrampoline offsets above to match. +#define PROBE_OFFSETOF(x) offsetof(struct MacroAssemblerX86Common::ProbeContext, x) +COMPILE_ASSERT(PROBE_OFFSETOF(probeFunction) == PROBE_PROBE_FUNCTION_OFFSET, ProbeContext_probeFunction_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(arg1) == PROBE_ARG1_OFFSET, ProbeContext_arg1_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(arg2) == PROBE_ARG2_OFFSET, ProbeContext_arg2_offset_matches_ctiMasmProbeTrampoline); + +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.eax) == PROBE_CPU_EAX_OFFSET, ProbeContext_cpu_eax_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ecx) == PROBE_CPU_ECX_OFFSET, ProbeContext_cpu_ecx_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.edx) == PROBE_CPU_EDX_OFFSET, ProbeContext_cpu_edx_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ebx) == PROBE_CPU_EBX_OFFSET, ProbeContext_cpu_ebx_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.esp) == PROBE_CPU_ESP_OFFSET, ProbeContext_cpu_esp_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ebp) == PROBE_CPU_EBP_OFFSET, ProbeContext_cpu_ebp_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.esi) == PROBE_CPU_ESI_OFFSET, ProbeContext_cpu_esi_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.edi) == PROBE_CPU_EDI_OFFSET, ProbeContext_cpu_edi_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.eip) == PROBE_CPU_EIP_OFFSET, ProbeContext_cpu_eip_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.eflags) == PROBE_CPU_EFLAGS_OFFSET, ProbeContext_cpu_eflags_offset_matches_ctiMasmProbeTrampoline); + +#if CPU(X86_64) +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r8) == PROBE_CPU_R8_OFFSET, ProbeContext_cpu_r8_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r9) == PROBE_CPU_R9_OFFSET, ProbeContext_cpu_r9_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r10) == PROBE_CPU_R10_OFFSET, ProbeContext_cpu_r10_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r11) == PROBE_CPU_R11_OFFSET, ProbeContext_cpu_r11_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r12) == PROBE_CPU_R12_OFFSET, ProbeContext_cpu_r12_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r13) == PROBE_CPU_R13_OFFSET, ProbeContext_cpu_r13_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r14) == PROBE_CPU_R14_OFFSET, ProbeContext_cpu_r14_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r15) == PROBE_CPU_R15_OFFSET, ProbeContext_cpu_r15_offset_matches_ctiMasmProbeTrampoline); +#endif // CPU(X86_64) + +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm0) == PROBE_CPU_XMM0_OFFSET, ProbeContext_cpu_xmm0_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm1) == PROBE_CPU_XMM1_OFFSET, ProbeContext_cpu_xmm1_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm2) == PROBE_CPU_XMM2_OFFSET, ProbeContext_cpu_xmm2_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm3) == PROBE_CPU_XMM3_OFFSET, ProbeContext_cpu_xmm3_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm4) == PROBE_CPU_XMM4_OFFSET, ProbeContext_cpu_xmm4_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm5) == PROBE_CPU_XMM5_OFFSET, ProbeContext_cpu_xmm5_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm6) == PROBE_CPU_XMM6_OFFSET, ProbeContext_cpu_xmm6_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm7) == PROBE_CPU_XMM7_OFFSET, ProbeContext_cpu_xmm7_offset_matches_ctiMasmProbeTrampoline); + +#if CPU(X86_64) +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm8) == PROBE_CPU_XMM8_OFFSET, ProbeContext_cpu_xmm8_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm9) == PROBE_CPU_XMM9_OFFSET, ProbeContext_cpu_xmm9_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm10) == PROBE_CPU_XMM10_OFFSET, ProbeContext_cpu_xmm10_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm11) == PROBE_CPU_XMM11_OFFSET, ProbeContext_cpu_xmm11_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm12) == PROBE_CPU_XMM12_OFFSET, ProbeContext_cpu_xmm12_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm13) == PROBE_CPU_XMM13_OFFSET, ProbeContext_cpu_xmm13_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm14) == PROBE_CPU_XMM14_OFFSET, ProbeContext_cpu_xmm14_offset_matches_ctiMasmProbeTrampoline); +COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm15) == PROBE_CPU_XMM15_OFFSET, ProbeContext_cpu_xmm15_offset_matches_ctiMasmProbeTrampoline); +#endif // CPU(X86_64) + +COMPILE_ASSERT(sizeof(MacroAssemblerX86Common::ProbeContext) == PROBE_SIZE, ProbeContext_size_matches_ctiMasmProbeTrampoline); + +#undef PROBE_OFFSETOF + +#if CPU(X86) +asm ( + ".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n" + HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n" + SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n" + + "pushfd" "\n" + + // MacroAssemblerX86Common::probe() has already generated code to store some values. + // Together with the eflags pushed above, the top of stack now looks like + // this: + // esp[0 * ptrSize]: eflags + // esp[1 * ptrSize]: return address / saved eip + // esp[2 * ptrSize]: probeFunction + // esp[3 * ptrSize]: arg1 + // esp[4 * ptrSize]: arg2 + // esp[5 * ptrSize]: saved eax + // esp[6 * ptrSize]: saved esp + + "movl %esp, %eax" "\n" + "subl $" STRINGIZE_VALUE_OF(PROBE_SIZE) ", %esp" "\n" + + // The X86_64 ABI specifies that the worse case stack alignment requirement + // is 32 bytes. + "andl $~0x1f, %esp" "\n" + + "movl %ebp, " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%esp)" "\n" + "movl %esp, %ebp" "\n" // Save the ProbeContext*. + + "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%ebp)" "\n" + "movl %edx, " STRINGIZE_VALUE_OF(PROBE_CPU_EDX_OFFSET) "(%ebp)" "\n" + "movl %ebx, " STRINGIZE_VALUE_OF(PROBE_CPU_EBX_OFFSET) "(%ebp)" "\n" + "movl %esi, " STRINGIZE_VALUE_OF(PROBE_CPU_ESI_OFFSET) "(%ebp)" "\n" + "movl %edi, " STRINGIZE_VALUE_OF(PROBE_CPU_EDI_OFFSET) "(%ebp)" "\n" + + "movl 0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n" + "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%ebp)" "\n" + "movl 1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n" + "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%ebp)" "\n" + "movl 2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n" + "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "(%ebp)" "\n" + "movl 3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n" + "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_ARG1_OFFSET) "(%ebp)" "\n" + "movl 4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n" + "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_ARG2_OFFSET) "(%ebp)" "\n" + "movl 5 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n" + "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%ebp)" "\n" + "movl 6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n" + "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%ebp)" "\n" + + "movq %xmm0, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%ebp)" "\n" + "movq %xmm1, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%ebp)" "\n" + "movq %xmm2, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%ebp)" "\n" + "movq %xmm3, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%ebp)" "\n" + "movq %xmm4, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%ebp)" "\n" + "movq %xmm5, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%ebp)" "\n" + "movq %xmm6, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%ebp)" "\n" + "movq %xmm7, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%ebp)" "\n" + + // Reserve stack space for the arg while maintaining the required stack + // pointer 32 byte alignment: + "subl $0x20, %esp" "\n" + "movl %ebp, 0(%esp)" "\n" // the ProbeContext* arg. + + "call *" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "(%ebp)" "\n" + + // To enable probes to modify register state, we copy all registers + // out of the ProbeContext before returning. + + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EDX_OFFSET) "(%ebp), %edx" "\n" + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EBX_OFFSET) "(%ebp), %ebx" "\n" + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ESI_OFFSET) "(%ebp), %esi" "\n" + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EDI_OFFSET) "(%ebp), %edi" "\n" - dataLogF("%sProbeContext %p {\n", indentation, this); - dataLogF("%s probeFunction: %p\n", indentation, probeFunction); - dataLogF("%s arg1: %p %llu\n", indentation, arg1, reinterpret_cast<int64_t>(arg1)); - dataLogF("%s arg2: %p %llu\n", indentation, arg2, reinterpret_cast<int64_t>(arg2)); - dataLogF("%s cpu: {\n", indentation); + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%ebp), %xmm0" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%ebp), %xmm1" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%ebp), %xmm2" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%ebp), %xmm3" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%ebp), %xmm4" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%ebp), %xmm5" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%ebp), %xmm6" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%ebp), %xmm7" "\n" - dumpCPURegisters(indentation); + // There are 6 more registers left to restore: + // eax, ecx, ebp, esp, eip, and eflags. + // We need to handle these last few restores carefully because: + // + // 1. We need to push the return address on the stack for ret to use. + // That means we need to write to the stack. + // 2. The user probe function may have altered the restore value of esp to + // point to the vicinity of one of the restore values for the remaining + // registers left to be restored. + // That means, for requirement 1, we may end up writing over some of the + // restore values. We can check for this, and first copy the restore + // values to a "safe area" on the stack before commencing with the action + // for requirement 1. + // 3. For requirement 2, we need to ensure that the "safe area" is + // protected from interrupt handlers overwriting it. Hence, the esp needs + // to be adjusted to include the "safe area" before we start copying the + // the restore values. - dataLogF("%s }\n", indentation); - dataLogF("%s}\n", indentation); + "movl %ebp, %eax" "\n" + "addl $" STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) ", %eax" "\n" + "cmpl %eax, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%ebp)" "\n" + "jg " SYMBOL_STRING(ctiMasmProbeTrampolineEnd) "\n" + + // Locate the "safe area" at 2x sizeof(ProbeContext) below where the new + // rsp will be. This time we don't have to 32-byte align it because we're + // not using to store any xmm regs. + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%ebp), %eax" "\n" + "subl $2 * " STRINGIZE_VALUE_OF(PROBE_SIZE) ", %eax" "\n" + "movl %eax, %esp" "\n" + + "subl $" STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) ", %eax" "\n" + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%ebp), %ecx" "\n" + "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%eax)" "\n" + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%ebp), %ecx" "\n" + "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%eax)" "\n" + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%ebp), %ecx" "\n" + "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%eax)" "\n" + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%ebp), %ecx" "\n" + "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%eax)" "\n" + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%ebp), %ecx" "\n" + "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%eax)" "\n" + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%ebp), %ecx" "\n" + "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%eax)" "\n" + "movl %eax, %ebp" "\n" + + SYMBOL_STRING(ctiMasmProbeTrampolineEnd) ":" "\n" + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%ebp), %eax" "\n" + "subl $5 * " STRINGIZE_VALUE_OF(PTR_SIZE) ", %eax" "\n" + // At this point, %esp should be < %eax. + + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%ebp), %ecx" "\n" + "movl %ecx, 0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax)" "\n" + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%ebp), %ecx" "\n" + "movl %ecx, 1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax)" "\n" + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%ebp), %ecx" "\n" + "movl %ecx, 2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax)" "\n" + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%ebp), %ecx" "\n" + "movl %ecx, 3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax)" "\n" + "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%ebp), %ecx" "\n" + "movl %ecx, 4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax)" "\n" + "movl %eax, %esp" "\n" + + "popfd" "\n" + "popl %eax" "\n" + "popl %ecx" "\n" + "popl %ebp" "\n" + "ret" "\n" +); +#endif // CPU(X86) + +#if CPU(X86_64) +asm ( + ".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n" + HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n" + SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n" + + "pushfq" "\n" + + // MacroAssemblerX86Common::probe() has already generated code to store some values. + // Together with the rflags pushed above, the top of stack now looks like + // this: + // esp[0 * ptrSize]: rflags + // esp[1 * ptrSize]: return address / saved rip + // esp[2 * ptrSize]: probeFunction + // esp[3 * ptrSize]: arg1 + // esp[4 * ptrSize]: arg2 + // esp[5 * ptrSize]: saved rax + // esp[6 * ptrSize]: saved rsp + + "movq %rsp, %rax" "\n" + "subq $" STRINGIZE_VALUE_OF(PROBE_SIZE) ", %rsp" "\n" + + // The X86_64 ABI specifies that the worse case stack alignment requirement + // is 32 bytes. + "andq $~0x1f, %rsp" "\n" + + "movq %rbp, " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%rsp)" "\n" + "movq %rsp, %rbp" "\n" // Save the ProbeContext*. + + "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%rbp)" "\n" + "movq %rdx, " STRINGIZE_VALUE_OF(PROBE_CPU_EDX_OFFSET) "(%rbp)" "\n" + "movq %rbx, " STRINGIZE_VALUE_OF(PROBE_CPU_EBX_OFFSET) "(%rbp)" "\n" + "movq %rsi, " STRINGIZE_VALUE_OF(PROBE_CPU_ESI_OFFSET) "(%rbp)" "\n" + "movq %rdi, " STRINGIZE_VALUE_OF(PROBE_CPU_EDI_OFFSET) "(%rbp)" "\n" + + "movq 0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n" + "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%rbp)" "\n" + "movq 1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n" + "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%rbp)" "\n" + "movq 2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n" + "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "(%rbp)" "\n" + "movq 3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n" + "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_ARG1_OFFSET) "(%rbp)" "\n" + "movq 4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n" + "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_ARG2_OFFSET) "(%rbp)" "\n" + "movq 5 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n" + "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%rbp)" "\n" + "movq 6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n" + "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rbp)" "\n" + + "movq %r8, " STRINGIZE_VALUE_OF(PROBE_CPU_R8_OFFSET) "(%rbp)" "\n" + "movq %r9, " STRINGIZE_VALUE_OF(PROBE_CPU_R9_OFFSET) "(%rbp)" "\n" + "movq %r10, " STRINGIZE_VALUE_OF(PROBE_CPU_R10_OFFSET) "(%rbp)" "\n" + "movq %r11, " STRINGIZE_VALUE_OF(PROBE_CPU_R11_OFFSET) "(%rbp)" "\n" + "movq %r12, " STRINGIZE_VALUE_OF(PROBE_CPU_R12_OFFSET) "(%rbp)" "\n" + "movq %r13, " STRINGIZE_VALUE_OF(PROBE_CPU_R13_OFFSET) "(%rbp)" "\n" + "movq %r14, " STRINGIZE_VALUE_OF(PROBE_CPU_R14_OFFSET) "(%rbp)" "\n" + "movq %r15, " STRINGIZE_VALUE_OF(PROBE_CPU_R15_OFFSET) "(%rbp)" "\n" + + "movq %xmm0, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%rbp)" "\n" + "movq %xmm1, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%rbp)" "\n" + "movq %xmm2, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%rbp)" "\n" + "movq %xmm3, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%rbp)" "\n" + "movq %xmm4, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%rbp)" "\n" + "movq %xmm5, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%rbp)" "\n" + "movq %xmm6, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%rbp)" "\n" + "movq %xmm7, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%rbp)" "\n" + "movq %xmm8, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM8_OFFSET) "(%rbp)" "\n" + "movq %xmm9, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM9_OFFSET) "(%rbp)" "\n" + "movq %xmm10, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM10_OFFSET) "(%rbp)" "\n" + "movq %xmm11, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM11_OFFSET) "(%rbp)" "\n" + "movq %xmm12, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM12_OFFSET) "(%rbp)" "\n" + "movq %xmm13, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM13_OFFSET) "(%rbp)" "\n" + "movq %xmm14, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM14_OFFSET) "(%rbp)" "\n" + "movq %xmm15, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM15_OFFSET) "(%rbp)" "\n" + + "movq %rbp, %rdi" "\n" // the ProbeContext* arg. + "call *" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "(%rbp)" "\n" + + // To enable probes to modify register state, we copy all registers + // out of the ProbeContext before returning. + + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EDX_OFFSET) "(%rbp), %rdx" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EBX_OFFSET) "(%rbp), %rbx" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ESI_OFFSET) "(%rbp), %rsi" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EDI_OFFSET) "(%rbp), %rdi" "\n" + + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R8_OFFSET) "(%rbp), %r8" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R9_OFFSET) "(%rbp), %r9" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R10_OFFSET) "(%rbp), %r10" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R11_OFFSET) "(%rbp), %r11" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R12_OFFSET) "(%rbp), %r12" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R13_OFFSET) "(%rbp), %r13" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R14_OFFSET) "(%rbp), %r14" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R15_OFFSET) "(%rbp), %r15" "\n" + + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%rbp), %xmm0" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%rbp), %xmm1" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%rbp), %xmm2" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%rbp), %xmm3" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%rbp), %xmm4" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%rbp), %xmm5" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%rbp), %xmm6" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%rbp), %xmm7" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM8_OFFSET) "(%rbp), %xmm8" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM9_OFFSET) "(%rbp), %xmm9" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM10_OFFSET) "(%rbp), %xmm10" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM11_OFFSET) "(%rbp), %xmm11" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM12_OFFSET) "(%rbp), %xmm12" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM13_OFFSET) "(%rbp), %xmm13" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM14_OFFSET) "(%rbp), %xmm14" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM15_OFFSET) "(%rbp), %xmm15" "\n" + + // There are 6 more registers left to restore: + // rax, rcx, rbp, rsp, rip, and rflags. + // We need to handle these last few restores carefully because: + // + // 1. We need to push the return address on the stack for ret to use + // That means we need to write to the stack. + // 2. The user probe function may have altered the restore value of esp to + // point to the vicinity of one of the restore values for the remaining + // registers left to be restored. + // That means, for requirement 1, we may end up writing over some of the + // restore values. We can check for this, and first copy the restore + // values to a "safe area" on the stack before commencing with the action + // for requirement 1. + // 3. For both requirement 2, we need to ensure that the "safe area" is + // protected from interrupt handlers overwriting it. Hence, the esp needs + // to be adjusted to include the "safe area" before we start copying the + // the restore values. + + "movq %rbp, %rax" "\n" + "addq $" STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) ", %rax" "\n" + "cmpq %rax, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rbp)" "\n" + "jg " SYMBOL_STRING(ctiMasmProbeTrampolineEnd) "\n" + + // Locate the "safe area" at 2x sizeof(ProbeContext) below where the new + // rsp will be. This time we don't have to 32-byte align it because we're + // not using to store any xmm regs. + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rbp), %rax" "\n" + "subq $2 * " STRINGIZE_VALUE_OF(PROBE_SIZE) ", %rax" "\n" + "movq %rax, %rsp" "\n" + + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%rbp), %rcx" "\n" + "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%rax)" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%rbp), %rcx" "\n" + "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%rax)" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%rbp), %rcx" "\n" + "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%rax)" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rbp), %rcx" "\n" + "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rax)" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%rbp), %rcx" "\n" + "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%rax)" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%rbp), %rcx" "\n" + "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%rax)" "\n" + "movq %rax, %rbp" "\n" + + SYMBOL_STRING(ctiMasmProbeTrampolineEnd) ":" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rbp), %rax" "\n" + "subq $5 * " STRINGIZE_VALUE_OF(PTR_SIZE) ", %rax" "\n" + // At this point, %rsp should be < %rax. + + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%rbp), %rcx" "\n" + "movq %rcx, 0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax)" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%rbp), %rcx" "\n" + "movq %rcx, 1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax)" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%rbp), %rcx" "\n" + "movq %rcx, 2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax)" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%rbp), %rcx" "\n" + "movq %rcx, 3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax)" "\n" + "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%rbp), %rcx" "\n" + "movq %rcx, 4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax)" "\n" + "movq %rax, %rsp" "\n" + + "popfq" "\n" + "popq %rax" "\n" + "popq %rcx" "\n" + "popq %rbp" "\n" + "ret" "\n" +); +#endif // CPU(X86_64) + +#endif // COMPILER(GCC_OR_CLANG) + +// What code is emitted for the probe? +// ================================== +// We want to keep the size of the emitted probe invocation code as compact as +// possible to minimize the perturbation to the JIT generated code. However, +// we also need to preserve the CPU registers and set up the ProbeContext to be +// passed to the user probe function. +// +// Hence, we do only the minimum here to preserve a scratch register (i.e. rax +// in this case) and the stack pointer (i.e. rsp), and pass the probe arguments. +// We'll let the ctiMasmProbeTrampoline handle the rest of the probe invocation +// work i.e. saving the CPUState (and setting up the ProbeContext), calling the +// user probe function, and restoring the CPUState before returning to JIT +// generated code. +// +// What registers need to be saved? +// =============================== +// The registers are saved for 2 reasons: +// 1. To preserve their state in the JITted code. This means that all registers +// that are not callee saved needs to be saved. We also need to save the +// condition code registers because the probe can be inserted between a test +// and a branch. +// 2. To allow the probe to inspect the values of the registers for debugging +// purposes. This means all registers need to be saved. +// +// In summary, save everything. But for reasons stated above, we should do the +// minimum here and let ctiMasmProbeTrampoline do the heavy lifting to save the +// full set. +// +// What values are in the saved registers? +// ====================================== +// Conceptually, the saved registers should contain values as if the probe +// is not present in the JIT generated code. Hence, they should contain values +// that are expected at the start of the instruction immediately following the +// probe. +// +// Specifically, the saved stack pointer register will point to the stack +// position before we push the ProbeContext frame. The saved rip will point to +// the address of the instruction immediately following the probe. + +void MacroAssemblerX86Common::probe(MacroAssemblerX86Common::ProbeFunction function, void* arg1, void* arg2) +{ + push(RegisterID::esp); + push(RegisterID::eax); + move(TrustedImmPtr(arg2), RegisterID::eax); + push(RegisterID::eax); + move(TrustedImmPtr(arg1), RegisterID::eax); + push(RegisterID::eax); + move(TrustedImmPtr(reinterpret_cast<void*>(function)), RegisterID::eax); + push(RegisterID::eax); + move(TrustedImmPtr(reinterpret_cast<void*>(ctiMasmProbeTrampoline)), RegisterID::eax); + call(RegisterID::eax); } -#endif // USE(MASM_PROBE) +#endif // ENABLE(MASM_PROBE) + +#if CPU(X86) && !OS(MAC_OS_X) +MacroAssemblerX86Common::SSE2CheckState MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2; +#endif + +MacroAssemblerX86Common::CPUIDCheckState MacroAssemblerX86Common::s_sse4_1CheckState = CPUIDCheckState::NotChecked; +MacroAssemblerX86Common::CPUIDCheckState MacroAssemblerX86Common::s_avxCheckState = CPUIDCheckState::NotChecked; +MacroAssemblerX86Common::CPUIDCheckState MacroAssemblerX86Common::s_lzcntCheckState = CPUIDCheckState::NotChecked; +MacroAssemblerX86Common::CPUIDCheckState MacroAssemblerX86Common::s_bmi1CheckState = CPUIDCheckState::NotChecked; } // namespace JSC diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h b/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h index ac09eaca4..695e640f0 100644 --- a/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h +++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86Common.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2014-2017 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -23,22 +23,35 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef MacroAssemblerX86Common_h -#define MacroAssemblerX86Common_h +#pragma once #if ENABLE(ASSEMBLER) #include "X86Assembler.h" #include "AbstractMacroAssembler.h" +#include <wtf/Optional.h> + +#if COMPILER(MSVC) +#include <intrin.h> +#endif namespace JSC { -class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> { +class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler, MacroAssemblerX86Common> { public: #if CPU(X86_64) - static const X86Registers::RegisterID scratchRegister = X86Registers::r11; -#endif + // Use this directly only if you're not generating code with it. + static const X86Registers::RegisterID s_scratchRegister = X86Registers::r11; + // Use this when generating code so that we get enforcement of the disallowing of scratch register + // usage. + X86Registers::RegisterID scratchRegister() + { + RELEASE_ASSERT(m_allowScratchRegister); + return s_scratchRegister; + } +#endif + protected: static const int DoubleConditionBitInvert = 0x10; static const int DoubleConditionBitSpecial = 0x20; @@ -73,6 +86,7 @@ public: NonZero = X86Assembler::ConditionNE }; + // FIXME: it would be neat to rename this to FloatingPointCondition in every assembler. enum DoubleCondition { // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN. DoubleEqual = X86Assembler::ConditionE | DoubleConditionBitSpecial, @@ -117,6 +131,33 @@ public: m_assembler.addl_im(imm.m_value, address.offset, address.base); } + void add32(TrustedImm32 imm, BaseIndex address) + { + m_assembler.addl_im(imm.m_value, address.offset, address.base, address.index, address.scale); + } + + void add8(TrustedImm32 imm, Address address) + { + TrustedImm32 imm8(static_cast<int8_t>(imm.m_value)); + m_assembler.addb_im(imm8.m_value, address.offset, address.base); + } + + void add8(TrustedImm32 imm, BaseIndex address) + { + TrustedImm32 imm8(static_cast<int8_t>(imm.m_value)); + m_assembler.addb_im(imm8.m_value, address.offset, address.base, address.index, address.scale); + } + + void add16(TrustedImm32 imm, Address address) + { + m_assembler.addw_im(imm.m_value, address.offset, address.base); + } + + void add16(TrustedImm32 imm, BaseIndex address) + { + m_assembler.addw_im(imm.m_value, address.offset, address.base, address.index, address.scale); + } + void add32(TrustedImm32 imm, RegisterID dest) { if (imm.m_value == 1) @@ -135,11 +176,66 @@ public: m_assembler.addl_rm(src, dest.offset, dest.base); } + void add32(RegisterID src, BaseIndex dest) + { + m_assembler.addl_rm(src, dest.offset, dest.base, dest.index, dest.scale); + } + + void add8(RegisterID src, Address dest) + { + m_assembler.addb_rm(src, dest.offset, dest.base); + } + + void add8(RegisterID src, BaseIndex dest) + { + m_assembler.addb_rm(src, dest.offset, dest.base, dest.index, dest.scale); + } + + void add16(RegisterID src, Address dest) + { + m_assembler.addw_rm(src, dest.offset, dest.base); + } + + void add16(RegisterID src, BaseIndex dest) + { + m_assembler.addw_rm(src, dest.offset, dest.base, dest.index, dest.scale); + } + void add32(TrustedImm32 imm, RegisterID src, RegisterID dest) { + if (!imm.m_value) { + zeroExtend32ToPtr(src, dest); + return; + } + + if (src == dest) { + add32(imm, dest); + return; + } + m_assembler.leal_mr(imm.m_value, src, dest); } - + + void add32(RegisterID a, RegisterID b, RegisterID dest) + { + x86Lea32(BaseIndex(a, b, TimesOne), dest); + } + + void x86Lea32(BaseIndex index, RegisterID dest) + { + if (!index.scale && !index.offset) { + if (index.base == dest) { + add32(index.index, dest); + return; + } + if (index.index == dest) { + add32(index.base, dest); + return; + } + } + m_assembler.leal_mr(index.offset, index.base, index.index, index.scale, dest); + } + void and32(RegisterID src, RegisterID dest) { m_assembler.andl_rr(src, dest); @@ -172,24 +268,77 @@ public: else if (op1 == dest) and32(op2, dest); else { - move(op2, dest); + move32IfNeeded(op2, dest); and32(op1, dest); } } + void and32(Address op1, RegisterID op2, RegisterID dest) + { + if (op2 == dest) + and32(op1, dest); + else if (op1.base == dest) { + load32(op1, dest); + and32(op2, dest); + } else { + zeroExtend32ToPtr(op2, dest); + and32(op1, dest); + } + } + + void and32(RegisterID op1, Address op2, RegisterID dest) + { + and32(op2, op1, dest); + } + void and32(TrustedImm32 imm, RegisterID src, RegisterID dest) { - move(src, dest); + move32IfNeeded(src, dest); and32(imm, dest); } - void lshift32(RegisterID shift_amount, RegisterID dest) + void countLeadingZeros32(RegisterID src, RegisterID dst) { - ASSERT(shift_amount != dest); + if (supportsLZCNT()) { + m_assembler.lzcnt_rr(src, dst); + return; + } + m_assembler.bsr_rr(src, dst); + clz32AfterBsr(dst); + } + + void countLeadingZeros32(Address src, RegisterID dst) + { + if (supportsLZCNT()) { + m_assembler.lzcnt_mr(src.offset, src.base, dst); + return; + } + m_assembler.bsr_mr(src.offset, src.base, dst); + clz32AfterBsr(dst); + } + + void countTrailingZeros32(RegisterID src, RegisterID dst) + { + if (supportsBMI1()) { + m_assembler.tzcnt_rr(src, dst); + return; + } + m_assembler.bsf_rr(src, dst); + ctzAfterBsf<32>(dst); + } + // Only used for testing purposes. + void illegalInstruction() + { + m_assembler.illegalInstruction(); + } + + void lshift32(RegisterID shift_amount, RegisterID dest) + { if (shift_amount == X86Registers::ecx) m_assembler.shll_CLr(dest); else { + ASSERT(shift_amount != dest); // On x86 we can only shift by ecx; if asked to shift by another register we'll // need rejig the shift amount into ecx first, and restore the registers afterwards. // If we dest is ecx, then shift the swapped register! @@ -203,8 +352,7 @@ public: { ASSERT(shift_amount != dest); - if (src != dest) - move(src, dest); + move32IfNeeded(src, dest); lshift32(shift_amount, dest); } @@ -215,8 +363,7 @@ public: void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) { - if (src != dest) - move(src, dest); + move32IfNeeded(src, dest); lshift32(imm, dest); } @@ -225,16 +372,80 @@ public: m_assembler.imull_rr(src, dest); } + void mul32(RegisterID src1, RegisterID src2, RegisterID dest) + { + if (src2 == dest) { + m_assembler.imull_rr(src1, dest); + return; + } + move32IfNeeded(src1, dest); + m_assembler.imull_rr(src2, dest); + } + void mul32(Address src, RegisterID dest) { m_assembler.imull_mr(src.offset, src.base, dest); } + + void mul32(Address op1, RegisterID op2, RegisterID dest) + { + if (op2 == dest) + mul32(op1, dest); + else if (op1.base == dest) { + load32(op1, dest); + mul32(op2, dest); + } else { + zeroExtend32ToPtr(op2, dest); + mul32(op1, dest); + } + } + + void mul32(RegisterID src1, Address src2, RegisterID dest) + { + mul32(src2, src1, dest); + } void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest) { m_assembler.imull_i32r(src, imm.m_value, dest); } + void x86ConvertToDoubleWord32() + { + m_assembler.cdq(); + } + + void x86ConvertToDoubleWord32(RegisterID eax, RegisterID edx) + { + ASSERT_UNUSED(eax, eax == X86Registers::eax); + ASSERT_UNUSED(edx, edx == X86Registers::edx); + x86ConvertToDoubleWord32(); + } + + void x86Div32(RegisterID denominator) + { + m_assembler.idivl_r(denominator); + } + + void x86Div32(RegisterID eax, RegisterID edx, RegisterID denominator) + { + ASSERT_UNUSED(eax, eax == X86Registers::eax); + ASSERT_UNUSED(edx, edx == X86Registers::edx); + x86Div32(denominator); + } + + void x86UDiv32(RegisterID denominator) + { + m_assembler.divl_r(denominator); + } + + void x86UDiv32(RegisterID eax, RegisterID edx, RegisterID denominator) + { + ASSERT_UNUSED(eax, eax == X86Registers::eax); + ASSERT_UNUSED(edx, edx == X86Registers::edx); + x86UDiv32(denominator); + } + void neg32(RegisterID srcDest) { m_assembler.negl_r(srcDest); @@ -277,24 +488,42 @@ public: else if (op1 == dest) or32(op2, dest); else { - move(op2, dest); + move32IfNeeded(op2, dest); or32(op1, dest); } } + void or32(Address op1, RegisterID op2, RegisterID dest) + { + if (op2 == dest) + or32(op1, dest); + else if (op1.base == dest) { + load32(op1, dest); + or32(op2, dest); + } else { + zeroExtend32ToPtr(op2, dest); + or32(op1, dest); + } + } + + void or32(RegisterID op1, Address op2, RegisterID dest) + { + or32(op2, op1, dest); + } + void or32(TrustedImm32 imm, RegisterID src, RegisterID dest) { - move(src, dest); + move32IfNeeded(src, dest); or32(imm, dest); } void rshift32(RegisterID shift_amount, RegisterID dest) { - ASSERT(shift_amount != dest); - if (shift_amount == X86Registers::ecx) m_assembler.sarl_CLr(dest); else { + ASSERT(shift_amount != dest); + // On x86 we can only shift by ecx; if asked to shift by another register we'll // need rejig the shift amount into ecx first, and restore the registers afterwards. // If we dest is ecx, then shift the swapped register! @@ -308,8 +537,7 @@ public: { ASSERT(shift_amount != dest); - if (src != dest) - move(src, dest); + move32IfNeeded(src, dest); rshift32(shift_amount, dest); } @@ -320,18 +548,17 @@ public: void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) { - if (src != dest) - move(src, dest); + move32IfNeeded(src, dest); rshift32(imm, dest); } void urshift32(RegisterID shift_amount, RegisterID dest) { - ASSERT(shift_amount != dest); - if (shift_amount == X86Registers::ecx) m_assembler.shrl_CLr(dest); else { + ASSERT(shift_amount != dest); + // On x86 we can only shift by ecx; if asked to shift by another register we'll // need rejig the shift amount into ecx first, and restore the registers afterwards. // If we dest is ecx, then shift the swapped register! @@ -345,8 +572,7 @@ public: { ASSERT(shift_amount != dest); - if (src != dest) - move(src, dest); + move32IfNeeded(src, dest); urshift32(shift_amount, dest); } @@ -357,16 +583,64 @@ public: void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) { - if (src != dest) - move(src, dest); + move32IfNeeded(src, dest); urshift32(imm, dest); } - + + void rotateRight32(TrustedImm32 imm, RegisterID dest) + { + m_assembler.rorl_i8r(imm.m_value, dest); + } + + void rotateRight32(RegisterID src, RegisterID dest) + { + if (src == X86Registers::ecx) + m_assembler.rorl_CLr(dest); + else { + ASSERT(src != dest); + + // Can only rotate by ecx, so we do some swapping if we see anything else. + swap(src, X86Registers::ecx); + m_assembler.rorl_CLr(dest == X86Registers::ecx ? src : dest); + swap(src, X86Registers::ecx); + } + } + + void rotateLeft32(TrustedImm32 imm, RegisterID dest) + { + m_assembler.roll_i8r(imm.m_value, dest); + } + + void rotateLeft32(RegisterID src, RegisterID dest) + { + if (src == X86Registers::ecx) + m_assembler.roll_CLr(dest); + else { + ASSERT(src != dest); + + // Can only rotate by ecx, so we do some swapping if we see anything else. + swap(src, X86Registers::ecx); + m_assembler.roll_CLr(dest == X86Registers::ecx ? src : dest); + swap(src, X86Registers::ecx); + } + } + void sub32(RegisterID src, RegisterID dest) { m_assembler.subl_rr(src, dest); } - + + void sub32(RegisterID left, RegisterID right, RegisterID dest) + { + if (dest == right) { + neg32(dest); + add32(left, dest); + return; + } + move(left, dest); + sub32(right, dest); + } + void sub32(TrustedImm32 imm, RegisterID dest) { if (imm.m_value == 1) @@ -406,9 +680,9 @@ public: void xor32(TrustedImm32 imm, RegisterID dest) { if (imm.m_value == -1) - m_assembler.notl_r(dest); + m_assembler.notl_r(dest); else - m_assembler.xorl_ir(imm.m_value, dest); + m_assembler.xorl_ir(imm.m_value, dest); } void xor32(RegisterID src, Address dest) @@ -428,27 +702,70 @@ public: else if (op1 == dest) xor32(op2, dest); else { - move(op2, dest); + move32IfNeeded(op2, dest); + xor32(op1, dest); + } + } + + void xor32(Address op1, RegisterID op2, RegisterID dest) + { + if (op2 == dest) + xor32(op1, dest); + else if (op1.base == dest) { + load32(op1, dest); + xor32(op2, dest); + } else { + zeroExtend32ToPtr(op2, dest); xor32(op1, dest); } } + void xor32(RegisterID op1, Address op2, RegisterID dest) + { + xor32(op2, op1, dest); + } + void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest) { - move(src, dest); + move32IfNeeded(src, dest); xor32(imm, dest); } + void not32(RegisterID srcDest) + { + m_assembler.notl_r(srcDest); + } + + void not32(Address dest) + { + m_assembler.notl_m(dest.offset, dest.base); + } + void sqrtDouble(FPRegisterID src, FPRegisterID dst) { m_assembler.sqrtsd_rr(src, dst); } + void sqrtDouble(Address src, FPRegisterID dst) + { + m_assembler.sqrtsd_mr(src.offset, src.base, dst); + } + + void sqrtFloat(FPRegisterID src, FPRegisterID dst) + { + m_assembler.sqrtss_rr(src, dst); + } + + void sqrtFloat(Address src, FPRegisterID dst) + { + m_assembler.sqrtss_mr(src.offset, src.base, dst); + } + void absDouble(FPRegisterID src, FPRegisterID dst) { ASSERT(src != dst); static const double negativeZeroConstant = -0.0; - loadDouble(&negativeZeroConstant, dst); + loadDouble(TrustedImmPtr(&negativeZeroConstant), dst); m_assembler.andnpd_rr(src, dst); } @@ -456,10 +773,79 @@ public: { ASSERT(src != dst); static const double negativeZeroConstant = -0.0; - loadDouble(&negativeZeroConstant, dst); + loadDouble(TrustedImmPtr(&negativeZeroConstant), dst); m_assembler.xorpd_rr(src, dst); } + void ceilDouble(FPRegisterID src, FPRegisterID dst) + { + m_assembler.roundsd_rr(src, dst, X86Assembler::RoundingType::TowardInfiniti); + } + + void ceilDouble(Address src, FPRegisterID dst) + { + m_assembler.roundsd_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardInfiniti); + } + + void ceilFloat(FPRegisterID src, FPRegisterID dst) + { + m_assembler.roundss_rr(src, dst, X86Assembler::RoundingType::TowardInfiniti); + } + + void ceilFloat(Address src, FPRegisterID dst) + { + m_assembler.roundss_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardInfiniti); + } + + void floorDouble(FPRegisterID src, FPRegisterID dst) + { + m_assembler.roundsd_rr(src, dst, X86Assembler::RoundingType::TowardNegativeInfiniti); + } + + void floorDouble(Address src, FPRegisterID dst) + { + m_assembler.roundsd_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardNegativeInfiniti); + } + + void floorFloat(FPRegisterID src, FPRegisterID dst) + { + m_assembler.roundss_rr(src, dst, X86Assembler::RoundingType::TowardNegativeInfiniti); + } + + void floorFloat(Address src, FPRegisterID dst) + { + m_assembler.roundss_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardNegativeInfiniti); + } + + void roundTowardNearestIntDouble(FPRegisterID src, FPRegisterID dst) + { + m_assembler.roundsd_rr(src, dst, X86Assembler::RoundingType::ToNearestWithTiesToEven); + } + + void roundTowardNearestIntFloat(FPRegisterID src, FPRegisterID dst) + { + m_assembler.roundss_rr(src, dst, X86Assembler::RoundingType::ToNearestWithTiesToEven); + } + + void roundTowardZeroDouble(FPRegisterID src, FPRegisterID dst) + { + m_assembler.roundsd_rr(src, dst, X86Assembler::RoundingType::TowardZero); + } + + void roundTowardZeroDouble(Address src, FPRegisterID dst) + { + m_assembler.roundsd_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardZero); + } + + void roundTowardZeroFloat(FPRegisterID src, FPRegisterID dst) + { + m_assembler.roundss_rr(src, dst, X86Assembler::RoundingType::TowardZero); + } + + void roundTowardZeroFloat(Address src, FPRegisterID dst) + { + m_assembler.roundss_mr(src.offset, src.base, dst, X86Assembler::RoundingType::TowardZero); + } // Memory access operations: // @@ -525,15 +911,25 @@ public: m_assembler.movzbl_mr(address.offset, address.base, dest); } - void load8Signed(BaseIndex address, RegisterID dest) + void load8SignedExtendTo32(BaseIndex address, RegisterID dest) { m_assembler.movsbl_mr(address.offset, address.base, address.index, address.scale, dest); } - void load8Signed(ImplicitAddress address, RegisterID dest) + void load8SignedExtendTo32(ImplicitAddress address, RegisterID dest) { m_assembler.movsbl_mr(address.offset, address.base, dest); } + + void zeroExtend8To32(RegisterID src, RegisterID dest) + { + m_assembler.movzbl_rr(src, dest); + } + + void signExtend8To32(RegisterID src, RegisterID dest) + { + m_assembler.movsbl_rr(src, dest); + } void load16(BaseIndex address, RegisterID dest) { @@ -545,16 +941,26 @@ public: m_assembler.movzwl_mr(address.offset, address.base, dest); } - void load16Signed(BaseIndex address, RegisterID dest) + void load16SignedExtendTo32(BaseIndex address, RegisterID dest) { m_assembler.movswl_mr(address.offset, address.base, address.index, address.scale, dest); } - void load16Signed(Address address, RegisterID dest) + void load16SignedExtendTo32(Address address, RegisterID dest) { m_assembler.movswl_mr(address.offset, address.base, dest); } + void zeroExtend16To32(RegisterID src, RegisterID dest) + { + m_assembler.movzwl_rr(src, dest); + } + + void signExtend16To32(RegisterID src, RegisterID dest) + { + m_assembler.movswl_rr(src, dest); + } + DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address) { padBeforePatch(); @@ -582,16 +988,26 @@ public: m_assembler.movl_i32m(imm.m_value, address.offset, address.base, address.index, address.scale); } + void storeZero32(ImplicitAddress address) + { + store32(TrustedImm32(0), address); + } + + void storeZero32(BaseIndex address) + { + store32(TrustedImm32(0), address); + } + void store8(TrustedImm32 imm, Address address) { - ASSERT(-128 <= imm.m_value && imm.m_value < 128); - m_assembler.movb_i8m(imm.m_value, address.offset, address.base); + TrustedImm32 imm8(static_cast<int8_t>(imm.m_value)); + m_assembler.movb_i8m(imm8.m_value, address.offset, address.base); } void store8(TrustedImm32 imm, BaseIndex address) { - ASSERT(-128 <= imm.m_value && imm.m_value < 128); - m_assembler.movb_i8m(imm.m_value, address.offset, address.base, address.index, address.scale); + TrustedImm32 imm8(static_cast<int8_t>(imm.m_value)); + m_assembler.movb_i8m(imm8.m_value, address.offset, address.base, address.index, address.scale); } static ALWAYS_INLINE RegisterID getUnusedRegister(BaseIndex address) @@ -672,6 +1088,25 @@ public: m_assembler.movw_rm(src, address.offset, address.base, address.index, address.scale); } + void store16(RegisterID src, Address address) + { +#if CPU(X86) + // On 32-bit x86 we can only store from the first 4 registers; + // esp..edi are mapped to the 'h' registers! + if (src >= 4) { + // Pick a temporary register. + RegisterID temp = getUnusedRegister(address); + + // Swap to the temporary register to perform the store. + swap(src, temp); + m_assembler.movw_rm(temp, address.offset, address.base); + swap(src, temp); + return; + } +#endif + m_assembler.movw_rm(src, address.offset, address.base); + } + // Floating-point operation: // @@ -681,17 +1116,17 @@ public: { ASSERT(isSSE2Present()); if (src != dest) - m_assembler.movsd_rr(src, dest); + m_assembler.movaps_rr(src, dest); } - void loadDouble(const void* address, FPRegisterID dest) + void loadDouble(TrustedImmPtr address, FPRegisterID dest) { #if CPU(X86) ASSERT(isSSE2Present()); - m_assembler.movsd_mr(address, dest); + m_assembler.movsd_mr(address.m_value, dest); #else - move(TrustedImmPtr(address), scratchRegister); - loadDouble(scratchRegister, dest); + move(address, scratchRegister()); + loadDouble(scratchRegister(), dest); #endif } @@ -700,12 +1135,19 @@ public: ASSERT(isSSE2Present()); m_assembler.movsd_mr(address.offset, address.base, dest); } - + void loadDouble(BaseIndex address, FPRegisterID dest) { ASSERT(isSSE2Present()); m_assembler.movsd_mr(address.offset, address.base, address.index, address.scale, dest); } + + void loadFloat(ImplicitAddress address, FPRegisterID dest) + { + ASSERT(isSSE2Present()); + m_assembler.movss_mr(address.offset, address.base, dest); + } + void loadFloat(BaseIndex address, FPRegisterID dest) { ASSERT(isSSE2Present()); @@ -723,7 +1165,13 @@ public: ASSERT(isSSE2Present()); m_assembler.movsd_rm(src, address.offset, address.base, address.index, address.scale); } - + + void storeFloat(FPRegisterID src, ImplicitAddress address) + { + ASSERT(isSSE2Present()); + m_assembler.movss_rm(src, address.offset, address.base); + } + void storeFloat(FPRegisterID src, BaseIndex address) { ASSERT(isSSE2Present()); @@ -736,33 +1184,144 @@ public: m_assembler.cvtsd2ss_rr(src, dst); } + void convertDoubleToFloat(Address address, FPRegisterID dst) + { + ASSERT(isSSE2Present()); + m_assembler.cvtsd2ss_mr(address.offset, address.base, dst); + } + void convertFloatToDouble(FPRegisterID src, FPRegisterID dst) { ASSERT(isSSE2Present()); m_assembler.cvtss2sd_rr(src, dst); } - void addDouble(FPRegisterID src, FPRegisterID dest) + void convertFloatToDouble(Address address, FPRegisterID dst) { ASSERT(isSSE2Present()); - m_assembler.addsd_rr(src, dest); + m_assembler.cvtss2sd_mr(address.offset, address.base, dst); + } + + void addDouble(FPRegisterID src, FPRegisterID dest) + { + addDouble(src, dest, dest); } void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) { - ASSERT(isSSE2Present()); - if (op1 == dest) - addDouble(op2, dest); + if (supportsAVX()) + m_assembler.vaddsd_rr(op1, op2, dest); else { - moveDouble(op2, dest); - addDouble(op1, dest); + ASSERT(isSSE2Present()); + if (op1 == dest) + m_assembler.addsd_rr(op2, dest); + else { + moveDouble(op2, dest); + m_assembler.addsd_rr(op1, dest); + } } } void addDouble(Address src, FPRegisterID dest) { - ASSERT(isSSE2Present()); - m_assembler.addsd_mr(src.offset, src.base, dest); + addDouble(src, dest, dest); + } + + void addDouble(Address op1, FPRegisterID op2, FPRegisterID dest) + { + if (supportsAVX()) + m_assembler.vaddsd_mr(op1.offset, op1.base, op2, dest); + else { + ASSERT(isSSE2Present()); + if (op2 == dest) { + m_assembler.addsd_mr(op1.offset, op1.base, dest); + return; + } + + loadDouble(op1, dest); + addDouble(op2, dest); + } + } + + void addDouble(FPRegisterID op1, Address op2, FPRegisterID dest) + { + addDouble(op2, op1, dest); + } + + void addDouble(BaseIndex op1, FPRegisterID op2, FPRegisterID dest) + { + if (supportsAVX()) + m_assembler.vaddsd_mr(op1.offset, op1.base, op1.index, op1.scale, op2, dest); + else { + ASSERT(isSSE2Present()); + if (op2 == dest) { + m_assembler.addsd_mr(op1.offset, op1.base, op1.index, op1.scale, dest); + return; + } + loadDouble(op1, dest); + addDouble(op2, dest); + } + } + + void addFloat(FPRegisterID src, FPRegisterID dest) + { + addFloat(src, dest, dest); + } + + void addFloat(Address src, FPRegisterID dest) + { + addFloat(src, dest, dest); + } + + void addFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + if (supportsAVX()) + m_assembler.vaddss_rr(op1, op2, dest); + else { + ASSERT(isSSE2Present()); + if (op1 == dest) + m_assembler.addss_rr(op2, dest); + else { + moveDouble(op2, dest); + m_assembler.addss_rr(op1, dest); + } + } + } + + void addFloat(Address op1, FPRegisterID op2, FPRegisterID dest) + { + if (supportsAVX()) + m_assembler.vaddss_mr(op1.offset, op1.base, op2, dest); + else { + ASSERT(isSSE2Present()); + if (op2 == dest) { + m_assembler.addss_mr(op1.offset, op1.base, dest); + return; + } + + loadFloat(op1, dest); + addFloat(op2, dest); + } + } + + void addFloat(FPRegisterID op1, Address op2, FPRegisterID dest) + { + addFloat(op2, op1, dest); + } + + void addFloat(BaseIndex op1, FPRegisterID op2, FPRegisterID dest) + { + if (supportsAVX()) + m_assembler.vaddss_mr(op1.offset, op1.base, op1.index, op1.scale, op2, dest); + else { + ASSERT(isSSE2Present()); + if (op2 == dest) { + m_assembler.addss_mr(op1.offset, op1.base, op1.index, op1.scale, dest); + return; + } + loadFloat(op1, dest); + addFloat(op2, dest); + } } void divDouble(FPRegisterID src, FPRegisterID dest) @@ -786,48 +1345,314 @@ public: m_assembler.divsd_mr(src.offset, src.base, dest); } - void subDouble(FPRegisterID src, FPRegisterID dest) + void divFloat(FPRegisterID src, FPRegisterID dest) { ASSERT(isSSE2Present()); - m_assembler.subsd_rr(src, dest); + m_assembler.divss_rr(src, dest); + } + + void divFloat(Address src, FPRegisterID dest) + { + ASSERT(isSSE2Present()); + m_assembler.divss_mr(src.offset, src.base, dest); + } + + void subDouble(FPRegisterID src, FPRegisterID dest) + { + subDouble(dest, src, dest); } void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) { - // B := A - B is invalid. - ASSERT(op1 == dest || op2 != dest); + if (supportsAVX()) + m_assembler.vsubsd_rr(op1, op2, dest); + else { + ASSERT(isSSE2Present()); - moveDouble(op1, dest); - subDouble(op2, dest); + // B := A - B is invalid. + ASSERT(op1 == dest || op2 != dest); + moveDouble(op1, dest); + m_assembler.subsd_rr(op2, dest); + } + } + + void subDouble(FPRegisterID op1, Address op2, FPRegisterID dest) + { + if (supportsAVX()) + m_assembler.vsubsd_mr(op1, op2.offset, op2.base, dest); + else { + moveDouble(op1, dest); + m_assembler.subsd_mr(op2.offset, op2.base, dest); + } + } + + void subDouble(FPRegisterID op1, BaseIndex op2, FPRegisterID dest) + { + if (supportsAVX()) + m_assembler.vsubsd_mr(op1, op2.offset, op2.base, op2.index, op2.scale, dest); + else { + moveDouble(op1, dest); + m_assembler.subsd_mr(op2.offset, op2.base, op2.index, op2.scale, dest); + } } void subDouble(Address src, FPRegisterID dest) { - ASSERT(isSSE2Present()); - m_assembler.subsd_mr(src.offset, src.base, dest); + subDouble(dest, src, dest); + } + + void subFloat(FPRegisterID src, FPRegisterID dest) + { + subFloat(dest, src, dest); + } + + void subFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + if (supportsAVX()) + m_assembler.vsubss_rr(op1, op2, dest); + else { + ASSERT(isSSE2Present()); + // B := A - B is invalid. + ASSERT(op1 == dest || op2 != dest); + moveDouble(op1, dest); + m_assembler.subss_rr(op2, dest); + } + } + + void subFloat(FPRegisterID op1, Address op2, FPRegisterID dest) + { + if (supportsAVX()) + m_assembler.vsubss_mr(op1, op2.offset, op2.base, dest); + else { + moveDouble(op1, dest); + m_assembler.subss_mr(op2.offset, op2.base, dest); + } + } + + void subFloat(FPRegisterID op1, BaseIndex op2, FPRegisterID dest) + { + if (supportsAVX()) + m_assembler.vsubss_mr(op1, op2.offset, op2.base, op2.index, op2.scale, dest); + else { + moveDouble(op1, dest); + m_assembler.subss_mr(op2.offset, op2.base, op2.index, op2.scale, dest); + } + } + + void subFloat(Address src, FPRegisterID dest) + { + subFloat(dest, src, dest); } void mulDouble(FPRegisterID src, FPRegisterID dest) { - ASSERT(isSSE2Present()); - m_assembler.mulsd_rr(src, dest); + mulDouble(src, dest, dest); } void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) { - ASSERT(isSSE2Present()); - if (op1 == dest) - mulDouble(op2, dest); + if (supportsAVX()) + m_assembler.vmulsd_rr(op1, op2, dest); else { - moveDouble(op2, dest); - mulDouble(op1, dest); + ASSERT(isSSE2Present()); + if (op1 == dest) + m_assembler.mulsd_rr(op2, dest); + else { + moveDouble(op2, dest); + m_assembler.mulsd_rr(op1, dest); + } } } void mulDouble(Address src, FPRegisterID dest) { - ASSERT(isSSE2Present()); - m_assembler.mulsd_mr(src.offset, src.base, dest); + mulDouble(src, dest, dest); + } + + void mulDouble(Address op1, FPRegisterID op2, FPRegisterID dest) + { + if (supportsAVX()) + m_assembler.vmulsd_mr(op1.offset, op1.base, op2, dest); + else { + ASSERT(isSSE2Present()); + if (op2 == dest) { + m_assembler.mulsd_mr(op1.offset, op1.base, dest); + return; + } + loadDouble(op1, dest); + mulDouble(op2, dest); + } + } + + void mulDouble(FPRegisterID op1, Address op2, FPRegisterID dest) + { + return mulDouble(op2, op1, dest); + } + + void mulDouble(BaseIndex op1, FPRegisterID op2, FPRegisterID dest) + { + if (supportsAVX()) + m_assembler.vmulsd_mr(op1.offset, op1.base, op1.index, op1.scale, op2, dest); + else { + ASSERT(isSSE2Present()); + if (op2 == dest) { + m_assembler.mulsd_mr(op1.offset, op1.base, op1.index, op1.scale, dest); + return; + } + loadDouble(op1, dest); + mulDouble(op2, dest); + } + } + + void mulFloat(FPRegisterID src, FPRegisterID dest) + { + mulFloat(src, dest, dest); + } + + void mulFloat(Address src, FPRegisterID dest) + { + mulFloat(src, dest, dest); + } + + void mulFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + if (supportsAVX()) + m_assembler.vmulss_rr(op1, op2, dest); + else { + ASSERT(isSSE2Present()); + if (op1 == dest) + m_assembler.mulss_rr(op2, dest); + else { + moveDouble(op2, dest); + m_assembler.mulss_rr(op1, dest); + } + } + } + + void mulFloat(Address op1, FPRegisterID op2, FPRegisterID dest) + { + if (supportsAVX()) + m_assembler.vmulss_mr(op1.offset, op1.base, op2, dest); + else { + ASSERT(isSSE2Present()); + if (op2 == dest) { + m_assembler.mulss_mr(op1.offset, op1.base, dest); + return; + } + loadFloat(op1, dest); + mulFloat(op2, dest); + } + } + + void mulFloat(FPRegisterID op1, Address op2, FPRegisterID dest) + { + mulFloat(op2, op1, dest); + } + + void mulFloat(BaseIndex op1, FPRegisterID op2, FPRegisterID dest) + { + if (supportsAVX()) + m_assembler.vmulss_mr(op1.offset, op1.base, op1.index, op1.scale, op2, dest); + else { + ASSERT(isSSE2Present()); + if (op2 == dest) { + m_assembler.mulss_mr(op1.offset, op1.base, op1.index, op1.scale, dest); + return; + } + loadFloat(op1, dest); + mulFloat(op2, dest); + } + } + + void andDouble(FPRegisterID src, FPRegisterID dst) + { + // ANDPS is defined on 128bits and is shorter than ANDPD. + m_assembler.andps_rr(src, dst); + } + + void andDouble(FPRegisterID src1, FPRegisterID src2, FPRegisterID dst) + { + if (src1 == dst) + andDouble(src2, dst); + else { + moveDouble(src2, dst); + andDouble(src1, dst); + } + } + + void andFloat(FPRegisterID src, FPRegisterID dst) + { + m_assembler.andps_rr(src, dst); + } + + void andFloat(FPRegisterID src1, FPRegisterID src2, FPRegisterID dst) + { + if (src1 == dst) + andFloat(src2, dst); + else { + moveDouble(src2, dst); + andFloat(src1, dst); + } + } + + void orDouble(FPRegisterID src, FPRegisterID dst) + { + m_assembler.orps_rr(src, dst); + } + + void orDouble(FPRegisterID src1, FPRegisterID src2, FPRegisterID dst) + { + if (src1 == dst) + orDouble(src2, dst); + else { + moveDouble(src2, dst); + orDouble(src1, dst); + } + } + + void orFloat(FPRegisterID src, FPRegisterID dst) + { + m_assembler.orps_rr(src, dst); + } + + void orFloat(FPRegisterID src1, FPRegisterID src2, FPRegisterID dst) + { + if (src1 == dst) + orFloat(src2, dst); + else { + moveDouble(src2, dst); + orFloat(src1, dst); + } + } + + void xorDouble(FPRegisterID src, FPRegisterID dst) + { + m_assembler.xorps_rr(src, dst); + } + + void xorDouble(FPRegisterID src1, FPRegisterID src2, FPRegisterID dst) + { + if (src1 == dst) + xorDouble(src2, dst); + else { + moveDouble(src2, dst); + xorDouble(src1, dst); + } + } + + void xorFloat(FPRegisterID src, FPRegisterID dst) + { + m_assembler.xorps_rr(src, dst); + } + + void xorFloat(FPRegisterID src1, FPRegisterID src2, FPRegisterID dst) + { + if (src1 == dst) + xorFloat(src2, dst); + else { + moveDouble(src2, dst); + xorFloat(src1, dst); + } } void convertInt32ToDouble(RegisterID src, FPRegisterID dest) @@ -842,6 +1667,18 @@ public: m_assembler.cvtsi2sd_mr(src.offset, src.base, dest); } + void convertInt32ToFloat(RegisterID src, FPRegisterID dest) + { + ASSERT(isSSE2Present()); + m_assembler.cvtsi2ss_rr(src, dest); + } + + void convertInt32ToFloat(Address src, FPRegisterID dest) + { + ASSERT(isSSE2Present()); + m_assembler.cvtsi2ss_mr(src.offset, src.base, dest); + } + Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right) { ASSERT(isSSE2Present()); @@ -850,27 +1687,18 @@ public: m_assembler.ucomisd_rr(left, right); else m_assembler.ucomisd_rr(right, left); + return jumpAfterFloatingPointCompare(cond, left, right); + } - if (cond == DoubleEqual) { - if (left == right) - return Jump(m_assembler.jnp()); - Jump isUnordered(m_assembler.jp()); - Jump result = Jump(m_assembler.je()); - isUnordered.link(this); - return result; - } else if (cond == DoubleNotEqualOrUnordered) { - if (left == right) - return Jump(m_assembler.jp()); - Jump isUnordered(m_assembler.jp()); - Jump isEqual(m_assembler.je()); - isUnordered.link(this); - Jump result = jump(); - isEqual.link(this); - return result; - } + Jump branchFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right) + { + ASSERT(isSSE2Present()); - ASSERT(!(cond & DoubleConditionBitSpecial)); - return Jump(m_assembler.jCC(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits))); + if (cond & DoubleConditionBitInvert) + m_assembler.ucomiss_rr(left, right); + else + m_assembler.ucomiss_rr(right, left); + return jumpAfterFloatingPointCompare(cond, left, right); } // Truncates 'src' to an integer, and places the resulting 'dest'. @@ -890,15 +1718,13 @@ public: ASSERT(isSSE2Present()); m_assembler.cvttsd2si_rr(src, dest); } - -#if CPU(X86_64) - void truncateDoubleToUint32(FPRegisterID src, RegisterID dest) + + void truncateFloatToInt32(FPRegisterID src, RegisterID dest) { ASSERT(isSSE2Present()); - m_assembler.cvttsd2siq_rr(src, dest); + m_assembler.cvttss2si_rr(src, dest); } -#endif - + // Convert 'src' to an integer, and places the resulting 'dest'. // If the result is not representable as a 32 bit value, branch. // May also branch for some values that are representable in 32 bits @@ -909,8 +1735,17 @@ public: m_assembler.cvttsd2si_rr(src, dest); // If the result is zero, it might have been -0.0, and the double comparison won't catch this! +#if CPU(X86_64) + if (negZeroCheck) { + Jump valueIsNonZero = branchTest32(NonZero, dest); + m_assembler.movmskpd_rr(src, scratchRegister()); + failureCases.append(branchTest32(NonZero, scratchRegister(), TrustedImm32(1))); + valueIsNonZero.link(this); + } +#else if (negZeroCheck) failureCases.append(branchTest32(Zero, dest)); +#endif // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump. convertInt32ToDouble(dest, fpTemp); @@ -919,6 +1754,11 @@ public: failureCases.append(m_assembler.jne()); } + void moveZeroToDouble(FPRegisterID reg) + { + m_assembler.xorps_rr(reg, reg); + } + Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch) { ASSERT(isSSE2Present()); @@ -951,13 +1791,13 @@ public: m_assembler.por_rr(src, dst); } - void moveInt32ToPacked(RegisterID src, XMMRegisterID dst) + void move32ToFloat(RegisterID src, XMMRegisterID dst) { ASSERT(isSSE2Present()); m_assembler.movd_rr(src, dst); } - void movePackedToInt32(XMMRegisterID src, RegisterID dst) + void moveFloatTo32(XMMRegisterID src, RegisterID dst) { ASSERT(isSSE2Present()); m_assembler.movd_rr(src, dst); @@ -1017,20 +1857,104 @@ public: void move(TrustedImmPtr imm, RegisterID dest) { - m_assembler.movq_i64r(imm.asIntptr(), dest); + if (!imm.m_value) + m_assembler.xorq_rr(dest, dest); + else + m_assembler.movq_i64r(imm.asIntptr(), dest); } void move(TrustedImm64 imm, RegisterID dest) { - m_assembler.movq_i64r(imm.m_value, dest); + if (!imm.m_value) + m_assembler.xorq_rr(dest, dest); + else + m_assembler.movq_i64r(imm.m_value, dest); + } + + void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest) + { + ASSERT(isSSE2Present()); + + if (cond & DoubleConditionBitInvert) + m_assembler.ucomisd_rr(left, right); + else + m_assembler.ucomisd_rr(right, left); + moveConditionallyAfterFloatingPointCompare(cond, left, right, src, dest); + } + + void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + ASSERT(isSSE2Present()); + + if (thenCase != dest && elseCase != dest) { + move(elseCase, dest); + elseCase = dest; + } + + RegisterID src; + if (elseCase == dest) + src = thenCase; + else { + cond = invert(cond); + src = elseCase; + } + + if (cond & DoubleConditionBitInvert) + m_assembler.ucomisd_rr(left, right); + else + m_assembler.ucomisd_rr(right, left); + moveConditionallyAfterFloatingPointCompare(cond, left, right, src, dest); + } + + void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest) + { + ASSERT(isSSE2Present()); + + if (cond & DoubleConditionBitInvert) + m_assembler.ucomiss_rr(left, right); + else + m_assembler.ucomiss_rr(right, left); + moveConditionallyAfterFloatingPointCompare(cond, left, right, src, dest); } + void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + ASSERT(isSSE2Present()); + + if (thenCase != dest && elseCase != dest) { + move(elseCase, dest); + elseCase = dest; + } + + RegisterID src; + if (elseCase == dest) + src = thenCase; + else { + cond = invert(cond); + src = elseCase; + } + + if (cond & DoubleConditionBitInvert) + m_assembler.ucomiss_rr(left, right); + else + m_assembler.ucomiss_rr(right, left); + moveConditionallyAfterFloatingPointCompare(cond, left, right, src, dest); + } + void swap(RegisterID reg1, RegisterID reg2) { if (reg1 != reg2) m_assembler.xchgq_rr(reg1, reg2); } + void signExtend32ToPtr(TrustedImm32 imm, RegisterID dest) + { + if (!imm.m_value) + m_assembler.xorq_rr(dest, dest); + else + m_assembler.mov_i32r(imm.m_value, dest); + } + void signExtend32ToPtr(RegisterID src, RegisterID dest) { m_assembler.movsxd_rr(src, dest); @@ -1040,6 +1964,11 @@ public: { m_assembler.movl_rr(src, dest); } + + void zeroExtend32ToPtr(TrustedImm32 src, RegisterID dest) + { + m_assembler.movl_i32r(src.m_value, dest); + } #else void move(RegisterID src, RegisterID dest) { @@ -1049,7 +1978,46 @@ public: void move(TrustedImmPtr imm, RegisterID dest) { - m_assembler.movl_i32r(imm.asIntptr(), dest); + if (!imm.m_value) + m_assembler.xorl_rr(dest, dest); + else + m_assembler.movl_i32r(imm.asIntptr(), dest); + } + + void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest) + { + ASSERT(isSSE2Present()); + + if (cond & DoubleConditionBitInvert) + m_assembler.ucomisd_rr(left, right); + else + m_assembler.ucomisd_rr(right, left); + + if (cond == DoubleEqual) { + if (left == right) { + m_assembler.cmovnpl_rr(src, dest); + return; + } + + Jump isUnordered(m_assembler.jp()); + m_assembler.cmovel_rr(src, dest); + isUnordered.link(this); + return; + } + + if (cond == DoubleNotEqualOrUnordered) { + if (left == right) { + m_assembler.cmovpl_rr(src, dest); + return; + } + + m_assembler.cmovpl_rr(src, dest); + m_assembler.cmovnel_rr(src, dest); + return; + } + + ASSERT(!(cond & DoubleConditionBitSpecial)); + m_assembler.cmovl_rr(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits), src, dest); } void swap(RegisterID reg1, RegisterID reg2) @@ -1069,6 +2037,190 @@ public: } #endif + void swap32(RegisterID src, RegisterID dest) + { + m_assembler.xchgl_rr(src, dest); + } + + void swap32(RegisterID src, Address dest) + { + m_assembler.xchgl_rm(src, dest.offset, dest.base); + } + + void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest) + { + m_assembler.cmpl_rr(right, left); + cmov(x86Condition(cond), src, dest); + } + + void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + m_assembler.cmpl_rr(right, left); + + if (thenCase != dest && elseCase != dest) { + move(elseCase, dest); + elseCase = dest; + } + + if (elseCase == dest) + cmov(x86Condition(cond), thenCase, dest); + else + cmov(x86Condition(invert(cond)), elseCase, dest); + } + + void moveConditionally32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + if (!right.m_value) { + if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) { + moveConditionallyTest32(*resultCondition, left, left, thenCase, elseCase, dest); + return; + } + } + + m_assembler.cmpl_ir(right.m_value, left); + + if (thenCase != dest && elseCase != dest) { + move(elseCase, dest); + elseCase = dest; + } + + if (elseCase == dest) + cmov(x86Condition(cond), thenCase, dest); + else + cmov(x86Condition(invert(cond)), elseCase, dest); + } + + void moveConditionallyTest32(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest) + { + m_assembler.testl_rr(testReg, mask); + cmov(x86Condition(cond), src, dest); + } + + void moveConditionallyTest32(ResultCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + ASSERT(isInvertible(cond)); + ASSERT_WITH_MESSAGE(cond != Overflow, "TEST does not set the Overflow Flag."); + + m_assembler.testl_rr(right, left); + + if (thenCase != dest && elseCase != dest) { + move(elseCase, dest); + elseCase = dest; + } + + if (elseCase == dest) + cmov(x86Condition(cond), thenCase, dest); + else + cmov(x86Condition(invert(cond)), elseCase, dest); + } + + void moveConditionallyTest32(ResultCondition cond, RegisterID testReg, TrustedImm32 mask, RegisterID src, RegisterID dest) + { + test32(testReg, mask); + cmov(x86Condition(cond), src, dest); + } + + void moveConditionallyTest32(ResultCondition cond, RegisterID testReg, TrustedImm32 mask, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + ASSERT(isInvertible(cond)); + ASSERT_WITH_MESSAGE(cond != Overflow, "TEST does not set the Overflow Flag."); + + test32(testReg, mask); + + if (thenCase != dest && elseCase != dest) { + move(elseCase, dest); + elseCase = dest; + } + + if (elseCase == dest) + cmov(x86Condition(cond), thenCase, dest); + else + cmov(x86Condition(invert(cond)), elseCase, dest); + } + + template<typename LeftType, typename RightType> + void moveDoubleConditionally32(RelationalCondition cond, LeftType left, RightType right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + static_assert(!std::is_same<LeftType, FPRegisterID>::value && !std::is_same<RightType, FPRegisterID>::value, "One of the tested argument could be aliased on dest. Use moveDoubleConditionallyDouble()."); + + if (thenCase != dest && elseCase != dest) { + moveDouble(elseCase, dest); + elseCase = dest; + } + + if (elseCase == dest) { + Jump falseCase = branch32(invert(cond), left, right); + moveDouble(thenCase, dest); + falseCase.link(this); + } else { + Jump trueCase = branch32(cond, left, right); + moveDouble(elseCase, dest); + trueCase.link(this); + } + } + + template<typename TestType, typename MaskType> + void moveDoubleConditionallyTest32(ResultCondition cond, TestType test, MaskType mask, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + static_assert(!std::is_same<TestType, FPRegisterID>::value && !std::is_same<MaskType, FPRegisterID>::value, "One of the tested argument could be aliased on dest. Use moveDoubleConditionallyDouble()."); + + if (elseCase == dest && isInvertible(cond)) { + Jump falseCase = branchTest32(invert(cond), test, mask); + moveDouble(thenCase, dest); + falseCase.link(this); + } else if (thenCase == dest) { + Jump trueCase = branchTest32(cond, test, mask); + moveDouble(elseCase, dest); + trueCase.link(this); + } + + Jump trueCase = branchTest32(cond, test, mask); + moveDouble(elseCase, dest); + Jump falseCase = jump(); + trueCase.link(this); + moveDouble(thenCase, dest); + falseCase.link(this); + } + + void moveDoubleConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + if (elseCase == dest) { + Jump falseCase = branchDouble(invert(cond), left, right); + moveDouble(thenCase, dest); + falseCase.link(this); + } else if (thenCase == dest) { + Jump trueCase = branchDouble(cond, left, right); + moveDouble(elseCase, dest); + trueCase.link(this); + } else { + Jump trueCase = branchDouble(cond, left, right); + moveDouble(elseCase, dest); + Jump falseCase = jump(); + trueCase.link(this); + moveDouble(thenCase, dest); + falseCase.link(this); + } + } + + void moveDoubleConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + if (elseCase == dest) { + Jump falseCase = branchFloat(invert(cond), left, right); + moveDouble(thenCase, dest); + falseCase.link(this); + } else if (thenCase == dest) { + Jump trueCase = branchFloat(cond, left, right); + moveDouble(elseCase, dest); + trueCase.link(this); + } else { + Jump trueCase = branchFloat(cond, left, right); + moveDouble(elseCase, dest); + Jump falseCase = jump(); + trueCase.link(this); + moveDouble(thenCase, dest); + falseCase.link(this); + } + } // Forwards / external control flow operations: // @@ -1091,7 +2243,8 @@ public: public: Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right) { - m_assembler.cmpb_im(right.m_value, left.offset, left.base); + TrustedImm32 right8(static_cast<int8_t>(right.m_value)); + m_assembler.cmpb_im(right8.m_value, left.offset, left.base); return Jump(m_assembler.jCC(x86Condition(cond))); } @@ -1103,10 +2256,12 @@ public: Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right) { - if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) - m_assembler.testl_rr(left, left); - else - m_assembler.cmpl_ir(right.m_value, left); + if (!right.m_value) { + if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) + return branchTest32(*resultCondition, left, left); + } + + m_assembler.cmpl_ir(right.m_value, left); return Jump(m_assembler.jCC(x86Condition(cond))); } @@ -1149,9 +2304,12 @@ public: { if (mask.m_value == -1) m_assembler.testl_rr(reg, reg); - else if (!(mask.m_value & ~0xff) && reg < X86Registers::esp) // Using esp and greater as a byte register yields the upper half of the 16 bit registers ax, cx, dx and bx, e.g. esp, register 4, is actually ah. - m_assembler.testb_i8r(mask.m_value, reg); - else + else if (!(mask.m_value & ~0xff) && reg < X86Registers::esp) { // Using esp and greater as a byte register yields the upper half of the 16 bit registers ax, cx, dx and bx, e.g. esp, register 4, is actually ah. + if (mask.m_value == 0xff) + m_assembler.testb_rr(reg, reg); + else + m_assembler.testb_i8r(mask.m_value, reg); + } else m_assembler.testl_i32r(mask.m_value, reg); } @@ -1183,31 +2341,28 @@ public: Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) { - // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values. - ASSERT(mask.m_value >= -128 && mask.m_value <= 255); - if (mask.m_value == -1) + TrustedImm32 mask8(static_cast<int8_t>(mask.m_value)); + if (mask8.m_value == -1) m_assembler.cmpb_im(0, address.offset, address.base); else - m_assembler.testb_im(mask.m_value, address.offset, address.base); + m_assembler.testb_im(mask8.m_value, address.offset, address.base); return Jump(m_assembler.jCC(x86Condition(cond))); } Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) { - // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values. - ASSERT(mask.m_value >= -128 && mask.m_value <= 255); - if (mask.m_value == -1) + TrustedImm32 mask8(static_cast<int8_t>(mask.m_value)); + if (mask8.m_value == -1) m_assembler.cmpb_im(0, address.offset, address.base, address.index, address.scale); else - m_assembler.testb_im(mask.m_value, address.offset, address.base, address.index, address.scale); + m_assembler.testb_im(mask8.m_value, address.offset, address.base, address.index, address.scale); return Jump(m_assembler.jCC(x86Condition(cond))); } Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right) { - ASSERT(!(right.m_value & 0xFFFFFF00)); - - m_assembler.cmpb_im(right.m_value, left.offset, left.base, left.index, left.scale); + TrustedImm32 right8(static_cast<int8_t>(right.m_value)); + m_assembler.cmpb_im(right8.m_value, left.offset, left.base, left.index, left.scale); return Jump(m_assembler.jCC(x86Condition(cond))); } @@ -1227,6 +2382,12 @@ public: m_assembler.jmp_m(address.offset, address.base); } + // Address is a memory location containing the address to jump to + void jump(BaseIndex address) + { + m_assembler.jmp_m(address.offset, address.base, address.index, address.scale); + } + // Arithmetic control flow operations: // @@ -1272,13 +2433,30 @@ public: { if (src1 == dest) return branchAdd32(cond, src2, dest); - move(src2, dest); + move32IfNeeded(src2, dest); return branchAdd32(cond, src1, dest); } + Jump branchAdd32(ResultCondition cond, Address op1, RegisterID op2, RegisterID dest) + { + if (op2 == dest) + return branchAdd32(cond, op1, dest); + if (op1.base == dest) { + load32(op1, dest); + return branchAdd32(cond, op2, dest); + } + zeroExtend32ToPtr(op2, dest); + return branchAdd32(cond, op1, dest); + } + + Jump branchAdd32(ResultCondition cond, RegisterID src1, Address src2, RegisterID dest) + { + return branchAdd32(cond, src2, src1, dest); + } + Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest) { - move(src, dest); + move32IfNeeded(src, dest); return branchAdd32(cond, imm, dest); } @@ -1298,7 +2476,7 @@ public: return Jump(m_assembler.jCC(x86Condition(cond))); } - Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest) + Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest) { mul32(imm, src, dest); if (cond != Overflow) @@ -1310,7 +2488,7 @@ public: { if (src1 == dest) return branchMul32(cond, src2, dest); - move(src2, dest); + move32IfNeeded(src2, dest); return branchMul32(cond, src1, dest); } @@ -1349,13 +2527,13 @@ public: // B := A - B is invalid. ASSERT(src1 == dest || src2 != dest); - move(src1, dest); + move32IfNeeded(src1, dest); return branchSub32(cond, src2, dest); } Jump branchSub32(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest) { - move(src1, dest); + move32IfNeeded(src1, dest); return branchSub32(cond, src2, dest); } @@ -1379,6 +2557,11 @@ public: m_assembler.int3(); } + Call nearTailCall() + { + return Call(m_assembler.jmp(), Call::LinkableNearTail); + } + Call nearCall() { return Call(m_assembler.call(), Call::LinkableNear); @@ -1401,7 +2584,8 @@ public: void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest) { - m_assembler.cmpb_im(right.m_value, left.offset, left.base); + TrustedImm32 right8(static_cast<int8_t>(right.m_value)); + m_assembler.cmpb_im(right8.m_value, left.offset, left.base); set32(x86Condition(cond), dest); } @@ -1413,10 +2597,14 @@ public: void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest) { - if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) - m_assembler.testl_rr(left, left); - else - m_assembler.cmpl_ir(right.m_value, left); + if (!right.m_value) { + if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) { + test32(*resultCondition, left, left, dest); + return; + } + } + + m_assembler.cmpl_ir(right.m_value, left); set32(x86Condition(cond), dest); } @@ -1427,10 +2615,11 @@ public: void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest) { - if (mask.m_value == -1) + TrustedImm32 mask8(static_cast<int8_t>(mask.m_value)); + if (mask8.m_value == -1) m_assembler.cmpb_im(0, address.offset, address.base); else - m_assembler.testb_im(mask.m_value, address.offset, address.base); + m_assembler.testb_im(mask8.m_value, address.offset, address.base); set32(x86Condition(cond), dest); } @@ -1440,20 +2629,129 @@ public: set32(x86Condition(cond), dest); } + void test32(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest) + { + m_assembler.testl_rr(reg, mask); + set32(x86Condition(cond), dest); + } + + void test32(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest) + { + test32(reg, mask); + set32(x86Condition(cond), dest); + } + + void setCarry(RegisterID dest) + { + set32(X86Assembler::ConditionC, dest); + } + // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc. static RelationalCondition invert(RelationalCondition cond) { return static_cast<RelationalCondition>(cond ^ 1); } + static DoubleCondition invert(DoubleCondition cond) + { + switch (cond) { + case DoubleEqual: + return DoubleNotEqualOrUnordered; + case DoubleNotEqual: + return DoubleEqualOrUnordered; + case DoubleGreaterThan: + return DoubleLessThanOrEqualOrUnordered; + case DoubleGreaterThanOrEqual: + return DoubleLessThanOrUnordered; + case DoubleLessThan: + return DoubleGreaterThanOrEqualOrUnordered; + case DoubleLessThanOrEqual: + return DoubleGreaterThanOrUnordered; + case DoubleEqualOrUnordered: + return DoubleNotEqual; + case DoubleNotEqualOrUnordered: + return DoubleEqual; + case DoubleGreaterThanOrUnordered: + return DoubleLessThanOrEqual; + case DoubleGreaterThanOrEqualOrUnordered: + return DoubleLessThan; + case DoubleLessThanOrUnordered: + return DoubleGreaterThanOrEqual; + case DoubleLessThanOrEqualOrUnordered: + return DoubleGreaterThan; + } + RELEASE_ASSERT_NOT_REACHED(); + return DoubleEqual; // make compiler happy + } + + static bool isInvertible(ResultCondition cond) + { + switch (cond) { + case Zero: + case NonZero: + case Signed: + case PositiveOrZero: + return true; + default: + return false; + } + } + + static ResultCondition invert(ResultCondition cond) + { + switch (cond) { + case Zero: + return NonZero; + case NonZero: + return Zero; + case Signed: + return PositiveOrZero; + case PositiveOrZero: + return Signed; + default: + RELEASE_ASSERT_NOT_REACHED(); + return Zero; // Make compiler happy for release builds. + } + } + + static std::optional<ResultCondition> commuteCompareToZeroIntoTest(RelationalCondition cond) + { + switch (cond) { + case Equal: + return Zero; + case NotEqual: + return NonZero; + case LessThan: + return Signed; + case GreaterThanOrEqual: + return PositiveOrZero; + break; + default: + return std::nullopt; + } + } + void nop() { m_assembler.nop(); } + // We take memoryFence to mean acqrel. This has acqrel semantics on x86. void memoryFence() { - m_assembler.mfence(); + // lock; orl $0, (%rsp) + m_assembler.lock(); + m_assembler.orl_im(0, 0, X86Registers::esp); + } + + // We take this to mean that it prevents motion of normal stores. So, it's a no-op on x86. + void storeFence() + { + } + + // We take this to mean that it prevents motion of normal loads. So, it's a no-op on x86. + void loadFence() + { } static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination) @@ -1466,28 +2764,61 @@ public: return X86Assembler::maxJumpReplacementSize(); } -#if USE(MASM_PROBE) - struct CPUState { - #define DECLARE_REGISTER(_type, _regName) \ - _type _regName; - FOR_EACH_CPU_REGISTER(DECLARE_REGISTER) - #undef DECLARE_REGISTER - }; + static ptrdiff_t patchableJumpSize() + { + return X86Assembler::patchableJumpSize(); + } - struct ProbeContext; - typedef void (*ProbeFunction)(struct ProbeContext*); + static bool supportsFloatingPointRounding() + { + if (s_sse4_1CheckState == CPUIDCheckState::NotChecked) + updateEax1EcxFlags(); + return s_sse4_1CheckState == CPUIDCheckState::Set; + } - struct ProbeContext { - ProbeFunction probeFunction; - void* arg1; - void* arg2; - CPUState cpu; + static bool supportsAVX() + { + // AVX still causes mysterious regressions and those regressions can be massive. + return false; + } - void dump(const char* indentation = 0); - private: - void dumpCPURegisters(const char* indentation); - }; -#endif // USE(MASM_PROBE) + static void updateEax1EcxFlags() + { + int flags = 0; +#if COMPILER(MSVC) + int cpuInfo[4]; + __cpuid(cpuInfo, 0x1); + flags = cpuInfo[2]; +#elif COMPILER(GCC_OR_CLANG) +#if CPU(X86_64) + asm ( + "movl $0x1, %%eax;" + "cpuid;" + "movl %%ecx, %0;" + : "=g" (flags) + : + : "%eax", "%ebx", "%ecx", "%edx" + ); +#else + asm ( + "movl $0x1, %%eax;" + "pushl %%ebx;" + "cpuid;" + "popl %%ebx;" + "movl %%ecx, %0;" + : "=g" (flags) + : + : "%eax", "%ecx", "%edx" + ); +#endif +#endif // COMPILER(GCC_OR_CLANG) + s_sse4_1CheckState = (flags & (1 << 19)) ? CPUIDCheckState::Set : CPUIDCheckState::Clear; + s_avxCheckState = (flags & (1 << 28)) ? CPUIDCheckState::Set : CPUIDCheckState::Clear; + } + +#if ENABLE(MASM_PROBE) + void probe(ProbeFunction, void* arg1, void* arg2); +#endif // ENABLE(MASM_PROBE) protected: X86Assembler::Condition x86Condition(RelationalCondition cond) @@ -1517,6 +2848,84 @@ protected: m_assembler.movzbl_rr(dest, dest); } + void cmov(X86Assembler::Condition cond, RegisterID src, RegisterID dest) + { +#if CPU(X86_64) + m_assembler.cmovq_rr(cond, src, dest); +#else + m_assembler.cmovl_rr(cond, src, dest); +#endif + } + + static bool supportsLZCNT() + { + if (s_lzcntCheckState == CPUIDCheckState::NotChecked) { + int flags = 0; +#if COMPILER(MSVC) + int cpuInfo[4]; + __cpuid(cpuInfo, 0x80000001); + flags = cpuInfo[2]; +#elif COMPILER(GCC_OR_CLANG) +#if CPU(X86_64) + asm ( + "movl $0x80000001, %%eax;" + "cpuid;" + "movl %%ecx, %0;" + : "=g" (flags) + : + : "%eax", "%ebx", "%ecx", "%edx" + ); +#else + asm ( + "movl $0x80000001, %%eax;" + "pushl %%ebx;" + "cpuid;" + "popl %%ebx;" + "movl %%ecx, %0;" + : "=g" (flags) + : + : "%eax", "%ecx", "%edx" + ); +#endif +#endif // COMPILER(GCC_OR_CLANG) + s_lzcntCheckState = (flags & 0x20) ? CPUIDCheckState::Set : CPUIDCheckState::Clear; + } + return s_lzcntCheckState == CPUIDCheckState::Set; + } + + static bool supportsBMI1() + { + if (s_bmi1CheckState == CPUIDCheckState::NotChecked) { + int flags = 0; +#if COMPILER(MSVC) + int cpuInfo[4]; + __cpuid(cpuInfo, 0x80000001); + flags = cpuInfo[2]; +#elif COMPILER(GCC_OR_CLANG) + asm ( + "movl $0x7, %%eax;" + "movl $0x0, %%ecx;" + "cpuid;" + "movl %%ebx, %0;" + : "=g" (flags) + : + : "%eax", "%ebx", "%ecx", "%edx" + ); +#endif // COMPILER(GCC_OR_CLANG) + static int BMI1FeatureBit = 1 << 3; + s_bmi1CheckState = (flags & BMI1FeatureBit) ? CPUIDCheckState::Set : CPUIDCheckState::Clear; + } + return s_bmi1CheckState == CPUIDCheckState::Set; + } + + template<int sizeOfRegister> + void ctzAfterBsf(RegisterID dst) + { + Jump srcIsNonZero = m_assembler.jCC(x86Condition(NonZero)); + move(TrustedImm32(sizeOfRegister), dst); + srcIsNonZero.link(this); + } + private: // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'. @@ -1538,6 +2947,84 @@ private: m_assembler.testl_i32m(mask.m_value, address.offset, address.base); } + // If lzcnt is not available, use this after BSR + // to count the leading zeros. + void clz32AfterBsr(RegisterID dst) + { + Jump srcIsNonZero = m_assembler.jCC(x86Condition(NonZero)); + move(TrustedImm32(32), dst); + + Jump skipNonZeroCase = jump(); + srcIsNonZero.link(this); + xor32(TrustedImm32(0x1f), dst); + skipNonZeroCase.link(this); + } + + Jump jumpAfterFloatingPointCompare(DoubleCondition cond, FPRegisterID left, FPRegisterID right) + { + if (cond == DoubleEqual) { + if (left == right) + return Jump(m_assembler.jnp()); + Jump isUnordered(m_assembler.jp()); + Jump result = Jump(m_assembler.je()); + isUnordered.link(this); + return result; + } + if (cond == DoubleNotEqualOrUnordered) { + if (left == right) + return Jump(m_assembler.jp()); + Jump isUnordered(m_assembler.jp()); + Jump isEqual(m_assembler.je()); + isUnordered.link(this); + Jump result = jump(); + isEqual.link(this); + return result; + } + + ASSERT(!(cond & DoubleConditionBitSpecial)); + return Jump(m_assembler.jCC(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits))); + } + + // The 32bit Move does not need the REX byte for low registers, making it shorter. + // Use this if the top bits are irrelevant because they will be reset by the next instruction. + void move32IfNeeded(RegisterID src, RegisterID dest) + { + if (src == dest) + return; + m_assembler.movl_rr(src, dest); + } + +#if CPU(X86_64) + void moveConditionallyAfterFloatingPointCompare(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest) + { + if (cond == DoubleEqual) { + if (left == right) { + m_assembler.cmovnpq_rr(src, dest); + return; + } + + Jump isUnordered(m_assembler.jp()); + m_assembler.cmoveq_rr(src, dest); + isUnordered.link(this); + return; + } + + if (cond == DoubleNotEqualOrUnordered) { + if (left == right) { + m_assembler.cmovpq_rr(src, dest); + return; + } + + m_assembler.cmovpq_rr(src, dest); + m_assembler.cmovneq_rr(src, dest); + return; + } + + ASSERT(!(cond & DoubleConditionBitSpecial)); + cmov(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits), src, dest); + } +#endif + #if CPU(X86) #if OS(MAC_OS_X) @@ -1567,7 +3054,7 @@ private: cpuid; mov flags, edx; } -#elif COMPILER(GCC) +#elif COMPILER(GCC_OR_CLANG) asm ( "movl $0x1, %%eax;" "pushl %%ebx;" @@ -1588,7 +3075,7 @@ private: return s_sse2CheckState == HasSSE2; } - static SSE2CheckState s_sse2CheckState; + JS_EXPORTDATA static SSE2CheckState s_sse2CheckState; #endif // OS(MAC_OS_X) #elif !defined(NDEBUG) // CPU(X86) @@ -1601,10 +3088,18 @@ private: } #endif + + enum class CPUIDCheckState { + NotChecked, + Clear, + Set + }; + JS_EXPORT_PRIVATE static CPUIDCheckState s_sse4_1CheckState; + JS_EXPORT_PRIVATE static CPUIDCheckState s_avxCheckState; + static CPUIDCheckState s_bmi1CheckState; + static CPUIDCheckState s_lzcntCheckState; }; } // namespace JSC #endif // ENABLE(ASSEMBLER) - -#endif // MacroAssemblerX86Common_h diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h b/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h index 4fbc5a3dd..7e1841270 100644 --- a/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h +++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2012 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2012, 2014-2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -23,27 +23,28 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef MacroAssemblerX86_64_h -#define MacroAssemblerX86_64_h +#pragma once #if ENABLE(ASSEMBLER) && CPU(X86_64) #include "MacroAssemblerX86Common.h" -#if USE(MASM_PROBE) -#include <wtf/StdLibExtras.h> -#endif +#define REPATCH_OFFSET_CALL_R11 3 -#define REPTACH_OFFSET_CALL_R11 3 +inline bool CAN_SIGN_EXTEND_32_64(int64_t value) { return value == (int64_t)(int32_t)value; } namespace JSC { class MacroAssemblerX86_64 : public MacroAssemblerX86Common { public: + static const unsigned numGPRs = 16; + static const unsigned numFPRs = 16; + static const Scale ScalePtr = TimesEight; using MacroAssemblerX86Common::add32; using MacroAssemblerX86Common::and32; + using MacroAssemblerX86Common::branch32; using MacroAssemblerX86Common::branchAdd32; using MacroAssemblerX86Common::or32; using MacroAssemblerX86Common::sub32; @@ -59,38 +60,38 @@ public: void add32(TrustedImm32 imm, AbsoluteAddress address) { - move(TrustedImmPtr(address.m_ptr), scratchRegister); - add32(imm, Address(scratchRegister)); + move(TrustedImmPtr(address.m_ptr), scratchRegister()); + add32(imm, Address(scratchRegister())); } void and32(TrustedImm32 imm, AbsoluteAddress address) { - move(TrustedImmPtr(address.m_ptr), scratchRegister); - and32(imm, Address(scratchRegister)); + move(TrustedImmPtr(address.m_ptr), scratchRegister()); + and32(imm, Address(scratchRegister())); } void add32(AbsoluteAddress address, RegisterID dest) { - move(TrustedImmPtr(address.m_ptr), scratchRegister); - add32(Address(scratchRegister), dest); + move(TrustedImmPtr(address.m_ptr), scratchRegister()); + add32(Address(scratchRegister()), dest); } void or32(TrustedImm32 imm, AbsoluteAddress address) { - move(TrustedImmPtr(address.m_ptr), scratchRegister); - or32(imm, Address(scratchRegister)); + move(TrustedImmPtr(address.m_ptr), scratchRegister()); + or32(imm, Address(scratchRegister())); } void or32(RegisterID reg, AbsoluteAddress address) { - move(TrustedImmPtr(address.m_ptr), scratchRegister); - or32(reg, Address(scratchRegister)); + move(TrustedImmPtr(address.m_ptr), scratchRegister()); + or32(reg, Address(scratchRegister())); } void sub32(TrustedImm32 imm, AbsoluteAddress address) { - move(TrustedImmPtr(address.m_ptr), scratchRegister); - sub32(imm, Address(scratchRegister)); + move(TrustedImmPtr(address.m_ptr), scratchRegister()); + sub32(imm, Address(scratchRegister())); } void load8(const void* address, RegisterID dest) @@ -111,70 +112,145 @@ public: void addDouble(AbsoluteAddress address, FPRegisterID dest) { - move(TrustedImmPtr(address.m_ptr), scratchRegister); - m_assembler.addsd_mr(0, scratchRegister, dest); + move(TrustedImmPtr(address.m_ptr), scratchRegister()); + m_assembler.addsd_mr(0, scratchRegister(), dest); } void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest) { - move(imm, scratchRegister); - m_assembler.cvtsi2sd_rr(scratchRegister, dest); + move(imm, scratchRegister()); + m_assembler.cvtsi2sd_rr(scratchRegister(), dest); } void store32(TrustedImm32 imm, void* address) { - move(TrustedImmPtr(address), scratchRegister); - store32(imm, scratchRegister); + move(TrustedImmPtr(address), scratchRegister()); + store32(imm, scratchRegister()); + } + + void store32(RegisterID source, void* address) + { + if (source == X86Registers::eax) + m_assembler.movl_EAXm(address); + else { + move(TrustedImmPtr(address), scratchRegister()); + store32(source, scratchRegister()); + } } void store8(TrustedImm32 imm, void* address) { - move(TrustedImmPtr(address), scratchRegister); - store8(imm, Address(scratchRegister)); + TrustedImm32 imm8(static_cast<int8_t>(imm.m_value)); + move(TrustedImmPtr(address), scratchRegister()); + store8(imm8, Address(scratchRegister())); } void store8(RegisterID reg, void* address) { - move(TrustedImmPtr(address), scratchRegister); - store8(reg, Address(scratchRegister)); + move(TrustedImmPtr(address), scratchRegister()); + store8(reg, Address(scratchRegister())); } +#if OS(WINDOWS) + Call callWithSlowPathReturnType() + { + // On Win64, when the return type is larger than 8 bytes, we need to allocate space on the stack for the return value. + // On entry, rcx should contain a pointer to this stack space. The other parameters are shifted to the right, + // rdx should contain the first argument, r8 should contain the second argument, and r9 should contain the third argument. + // On return, rax contains a pointer to this stack value. See http://msdn.microsoft.com/en-us/library/7572ztz4.aspx. + // We then need to copy the 16 byte return value into rax and rdx, since JIT expects the return value to be split between the two. + // It is assumed that the parameters are already shifted to the right, when entering this method. + // Note: this implementation supports up to 3 parameters. + + // JIT relies on the CallerFrame (frame pointer) being put on the stack, + // On Win64 we need to manually copy the frame pointer to the stack, since MSVC may not maintain a frame pointer on 64-bit. + // See http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx where it's stated that rbp MAY be used as a frame pointer. + store64(X86Registers::ebp, Address(X86Registers::esp, -16)); + + // We also need to allocate the shadow space on the stack for the 4 parameter registers. + // In addition, we need to allocate 16 bytes for the return value. + // Also, we should allocate 16 bytes for the frame pointer, and return address (not populated). + sub64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp); + + // The first parameter register should contain a pointer to the stack allocated space for the return value. + move(X86Registers::esp, X86Registers::ecx); + add64(TrustedImm32(4 * sizeof(int64_t)), X86Registers::ecx); + + DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister()); + Call result = Call(m_assembler.call(scratchRegister()), Call::Linkable); + + add64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp); + + // Copy the return value into rax and rdx. + load64(Address(X86Registers::eax, sizeof(int64_t)), X86Registers::edx); + load64(Address(X86Registers::eax), X86Registers::eax); + + ASSERT_UNUSED(label, differenceBetween(label, result) == REPATCH_OFFSET_CALL_R11); + return result; + } +#endif + Call call() { - DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister); - Call result = Call(m_assembler.call(scratchRegister), Call::Linkable); - ASSERT_UNUSED(label, differenceBetween(label, result) == REPTACH_OFFSET_CALL_R11); +#if OS(WINDOWS) + // JIT relies on the CallerFrame (frame pointer) being put on the stack, + // On Win64 we need to manually copy the frame pointer to the stack, since MSVC may not maintain a frame pointer on 64-bit. + // See http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx where it's stated that rbp MAY be used as a frame pointer. + store64(X86Registers::ebp, Address(X86Registers::esp, -16)); + + // On Windows we need to copy the arguments that don't fit in registers to the stack location where the callee expects to find them. + // We don't know the number of arguments at this point, so the arguments (5, 6, ...) should always be copied. + + // Copy argument 5 + load64(Address(X86Registers::esp, 4 * sizeof(int64_t)), scratchRegister()); + store64(scratchRegister(), Address(X86Registers::esp, -4 * static_cast<int32_t>(sizeof(int64_t)))); + + // Copy argument 6 + load64(Address(X86Registers::esp, 5 * sizeof(int64_t)), scratchRegister()); + store64(scratchRegister(), Address(X86Registers::esp, -3 * static_cast<int32_t>(sizeof(int64_t)))); + + // We also need to allocate the shadow space on the stack for the 4 parameter registers. + // Also, we should allocate 16 bytes for the frame pointer, and return address (not populated). + // In addition, we need to allocate 16 bytes for two more parameters, since the call can have up to 6 parameters. + sub64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp); +#endif + DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister()); + Call result = Call(m_assembler.call(scratchRegister()), Call::Linkable); +#if OS(WINDOWS) + add64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp); +#endif + ASSERT_UNUSED(label, differenceBetween(label, result) == REPATCH_OFFSET_CALL_R11); return result; } // Address is a memory location containing the address to jump to void jump(AbsoluteAddress address) { - move(TrustedImmPtr(address.m_ptr), scratchRegister); - jump(Address(scratchRegister)); + move(TrustedImmPtr(address.m_ptr), scratchRegister()); + jump(Address(scratchRegister())); } Call tailRecursiveCall() { - DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister); - Jump newJump = Jump(m_assembler.jmp_r(scratchRegister)); - ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11); + DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister()); + Jump newJump = Jump(m_assembler.jmp_r(scratchRegister())); + ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPATCH_OFFSET_CALL_R11); return Call::fromTailJump(newJump); } Call makeTailRecursiveCall(Jump oldJump) { oldJump.link(this); - DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister); - Jump newJump = Jump(m_assembler.jmp_r(scratchRegister)); - ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11); + DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister()); + Jump newJump = Jump(m_assembler.jmp_r(scratchRegister())); + ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPATCH_OFFSET_CALL_R11); return Call::fromTailJump(newJump); } Jump branchAdd32(ResultCondition cond, TrustedImm32 src, AbsoluteAddress dest) { - move(TrustedImmPtr(dest.m_ptr), scratchRegister); - add32(src, Address(scratchRegister)); + move(TrustedImmPtr(dest.m_ptr), scratchRegister()); + add32(src, Address(scratchRegister())); return Jump(m_assembler.jCC(x86Condition(cond))); } @@ -188,10 +264,15 @@ public: m_assembler.addq_mr(src.offset, src.base, dest); } + void add64(RegisterID src, Address dest) + { + m_assembler.addq_rm(src, dest.offset, dest.base); + } + void add64(AbsoluteAddress src, RegisterID dest) { - move(TrustedImmPtr(src.m_ptr), scratchRegister); - add64(Address(scratchRegister), dest); + move(TrustedImmPtr(src.m_ptr), scratchRegister()); + add64(Address(scratchRegister()), dest); } void add64(TrustedImm32 imm, RegisterID srcDest) @@ -207,8 +288,8 @@ public: if (imm.m_value == 1) m_assembler.incq_r(dest); else { - move(imm, scratchRegister); - add64(scratchRegister, dest); + move(imm, scratchRegister()); + add64(scratchRegister(), dest); } } @@ -219,13 +300,36 @@ public: void add64(TrustedImm32 imm, Address address) { - m_assembler.addq_im(imm.m_value, address.offset, address.base); + if (imm.m_value == 1) + m_assembler.incq_m(address.offset, address.base); + else + m_assembler.addq_im(imm.m_value, address.offset, address.base); } void add64(TrustedImm32 imm, AbsoluteAddress address) { - move(TrustedImmPtr(address.m_ptr), scratchRegister); - add64(imm, Address(scratchRegister)); + move(TrustedImmPtr(address.m_ptr), scratchRegister()); + add64(imm, Address(scratchRegister())); + } + + void add64(RegisterID a, RegisterID b, RegisterID dest) + { + x86Lea64(BaseIndex(a, b, TimesOne), dest); + } + + void x86Lea64(BaseIndex index, RegisterID dest) + { + if (!index.scale && !index.offset) { + if (index.base == dest) { + add64(index.index, dest); + return; + } + if (index.index == dest) { + add64(index.base, dest); + return; + } + } + m_assembler.leaq_mr(index.offset, index.base, index.index, index.scale, dest); } void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest) @@ -245,8 +349,56 @@ public: void and64(TrustedImmPtr imm, RegisterID srcDest) { - move(imm, scratchRegister); - and64(scratchRegister, srcDest); + intptr_t intValue = imm.asIntptr(); + if (intValue <= std::numeric_limits<int32_t>::max() + && intValue >= std::numeric_limits<int32_t>::min()) { + and64(TrustedImm32(static_cast<int32_t>(intValue)), srcDest); + return; + } + move(imm, scratchRegister()); + and64(scratchRegister(), srcDest); + } + + void and64(RegisterID op1, RegisterID op2, RegisterID dest) + { + if (op1 == op2 && op1 != dest && op2 != dest) + move(op1, dest); + else if (op1 == dest) + and64(op2, dest); + else { + move(op2, dest); + and64(op1, dest); + } + } + + void countLeadingZeros64(RegisterID src, RegisterID dst) + { + if (supportsLZCNT()) { + m_assembler.lzcntq_rr(src, dst); + return; + } + m_assembler.bsrq_rr(src, dst); + clz64AfterBsr(dst); + } + + void countLeadingZeros64(Address src, RegisterID dst) + { + if (supportsLZCNT()) { + m_assembler.lzcntq_mr(src.offset, src.base, dst); + return; + } + m_assembler.bsrq_mr(src.offset, src.base, dst); + clz64AfterBsr(dst); + } + + void countTrailingZeros64(RegisterID src, RegisterID dst) + { + if (supportsBMI1()) { + m_assembler.tzcntq_rr(src, dst); + return; + } + m_assembler.bsfq_rr(src, dst); + ctzAfterBsf<64>(dst); } void lshift64(TrustedImm32 imm, RegisterID dest) @@ -254,16 +406,147 @@ public: m_assembler.shlq_i8r(imm.m_value, dest); } + void lshift64(RegisterID src, RegisterID dest) + { + if (src == X86Registers::ecx) + m_assembler.shlq_CLr(dest); + else { + ASSERT(src != dest); + + // Can only shift by ecx, so we do some swapping if we see anything else. + swap(src, X86Registers::ecx); + m_assembler.shlq_CLr(dest == X86Registers::ecx ? src : dest); + swap(src, X86Registers::ecx); + } + } + void rshift64(TrustedImm32 imm, RegisterID dest) { m_assembler.sarq_i8r(imm.m_value, dest); } - + + void rshift64(RegisterID src, RegisterID dest) + { + if (src == X86Registers::ecx) + m_assembler.sarq_CLr(dest); + else { + ASSERT(src != dest); + + // Can only shift by ecx, so we do some swapping if we see anything else. + swap(src, X86Registers::ecx); + m_assembler.sarq_CLr(dest == X86Registers::ecx ? src : dest); + swap(src, X86Registers::ecx); + } + } + + void urshift64(TrustedImm32 imm, RegisterID dest) + { + m_assembler.shrq_i8r(imm.m_value, dest); + } + + void urshift64(RegisterID src, RegisterID dest) + { + if (src == X86Registers::ecx) + m_assembler.shrq_CLr(dest); + else { + ASSERT(src != dest); + + // Can only shift by ecx, so we do some swapping if we see anything else. + swap(src, X86Registers::ecx); + m_assembler.shrq_CLr(dest == X86Registers::ecx ? src : dest); + swap(src, X86Registers::ecx); + } + } + + void rotateRight64(TrustedImm32 imm, RegisterID dest) + { + m_assembler.rorq_i8r(imm.m_value, dest); + } + + void rotateRight64(RegisterID src, RegisterID dest) + { + if (src == X86Registers::ecx) + m_assembler.rorq_CLr(dest); + else { + ASSERT(src != dest); + + // Can only rotate by ecx, so we do some swapping if we see anything else. + swap(src, X86Registers::ecx); + m_assembler.rorq_CLr(dest == X86Registers::ecx ? src : dest); + swap(src, X86Registers::ecx); + } + } + + void rotateLeft64(TrustedImm32 imm, RegisterID dest) + { + m_assembler.rolq_i8r(imm.m_value, dest); + } + + void rotateLeft64(RegisterID src, RegisterID dest) + { + if (src == X86Registers::ecx) + m_assembler.rolq_CLr(dest); + else { + ASSERT(src != dest); + + // Can only rotate by ecx, so we do some swapping if we see anything else. + swap(src, X86Registers::ecx); + m_assembler.rolq_CLr(dest == X86Registers::ecx ? src : dest); + swap(src, X86Registers::ecx); + } + } + void mul64(RegisterID src, RegisterID dest) { m_assembler.imulq_rr(src, dest); } + + void mul64(RegisterID src1, RegisterID src2, RegisterID dest) + { + if (src2 == dest) { + m_assembler.imulq_rr(src1, dest); + return; + } + move(src1, dest); + m_assembler.imulq_rr(src2, dest); + } + void x86ConvertToQuadWord64() + { + m_assembler.cqo(); + } + + void x86ConvertToQuadWord64(RegisterID rax, RegisterID rdx) + { + ASSERT_UNUSED(rax, rax == X86Registers::eax); + ASSERT_UNUSED(rdx, rdx == X86Registers::edx); + x86ConvertToQuadWord64(); + } + + void x86Div64(RegisterID denominator) + { + m_assembler.idivq_r(denominator); + } + + void x86Div64(RegisterID rax, RegisterID rdx, RegisterID denominator) + { + ASSERT_UNUSED(rax, rax == X86Registers::eax); + ASSERT_UNUSED(rdx, rdx == X86Registers::edx); + x86Div64(denominator); + } + + void x86UDiv64(RegisterID denominator) + { + m_assembler.divq_r(denominator); + } + + void x86UDiv64(RegisterID rax, RegisterID rdx, RegisterID denominator) + { + ASSERT_UNUSED(rax, rax == X86Registers::eax); + ASSERT_UNUSED(rdx, rdx == X86Registers::edx); + x86UDiv64(denominator); + } + void neg64(RegisterID dest) { m_assembler.negq_r(dest); @@ -274,10 +557,15 @@ public: m_assembler.orq_rr(src, dest); } - void or64(TrustedImm64 imm, RegisterID dest) + void or64(TrustedImm64 imm, RegisterID srcDest) { - move(imm, scratchRegister); - or64(scratchRegister, dest); + if (imm.m_value <= std::numeric_limits<int32_t>::max() + && imm.m_value >= std::numeric_limits<int32_t>::min()) { + or64(TrustedImm32(static_cast<int32_t>(imm.m_value)), srcDest); + return; + } + move(imm, scratchRegister()); + or64(scratchRegister(), srcDest); } void or64(TrustedImm32 imm, RegisterID dest) @@ -302,11 +590,6 @@ public: move(src, dest); or64(imm, dest); } - - void rotateRight64(TrustedImm32 imm, RegisterID srcDst) - { - m_assembler.rorq_i8r(imm.m_value, srcDst); - } void sub64(RegisterID src, RegisterID dest) { @@ -326,15 +609,42 @@ public: if (imm.m_value == 1) m_assembler.decq_r(dest); else { - move(imm, scratchRegister); - sub64(scratchRegister, dest); + move(imm, scratchRegister()); + sub64(scratchRegister(), dest); } } + void sub64(TrustedImm32 imm, Address address) + { + m_assembler.subq_im(imm.m_value, address.offset, address.base); + } + + void sub64(Address src, RegisterID dest) + { + m_assembler.subq_mr(src.offset, src.base, dest); + } + + void sub64(RegisterID src, Address dest) + { + m_assembler.subq_rm(src, dest.offset, dest.base); + } + void xor64(RegisterID src, RegisterID dest) { m_assembler.xorq_rr(src, dest); } + + void xor64(RegisterID op1, RegisterID op2, RegisterID dest) + { + if (op1 == op2) + move(TrustedImm32(0), dest); + else if (op1 == dest) + xor64(op2, dest); + else { + move(op2, dest); + xor64(op1, dest); + } + } void xor64(RegisterID src, Address dest) { @@ -346,6 +656,16 @@ public: m_assembler.xorq_ir(imm.m_value, srcDest); } + void not64(RegisterID srcDest) + { + m_assembler.notq_r(srcDest); + } + + void not64(Address dest) + { + m_assembler.notq_m(dest.offset, dest.base); + } + void load64(ImplicitAddress address, RegisterID dest) { m_assembler.movq_mr(address.offset, address.base, dest); @@ -395,21 +715,31 @@ public: if (src == X86Registers::eax) m_assembler.movq_EAXm(address); else { - move(TrustedImmPtr(address), scratchRegister); - store64(src, scratchRegister); + move(TrustedImmPtr(address), scratchRegister()); + store64(src, scratchRegister()); } } + void store64(TrustedImm32 imm, ImplicitAddress address) + { + m_assembler.movq_i32m(imm.m_value, address.offset, address.base); + } + void store64(TrustedImm64 imm, ImplicitAddress address) { - move(imm, scratchRegister); - store64(scratchRegister, address); + if (CAN_SIGN_EXTEND_32_64(imm.m_value)) { + store64(TrustedImm32(static_cast<int32_t>(imm.m_value)), address); + return; + } + + move(imm, scratchRegister()); + store64(scratchRegister(), address); } void store64(TrustedImm64 imm, BaseIndex address) { - move(imm, scratchRegister); - m_assembler.movq_rm(scratchRegister, address.offset, address.base, address.index, address.scale); + move(imm, scratchRegister()); + m_assembler.movq_rm(scratchRegister(), address.offset, address.base, address.index, address.scale); } DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address) @@ -419,6 +749,16 @@ public: return DataLabel32(this); } + void swap64(RegisterID src, RegisterID dest) + { + m_assembler.xchgq_rr(src, dest); + } + + void swap64(RegisterID src, Address dest) + { + m_assembler.xchgq_rm(src, dest.offset, dest.base); + } + void move64ToDouble(RegisterID src, FPRegisterID dest) { m_assembler.movq_rr(src, dest); @@ -431,35 +771,81 @@ public: void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest) { - if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) - m_assembler.testq_rr(left, left); - else - m_assembler.cmpq_ir(right.m_value, left); - m_assembler.setCC_r(x86Condition(cond), dest); - m_assembler.movzbl_rr(dest, dest); + if (!right.m_value) { + if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) { + test64(*resultCondition, left, left, dest); + return; + } + } + + m_assembler.cmpq_ir(right.m_value, left); + set32(x86Condition(cond), dest); } void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest) { m_assembler.cmpq_rr(right, left); - m_assembler.setCC_r(x86Condition(cond), dest); - m_assembler.movzbl_rr(dest, dest); + set32(x86Condition(cond), dest); } - + + void compareDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID dest) + { + if (cond & DoubleConditionBitInvert) + m_assembler.ucomisd_rr(left, right); + else + m_assembler.ucomisd_rr(right, left); + + if (cond == DoubleEqual) { + if (left == right) { + m_assembler.setnp_r(dest); + return; + } + + Jump isUnordered(m_assembler.jp()); + m_assembler.sete_r(dest); + isUnordered.link(this); + return; + } + + if (cond == DoubleNotEqualOrUnordered) { + if (left == right) { + m_assembler.setp_r(dest); + return; + } + + m_assembler.setp_r(dest); + m_assembler.setne_r(dest); + return; + } + + ASSERT(!(cond & DoubleConditionBitSpecial)); + m_assembler.setCC_r(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits), dest); + } + Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right) { m_assembler.cmpq_rr(right, left); return Jump(m_assembler.jCC(x86Condition(cond))); } + Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm32 right) + { + if (!right.m_value) { + if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) + return branchTest64(*resultCondition, left, left); + } + m_assembler.cmpq_ir(right.m_value, left); + return Jump(m_assembler.jCC(x86Condition(cond))); + } + Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right) { if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) { m_assembler.testq_rr(left, left); return Jump(m_assembler.jCC(x86Condition(cond))); } - move(right, scratchRegister); - return branch64(cond, left, scratchRegister); + move(right, scratchRegister()); + return branch64(cond, left, scratchRegister()); } Jump branch64(RelationalCondition cond, RegisterID left, Address right) @@ -470,8 +856,8 @@ public: Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right) { - move(TrustedImmPtr(left.m_ptr), scratchRegister); - return branch64(cond, Address(scratchRegister), right); + move(TrustedImmPtr(left.m_ptr), scratchRegister()); + return branch64(cond, Address(scratchRegister()), right); } Jump branch64(RelationalCondition cond, Address left, RegisterID right) @@ -480,10 +866,16 @@ public: return Jump(m_assembler.jCC(x86Condition(cond))); } + Jump branch64(RelationalCondition cond, Address left, TrustedImm32 right) + { + m_assembler.cmpq_im(right.m_value, left.offset, left.base); + return Jump(m_assembler.jCC(x86Condition(cond))); + } + Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right) { - move(right, scratchRegister); - return branch64(cond, left, scratchRegister); + move(right, scratchRegister()); + return branch64(cond, left, scratchRegister()); } Jump branch64(RelationalCondition cond, BaseIndex address, RegisterID right) @@ -491,6 +883,12 @@ public: m_assembler.cmpq_rm(right, address.offset, address.base, address.index, address.scale); return Jump(m_assembler.jCC(x86Condition(cond))); } + + Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right) + { + load32(left.m_ptr, scratchRegister()); + return branch32(cond, scratchRegister(), right); + } Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right) { @@ -499,8 +897,8 @@ public: Jump branchPtr(RelationalCondition cond, BaseIndex left, TrustedImmPtr right) { - move(right, scratchRegister); - return branchPtr(cond, left, scratchRegister); + move(right, scratchRegister()); + return branchPtr(cond, left, scratchRegister()); } Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask) @@ -521,6 +919,12 @@ public: return Jump(m_assembler.jCC(x86Condition(cond))); } + Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm64 mask) + { + move(mask, scratchRegister()); + return branchTest64(cond, reg, scratchRegister()); + } + void test64(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest) { if (mask.m_value == -1) @@ -540,8 +944,8 @@ public: Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) { - load64(address.m_ptr, scratchRegister); - return branchTest64(cond, scratchRegister, mask); + load64(address.m_ptr, scratchRegister()); + return branchTest64(cond, scratchRegister(), mask); } Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) @@ -575,12 +979,43 @@ public: return Jump(m_assembler.jCC(x86Condition(cond))); } + Jump branchAdd64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest) + { + if (src1 == dest) + return branchAdd64(cond, src2, dest); + move(src2, dest); + return branchAdd64(cond, src1, dest); + } + + Jump branchAdd64(ResultCondition cond, Address op1, RegisterID op2, RegisterID dest) + { + if (op2 == dest) + return branchAdd64(cond, op1, dest); + if (op1.base == dest) { + load32(op1, dest); + return branchAdd64(cond, op2, dest); + } + move(op2, dest); + return branchAdd64(cond, op1, dest); + } + + Jump branchAdd64(ResultCondition cond, RegisterID src1, Address src2, RegisterID dest) + { + return branchAdd64(cond, src2, src1, dest); + } + Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest) { add64(src, dest); return Jump(m_assembler.jCC(x86Condition(cond))); } + Jump branchAdd64(ResultCondition cond, Address src, RegisterID dest) + { + add64(src, dest); + return Jump(m_assembler.jCC(x86Condition(cond))); + } + Jump branchMul64(ResultCondition cond, RegisterID src, RegisterID dest) { mul64(src, dest); @@ -589,6 +1024,14 @@ public: return Jump(m_assembler.jCC(x86Condition(cond))); } + Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest) + { + if (src1 == dest) + return branchMul64(cond, src2, dest); + move(src2, dest); + return branchMul64(cond, src1, dest); + } + Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest) { sub64(imm, dest); @@ -613,6 +1056,164 @@ public: return Jump(m_assembler.jCC(x86Condition(cond))); } + void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest) + { + m_assembler.cmpq_rr(right, left); + cmov(x86Condition(cond), src, dest); + } + + void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + m_assembler.cmpq_rr(right, left); + + if (thenCase != dest && elseCase != dest) { + move(elseCase, dest); + elseCase = dest; + } + + if (elseCase == dest) + cmov(x86Condition(cond), thenCase, dest); + else + cmov(x86Condition(invert(cond)), elseCase, dest); + } + + void moveConditionally64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + if (!right.m_value) { + if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) { + moveConditionallyTest64(*resultCondition, left, left, thenCase, elseCase, dest); + return; + } + } + + m_assembler.cmpq_ir(right.m_value, left); + + if (thenCase != dest && elseCase != dest) { + move(elseCase, dest); + elseCase = dest; + } + + if (elseCase == dest) + cmov(x86Condition(cond), thenCase, dest); + else + cmov(x86Condition(invert(cond)), elseCase, dest); + } + + void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest) + { + m_assembler.testq_rr(testReg, mask); + cmov(x86Condition(cond), src, dest); + } + + void moveConditionallyTest64(ResultCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + ASSERT(isInvertible(cond)); + ASSERT_WITH_MESSAGE(cond != Overflow, "TEST does not set the Overflow Flag."); + + m_assembler.testq_rr(right, left); + + if (thenCase != dest && elseCase != dest) { + move(elseCase, dest); + elseCase = dest; + } + + if (elseCase == dest) + cmov(x86Condition(cond), thenCase, dest); + else + cmov(x86Condition(invert(cond)), elseCase, dest); + } + + void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, TrustedImm32 mask, RegisterID src, RegisterID dest) + { + // if we are only interested in the low seven bits, this can be tested with a testb + if (mask.m_value == -1) + m_assembler.testq_rr(testReg, testReg); + else if ((mask.m_value & ~0x7f) == 0) + m_assembler.testb_i8r(mask.m_value, testReg); + else + m_assembler.testq_i32r(mask.m_value, testReg); + cmov(x86Condition(cond), src, dest); + } + + void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, TrustedImm32 mask, RegisterID thenCase, RegisterID elseCase, RegisterID dest) + { + ASSERT(isInvertible(cond)); + ASSERT_WITH_MESSAGE(cond != Overflow, "TEST does not set the Overflow Flag."); + + if (mask.m_value == -1) + m_assembler.testq_rr(testReg, testReg); + else if (!(mask.m_value & ~0x7f)) + m_assembler.testb_i8r(mask.m_value, testReg); + else + m_assembler.testq_i32r(mask.m_value, testReg); + + if (thenCase != dest && elseCase != dest) { + move(elseCase, dest); + elseCase = dest; + } + + if (elseCase == dest) + cmov(x86Condition(cond), thenCase, dest); + else + cmov(x86Condition(invert(cond)), elseCase, dest); + } + + template<typename LeftType, typename RightType> + void moveDoubleConditionally64(RelationalCondition cond, LeftType left, RightType right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + static_assert(!std::is_same<LeftType, FPRegisterID>::value && !std::is_same<RightType, FPRegisterID>::value, "One of the tested argument could be aliased on dest. Use moveDoubleConditionallyDouble()."); + + if (thenCase != dest && elseCase != dest) { + moveDouble(elseCase, dest); + elseCase = dest; + } + + if (elseCase == dest) { + Jump falseCase = branch64(invert(cond), left, right); + moveDouble(thenCase, dest); + falseCase.link(this); + } else { + Jump trueCase = branch64(cond, left, right); + moveDouble(elseCase, dest); + trueCase.link(this); + } + } + + template<typename TestType, typename MaskType> + void moveDoubleConditionallyTest64(ResultCondition cond, TestType test, MaskType mask, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest) + { + static_assert(!std::is_same<TestType, FPRegisterID>::value && !std::is_same<MaskType, FPRegisterID>::value, "One of the tested argument could be aliased on dest. Use moveDoubleConditionallyDouble()."); + + if (elseCase == dest && isInvertible(cond)) { + Jump falseCase = branchTest64(invert(cond), test, mask); + moveDouble(thenCase, dest); + falseCase.link(this); + } else if (thenCase == dest) { + Jump trueCase = branchTest64(cond, test, mask); + moveDouble(elseCase, dest); + trueCase.link(this); + } + + Jump trueCase = branchTest64(cond, test, mask); + moveDouble(elseCase, dest); + Jump falseCase = jump(); + trueCase.link(this); + moveDouble(thenCase, dest); + falseCase.link(this); + } + + void abortWithReason(AbortReason reason) + { + move(TrustedImm32(reason), X86Registers::r11); + breakpoint(); + } + + void abortWithReason(AbortReason reason, intptr_t misc) + { + move(TrustedImm64(misc), X86Registers::r10); + abortWithReason(reason); + } + ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest) { ConvertibleLoadLabel result = ConvertibleLoadLabel(this); @@ -627,44 +1228,137 @@ public: return DataLabelPtr(this); } + DataLabelPtr moveWithPatch(TrustedImm32 initialValue, RegisterID dest) + { + padBeforePatch(); + m_assembler.movq_i64r(initialValue.m_value, dest); + return DataLabelPtr(this); + } + Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) { - dataLabel = moveWithPatch(initialRightValue, scratchRegister); - return branch64(cond, left, scratchRegister); + dataLabel = moveWithPatch(initialRightValue, scratchRegister()); + return branch64(cond, left, scratchRegister()); } Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) { - dataLabel = moveWithPatch(initialRightValue, scratchRegister); - return branch64(cond, left, scratchRegister); + dataLabel = moveWithPatch(initialRightValue, scratchRegister()); + return branch64(cond, left, scratchRegister()); + } + + Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0)) + { + padBeforePatch(); + m_assembler.movl_i32r(initialRightValue.m_value, scratchRegister()); + dataLabel = DataLabel32(this); + return branch32(cond, left, scratchRegister()); } DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address) { - DataLabelPtr label = moveWithPatch(initialValue, scratchRegister); - store64(scratchRegister, address); + DataLabelPtr label = moveWithPatch(initialValue, scratchRegister()); + store64(scratchRegister(), address); return label; } + + PatchableJump patchableBranch64(RelationalCondition cond, RegisterID reg, TrustedImm64 imm) + { + return PatchableJump(branch64(cond, reg, imm)); + } + + PatchableJump patchableBranch64(RelationalCondition cond, RegisterID left, RegisterID right) + { + return PatchableJump(branch64(cond, left, right)); + } using MacroAssemblerX86Common::branch8; Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right) { - MacroAssemblerX86Common::move(TrustedImmPtr(left.m_ptr), scratchRegister); - return MacroAssemblerX86Common::branch8(cond, Address(scratchRegister), right); + MacroAssemblerX86Common::move(TrustedImmPtr(left.m_ptr), scratchRegister()); + return MacroAssemblerX86Common::branch8(cond, Address(scratchRegister()), right); } using MacroAssemblerX86Common::branchTest8; Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1)) { + TrustedImm32 mask8(static_cast<int8_t>(mask.m_value)); TrustedImmPtr addr(reinterpret_cast<void*>(address.offset)); - MacroAssemblerX86Common::move(addr, scratchRegister); - return MacroAssemblerX86Common::branchTest8(cond, BaseIndex(scratchRegister, address.base, TimesOne), mask); + MacroAssemblerX86Common::move(addr, scratchRegister()); + return MacroAssemblerX86Common::branchTest8(cond, BaseIndex(scratchRegister(), address.base, TimesOne), mask8); } Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) { - MacroAssemblerX86Common::move(TrustedImmPtr(address.m_ptr), scratchRegister); - return MacroAssemblerX86Common::branchTest8(cond, Address(scratchRegister), mask); + TrustedImm32 mask8(static_cast<int8_t>(mask.m_value)); + MacroAssemblerX86Common::move(TrustedImmPtr(address.m_ptr), scratchRegister()); + return MacroAssemblerX86Common::branchTest8(cond, Address(scratchRegister()), mask8); + } + + void truncateDoubleToUint32(FPRegisterID src, RegisterID dest) + { + m_assembler.cvttsd2siq_rr(src, dest); + } + + void truncateDoubleToInt64(FPRegisterID src, RegisterID dest) + { + m_assembler.cvttsd2siq_rr(src, dest); + } + + // int64Min should contain exactly 0x43E0000000000000 == static_cast<double>(int64_t::min()). scratch may + // be the same FPR as src. + void truncateDoubleToUint64(FPRegisterID src, RegisterID dest, FPRegisterID scratch, FPRegisterID int64Min) + { + ASSERT(scratch != int64Min); + + // Since X86 does not have a floating point to unsigned integer instruction, we need to use the signed + // integer conversion instruction. If the src is less than int64_t::min() then the results of the two + // instructions are the same. Otherwise, we need to: subtract int64_t::min(); truncate double to + // uint64_t; then add back int64_t::min() in the destination gpr. + + Jump large = branchDouble(DoubleGreaterThanOrEqual, src, int64Min); + m_assembler.cvttsd2siq_rr(src, dest); + Jump done = jump(); + large.link(this); + moveDouble(src, scratch); + m_assembler.subsd_rr(int64Min, scratch); + m_assembler.movq_i64r(0x8000000000000000, scratchRegister()); + m_assembler.cvttsd2siq_rr(scratch, dest); + m_assembler.orq_rr(scratchRegister(), dest); + done.link(this); + } + + void truncateFloatToUint32(FPRegisterID src, RegisterID dest) + { + m_assembler.cvttss2siq_rr(src, dest); + } + + void truncateFloatToInt64(FPRegisterID src, RegisterID dest) + { + m_assembler.cvttss2siq_rr(src, dest); + } + + // int64Min should contain exactly 0x5f000000 == static_cast<float>(int64_t::min()). scratch may be the + // same FPR as src. + void truncateFloatToUint64(FPRegisterID src, RegisterID dest, FPRegisterID scratch, FPRegisterID int64Min) + { + ASSERT(scratch != int64Min); + + // Since X86 does not have a floating point to unsigned integer instruction, we need to use the signed + // integer conversion instruction. If the src is less than int64_t::min() then the results of the two + // instructions are the same. Otherwise, we need to: subtract int64_t::min(); truncate double to + // uint64_t; then add back int64_t::min() in the destination gpr. + + Jump large = branchFloat(DoubleGreaterThanOrEqual, src, int64Min); + m_assembler.cvttss2siq_rr(src, dest); + Jump done = jump(); + large.link(this); + moveDouble(src, scratch); + m_assembler.subss_rr(int64Min, scratch); + m_assembler.movq_i64r(0x8000000000000000, scratchRegister()); + m_assembler.cvttss2siq_rr(scratch, dest); + m_assembler.orq_rr(scratchRegister(), dest); + done.link(this); } void convertInt64ToDouble(RegisterID src, FPRegisterID dest) @@ -672,21 +1366,77 @@ public: m_assembler.cvtsi2sdq_rr(src, dest); } + void convertInt64ToDouble(Address src, FPRegisterID dest) + { + m_assembler.cvtsi2sdq_mr(src.offset, src.base, dest); + } + + void convertInt64ToFloat(RegisterID src, FPRegisterID dest) + { + m_assembler.cvtsi2ssq_rr(src, dest); + } + + void convertInt64ToFloat(Address src, FPRegisterID dest) + { + m_assembler.cvtsi2ssq_mr(src.offset, src.base, dest); + } + + // One of scratch or scratch2 may be the same as src + void convertUInt64ToDouble(RegisterID src, FPRegisterID dest, RegisterID scratch) + { + RegisterID scratch2 = scratchRegister(); + + m_assembler.testq_rr(src, src); + AssemblerLabel signBitSet = m_assembler.jCC(x86Condition(Signed)); + m_assembler.cvtsi2sdq_rr(src, dest); + AssemblerLabel done = m_assembler.jmp(); + m_assembler.linkJump(signBitSet, m_assembler.label()); + if (scratch != src) + m_assembler.movq_rr(src, scratch); + m_assembler.movq_rr(src, scratch2); + m_assembler.shrq_i8r(1, scratch); + m_assembler.andq_ir(1, scratch2); + m_assembler.orq_rr(scratch, scratch2); + m_assembler.cvtsi2sdq_rr(scratch2, dest); + m_assembler.addsd_rr(dest, dest); + m_assembler.linkJump(done, m_assembler.label()); + } + + // One of scratch or scratch2 may be the same as src + void convertUInt64ToFloat(RegisterID src, FPRegisterID dest, RegisterID scratch) + { + RegisterID scratch2 = scratchRegister(); + m_assembler.testq_rr(src, src); + AssemblerLabel signBitSet = m_assembler.jCC(x86Condition(Signed)); + m_assembler.cvtsi2ssq_rr(src, dest); + AssemblerLabel done = m_assembler.jmp(); + m_assembler.linkJump(signBitSet, m_assembler.label()); + if (scratch != src) + m_assembler.movq_rr(src, scratch); + m_assembler.movq_rr(src, scratch2); + m_assembler.shrq_i8r(1, scratch); + m_assembler.andq_ir(1, scratch2); + m_assembler.orq_rr(scratch, scratch2); + m_assembler.cvtsi2ssq_rr(scratch2, dest); + m_assembler.addss_rr(dest, dest); + m_assembler.linkJump(done, m_assembler.label()); + } + static bool supportsFloatingPoint() { return true; } - // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate() static bool supportsFloatingPointTruncate() { return true; } static bool supportsFloatingPointSqrt() { return true; } static bool supportsFloatingPointAbs() { return true; } static FunctionPtr readCallTarget(CodeLocationCall call) { - return FunctionPtr(X86Assembler::readPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation())); + return FunctionPtr(X86Assembler::readPointer(call.dataLabelPtrAtOffset(-REPATCH_OFFSET_CALL_R11).dataLocation())); } - static bool haveScratchRegisterForBlinding() { return true; } - static RegisterID scratchRegisterForBlinding() { return scratchRegister; } + bool haveScratchRegisterForBlinding() { return m_allowScratchRegister; } + RegisterID scratchRegisterForBlinding() { return scratchRegister(); } static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; } + static bool canJumpReplacePatchableBranch32WithPatch() { return true; } static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label) { @@ -698,127 +1448,78 @@ public: return label.labelAtOffset(-totalBytes); } + static CodeLocationLabel startOfBranch32WithPatchOnRegister(CodeLocationDataLabel32 label) + { + const int rexBytes = 1; + const int opcodeBytes = 1; + const int immediateBytes = 4; + const int totalBytes = rexBytes + opcodeBytes + immediateBytes; + ASSERT(totalBytes >= maxJumpReplacementSize()); + return label.labelAtOffset(-totalBytes); + } + static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label) { return startOfBranchPtrWithPatchOnRegister(label); } + + static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32 label) + { + return startOfBranch32WithPatchOnRegister(label); + } static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, Address, void* initialValue) { - X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), scratchRegister); + X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), s_scratchRegister); } - static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue) + static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel instructionStart, Address, int32_t initialValue) { - X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), scratchRegister); - } - -#if USE(MASM_PROBE) - // This function emits code to preserve the CPUState (e.g. registers), - // call a user supplied probe function, and restore the CPUState before - // continuing with other JIT generated code. - // - // The user supplied probe function will be called with a single pointer to - // a ProbeContext struct (defined above) which contains, among other things, - // the preserved CPUState. This allows the user probe function to inspect - // the CPUState at that point in the JIT generated code. - // - // If the user probe function alters the register values in the ProbeContext, - // the altered values will be loaded into the CPU registers when the probe - // returns. - // - // The ProbeContext is stack allocated and is only valid for the duration - // of the call to the user probe function. - - void probe(ProbeFunction, void* arg1 = 0, void* arg2 = 0); -#endif // USE(MASM_PROBE) - -private: - friend class LinkBuffer; - friend class RepatchBuffer; + X86Assembler::revertJumpTo_movl_i32r(instructionStart.executableAddress(), initialValue, s_scratchRegister); + } - static void linkCall(void* code, Call call, FunctionPtr function) + static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue) { - if (!call.isFlagSet(Call::Near)) - X86Assembler::linkPointer(code, call.m_label.labelAtOffset(-REPTACH_OFFSET_CALL_R11), function.value()); - else - X86Assembler::linkCall(code, call.m_label, function.value()); + X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast<intptr_t>(initialValue), s_scratchRegister); } static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) { - X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress()); + X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPATCH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress()); } static void repatchCall(CodeLocationCall call, FunctionPtr destination) { - X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress()); + X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPATCH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress()); } -#if USE(MASM_PROBE) - inline TrustedImm64 trustedImm64FromPtr(void* ptr) +private: + // If lzcnt is not available, use this after BSR + // to count the leading zeros. + void clz64AfterBsr(RegisterID dst) { - return TrustedImm64(TrustedImmPtr(ptr)); - } + Jump srcIsNonZero = m_assembler.jCC(x86Condition(NonZero)); + move(TrustedImm32(64), dst); - inline TrustedImm64 trustedImm64FromPtr(ProbeFunction function) - { - return TrustedImm64(TrustedImmPtr(reinterpret_cast<void*>(function))); + Jump skipNonZeroCase = jump(); + srcIsNonZero.link(this); + xor64(TrustedImm32(0x3f), dst); + skipNonZeroCase.link(this); } - inline TrustedImm64 trustedImm64FromPtr(void (*function)()) + friend class LinkBuffer; + + static void linkCall(void* code, Call call, FunctionPtr function) { - return TrustedImm64(TrustedImmPtr(reinterpret_cast<void*>(function))); + if (!call.isFlagSet(Call::Near)) + X86Assembler::linkPointer(code, call.m_label.labelAtOffset(-REPATCH_OFFSET_CALL_R11), function.value()); + else if (call.isFlagSet(Call::Tail)) + X86Assembler::linkJump(code, call.m_label, function.value()); + else + X86Assembler::linkCall(code, call.m_label, function.value()); } -#endif }; -#if USE(MASM_PROBE) - -extern "C" void ctiMasmProbeTrampoline(); - -// What code is emitted for the probe? -// ================================== -// We want to keep the size of the emitted probe invocation code as compact as -// possible to minimize the perturbation to the JIT generated code. However, -// we also need to preserve the CPU registers and set up the ProbeContext to be -// passed to the user probe function. -// -// Hence, we do only the minimum here to preserve a scratch register (i.e. rax -// in this case) and the stack pointer (i.e. rsp), and pass the probe arguments. -// We'll let the ctiMasmProbeTrampoline handle the rest of the probe invocation -// work i.e. saving the CPUState (and setting up the ProbeContext), calling the -// user probe function, and restoring the CPUState before returning to JIT -// generated code. -// -// What values are in the saved registers? -// ====================================== -// Conceptually, the saved registers should contain values as if the probe -// is not present in the JIT generated code. Hence, they should contain values -// that are expected at the start of the instruction immediately following the -// probe. -// -// Specifcally, the saved stack pointer register will point to the stack -// position before we push the ProbeContext frame. The saved rip will point to -// the address of the instruction immediately following the probe. - -inline void MacroAssemblerX86_64::probe(MacroAssemblerX86_64::ProbeFunction function, void* arg1, void* arg2) -{ - push(RegisterID::esp); - push(RegisterID::eax); - move(trustedImm64FromPtr(arg2), RegisterID::eax); - push(RegisterID::eax); - move(trustedImm64FromPtr(arg1), RegisterID::eax); - push(RegisterID::eax); - move(trustedImm64FromPtr(function), RegisterID::eax); - push(RegisterID::eax); - move(trustedImm64FromPtr(ctiMasmProbeTrampoline), RegisterID::eax); - call(RegisterID::eax); -} -#endif // USE(MASM_PROBE) - } // namespace JSC #endif // ENABLE(ASSEMBLER) - -#endif // MacroAssemblerX86_64_h diff --git a/Source/JavaScriptCore/assembler/MaxFrameExtentForSlowPathCall.h b/Source/JavaScriptCore/assembler/MaxFrameExtentForSlowPathCall.h new file mode 100644 index 000000000..c6f53b347 --- /dev/null +++ b/Source/JavaScriptCore/assembler/MaxFrameExtentForSlowPathCall.h @@ -0,0 +1,79 @@ +/* + * Copyright (C) 2013, 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#include "Register.h" +#include "StackAlignment.h" +#include <wtf/Assertions.h> + +namespace JSC { + +// The maxFrameExtentForSlowPathCall is the max amount of stack space (in bytes) +// that can be used for outgoing args when calling a slow path C function +// from JS code. + +#if !ENABLE(JIT) +static const size_t maxFrameExtentForSlowPathCall = 0; + +#elif CPU(X86_64) && OS(WINDOWS) +// 4 args in registers, but stack space needs to be allocated for all args. +static const size_t maxFrameExtentForSlowPathCall = 64; + +#elif CPU(X86_64) +// All args in registers. +static const size_t maxFrameExtentForSlowPathCall = 0; + +#elif CPU(X86) +// 7 args on stack (28 bytes). +static const size_t maxFrameExtentForSlowPathCall = 40; + +#elif CPU(ARM64) +// All args in registers. +static const size_t maxFrameExtentForSlowPathCall = 0; + +#elif CPU(ARM) +// First four args in registers, remaining 4 args on stack. +static const size_t maxFrameExtentForSlowPathCall = 24; + +#elif CPU(MIPS) +// Though args are in registers, there need to be space on the stack for all args. +static const size_t maxFrameExtentForSlowPathCall = 40; + +#else +#error "Unsupported CPU: need value for maxFrameExtentForSlowPathCall" + +#endif + +COMPILE_ASSERT(!(maxFrameExtentForSlowPathCall % sizeof(Register)), extent_must_be_in_multiples_of_registers); + +#if ENABLE(JIT) +// Make sure that cfr - maxFrameExtentForSlowPathCall bytes will make the stack pointer aligned +COMPILE_ASSERT((maxFrameExtentForSlowPathCall % 16) == 16 - sizeof(CallerFrameAndPC), extent_must_align_stack_from_callframe_pointer); +#endif + +static const size_t maxFrameExtentForSlowPathCallInRegisters = maxFrameExtentForSlowPathCall / sizeof(Register); + +} // namespace JSC diff --git a/Source/JavaScriptCore/assembler/RepatchBuffer.h b/Source/JavaScriptCore/assembler/RepatchBuffer.h deleted file mode 100644 index 41e950ad8..000000000 --- a/Source/JavaScriptCore/assembler/RepatchBuffer.h +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Copyright (C) 2009 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef RepatchBuffer_h -#define RepatchBuffer_h - -#if ENABLE(JIT) - -#include "CodeBlock.h" -#include <MacroAssembler.h> -#include <wtf/Noncopyable.h> - -namespace JSC { - -// RepatchBuffer: -// -// This class is used to modify code after code generation has been completed, -// and after the code has potentially already been executed. This mechanism is -// used to apply optimizations to the code. -// -class RepatchBuffer { - typedef MacroAssemblerCodePtr CodePtr; - -public: - RepatchBuffer(CodeBlock* codeBlock) - : m_codeBlock(codeBlock) - { -#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) - RefPtr<JITCode> code = codeBlock->jitCode(); - m_start = code->start(); - m_size = code->size(); - - ExecutableAllocator::makeWritable(m_start, m_size); -#endif - } - - ~RepatchBuffer() - { -#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) - ExecutableAllocator::makeExecutable(m_start, m_size); -#endif - } - - CodeBlock* codeBlock() const { return m_codeBlock; } - - void relink(CodeLocationJump jump, CodeLocationLabel destination) - { - MacroAssembler::repatchJump(jump, destination); - } - - void relink(CodeLocationCall call, CodeLocationLabel destination) - { - MacroAssembler::repatchCall(call, destination); - } - - void relink(CodeLocationCall call, FunctionPtr destination) - { - MacroAssembler::repatchCall(call, destination); - } - - void relink(CodeLocationNearCall nearCall, CodePtr destination) - { - MacroAssembler::repatchNearCall(nearCall, CodeLocationLabel(destination)); - } - - void relink(CodeLocationNearCall nearCall, CodeLocationLabel destination) - { - MacroAssembler::repatchNearCall(nearCall, destination); - } - - void repatch(CodeLocationDataLabel32 dataLabel32, int32_t value) - { - MacroAssembler::repatchInt32(dataLabel32, value); - } - - void repatch(CodeLocationDataLabelCompact dataLabelCompact, int32_t value) - { - MacroAssembler::repatchCompact(dataLabelCompact, value); - } - - void repatch(CodeLocationDataLabelPtr dataLabelPtr, void* value) - { - MacroAssembler::repatchPointer(dataLabelPtr, value); - } - - void relinkCallerToTrampoline(ReturnAddressPtr returnAddress, CodeLocationLabel label) - { - relink(CodeLocationCall(CodePtr(returnAddress)), label); - } - - void relinkCallerToTrampoline(ReturnAddressPtr returnAddress, CodePtr newCalleeFunction) - { - relinkCallerToTrampoline(returnAddress, CodeLocationLabel(newCalleeFunction)); - } - - void relinkCallerToFunction(ReturnAddressPtr returnAddress, FunctionPtr function) - { - relink(CodeLocationCall(CodePtr(returnAddress)), function); - } - - void relinkNearCallerToTrampoline(ReturnAddressPtr returnAddress, CodeLocationLabel label) - { - relink(CodeLocationNearCall(CodePtr(returnAddress)), label); - } - - void relinkNearCallerToTrampoline(ReturnAddressPtr returnAddress, CodePtr newCalleeFunction) - { - relinkNearCallerToTrampoline(returnAddress, CodeLocationLabel(newCalleeFunction)); - } - - void replaceWithLoad(CodeLocationConvertibleLoad label) - { - MacroAssembler::replaceWithLoad(label); - } - - void replaceWithAddressComputation(CodeLocationConvertibleLoad label) - { - MacroAssembler::replaceWithAddressComputation(label); - } - - void setLoadInstructionIsActive(CodeLocationConvertibleLoad label, bool isActive) - { - if (isActive) - replaceWithLoad(label); - else - replaceWithAddressComputation(label); - } - - static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label) - { - return MacroAssembler::startOfBranchPtrWithPatchOnRegister(label); - } - - static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label) - { - return MacroAssembler::startOfPatchableBranchPtrWithPatchOnAddress(label); - } - - void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination) - { - MacroAssembler::replaceWithJump(instructionStart, destination); - } - - // This is a *bit* of a silly API, since we currently always also repatch the - // immediate after calling this. But I'm fine with that, since this just feels - // less yucky. - void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, MacroAssembler::RegisterID reg, void* value) - { - MacroAssembler::revertJumpReplacementToBranchPtrWithPatch(instructionStart, reg, value); - } - - void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, MacroAssembler::Address address, void* value) - { - MacroAssembler::revertJumpReplacementToPatchableBranchPtrWithPatch(instructionStart, address, value); - } - -private: - CodeBlock* m_codeBlock; -#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) - void* m_start; - size_t m_size; -#endif -}; - -} // namespace JSC - -#endif // ENABLE(ASSEMBLER) - -#endif // RepatchBuffer_h diff --git a/Source/JavaScriptCore/assembler/SH4Assembler.h b/Source/JavaScriptCore/assembler/SH4Assembler.h deleted file mode 100644 index d326279c5..000000000 --- a/Source/JavaScriptCore/assembler/SH4Assembler.h +++ /dev/null @@ -1,2225 +0,0 @@ -/* - * Copyright (C) 2013 Cisco Systems, Inc. All rights reserved. - * Copyright (C) 2009-2011 STMicroelectronics. All rights reserved. - * Copyright (C) 2008 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef SH4Assembler_h -#define SH4Assembler_h - -#if ENABLE(ASSEMBLER) && CPU(SH4) - -#include "AssemblerBuffer.h" -#include "AssemblerBufferWithConstantPool.h" -#include "JITCompilationEffort.h" -#include <limits.h> -#include <stdarg.h> -#include <stdint.h> -#include <stdio.h> -#include <wtf/Assertions.h> -#include <wtf/DataLog.h> -#include <wtf/Vector.h> - -#ifndef NDEBUG -#define SH4_ASSEMBLER_TRACING -#endif - -namespace JSC { -typedef uint16_t SH4Word; - -enum { - INVALID_OPCODE = 0xffff, - ADD_OPCODE = 0x300c, - ADDIMM_OPCODE = 0x7000, - ADDC_OPCODE = 0x300e, - ADDV_OPCODE = 0x300f, - AND_OPCODE = 0x2009, - ANDIMM_OPCODE = 0xc900, - DIV0_OPCODE = 0x2007, - DIV1_OPCODE = 0x3004, - BF_OPCODE = 0x8b00, - BFS_OPCODE = 0x8f00, - BRA_OPCODE = 0xa000, - BRAF_OPCODE = 0x0023, - NOP_OPCODE = 0x0009, - BSR_OPCODE = 0xb000, - RTS_OPCODE = 0x000b, - BT_OPCODE = 0x8900, - BTS_OPCODE = 0x8d00, - BSRF_OPCODE = 0x0003, - BRK_OPCODE = 0x003b, - FTRC_OPCODE = 0xf03d, - CMPEQ_OPCODE = 0x3000, - CMPEQIMM_OPCODE = 0x8800, - CMPGE_OPCODE = 0x3003, - CMPGT_OPCODE = 0x3007, - CMPHI_OPCODE = 0x3006, - CMPHS_OPCODE = 0x3002, - CMPPL_OPCODE = 0x4015, - CMPPZ_OPCODE = 0x4011, - CMPSTR_OPCODE = 0x200c, - DT_OPCODE = 0x4010, - FCMPEQ_OPCODE = 0xf004, - FCMPGT_OPCODE = 0xf005, - FMOV_OPCODE = 0xf00c, - FADD_OPCODE = 0xf000, - FMUL_OPCODE = 0xf002, - FSUB_OPCODE = 0xf001, - FDIV_OPCODE = 0xf003, - FNEG_OPCODE = 0xf04d, - JMP_OPCODE = 0x402b, - JSR_OPCODE = 0x400b, - LDSPR_OPCODE = 0x402a, - LDSLPR_OPCODE = 0x4026, - MOV_OPCODE = 0x6003, - MOVIMM_OPCODE = 0xe000, - MOVB_WRITE_RN_OPCODE = 0x2000, - MOVB_WRITE_RNDEC_OPCODE = 0x2004, - MOVB_WRITE_R0RN_OPCODE = 0x0004, - MOVB_WRITE_OFFGBR_OPCODE = 0xc000, - MOVB_WRITE_OFFRN_OPCODE = 0x8000, - MOVB_READ_RM_OPCODE = 0x6000, - MOVB_READ_RMINC_OPCODE = 0x6004, - MOVB_READ_R0RM_OPCODE = 0x000c, - MOVB_READ_OFFGBR_OPCODE = 0xc400, - MOVB_READ_OFFRM_OPCODE = 0x8400, - MOVL_WRITE_RN_OPCODE = 0x2002, - MOVL_WRITE_RNDEC_OPCODE = 0x2006, - MOVL_WRITE_R0RN_OPCODE = 0x0006, - MOVL_WRITE_OFFGBR_OPCODE = 0xc200, - MOVL_WRITE_OFFRN_OPCODE = 0x1000, - MOVL_READ_RM_OPCODE = 0x6002, - MOVL_READ_RMINC_OPCODE = 0x6006, - MOVL_READ_R0RM_OPCODE = 0x000e, - MOVL_READ_OFFGBR_OPCODE = 0xc600, - MOVL_READ_OFFPC_OPCODE = 0xd000, - MOVL_READ_OFFRM_OPCODE = 0x5000, - MOVW_WRITE_RN_OPCODE = 0x2001, - MOVW_WRITE_R0RN_OPCODE = 0x0005, - MOVW_READ_RM_OPCODE = 0x6001, - MOVW_READ_RMINC_OPCODE = 0x6005, - MOVW_READ_R0RM_OPCODE = 0x000d, - MOVW_READ_OFFRM_OPCODE = 0x8500, - MOVW_READ_OFFPC_OPCODE = 0x9000, - MOVA_READ_OFFPC_OPCODE = 0xc700, - MOVT_OPCODE = 0x0029, - MULL_OPCODE = 0x0007, - DMULL_L_OPCODE = 0x3005, - STSMACL_OPCODE = 0x001a, - STSMACH_OPCODE = 0x000a, - DMULSL_OPCODE = 0x300d, - NEG_OPCODE = 0x600b, - NEGC_OPCODE = 0x600a, - NOT_OPCODE = 0x6007, - OR_OPCODE = 0x200b, - ORIMM_OPCODE = 0xcb00, - ORBIMM_OPCODE = 0xcf00, - SETS_OPCODE = 0x0058, - SETT_OPCODE = 0x0018, - SHAD_OPCODE = 0x400c, - SHAL_OPCODE = 0x4020, - SHAR_OPCODE = 0x4021, - SHLD_OPCODE = 0x400d, - SHLL_OPCODE = 0x4000, - SHLL2_OPCODE = 0x4008, - SHLL8_OPCODE = 0x4018, - SHLL16_OPCODE = 0x4028, - SHLR_OPCODE = 0x4001, - SHLR2_OPCODE = 0x4009, - SHLR8_OPCODE = 0x4019, - SHLR16_OPCODE = 0x4029, - STSPR_OPCODE = 0x002a, - STSLPR_OPCODE = 0x4022, - FLOAT_OPCODE = 0xf02d, - SUB_OPCODE = 0x3008, - SUBC_OPCODE = 0x300a, - SUBV_OPCODE = 0x300b, - TST_OPCODE = 0x2008, - TSTIMM_OPCODE = 0xc800, - TSTB_OPCODE = 0xcc00, - EXTUB_OPCODE = 0x600c, - EXTUW_OPCODE = 0x600d, - XOR_OPCODE = 0x200a, - XORIMM_OPCODE = 0xca00, - XORB_OPCODE = 0xce00, - FMOVS_READ_RM_INC_OPCODE = 0xf009, - FMOVS_READ_RM_OPCODE = 0xf008, - FMOVS_READ_R0RM_OPCODE = 0xf006, - FMOVS_WRITE_RN_OPCODE = 0xf00a, - FMOVS_WRITE_RN_DEC_OPCODE = 0xf00b, - FMOVS_WRITE_R0RN_OPCODE = 0xf007, - FCNVDS_DRM_FPUL_OPCODE = 0xf0bd, - FCNVSD_FPUL_DRN_OPCODE = 0xf0ad, - LDS_RM_FPUL_OPCODE = 0x405a, - FLDS_FRM_FPUL_OPCODE = 0xf01d, - STS_FPUL_RN_OPCODE = 0x005a, - FSTS_FPUL_FRN_OPCODE = 0xF00d, - LDSFPSCR_OPCODE = 0x406a, - STSFPSCR_OPCODE = 0x006a, - LDSRMFPUL_OPCODE = 0x405a, - FSTSFPULFRN_OPCODE = 0xf00d, - FABS_OPCODE = 0xf05d, - FSQRT_OPCODE = 0xf06d, - FSCHG_OPCODE = 0xf3fd, - CLRT_OPCODE = 8, - SYNCO_OPCODE = 0x00ab, -}; - -namespace SH4Registers { -typedef enum { - r0, - r1, - r2, - r3, - r4, - r5, - r6, - r7, - r8, - r9, - r10, - r11, - r12, - r13, - r14, fp = r14, - r15, sp = r15, - pc, - pr, -} RegisterID; - -typedef enum { - fr0, dr0 = fr0, - fr1, - fr2, dr2 = fr2, - fr3, - fr4, dr4 = fr4, - fr5, - fr6, dr6 = fr6, - fr7, - fr8, dr8 = fr8, - fr9, - fr10, dr10 = fr10, - fr11, - fr12, dr12 = fr12, - fr13, - fr14, dr14 = fr14, - fr15, -} FPRegisterID; -} - -inline uint16_t getOpcodeGroup1(uint16_t opc, int rm, int rn) -{ - return (opc | ((rm & 0xf) << 8) | ((rn & 0xf) << 4)); -} - -inline uint16_t getOpcodeGroup2(uint16_t opc, int rm) -{ - return (opc | ((rm & 0xf) << 8)); -} - -inline uint16_t getOpcodeGroup3(uint16_t opc, int rm, int rn) -{ - return (opc | ((rm & 0xf) << 8) | (rn & 0xff)); -} - -inline uint16_t getOpcodeGroup4(uint16_t opc, int rm, int rn, int offset) -{ - return (opc | ((rm & 0xf) << 8) | ((rn & 0xf) << 4) | (offset & 0xf)); -} - -inline uint16_t getOpcodeGroup5(uint16_t opc, int rm) -{ - return (opc | (rm & 0xff)); -} - -inline uint16_t getOpcodeGroup6(uint16_t opc, int rm) -{ - return (opc | (rm & 0xfff)); -} - -inline uint16_t getOpcodeGroup7(uint16_t opc, int rm) -{ - return (opc | ((rm & 0x7) << 9)); -} - -inline uint16_t getOpcodeGroup8(uint16_t opc, int rm, int rn) -{ - return (opc | ((rm & 0x7) << 9) | ((rn & 0x7) << 5)); -} - -inline uint16_t getOpcodeGroup9(uint16_t opc, int rm, int rn) -{ - return (opc | ((rm & 0xf) << 8) | ((rn & 0x7) << 5)); -} - -inline uint16_t getOpcodeGroup10(uint16_t opc, int rm, int rn) -{ - return (opc | ((rm & 0x7) << 9) | ((rn & 0xf) << 4)); -} - -inline uint16_t getOpcodeGroup11(uint16_t opc, int rm, int rn) -{ - return (opc | ((rm & 0xf) << 4) | (rn & 0xf)); -} - -inline uint16_t getRn(uint16_t x) -{ - return ((x & 0xf00) >> 8); -} - -inline uint16_t getRm(uint16_t x) -{ - return ((x & 0xf0) >> 4); -} - -inline uint16_t getDisp(uint16_t x) -{ - return (x & 0xf); -} - -inline uint16_t getImm8(uint16_t x) -{ - return (x & 0xff); -} - -inline uint16_t getImm12(uint16_t x) -{ - return (x & 0xfff); -} - -inline uint16_t getDRn(uint16_t x) -{ - return ((x & 0xe00) >> 9); -} - -inline uint16_t getDRm(uint16_t x) -{ - return ((x & 0xe0) >> 5); -} - -class SH4Assembler { -public: - typedef SH4Registers::RegisterID RegisterID; - typedef SH4Registers::FPRegisterID FPRegisterID; - typedef AssemblerBufferWithConstantPool<512, 4, 2, SH4Assembler> SH4Buffer; - static const RegisterID scratchReg1 = SH4Registers::r3; - static const RegisterID scratchReg2 = SH4Registers::r11; - static const uint32_t maxInstructionSize = 16; - - static RegisterID firstRegister() { return SH4Registers::r0; } - static RegisterID lastRegister() { return SH4Registers::r15; } - - static FPRegisterID firstFPRegister() { return SH4Registers::dr0; } - static FPRegisterID lastFPRegister() { return SH4Registers::dr14; } - - enum { - padForAlign8 = 0x00, - padForAlign16 = 0x0009, - padForAlign32 = 0x00090009, - }; - - enum JumpType { - JumpFar, - JumpNear - }; - - SH4Assembler() - : m_claimscratchReg(0x0) - , m_indexOfLastWatchpoint(INT_MIN) - , m_indexOfTailOfLastWatchpoint(INT_MIN) - { - } - - SH4Buffer& buffer() { return m_buffer; } - - // SH4 condition codes - typedef enum { - EQ = 0x0, // Equal - NE = 0x1, // Not Equal - HS = 0x2, // Unsigned Greater Than equal - HI = 0x3, // Unsigned Greater Than - LS = 0x4, // Unsigned Lower or Same - LI = 0x5, // Unsigned Lower - GE = 0x6, // Greater or Equal - LT = 0x7, // Less Than - GT = 0x8, // Greater Than - LE = 0x9, // Less or Equal - OF = 0xa, // OverFlow - SI = 0xb, // Signed - NS = 0xc, // Not Signed - EQU= 0xd, // Equal or unordered(NaN) - NEU= 0xe, - GTU= 0xf, - GEU= 0x10, - LTU= 0x11, - LEU= 0x12, - } Condition; - - // Opaque label types -public: - bool isImmediate(int constant) - { - return ((constant <= 127) && (constant >= -128)); - } - - RegisterID claimScratch() - { - ASSERT((m_claimscratchReg != 0x3)); - - if (!(m_claimscratchReg & 0x1)) { - m_claimscratchReg = (m_claimscratchReg | 0x1); - return scratchReg1; - } - - m_claimscratchReg = (m_claimscratchReg | 0x2); - return scratchReg2; - } - - void releaseScratch(RegisterID scratchR) - { - if (scratchR == scratchReg1) - m_claimscratchReg = (m_claimscratchReg & 0x2); - else - m_claimscratchReg = (m_claimscratchReg & 0x1); - } - - // Stack operations - - void pushReg(RegisterID reg) - { - if (reg == SH4Registers::pr) { - oneShortOp(getOpcodeGroup2(STSLPR_OPCODE, SH4Registers::sp)); - return; - } - - oneShortOp(getOpcodeGroup1(MOVL_WRITE_RNDEC_OPCODE, SH4Registers::sp, reg)); - } - - void popReg(RegisterID reg) - { - if (reg == SH4Registers::pr) { - oneShortOp(getOpcodeGroup2(LDSLPR_OPCODE, SH4Registers::sp)); - return; - } - - oneShortOp(getOpcodeGroup1(MOVL_READ_RMINC_OPCODE, reg, SH4Registers::sp)); - } - - void movt(RegisterID dst) - { - uint16_t opc = getOpcodeGroup2(MOVT_OPCODE, dst); - oneShortOp(opc); - } - - // Arithmetic operations - - void addlRegReg(RegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(ADD_OPCODE, dst, src); - oneShortOp(opc); - } - - void addclRegReg(RegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(ADDC_OPCODE, dst, src); - oneShortOp(opc); - } - - void addvlRegReg(RegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(ADDV_OPCODE, dst, src); - oneShortOp(opc); - } - - void addlImm8r(int imm8, RegisterID dst) - { - ASSERT((imm8 <= 127) && (imm8 >= -128)); - - uint16_t opc = getOpcodeGroup3(ADDIMM_OPCODE, dst, imm8); - oneShortOp(opc); - } - - void andlRegReg(RegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(AND_OPCODE, dst, src); - oneShortOp(opc); - } - - void andlImm8r(int imm8, RegisterID dst) - { - ASSERT((imm8 <= 255) && (imm8 >= 0)); - ASSERT_UNUSED(dst, dst == SH4Registers::r0); - - uint16_t opc = getOpcodeGroup5(ANDIMM_OPCODE, imm8); - oneShortOp(opc); - } - - void div1lRegReg(RegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(DIV1_OPCODE, dst, src); - oneShortOp(opc); - } - - void div0lRegReg(RegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(DIV0_OPCODE, dst, src); - oneShortOp(opc); - } - - void notlReg(RegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(NOT_OPCODE, dst, src); - oneShortOp(opc); - } - - void orlRegReg(RegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(OR_OPCODE, dst, src); - oneShortOp(opc); - } - - void orlImm8r(int imm8, RegisterID dst) - { - ASSERT((imm8 <= 255) && (imm8 >= 0)); - ASSERT_UNUSED(dst, dst == SH4Registers::r0); - - uint16_t opc = getOpcodeGroup5(ORIMM_OPCODE, imm8); - oneShortOp(opc); - } - - void sublRegReg(RegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(SUB_OPCODE, dst, src); - oneShortOp(opc); - } - - void subvlRegReg(RegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(SUBV_OPCODE, dst, src); - oneShortOp(opc); - } - - void xorlRegReg(RegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(XOR_OPCODE, dst, src); - oneShortOp(opc); - } - - void xorlImm8r(int imm8, RegisterID dst) - { - ASSERT((imm8 <= 255) && (imm8 >= 0)); - ASSERT_UNUSED(dst, dst == SH4Registers::r0); - - uint16_t opc = getOpcodeGroup5(XORIMM_OPCODE, imm8); - oneShortOp(opc); - } - - void shllImm8r(int imm, RegisterID dst) - { - switch (imm) { - case 1: - oneShortOp(getOpcodeGroup2(SHLL_OPCODE, dst)); - break; - case 2: - oneShortOp(getOpcodeGroup2(SHLL2_OPCODE, dst)); - break; - case 8: - oneShortOp(getOpcodeGroup2(SHLL8_OPCODE, dst)); - break; - case 16: - oneShortOp(getOpcodeGroup2(SHLL16_OPCODE, dst)); - break; - default: - RELEASE_ASSERT_NOT_REACHED(); - } - } - - void neg(RegisterID dst, RegisterID src) - { - uint16_t opc = getOpcodeGroup1(NEG_OPCODE, dst, src); - oneShortOp(opc); - } - - void shldRegReg(RegisterID dst, RegisterID rShift) - { - oneShortOp(getOpcodeGroup1(SHLD_OPCODE, dst, rShift)); - } - - void shadRegReg(RegisterID dst, RegisterID rShift) - { - oneShortOp(getOpcodeGroup1(SHAD_OPCODE, dst, rShift)); - } - - void shlrImm8r(int imm, RegisterID dst) - { - switch (imm) { - case 1: - oneShortOp(getOpcodeGroup2(SHLR_OPCODE, dst)); - break; - case 2: - oneShortOp(getOpcodeGroup2(SHLR2_OPCODE, dst)); - break; - case 8: - oneShortOp(getOpcodeGroup2(SHLR8_OPCODE, dst)); - break; - case 16: - oneShortOp(getOpcodeGroup2(SHLR16_OPCODE, dst)); - break; - default: - RELEASE_ASSERT_NOT_REACHED(); - } - } - - void shalImm8r(int imm, RegisterID dst) - { - switch (imm) { - case 1: - oneShortOp(getOpcodeGroup2(SHAL_OPCODE, dst)); - break; - default: - RELEASE_ASSERT_NOT_REACHED(); - } - } - - void sharImm8r(int imm, RegisterID dst) - { - switch (imm) { - case 1: - oneShortOp(getOpcodeGroup2(SHAR_OPCODE, dst)); - break; - default: - RELEASE_ASSERT_NOT_REACHED(); - } - } - - void imullRegReg(RegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(MULL_OPCODE, dst, src); - oneShortOp(opc); - } - - void dmullRegReg(RegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(DMULL_L_OPCODE, dst, src); - oneShortOp(opc); - } - - void dmulslRegReg(RegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(DMULSL_OPCODE, dst, src); - oneShortOp(opc); - } - - void stsmacl(RegisterID reg) - { - uint16_t opc = getOpcodeGroup2(STSMACL_OPCODE, reg); - oneShortOp(opc); - } - - void stsmach(RegisterID reg) - { - uint16_t opc = getOpcodeGroup2(STSMACH_OPCODE, reg); - oneShortOp(opc); - } - - // Comparisons - - void cmplRegReg(RegisterID left, RegisterID right, Condition cond) - { - switch (cond) { - case NE: - oneShortOp(getOpcodeGroup1(CMPEQ_OPCODE, right, left)); - break; - case GT: - oneShortOp(getOpcodeGroup1(CMPGT_OPCODE, right, left)); - break; - case EQ: - oneShortOp(getOpcodeGroup1(CMPEQ_OPCODE, right, left)); - break; - case GE: - oneShortOp(getOpcodeGroup1(CMPGE_OPCODE, right, left)); - break; - case HS: - oneShortOp(getOpcodeGroup1(CMPHS_OPCODE, right, left)); - break; - case HI: - oneShortOp(getOpcodeGroup1(CMPHI_OPCODE, right, left)); - break; - case LI: - oneShortOp(getOpcodeGroup1(CMPHI_OPCODE, left, right)); - break; - case LS: - oneShortOp(getOpcodeGroup1(CMPHS_OPCODE, left, right)); - break; - case LE: - oneShortOp(getOpcodeGroup1(CMPGE_OPCODE, left, right)); - break; - case LT: - oneShortOp(getOpcodeGroup1(CMPGT_OPCODE, left, right)); - break; - default: - RELEASE_ASSERT_NOT_REACHED(); - } - } - - void cmppl(RegisterID reg) - { - uint16_t opc = getOpcodeGroup2(CMPPL_OPCODE, reg); - oneShortOp(opc); - } - - void cmppz(RegisterID reg) - { - uint16_t opc = getOpcodeGroup2(CMPPZ_OPCODE, reg); - oneShortOp(opc); - } - - void cmpEqImmR0(int imm, RegisterID dst) - { - ASSERT_UNUSED(dst, dst == SH4Registers::r0); - uint16_t opc = getOpcodeGroup5(CMPEQIMM_OPCODE, imm); - oneShortOp(opc); - } - - void testlRegReg(RegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(TST_OPCODE, dst, src); - oneShortOp(opc); - } - - void testlImm8r(int imm, RegisterID dst) - { - ASSERT((imm <= 255) && (imm >= 0)); - ASSERT_UNUSED(dst, dst == SH4Registers::r0); - - uint16_t opc = getOpcodeGroup5(TSTIMM_OPCODE, imm); - oneShortOp(opc); - } - - void nop() - { - oneShortOp(NOP_OPCODE, false); - } - - void synco() - { - oneShortOp(SYNCO_OPCODE); - } - - void sett() - { - oneShortOp(SETT_OPCODE); - } - - void clrt() - { - oneShortOp(CLRT_OPCODE); - } - - void fschg() - { - oneShortOp(FSCHG_OPCODE); - } - - void bkpt() - { - oneShortOp(BRK_OPCODE, false); - } - - void branch(uint16_t opc, int label) - { - switch (opc) { - case BT_OPCODE: - ASSERT((label <= 127) && (label >= -128)); - oneShortOp(getOpcodeGroup5(BT_OPCODE, label)); - break; - case BRA_OPCODE: - ASSERT((label <= 2047) && (label >= -2048)); - oneShortOp(getOpcodeGroup6(BRA_OPCODE, label)); - break; - case BF_OPCODE: - ASSERT((label <= 127) && (label >= -128)); - oneShortOp(getOpcodeGroup5(BF_OPCODE, label)); - break; - default: - RELEASE_ASSERT_NOT_REACHED(); - } - } - - void branch(uint16_t opc, RegisterID reg) - { - switch (opc) { - case BRAF_OPCODE: - oneShortOp(getOpcodeGroup2(BRAF_OPCODE, reg)); - break; - case JMP_OPCODE: - oneShortOp(getOpcodeGroup2(JMP_OPCODE, reg)); - break; - case JSR_OPCODE: - oneShortOp(getOpcodeGroup2(JSR_OPCODE, reg)); - break; - case BSRF_OPCODE: - oneShortOp(getOpcodeGroup2(BSRF_OPCODE, reg)); - break; - default: - RELEASE_ASSERT_NOT_REACHED(); - } - } - - void ldspr(RegisterID reg) - { - uint16_t opc = getOpcodeGroup2(LDSPR_OPCODE, reg); - oneShortOp(opc); - } - - void stspr(RegisterID reg) - { - uint16_t opc = getOpcodeGroup2(STSPR_OPCODE, reg); - oneShortOp(opc); - } - - void extub(RegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(EXTUB_OPCODE, dst, src); - oneShortOp(opc); - } - - void extuw(RegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(EXTUW_OPCODE, dst, src); - oneShortOp(opc); - } - - // float operations - - void ldsrmfpul(RegisterID src) - { - uint16_t opc = getOpcodeGroup2(LDS_RM_FPUL_OPCODE, src); - oneShortOp(opc); - } - - void fneg(FPRegisterID dst) - { - uint16_t opc = getOpcodeGroup2(FNEG_OPCODE, dst); - oneShortOp(opc, true, false); - } - - void fsqrt(FPRegisterID dst) - { - uint16_t opc = getOpcodeGroup2(FSQRT_OPCODE, dst); - oneShortOp(opc, true, false); - } - - void stsfpulReg(RegisterID src) - { - uint16_t opc = getOpcodeGroup2(STS_FPUL_RN_OPCODE, src); - oneShortOp(opc); - } - - void floatfpulfrn(FPRegisterID src) - { - uint16_t opc = getOpcodeGroup2(FLOAT_OPCODE, src); - oneShortOp(opc, true, false); - } - - void fmull(FPRegisterID src, FPRegisterID dst) - { - uint16_t opc = getOpcodeGroup1(FMUL_OPCODE, dst, src); - oneShortOp(opc, true, false); - } - - void fmovsRegReg(FPRegisterID src, FPRegisterID dst) - { - uint16_t opc = getOpcodeGroup1(FMOV_OPCODE, dst, src); - oneShortOp(opc, true, false); - } - - void fmovsReadrm(RegisterID src, FPRegisterID dst) - { - uint16_t opc = getOpcodeGroup1(FMOVS_READ_RM_OPCODE, dst, src); - oneShortOp(opc, true, false); - } - - void fmovsWriterm(FPRegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(FMOVS_WRITE_RN_OPCODE, dst, src); - oneShortOp(opc, true, false); - } - - void fmovsWriter0r(FPRegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(FMOVS_WRITE_R0RN_OPCODE, dst, src); - oneShortOp(opc, true, false); - } - - void fmovsReadr0r(RegisterID src, FPRegisterID dst) - { - uint16_t opc = getOpcodeGroup1(FMOVS_READ_R0RM_OPCODE, dst, src); - oneShortOp(opc, true, false); - } - - void fmovsReadrminc(RegisterID src, FPRegisterID dst) - { - uint16_t opc = getOpcodeGroup1(FMOVS_READ_RM_INC_OPCODE, dst, src); - oneShortOp(opc, true, false); - } - - void fmovsWriterndec(FPRegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(FMOVS_WRITE_RN_DEC_OPCODE, dst, src); - oneShortOp(opc, true, false); - } - - void ftrcRegfpul(FPRegisterID src) - { - uint16_t opc = getOpcodeGroup2(FTRC_OPCODE, src); - oneShortOp(opc, true, false); - } - - void fldsfpul(FPRegisterID src) - { - uint16_t opc = getOpcodeGroup2(FLDS_FRM_FPUL_OPCODE, src); - oneShortOp(opc); - } - - void fstsfpul(FPRegisterID src) - { - uint16_t opc = getOpcodeGroup2(FSTS_FPUL_FRN_OPCODE, src); - oneShortOp(opc); - } - - void ldsfpscr(RegisterID reg) - { - uint16_t opc = getOpcodeGroup2(LDSFPSCR_OPCODE, reg); - oneShortOp(opc); - } - - void stsfpscr(RegisterID reg) - { - uint16_t opc = getOpcodeGroup2(STSFPSCR_OPCODE, reg); - oneShortOp(opc); - } - - // double operations - - void dcnvds(FPRegisterID src) - { - uint16_t opc = getOpcodeGroup7(FCNVDS_DRM_FPUL_OPCODE, src >> 1); - oneShortOp(opc); - } - - void dcnvsd(FPRegisterID dst) - { - uint16_t opc = getOpcodeGroup7(FCNVSD_FPUL_DRN_OPCODE, dst >> 1); - oneShortOp(opc); - } - - void dcmppeq(FPRegisterID src, FPRegisterID dst) - { - uint16_t opc = getOpcodeGroup8(FCMPEQ_OPCODE, dst >> 1, src >> 1); - oneShortOp(opc); - } - - void dcmppgt(FPRegisterID src, FPRegisterID dst) - { - uint16_t opc = getOpcodeGroup8(FCMPGT_OPCODE, dst >> 1, src >> 1); - oneShortOp(opc); - } - - void dmulRegReg(FPRegisterID src, FPRegisterID dst) - { - uint16_t opc = getOpcodeGroup8(FMUL_OPCODE, dst >> 1, src >> 1); - oneShortOp(opc); - } - - void dsubRegReg(FPRegisterID src, FPRegisterID dst) - { - uint16_t opc = getOpcodeGroup8(FSUB_OPCODE, dst >> 1, src >> 1); - oneShortOp(opc); - } - - void daddRegReg(FPRegisterID src, FPRegisterID dst) - { - uint16_t opc = getOpcodeGroup8(FADD_OPCODE, dst >> 1, src >> 1); - oneShortOp(opc); - } - - void dmovRegReg(FPRegisterID src, FPRegisterID dst) - { - uint16_t opc = getOpcodeGroup8(FMOV_OPCODE, dst >> 1, src >> 1); - oneShortOp(opc); - } - - void ddivRegReg(FPRegisterID src, FPRegisterID dst) - { - uint16_t opc = getOpcodeGroup8(FDIV_OPCODE, dst >> 1, src >> 1); - oneShortOp(opc); - } - - void dabs(FPRegisterID dst) - { - uint16_t opc = getOpcodeGroup7(FABS_OPCODE, dst >> 1); - oneShortOp(opc); - } - - void dsqrt(FPRegisterID dst) - { - uint16_t opc = getOpcodeGroup7(FSQRT_OPCODE, dst >> 1); - oneShortOp(opc); - } - - void dneg(FPRegisterID dst) - { - uint16_t opc = getOpcodeGroup7(FNEG_OPCODE, dst >> 1); - oneShortOp(opc); - } - - void fmovReadrm(RegisterID src, FPRegisterID dst) - { - uint16_t opc = getOpcodeGroup10(FMOVS_READ_RM_OPCODE, dst >> 1, src); - oneShortOp(opc); - } - - void fmovWriterm(FPRegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup9(FMOVS_WRITE_RN_OPCODE, dst, src >> 1); - oneShortOp(opc); - } - - void fmovWriter0r(FPRegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup9(FMOVS_WRITE_R0RN_OPCODE, dst, src >> 1); - oneShortOp(opc); - } - - void fmovReadr0r(RegisterID src, FPRegisterID dst) - { - uint16_t opc = getOpcodeGroup10(FMOVS_READ_R0RM_OPCODE, dst >> 1, src); - oneShortOp(opc); - } - - void fmovReadrminc(RegisterID src, FPRegisterID dst) - { - uint16_t opc = getOpcodeGroup10(FMOVS_READ_RM_INC_OPCODE, dst >> 1, src); - oneShortOp(opc); - } - - void fmovWriterndec(FPRegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup9(FMOVS_WRITE_RN_DEC_OPCODE, dst, src >> 1); - oneShortOp(opc); - } - - void floatfpulDreg(FPRegisterID src) - { - uint16_t opc = getOpcodeGroup7(FLOAT_OPCODE, src >> 1); - oneShortOp(opc); - } - - void ftrcdrmfpul(FPRegisterID src) - { - uint16_t opc = getOpcodeGroup7(FTRC_OPCODE, src >> 1); - oneShortOp(opc); - } - - // Various move ops - - void movImm8(int imm8, RegisterID dst) - { - ASSERT((imm8 <= 127) && (imm8 >= -128)); - - uint16_t opc = getOpcodeGroup3(MOVIMM_OPCODE, dst, imm8); - oneShortOp(opc); - } - - void movlRegReg(RegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(MOV_OPCODE, dst, src); - oneShortOp(opc); - } - - void movwRegMem(RegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(MOVW_WRITE_RN_OPCODE, dst, src); - oneShortOp(opc); - } - - void movwMemReg(RegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(MOVW_READ_RM_OPCODE, dst, src); - oneShortOp(opc); - } - - void movwMemRegIn(RegisterID base, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(MOVW_READ_RMINC_OPCODE, dst, base); - oneShortOp(opc); - } - - void movwPCReg(int offset, RegisterID base, RegisterID dst) - { - ASSERT_UNUSED(base, base == SH4Registers::pc); - ASSERT((offset <= 255) && (offset >= 0)); - - uint16_t opc = getOpcodeGroup3(MOVW_READ_OFFPC_OPCODE, dst, offset); - oneShortOp(opc); - } - - void movwMemReg(int offset, RegisterID base, RegisterID dst) - { - ASSERT_UNUSED(dst, dst == SH4Registers::r0); - - uint16_t opc = getOpcodeGroup11(MOVW_READ_OFFRM_OPCODE, base, offset); - oneShortOp(opc); - } - - void movwR0mr(RegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(MOVW_READ_R0RM_OPCODE, dst, src); - oneShortOp(opc); - } - - void movwRegMemr0(RegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(MOVW_WRITE_R0RN_OPCODE, dst, src); - oneShortOp(opc); - } - - void movlRegMem(RegisterID src, int offset, RegisterID base) - { - ASSERT((offset <= 15) && (offset >= 0)); - - if (!offset) { - oneShortOp(getOpcodeGroup1(MOVL_WRITE_RN_OPCODE, base, src)); - return; - } - - oneShortOp(getOpcodeGroup4(MOVL_WRITE_OFFRN_OPCODE, base, src, offset)); - } - - void movlRegMem(RegisterID src, RegisterID base) - { - uint16_t opc = getOpcodeGroup1(MOVL_WRITE_RN_OPCODE, base, src); - oneShortOp(opc); - } - - void movlMemReg(int offset, RegisterID base, RegisterID dst) - { - if (base == SH4Registers::pc) { - ASSERT((offset <= 255) && (offset >= 0)); - oneShortOp(getOpcodeGroup3(MOVL_READ_OFFPC_OPCODE, dst, offset)); - return; - } - - ASSERT((offset <= 15) && (offset >= 0)); - if (!offset) { - oneShortOp(getOpcodeGroup1(MOVL_READ_RM_OPCODE, dst, base)); - return; - } - - oneShortOp(getOpcodeGroup4(MOVL_READ_OFFRM_OPCODE, dst, base, offset)); - } - - void movlMemRegCompact(int offset, RegisterID base, RegisterID dst) - { - oneShortOp(getOpcodeGroup4(MOVL_READ_OFFRM_OPCODE, dst, base, offset)); - } - - void movbRegMem(RegisterID src, RegisterID base) - { - uint16_t opc = getOpcodeGroup1(MOVB_WRITE_RN_OPCODE, base, src); - oneShortOp(opc); - } - - void movbMemReg(int offset, RegisterID base, RegisterID dst) - { - ASSERT_UNUSED(dst, dst == SH4Registers::r0); - - uint16_t opc = getOpcodeGroup11(MOVB_READ_OFFRM_OPCODE, base, offset); - oneShortOp(opc); - } - - void movbR0mr(RegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(MOVB_READ_R0RM_OPCODE, dst, src); - oneShortOp(opc); - } - - void movbMemReg(RegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(MOVB_READ_RM_OPCODE, dst, src); - oneShortOp(opc); - } - - void movbMemRegIn(RegisterID base, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(MOVB_READ_RMINC_OPCODE, dst, base); - oneShortOp(opc); - } - - void movbRegMemr0(RegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(MOVB_WRITE_R0RN_OPCODE, dst, src); - oneShortOp(opc); - } - - void movlMemReg(RegisterID base, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(MOVL_READ_RM_OPCODE, dst, base); - oneShortOp(opc); - } - - void movlMemRegIn(RegisterID base, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(MOVL_READ_RMINC_OPCODE, dst, base); - oneShortOp(opc); - } - - void movlR0mr(RegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(MOVL_READ_R0RM_OPCODE, dst, src); - oneShortOp(opc); - } - - void movlRegMemr0(RegisterID src, RegisterID dst) - { - uint16_t opc = getOpcodeGroup1(MOVL_WRITE_R0RN_OPCODE, dst, src); - oneShortOp(opc); - } - - void loadConstant(uint32_t constant, RegisterID dst) - { - if (((int)constant <= 0x7f) && ((int)constant >= -0x80)) { - movImm8(constant, dst); - return; - } - - uint16_t opc = getOpcodeGroup3(MOVIMM_OPCODE, dst, 0); - - m_buffer.ensureSpace(maxInstructionSize, sizeof(uint32_t)); - printInstr(getOpcodeGroup3(MOVIMM_OPCODE, dst, constant), m_buffer.codeSize()); - m_buffer.putShortWithConstantInt(opc, constant, true); - } - - void loadConstantUnReusable(uint32_t constant, RegisterID dst, bool ensureSpace = false) - { - uint16_t opc = getOpcodeGroup3(MOVIMM_OPCODE, dst, 0); - - if (ensureSpace) - m_buffer.ensureSpace(maxInstructionSize, sizeof(uint32_t)); - - printInstr(getOpcodeGroup3(MOVIMM_OPCODE, dst, constant), m_buffer.codeSize()); - m_buffer.putShortWithConstantInt(opc, constant); - } - - // Flow control - - AssemblerLabel call() - { - RegisterID scr = claimScratch(); - m_buffer.ensureSpace(maxInstructionSize + 4, sizeof(uint32_t)); - loadConstantUnReusable(0x0, scr); - branch(JSR_OPCODE, scr); - nop(); - releaseScratch(scr); - return m_buffer.label(); - } - - AssemblerLabel call(RegisterID dst) - { - m_buffer.ensureSpace(maxInstructionSize + 2); - branch(JSR_OPCODE, dst); - nop(); - return m_buffer.label(); - } - - AssemblerLabel jmp() - { - RegisterID scr = claimScratch(); - m_buffer.ensureSpace(maxInstructionSize + 4, sizeof(uint32_t)); - loadConstantUnReusable(0x0, scr); - branch(BRAF_OPCODE, scr); - nop(); - releaseScratch(scr); - return m_buffer.label(); - } - - AssemblerLabel extraInstrForBranch(RegisterID dst) - { - loadConstantUnReusable(0x0, dst); - branch(BRAF_OPCODE, dst); - nop(); - return m_buffer.label(); - } - - AssemblerLabel jmp(RegisterID dst) - { - jmpReg(dst); - return m_buffer.label(); - } - - void jmpReg(RegisterID dst) - { - m_buffer.ensureSpace(maxInstructionSize + 2); - branch(JMP_OPCODE, dst); - nop(); - } - - AssemblerLabel jne() - { - branch(BF_OPCODE, 0); - return m_buffer.label(); - } - - AssemblerLabel je() - { - branch(BT_OPCODE, 0); - return m_buffer.label(); - } - - AssemblerLabel bra() - { - branch(BRA_OPCODE, 0); - return m_buffer.label(); - } - - void ret() - { - m_buffer.ensureSpace(maxInstructionSize + 2); - oneShortOp(RTS_OPCODE, false); - } - - AssemblerLabel labelIgnoringWatchpoints() - { - m_buffer.ensureSpaceForAnyInstruction(); - return m_buffer.label(); - } - - AssemblerLabel labelForWatchpoint() - { - m_buffer.ensureSpaceForAnyInstruction(); - AssemblerLabel result = m_buffer.label(); - if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint) - result = label(); - m_indexOfLastWatchpoint = result.m_offset; - m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize(); - return result; - } - - AssemblerLabel label() - { - AssemblerLabel result = labelIgnoringWatchpoints(); - while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) { - nop(); - result = labelIgnoringWatchpoints(); - } - return result; - } - - int sizeOfConstantPool() - { - return m_buffer.sizeOfConstantPool(); - } - - AssemblerLabel align(int alignment) - { - m_buffer.ensureSpace(maxInstructionSize + 2); - while (!m_buffer.isAligned(alignment)) { - nop(); - m_buffer.ensureSpace(maxInstructionSize + 2); - } - return label(); - } - - static void changePCrelativeAddress(int offset, uint16_t* instructionPtr, uint32_t newAddress) - { - ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE); - uint32_t address = (offset << 2) + ((reinterpret_cast<uint32_t>(instructionPtr) + 4) &(~0x3)); - *reinterpret_cast<uint32_t*>(address) = newAddress; - } - - static uint32_t readPCrelativeAddress(int offset, uint16_t* instructionPtr) - { - ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE); - uint32_t address = (offset << 2) + ((reinterpret_cast<uint32_t>(instructionPtr) + 4) &(~0x3)); - return *reinterpret_cast<uint32_t*>(address); - } - - static uint16_t* getInstructionPtr(void* code, int offset) - { - return reinterpret_cast<uint16_t*> (reinterpret_cast<uint32_t>(code) + offset); - } - - static void linkJump(void* code, AssemblerLabel from, void* to) - { - ASSERT(from.isSet()); - - uint16_t* instructionPtr = getInstructionPtr(code, from.m_offset) - 3; - int offsetBits = (reinterpret_cast<uint32_t>(to) - reinterpret_cast<uint32_t>(code)) - from.m_offset; - - /* MOV #imm, reg => LDR reg - braf @reg braf @reg - nop nop - */ - ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE); - ASSERT((instructionPtr[1] & 0xf0ff) == BRAF_OPCODE); - changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, offsetBits); - printInstr(*instructionPtr, from.m_offset + 2); - } - - static void linkCall(void* code, AssemblerLabel from, void* to) - { - uint16_t* instructionPtr = getInstructionPtr(code, from.m_offset); - instructionPtr -= 3; - ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE); - changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, reinterpret_cast<uint32_t>(to)); - } - - static void linkPointer(void* code, AssemblerLabel where, void* value) - { - uint16_t* instructionPtr = getInstructionPtr(code, where.m_offset); - ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE); - changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, reinterpret_cast<uint32_t>(value)); - } - - static unsigned getCallReturnOffset(AssemblerLabel call) - { - ASSERT(call.isSet()); - return call.m_offset; - } - - static uint32_t* getLdrImmAddressOnPool(SH4Word* insn, uint32_t* constPool) - { - return (constPool + (*insn & 0xff)); - } - - static SH4Word patchConstantPoolLoad(SH4Word load, int value) - { - return ((load & ~0xff) | value); - } - - static SH4Buffer::TwoShorts placeConstantPoolBarrier(int offset) - { - ASSERT(((offset >> 1) <= 2047) && ((offset >> 1) >= -2048)); - - SH4Buffer::TwoShorts m_barrier; - m_barrier.high = (BRA_OPCODE | (offset >> 1)); - m_barrier.low = NOP_OPCODE; - printInstr(((BRA_OPCODE | (offset >> 1))), 0); - printInstr(NOP_OPCODE, 0); - return m_barrier; - } - - static void patchConstantPoolLoad(void* loadAddr, void* constPoolAddr) - { - SH4Word* instructionPtr = reinterpret_cast<SH4Word*>(loadAddr); - SH4Word instruction = *instructionPtr; - SH4Word index = instruction & 0xff; - - if ((instruction & 0xf000) != MOVIMM_OPCODE) - return; - - ASSERT((((reinterpret_cast<uint32_t>(constPoolAddr) - reinterpret_cast<uint32_t>(loadAddr)) + index * 4)) < 1024); - - int offset = reinterpret_cast<uint32_t>(constPoolAddr) + (index * 4) - ((reinterpret_cast<uint32_t>(instructionPtr) & ~0x03) + 4); - instruction &= 0x0f00; - instruction |= 0xd000; - offset &= 0x03ff; - instruction |= (offset >> 2); - *instructionPtr = instruction; - printInstr(instruction, reinterpret_cast<uint32_t>(loadAddr)); - } - - static void repatchPointer(void* where, void* value) - { - patchPointer(where, value); - } - - static void* readPointer(void* code) - { - return reinterpret_cast<void*>(readInt32(code)); - } - - static void repatchInt32(void* where, int32_t value) - { - uint16_t* instructionPtr = reinterpret_cast<uint16_t*>(where); - ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE); - changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, value); - } - - static void repatchCompact(void* where, int32_t value) - { - uint16_t* instructionPtr = reinterpret_cast<uint16_t*>(where); - ASSERT(value >= 0); - ASSERT(value <= 60); - - // Handle the uncommon case where a flushConstantPool occured in movlMemRegCompact. - if ((instructionPtr[0] & 0xf000) == BRA_OPCODE) - instructionPtr += (instructionPtr[0] & 0x0fff) + 2; - - ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFRM_OPCODE); - instructionPtr[0] = (instructionPtr[0] & 0xfff0) | (value >> 2); - cacheFlush(instructionPtr, sizeof(uint16_t)); - } - - static void relinkCall(void* from, void* to) - { - uint16_t* instructionPtr = reinterpret_cast<uint16_t*>(from); - instructionPtr -= 3; - ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE); - changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, reinterpret_cast<uint32_t>(to)); - } - - static void relinkJump(void* from, void* to) - { - uint16_t* instructionPtr = reinterpret_cast<uint16_t*> (from); - instructionPtr -= 3; - ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE); - ASSERT((instructionPtr[1] & 0xf0ff) == BRAF_OPCODE); - changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, reinterpret_cast<uint32_t>(to) - reinterpret_cast<uint32_t>(from)); - } - - // Linking & patching - - static ptrdiff_t maxJumpReplacementSize() - { - return sizeof(SH4Word) * 6; - } - - static void replaceWithJump(void *instructionStart, void *to) - { - SH4Word* instruction = reinterpret_cast<SH4Word*>(instructionStart); - intptr_t difference = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(instruction) + 3 * sizeof(SH4Word)); - - if ((instruction[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE) { - // We have an entry in constant pool and we potentially replace a branchPtrWithPatch, so let's backup what would be the - // condition (CMP/xx and Bx opcodes) for later use in revertJumpReplacementToBranchPtrWithPatch before putting the jump. - instruction[4] = instruction[1]; - instruction[5] = instruction[2]; - instruction[1] = (BRAF_OPCODE | (instruction[0] & 0x0f00)); - instruction[2] = NOP_OPCODE; - cacheFlush(&instruction[1], 2 * sizeof(SH4Word)); - } else { - instruction[0] = getOpcodeGroup3(MOVL_READ_OFFPC_OPCODE, SH4Registers::r13, 1); - instruction[1] = getOpcodeGroup2(BRAF_OPCODE, SH4Registers::r13); - instruction[2] = NOP_OPCODE; - cacheFlush(instruction, 3 * sizeof(SH4Word)); - } - - changePCrelativeAddress(instruction[0] & 0x00ff, instruction, difference); - } - - static void revertJumpReplacementToBranchPtrWithPatch(void* instructionStart, RegisterID rd, int imm) - { - SH4Word *insn = reinterpret_cast<SH4Word*>(instructionStart); - ASSERT((insn[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE); - ASSERT((insn[0] & 0x00ff) != 1); - - insn[0] = getOpcodeGroup3(MOVL_READ_OFFPC_OPCODE, SH4Registers::r13, insn[0] & 0x00ff); - if ((insn[1] & 0xf0ff) == BRAF_OPCODE) { - insn[1] = (insn[4] & 0xf00f) | (rd << 8) | (SH4Registers::r13 << 4); // Restore CMP/xx opcode. - insn[2] = insn[5]; - ASSERT(((insn[2] & 0xff00) == BT_OPCODE) || ((insn[2] & 0xff00) == BF_OPCODE)); - ASSERT((insn[3] & 0xf000) == MOVL_READ_OFFPC_OPCODE); - insn[4] = (BRAF_OPCODE | (insn[3] & 0x0f00)); - insn[5] = NOP_OPCODE; - cacheFlush(insn, 6 * sizeof(SH4Word)); - } else { - // The branchPtrWithPatch has already been restored, so we just patch the immediate value and ASSERT all is as expected. - ASSERT((insn[1] & 0xf000) == 0x3000); - insn[1] = (insn[1] & 0xf00f) | (rd << 8) | (SH4Registers::r13 << 4); - cacheFlush(insn, 2 * sizeof(SH4Word)); - ASSERT(((insn[2] & 0xff00) == BT_OPCODE) || ((insn[2] & 0xff00) == BF_OPCODE)); - ASSERT((insn[3] & 0xf000) == MOVL_READ_OFFPC_OPCODE); - ASSERT(insn[5] == NOP_OPCODE); - } - - changePCrelativeAddress(insn[0] & 0x00ff, insn, imm); - } - - void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type = JumpFar) - { - ASSERT(to.isSet()); - ASSERT(from.isSet()); - - uint16_t* instructionPtr = getInstructionPtr(data(), from.m_offset) - 1; - int offsetBits = (to.m_offset - from.m_offset); - - if (type == JumpNear) { - uint16_t instruction = instructionPtr[0]; - int offset = (offsetBits - 2); - ASSERT((((instruction == BT_OPCODE) || (instruction == BF_OPCODE)) && (offset >= -256) && (offset <= 254)) - || ((instruction == BRA_OPCODE) && (offset >= -4096) && (offset <= 4094))); - *instructionPtr++ = instruction | (offset >> 1); - printInstr(*instructionPtr, from.m_offset + 2); - return; - } - - /* MOV # imm, reg => LDR reg - braf @reg braf @reg - nop nop - */ - instructionPtr -= 2; - ASSERT((instructionPtr[1] & 0xf0ff) == BRAF_OPCODE); - - if ((instructionPtr[0] & 0xf000) == MOVIMM_OPCODE) { - uint32_t* addr = getLdrImmAddressOnPool(instructionPtr, m_buffer.poolAddress()); - *addr = offsetBits; - printInstr(*instructionPtr, from.m_offset + 2); - return; - } - - ASSERT((instructionPtr[0] & 0xf000) == MOVL_READ_OFFPC_OPCODE); - changePCrelativeAddress((*instructionPtr & 0xff), instructionPtr, offsetBits); - printInstr(*instructionPtr, from.m_offset + 2); - } - - static void* getRelocatedAddress(void* code, AssemblerLabel label) - { - return reinterpret_cast<void*>(reinterpret_cast<char*>(code) + label.m_offset); - } - - static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b) - { - return b.m_offset - a.m_offset; - } - - static void patchPointer(void* code, AssemblerLabel where, void* value) - { - patchPointer(reinterpret_cast<uint32_t*>(code) + where.m_offset, value); - } - - static void patchPointer(void* code, void* value) - { - patchInt32(code, reinterpret_cast<uint32_t>(value)); - } - - static void patchInt32(void* code, uint32_t value) - { - changePCrelativeAddress((*(reinterpret_cast<uint16_t*>(code)) & 0xff), reinterpret_cast<uint16_t*>(code), value); - } - - static uint32_t readInt32(void* code) - { - return readPCrelativeAddress((*(reinterpret_cast<uint16_t*>(code)) & 0xff), reinterpret_cast<uint16_t*>(code)); - } - - static void* readCallTarget(void* from) - { - uint16_t* instructionPtr = static_cast<uint16_t*>(from); - instructionPtr -= 3; - return reinterpret_cast<void*>(readPCrelativeAddress((*instructionPtr & 0xff), instructionPtr)); - } - - static void cacheFlush(void* code, size_t size) - { -#if OS(LINUX) - // Flush each page separately, otherwise the whole flush will fail if an uncommited page is in the area. - unsigned currentPage = reinterpret_cast<unsigned>(code) & ~(pageSize() - 1); - unsigned lastPage = (reinterpret_cast<unsigned>(code) + size - 1) & ~(pageSize() - 1); - do { -#if defined CACHEFLUSH_D_L2 - syscall(__NR_cacheflush, currentPage, pageSize(), CACHEFLUSH_D_WB | CACHEFLUSH_I | CACHEFLUSH_D_L2); -#else - syscall(__NR_cacheflush, currentPage, pageSize(), CACHEFLUSH_D_WB | CACHEFLUSH_I); -#endif - currentPage += pageSize(); - } while (lastPage >= currentPage); -#else -#error "The cacheFlush support is missing on this platform." -#endif - } - - void prefix(uint16_t pre) - { - m_buffer.putByte(pre); - } - - void oneShortOp(uint16_t opcode, bool checksize = true, bool isDouble = true) - { - printInstr(opcode, m_buffer.codeSize(), isDouble); - if (checksize) - m_buffer.ensureSpace(maxInstructionSize); - m_buffer.putShortUnchecked(opcode); - } - - void ensureSpace(int space) - { - m_buffer.ensureSpace(space); - } - - void ensureSpace(int insnSpace, int constSpace) - { - m_buffer.ensureSpace(insnSpace, constSpace); - } - - // Administrative methods - - void* data() const { return m_buffer.data(); } - size_t codeSize() const { return m_buffer.codeSize(); } - - unsigned debugOffset() { return m_buffer.debugOffset(); } - -#ifdef SH4_ASSEMBLER_TRACING - static void printInstr(uint16_t opc, unsigned size, bool isdoubleInst = true) - { - if (!getenv("JavaScriptCoreDumpJIT")) - return; - - const char *format = 0; - printfStdoutInstr("offset: 0x%8.8x\t", size); - switch (opc) { - case BRK_OPCODE: - format = " BRK\n"; - break; - case NOP_OPCODE: - format = " NOP\n"; - break; - case RTS_OPCODE: - format =" *RTS\n"; - break; - case SETS_OPCODE: - format = " SETS\n"; - break; - case SETT_OPCODE: - format = " SETT\n"; - break; - case CLRT_OPCODE: - format = " CLRT\n"; - break; - case FSCHG_OPCODE: - format = " FSCHG\n"; - break; - } - if (format) { - printfStdoutInstr(format); - return; - } - switch (opc & 0xf0ff) { - case BRAF_OPCODE: - format = " *BRAF R%d\n"; - break; - case DT_OPCODE: - format = " DT R%d\n"; - break; - case CMPPL_OPCODE: - format = " CMP/PL R%d\n"; - break; - case CMPPZ_OPCODE: - format = " CMP/PZ R%d\n"; - break; - case JMP_OPCODE: - format = " *JMP @R%d\n"; - break; - case JSR_OPCODE: - format = " *JSR @R%d\n"; - break; - case LDSPR_OPCODE: - format = " LDS R%d, PR\n"; - break; - case LDSLPR_OPCODE: - format = " LDS.L @R%d+, PR\n"; - break; - case MOVT_OPCODE: - format = " MOVT R%d\n"; - break; - case SHAL_OPCODE: - format = " SHAL R%d\n"; - break; - case SHAR_OPCODE: - format = " SHAR R%d\n"; - break; - case SHLL_OPCODE: - format = " SHLL R%d\n"; - break; - case SHLL2_OPCODE: - format = " SHLL2 R%d\n"; - break; - case SHLL8_OPCODE: - format = " SHLL8 R%d\n"; - break; - case SHLL16_OPCODE: - format = " SHLL16 R%d\n"; - break; - case SHLR_OPCODE: - format = " SHLR R%d\n"; - break; - case SHLR2_OPCODE: - format = " SHLR2 R%d\n"; - break; - case SHLR8_OPCODE: - format = " SHLR8 R%d\n"; - break; - case SHLR16_OPCODE: - format = " SHLR16 R%d\n"; - break; - case STSPR_OPCODE: - format = " STS PR, R%d\n"; - break; - case STSLPR_OPCODE: - format = " STS.L PR, @-R%d\n"; - break; - case LDS_RM_FPUL_OPCODE: - format = " LDS R%d, FPUL\n"; - break; - case STS_FPUL_RN_OPCODE: - format = " STS FPUL, R%d \n"; - break; - case FLDS_FRM_FPUL_OPCODE: - format = " FLDS FR%d, FPUL\n"; - break; - case FSTS_FPUL_FRN_OPCODE: - format = " FSTS FPUL, R%d \n"; - break; - case LDSFPSCR_OPCODE: - format = " LDS R%d, FPSCR \n"; - break; - case STSFPSCR_OPCODE: - format = " STS FPSCR, R%d \n"; - break; - case STSMACL_OPCODE: - format = " STS MACL, R%d \n"; - break; - case STSMACH_OPCODE: - format = " STS MACH, R%d \n"; - break; - case BSRF_OPCODE: - format = " *BSRF R%d"; - break; - case FTRC_OPCODE: - format = " FTRC FR%d, FPUL\n"; - break; - } - if (format) { - printfStdoutInstr(format, getRn(opc)); - return; - } - switch (opc & 0xf0ff) { - case FNEG_OPCODE: - format = " FNEG DR%d\n"; - break; - case FLOAT_OPCODE: - format = " FLOAT DR%d\n"; - break; - case FTRC_OPCODE: - format = " FTRC FR%d, FPUL\n"; - break; - case FABS_OPCODE: - format = " FABS FR%d\n"; - break; - case FSQRT_OPCODE: - format = " FSQRT FR%d\n"; - break; - case FCNVDS_DRM_FPUL_OPCODE: - format = " FCNVDS FR%d, FPUL\n"; - break; - case FCNVSD_FPUL_DRN_OPCODE: - format = " FCNVSD FPUL, FR%d\n"; - break; - } - if (format) { - if (isdoubleInst) - printfStdoutInstr(format, getDRn(opc) << 1); - else - printfStdoutInstr(format, getRn(opc)); - return; - } - switch (opc & 0xf00f) { - case ADD_OPCODE: - format = " ADD R%d, R%d\n"; - break; - case ADDC_OPCODE: - format = " ADDC R%d, R%d\n"; - break; - case ADDV_OPCODE: - format = " ADDV R%d, R%d\n"; - break; - case AND_OPCODE: - format = " AND R%d, R%d\n"; - break; - case DIV1_OPCODE: - format = " DIV1 R%d, R%d\n"; - break; - case CMPEQ_OPCODE: - format = " CMP/EQ R%d, R%d\n"; - break; - case CMPGE_OPCODE: - format = " CMP/GE R%d, R%d\n"; - break; - case CMPGT_OPCODE: - format = " CMP/GT R%d, R%d\n"; - break; - case CMPHI_OPCODE: - format = " CMP/HI R%d, R%d\n"; - break; - case CMPHS_OPCODE: - format = " CMP/HS R%d, R%d\n"; - break; - case MOV_OPCODE: - format = " MOV R%d, R%d\n"; - break; - case MOVB_WRITE_RN_OPCODE: - format = " MOV.B R%d, @R%d\n"; - break; - case MOVB_WRITE_RNDEC_OPCODE: - format = " MOV.B R%d, @-R%d\n"; - break; - case MOVB_WRITE_R0RN_OPCODE: - format = " MOV.B R%d, @(R0, R%d)\n"; - break; - case MOVB_READ_RM_OPCODE: - format = " MOV.B @R%d, R%d\n"; - break; - case MOVB_READ_RMINC_OPCODE: - format = " MOV.B @R%d+, R%d\n"; - break; - case MOVB_READ_R0RM_OPCODE: - format = " MOV.B @(R0, R%d), R%d\n"; - break; - case MOVL_WRITE_RN_OPCODE: - format = " MOV.L R%d, @R%d\n"; - break; - case MOVL_WRITE_RNDEC_OPCODE: - format = " MOV.L R%d, @-R%d\n"; - break; - case MOVL_WRITE_R0RN_OPCODE: - format = " MOV.L R%d, @(R0, R%d)\n"; - break; - case MOVL_READ_RM_OPCODE: - format = " MOV.L @R%d, R%d\n"; - break; - case MOVL_READ_RMINC_OPCODE: - format = " MOV.L @R%d+, R%d\n"; - break; - case MOVL_READ_R0RM_OPCODE: - format = " MOV.L @(R0, R%d), R%d\n"; - break; - case MULL_OPCODE: - format = " MUL.L R%d, R%d\n"; - break; - case DMULL_L_OPCODE: - format = " DMULU.L R%d, R%d\n"; - break; - case DMULSL_OPCODE: - format = " DMULS.L R%d, R%d\n"; - break; - case NEG_OPCODE: - format = " NEG R%d, R%d\n"; - break; - case NEGC_OPCODE: - format = " NEGC R%d, R%d\n"; - break; - case NOT_OPCODE: - format = " NOT R%d, R%d\n"; - break; - case OR_OPCODE: - format = " OR R%d, R%d\n"; - break; - case SHAD_OPCODE: - format = " SHAD R%d, R%d\n"; - break; - case SHLD_OPCODE: - format = " SHLD R%d, R%d\n"; - break; - case SUB_OPCODE: - format = " SUB R%d, R%d\n"; - break; - case SUBC_OPCODE: - format = " SUBC R%d, R%d\n"; - break; - case SUBV_OPCODE: - format = " SUBV R%d, R%d\n"; - break; - case TST_OPCODE: - format = " TST R%d, R%d\n"; - break; - case XOR_OPCODE: - format = " XOR R%d, R%d\n";break; - case MOVW_WRITE_RN_OPCODE: - format = " MOV.W R%d, @R%d\n"; - break; - case MOVW_READ_RM_OPCODE: - format = " MOV.W @R%d, R%d\n"; - break; - case MOVW_READ_RMINC_OPCODE: - format = " MOV.W @R%d+, R%d\n"; - break; - case MOVW_READ_R0RM_OPCODE: - format = " MOV.W @(R0, R%d), R%d\n"; - break; - case MOVW_WRITE_R0RN_OPCODE: - format = " MOV.W R%d, @(R0, R%d)\n"; - break; - case EXTUB_OPCODE: - format = " EXTU.B R%d, R%d\n"; - break; - case EXTUW_OPCODE: - format = " EXTU.W R%d, R%d\n"; - break; - } - if (format) { - printfStdoutInstr(format, getRm(opc), getRn(opc)); - return; - } - switch (opc & 0xf00f) { - case FSUB_OPCODE: - format = " FSUB FR%d, FR%d\n"; - break; - case FADD_OPCODE: - format = " FADD FR%d, FR%d\n"; - break; - case FDIV_OPCODE: - format = " FDIV FR%d, FR%d\n"; - break; - case FMUL_OPCODE: - format = " DMULL FR%d, FR%d\n"; - break; - case FMOV_OPCODE: - format = " FMOV FR%d, FR%d\n"; - break; - case FCMPEQ_OPCODE: - format = " FCMP/EQ FR%d, FR%d\n"; - break; - case FCMPGT_OPCODE: - format = " FCMP/GT FR%d, FR%d\n"; - break; - } - if (format) { - if (isdoubleInst) - printfStdoutInstr(format, getDRm(opc) << 1, getDRn(opc) << 1); - else - printfStdoutInstr(format, getRm(opc), getRn(opc)); - return; - } - switch (opc & 0xf00f) { - case FMOVS_WRITE_RN_DEC_OPCODE: - format = " %s FR%d, @-R%d\n"; - break; - case FMOVS_WRITE_RN_OPCODE: - format = " %s FR%d, @R%d\n"; - break; - case FMOVS_WRITE_R0RN_OPCODE: - format = " %s FR%d, @(R0, R%d)\n"; - break; - } - if (format) { - if (isdoubleInst) - printfStdoutInstr(format, "FMOV", getDRm(opc) << 1, getDRn(opc)); - else - printfStdoutInstr(format, "FMOV.S", getRm(opc), getRn(opc)); - return; - } - switch (opc & 0xf00f) { - case FMOVS_READ_RM_OPCODE: - format = " %s @R%d, FR%d\n"; - break; - case FMOVS_READ_RM_INC_OPCODE: - format = " %s @R%d+, FR%d\n"; - break; - case FMOVS_READ_R0RM_OPCODE: - format = " %s @(R0, R%d), FR%d\n"; - break; - } - if (format) { - if (isdoubleInst) - printfStdoutInstr(format, "FMOV", getDRm(opc), getDRn(opc) << 1); - else - printfStdoutInstr(format, "FMOV.S", getRm(opc), getRn(opc)); - return; - } - switch (opc & 0xff00) { - case BF_OPCODE: - format = " BF %d\n"; - break; - case BFS_OPCODE: - format = " *BF/S %d\n"; - break; - case ANDIMM_OPCODE: - format = " AND #%d, R0\n"; - break; - case BT_OPCODE: - format = " BT %d\n"; - break; - case BTS_OPCODE: - format = " *BT/S %d\n"; - break; - case CMPEQIMM_OPCODE: - format = " CMP/EQ #%d, R0\n"; - break; - case MOVB_WRITE_OFFGBR_OPCODE: - format = " MOV.B R0, @(%d, GBR)\n"; - break; - case MOVB_READ_OFFGBR_OPCODE: - format = " MOV.B @(%d, GBR), R0\n"; - break; - case MOVL_WRITE_OFFGBR_OPCODE: - format = " MOV.L R0, @(%d, GBR)\n"; - break; - case MOVL_READ_OFFGBR_OPCODE: - format = " MOV.L @(%d, GBR), R0\n"; - break; - case MOVA_READ_OFFPC_OPCODE: - format = " MOVA @(%d, PC), R0\n"; - break; - case ORIMM_OPCODE: - format = " OR #%d, R0\n"; - break; - case ORBIMM_OPCODE: - format = " OR.B #%d, @(R0, GBR)\n"; - break; - case TSTIMM_OPCODE: - format = " TST #%d, R0\n"; - break; - case TSTB_OPCODE: - format = " TST.B %d, @(R0, GBR)\n"; - break; - case XORIMM_OPCODE: - format = " XOR #%d, R0\n"; - break; - case XORB_OPCODE: - format = " XOR.B %d, @(R0, GBR)\n"; - break; - } - if (format) { - printfStdoutInstr(format, getImm8(opc)); - return; - } - switch (opc & 0xff00) { - case MOVB_WRITE_OFFRN_OPCODE: - format = " MOV.B R0, @(%d, R%d)\n"; - break; - case MOVB_READ_OFFRM_OPCODE: - format = " MOV.B @(%d, R%d), R0\n"; - break; - } - if (format) { - printfStdoutInstr(format, getDisp(opc), getRm(opc)); - return; - } - switch (opc & 0xf000) { - case BRA_OPCODE: - format = " *BRA %d\n"; - break; - case BSR_OPCODE: - format = " *BSR %d\n"; - break; - } - if (format) { - printfStdoutInstr(format, getImm12(opc)); - return; - } - switch (opc & 0xf000) { - case MOVL_READ_OFFPC_OPCODE: - format = " MOV.L @(%d, PC), R%d\n"; - break; - case ADDIMM_OPCODE: - format = " ADD #%d, R%d\n"; - break; - case MOVIMM_OPCODE: - format = " MOV #%d, R%d\n"; - break; - case MOVW_READ_OFFPC_OPCODE: - format = " MOV.W @(%d, PC), R%d\n"; - break; - } - if (format) { - printfStdoutInstr(format, getImm8(opc), getRn(opc)); - return; - } - switch (opc & 0xf000) { - case MOVL_WRITE_OFFRN_OPCODE: - format = " MOV.L R%d, @(%d, R%d)\n"; - printfStdoutInstr(format, getRm(opc), getDisp(opc), getRn(opc)); - break; - case MOVL_READ_OFFRM_OPCODE: - format = " MOV.L @(%d, R%d), R%d\n"; - printfStdoutInstr(format, getDisp(opc), getRm(opc), getRn(opc)); - break; - } - } - - static void printfStdoutInstr(const char* format, ...) - { - if (getenv("JavaScriptCoreDumpJIT")) { - va_list args; - va_start(args, format); - vprintfStdoutInstr(format, args); - va_end(args); - } - } - - static void vprintfStdoutInstr(const char* format, va_list args) - { - if (getenv("JavaScriptCoreDumpJIT")) - WTF::dataLogFV(format, args); - } - - static void printBlockInstr(uint16_t* first, unsigned offset, int nbInstr) - { - printfStdoutInstr(">> repatch instructions after link\n"); - for (int i = 0; i <= nbInstr; i++) - printInstr(*(first + i), offset + i); - printfStdoutInstr(">> end repatch\n"); - } -#else - static void printInstr(uint16_t, unsigned, bool = true) { }; - static void printBlockInstr(uint16_t*, unsigned, int) { }; -#endif - - static void replaceWithLoad(void* instructionStart) - { - SH4Word* insPtr = reinterpret_cast<SH4Word*>(instructionStart); - - insPtr += 2; // skip MOV and ADD opcodes - - if (((*insPtr) & 0xf00f) != MOVL_READ_RM_OPCODE) { - *insPtr = MOVL_READ_RM_OPCODE | (*insPtr & 0x0ff0); - cacheFlush(insPtr, sizeof(SH4Word)); - } - } - - static void replaceWithAddressComputation(void* instructionStart) - { - SH4Word* insPtr = reinterpret_cast<SH4Word*>(instructionStart); - - insPtr += 2; // skip MOV and ADD opcodes - - if (((*insPtr) & 0xf00f) != MOV_OPCODE) { - *insPtr = MOV_OPCODE | (*insPtr & 0x0ff0); - cacheFlush(insPtr, sizeof(SH4Word)); - } - } - -private: - SH4Buffer m_buffer; - int m_claimscratchReg; - int m_indexOfLastWatchpoint; - int m_indexOfTailOfLastWatchpoint; -}; - -} // namespace JSC - -#endif // ENABLE(ASSEMBLER) && CPU(SH4) - -#endif // SH4Assembler_h diff --git a/Source/JavaScriptCore/assembler/X86Assembler.h b/Source/JavaScriptCore/assembler/X86Assembler.h index 1a43e206c..fb3a169a1 100644 --- a/Source/JavaScriptCore/assembler/X86Assembler.h +++ b/Source/JavaScriptCore/assembler/X86Assembler.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2012-2017 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -23,124 +23,108 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef X86Assembler_h -#define X86Assembler_h +#pragma once #if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64)) #include "AssemblerBuffer.h" +#include "AssemblerCommon.h" #include "JITCompilationEffort.h" #include <limits.h> #include <stdint.h> #include <wtf/Assertions.h> #include <wtf/Vector.h> -#if USE(MASM_PROBE) -#include <xmmintrin.h> -#endif - namespace JSC { inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; } namespace X86Registers { - typedef enum { - eax, - ecx, - edx, - ebx, - esp, - ebp, - esi, - edi, -#if CPU(X86_64) - r8, - r9, - r10, - r11, - r12, - r13, - r14, - r15, -#endif - } RegisterID; +#define FOR_EACH_CPU_REGISTER(V) \ + FOR_EACH_CPU_GPREGISTER(V) \ + FOR_EACH_CPU_SPECIAL_REGISTER(V) \ + FOR_EACH_CPU_FPREGISTER(V) + +// The following are defined as pairs of the following value: +// 1. type of the storage needed to save the register value by the JIT probe. +// 2. name of the register. +#define FOR_EACH_CPU_GPREGISTER(V) \ + V(void*, eax) \ + V(void*, ecx) \ + V(void*, edx) \ + V(void*, ebx) \ + V(void*, esp) \ + V(void*, ebp) \ + V(void*, esi) \ + V(void*, edi) \ + FOR_EACH_X86_64_CPU_GPREGISTER(V) + +#define FOR_EACH_CPU_SPECIAL_REGISTER(V) \ + V(void*, eip) \ + V(void*, eflags) \ + +// Note: the JITs only stores double values in the FP registers. +#define FOR_EACH_CPU_FPREGISTER(V) \ + V(double, xmm0) \ + V(double, xmm1) \ + V(double, xmm2) \ + V(double, xmm3) \ + V(double, xmm4) \ + V(double, xmm5) \ + V(double, xmm6) \ + V(double, xmm7) \ + FOR_EACH_X86_64_CPU_FPREGISTER(V) - typedef enum { - xmm0, - xmm1, - xmm2, - xmm3, - xmm4, - xmm5, - xmm6, - xmm7, +#if CPU(X86) -#if CPU(X86_64) - xmm8, - xmm9, - xmm10, - xmm11, - xmm12, - xmm13, - xmm14, - xmm15, -#endif - } XMMRegisterID; - -#if USE(MASM_PROBE) - #define FOR_EACH_CPU_REGISTER(V) \ - FOR_EACH_CPU_GPREGISTER(V) \ - FOR_EACH_CPU_SPECIAL_REGISTER(V) \ - FOR_EACH_CPU_FPREGISTER(V) - - #define FOR_EACH_CPU_GPREGISTER(V) \ - V(void*, eax) \ - V(void*, ebx) \ - V(void*, ecx) \ - V(void*, edx) \ - V(void*, esi) \ - V(void*, edi) \ - V(void*, ebp) \ - V(void*, esp) \ - FOR_EACH_X86_64_CPU_GPREGISTER(V) - - #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \ - V(void*, eip) \ - V(void*, eflags) \ - - #define FOR_EACH_CPU_FPREGISTER(V) \ - V(__m128, xmm0) \ - V(__m128, xmm1) \ - V(__m128, xmm2) \ - V(__m128, xmm3) \ - V(__m128, xmm4) \ - V(__m128, xmm5) \ - V(__m128, xmm6) \ - V(__m128, xmm7) +#define FOR_EACH_X86_64_CPU_GPREGISTER(V) // Nothing to add. +#define FOR_EACH_X86_64_CPU_FPREGISTER(V) // Nothing to add. -#if CPU(X86) - #define FOR_EACH_X86_64_CPU_GPREGISTER(V) // Nothing to add. #elif CPU(X86_64) - #define FOR_EACH_X86_64_CPU_GPREGISTER(V) \ - V(void*, r8) \ - V(void*, r9) \ - V(void*, r10) \ - V(void*, r11) \ - V(void*, r12) \ - V(void*, r13) \ - V(void*, r14) \ - V(void*, r15) + +#define FOR_EACH_X86_64_CPU_GPREGISTER(V) \ + V(void*, r8) \ + V(void*, r9) \ + V(void*, r10) \ + V(void*, r11) \ + V(void*, r12) \ + V(void*, r13) \ + V(void*, r14) \ + V(void*, r15) + +#define FOR_EACH_X86_64_CPU_FPREGISTER(V) \ + V(double, xmm8) \ + V(double, xmm9) \ + V(double, xmm10) \ + V(double, xmm11) \ + V(double, xmm12) \ + V(double, xmm13) \ + V(double, xmm14) \ + V(double, xmm15) + #endif // CPU(X86_64) -#endif // USE(MASM_PROBE) -} + +typedef enum { + #define DECLARE_REGISTER(_type, _regName) _regName, + FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER) + #undef DECLARE_REGISTER +} RegisterID; + +typedef enum { + #define DECLARE_REGISTER(_type, _regName) _regName, + FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER) + #undef DECLARE_REGISTER +} XMMRegisterID; + +} // namespace X86Register class X86Assembler { public: typedef X86Registers::RegisterID RegisterID; - static RegisterID firstRegister() { return X86Registers::eax; } - static RegisterID lastRegister() + static constexpr RegisterID firstRegister() { return X86Registers::eax; } + static constexpr RegisterID lastRegister() { #if CPU(X86_64) return X86Registers::r15; @@ -152,8 +136,8 @@ public: typedef X86Registers::XMMRegisterID XMMRegisterID; typedef XMMRegisterID FPRegisterID; - static FPRegisterID firstFPRegister() { return X86Registers::xmm0; } - static FPRegisterID lastFPRegister() + static constexpr FPRegisterID firstFPRegister() { return X86Registers::xmm0; } + static constexpr FPRegisterID lastFPRegister() { #if CPU(X86_64) return X86Registers::xmm15; @@ -185,21 +169,43 @@ public: } Condition; private: + // OneByteOpcodeID defines the bytecode for 1 byte instruction. It also contains the prefixes + // for two bytes instructions. + // TwoByteOpcodeID, ThreeByteOpcodeID define the opcodes for the multibytes instructions. + // + // The encoding for each instruction can be found in the Intel Architecture Manual in the appendix + // "Opcode Map." + // + // Each opcode can have a suffix describing the type of argument. The full list of suffixes is + // in the "Key to Abbreviations" section of the "Opcode Map". + // The most common argument types are: + // -E: The argument is either a GPR or a memory address. + // -G: The argument is a GPR. + // -I: The argument is an immediate. + // The most common sizes are: + // -v: 32 or 64bit depending on the operand-size attribute. + // -z: 32bit in both 32bit and 64bit mode. Common for immediate values. typedef enum { + OP_ADD_EbGb = 0x00, OP_ADD_EvGv = 0x01, OP_ADD_GvEv = 0x03, + OP_ADD_EAXIv = 0x05, OP_OR_EvGv = 0x09, OP_OR_GvEv = 0x0B, + OP_OR_EAXIv = 0x0D, OP_2BYTE_ESCAPE = 0x0F, OP_AND_EvGv = 0x21, OP_AND_GvEv = 0x23, OP_SUB_EvGv = 0x29, OP_SUB_GvEv = 0x2B, + OP_SUB_EAXIv = 0x2D, PRE_PREDICT_BRANCH_NOT_TAKEN = 0x2E, OP_XOR_EvGv = 0x31, OP_XOR_GvEv = 0x33, + OP_XOR_EAXIv = 0x35, OP_CMP_EvGv = 0x39, OP_CMP_GvEv = 0x3B, + OP_CMP_EAXIv = 0x3D, #if CPU(X86_64) PRE_REX = 0x40, #endif @@ -224,9 +230,12 @@ private: OP_LEA = 0x8D, OP_GROUP1A_Ev = 0x8F, OP_NOP = 0x90, + OP_XCHG_EAX = 0x90, OP_CDQ = 0x99, OP_MOV_EAXOv = 0xA1, OP_MOV_OvEAX = 0xA3, + OP_TEST_ALIb = 0xA8, + OP_TEST_EAXIv = 0xA9, OP_MOV_EAXIv = 0xB8, OP_GROUP2_EvIb = 0xC1, OP_RET = 0xC3, @@ -235,9 +244,11 @@ private: OP_INT3 = 0xCC, OP_GROUP2_Ev1 = 0xD1, OP_GROUP2_EvCL = 0xD3, + OP_ESCAPE_D9 = 0xD9, OP_ESCAPE_DD = 0xDD, OP_CALL_rel32 = 0xE8, OP_JMP_rel32 = 0xE9, + PRE_LOCK = 0xF0, PRE_SSE_F2 = 0xF2, PRE_SSE_F3 = 0xF3, OP_HLT = 0xF4, @@ -248,29 +259,42 @@ private: } OneByteOpcodeID; typedef enum { + OP2_UD2 = 0xB, OP2_MOVSD_VsdWsd = 0x10, OP2_MOVSD_WsdVsd = 0x11, OP2_MOVSS_VsdWsd = 0x10, OP2_MOVSS_WsdVsd = 0x11, + OP2_MOVAPD_VpdWpd = 0x28, + OP2_MOVAPS_VpdWpd = 0x28, OP2_CVTSI2SD_VsdEd = 0x2A, OP2_CVTTSD2SI_GdWsd = 0x2C, + OP2_CVTTSS2SI_GdWsd = 0x2C, OP2_UCOMISD_VsdWsd = 0x2E, + OP2_3BYTE_ESCAPE_3A = 0x3A, + OP2_CMOVCC = 0x40, OP2_ADDSD_VsdWsd = 0x58, OP2_MULSD_VsdWsd = 0x59, OP2_CVTSD2SS_VsdWsd = 0x5A, OP2_CVTSS2SD_VsdWsd = 0x5A, OP2_SUBSD_VsdWsd = 0x5C, OP2_DIVSD_VsdWsd = 0x5E, + OP2_MOVMSKPD_VdEd = 0x50, OP2_SQRTSD_VsdWsd = 0x51, + OP2_ANDPS_VpdWpd = 0x54, OP2_ANDNPD_VpdWpd = 0x55, + OP2_ORPS_VpdWpd = 0x56, OP2_XORPD_VpdWpd = 0x57, OP2_MOVD_VdEd = 0x6E, OP2_MOVD_EdVd = 0x7E, OP2_JCC_rel32 = 0x80, OP_SETCC = 0x90, - OP2_3BYTE_ESCAPE = 0xAE, + OP2_3BYTE_ESCAPE_AE = 0xAE, OP2_IMUL_GvEv = 0xAF, OP2_MOVZX_GvEb = 0xB6, + OP2_BSF = 0xBC, + OP2_TZCNT = 0xBC, + OP2_BSR = 0xBD, + OP2_LZCNT = 0xBD, OP2_MOVSX_GvEb = 0xBE, OP2_MOVZX_GvEw = 0xB7, OP2_MOVSX_GvEw = 0xBF, @@ -281,9 +305,28 @@ private: } TwoByteOpcodeID; typedef enum { - OP3_MFENCE = 0xF0, + OP3_ROUNDSS_VssWssIb = 0x0A, + OP3_ROUNDSD_VsdWsdIb = 0x0B, + OP3_MFENCE = 0xF0, } ThreeByteOpcodeID; + struct VexPrefix { + enum : uint8_t { + TwoBytes = 0xC5, + ThreeBytes = 0xC4 + }; + }; + enum class VexImpliedBytes : uint8_t { + TwoBytesOp = 1, + ThreeBytesOp38 = 2, + ThreeBytesOp3A = 3 + }; + + TwoByteOpcodeID cmovcc(Condition cond) + { + return (TwoByteOpcodeID)(OP2_CMOVCC + cond); + } + TwoByteOpcodeID jccRel32(Condition cond) { return (TwoByteOpcodeID)(OP2_JCC_rel32 + cond); @@ -317,6 +360,7 @@ private: GROUP3_OP_TEST = 0, GROUP3_OP_NOT = 2, GROUP3_OP_NEG = 3, + GROUP3_OP_DIV = 6, GROUP3_OP_IDIV = 7, GROUP5_OP_CALLN = 2, @@ -328,6 +372,7 @@ private: GROUP14_OP_PSLLQ = 6, GROUP14_OP_PSRLQ = 2, + ESCAPE_D9_FSTP_singleReal = 3, ESCAPE_DD_FSTP_doubleReal = 3, } GroupOpcodeID; @@ -407,13 +452,43 @@ public: m_formatter.oneByteOp(OP_ADD_EvGv, src, base, offset); } + void addl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) + { + m_formatter.oneByteOp(OP_ADD_EvGv, src, base, index, scale, offset); + } + + void addb_rm(RegisterID src, int offset, RegisterID base) + { + m_formatter.oneByteOp8(OP_ADD_EbGb, src, base, offset); + } + + void addb_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) + { + m_formatter.oneByteOp8(OP_ADD_EbGb, src, base, index, scale, offset); + } + + void addw_rm(RegisterID src, int offset, RegisterID base) + { + m_formatter.prefix(PRE_OPERAND_SIZE); + m_formatter.oneByteOp8(OP_ADD_EvGv, src, base, offset); + } + + void addw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) + { + m_formatter.prefix(PRE_OPERAND_SIZE); + m_formatter.oneByteOp8(OP_ADD_EvGv, src, base, index, scale, offset); + } + void addl_ir(int imm, RegisterID dst) { if (CAN_SIGN_EXTEND_8_32(imm)) { m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst); m_formatter.immediate8(imm); } else { - m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst); + if (dst == X86Registers::eax) + m_formatter.oneByteOp(OP_ADD_EAXIv); + else + m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst); m_formatter.immediate32(imm); } } @@ -429,6 +504,53 @@ public: } } + void addl_im(int imm, int offset, RegisterID base, RegisterID index, int scale) + { + if (CAN_SIGN_EXTEND_8_32(imm)) { + m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, index, scale, offset); + m_formatter.immediate8(imm); + } else { + m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, index, scale, offset); + m_formatter.immediate32(imm); + } + } + + void addb_im(int imm, int offset, RegisterID base) + { + m_formatter.oneByteOp8(OP_GROUP1_EbIb, GROUP1_OP_ADD, base, offset); + m_formatter.immediate8(imm); + } + + void addb_im(int imm, int offset, RegisterID base, RegisterID index, int scale) + { + m_formatter.oneByteOp8(OP_GROUP1_EbIb, GROUP1_OP_ADD, base, index, scale, offset); + m_formatter.immediate8(imm); + } + + void addw_im(int imm, int offset, RegisterID base) + { + m_formatter.prefix(PRE_OPERAND_SIZE); + if (CAN_SIGN_EXTEND_8_32(imm)) { + m_formatter.oneByteOp8(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset); + m_formatter.immediate8(imm); + } else { + m_formatter.oneByteOp8(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset); + m_formatter.immediate16(imm); + } + } + + void addw_im(int imm, int offset, RegisterID base, RegisterID index, int scale) + { + m_formatter.prefix(PRE_OPERAND_SIZE); + if (CAN_SIGN_EXTEND_8_32(imm)) { + m_formatter.oneByteOp8(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, index, scale, offset); + m_formatter.immediate8(imm); + } else { + m_formatter.oneByteOp8(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, index, scale, offset); + m_formatter.immediate16(imm); + } + } + #if CPU(X86_64) void addq_rr(RegisterID src, RegisterID dst) { @@ -440,13 +562,21 @@ public: m_formatter.oneByteOp64(OP_ADD_GvEv, dst, base, offset); } + void addq_rm(RegisterID src, int offset, RegisterID base) + { + m_formatter.oneByteOp64(OP_ADD_EvGv, src, base, offset); + } + void addq_ir(int imm, RegisterID dst) { if (CAN_SIGN_EXTEND_8_32(imm)) { m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst); m_formatter.immediate8(imm); } else { - m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst); + if (dst == X86Registers::eax) + m_formatter.oneByteOp64(OP_ADD_EAXIv); + else + m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst); m_formatter.immediate32(imm); } } @@ -552,6 +682,12 @@ public: } #endif // CPU(X86_64) + // Only used for testing purposes. + void illegalInstruction() + { + m_formatter.twoByteOp(OP2_UD2); + } + void inc_r(RegisterID dst) { m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP1_OP_ADD, dst); @@ -562,6 +698,11 @@ public: { m_formatter.oneByteOp64(OP_GROUP5_Ev, GROUP1_OP_ADD, dst); } + + void incq_m(int offset, RegisterID base) + { + m_formatter.oneByteOp64(OP_GROUP5_Ev, GROUP1_OP_ADD, base, offset); + } #endif // CPU(X86_64) void negl_r(RegisterID dst) @@ -591,6 +732,18 @@ public: m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, base, offset); } +#if CPU(X86_64) + void notq_r(RegisterID dst) + { + m_formatter.oneByteOp64(OP_GROUP3_Ev, GROUP3_OP_NOT, dst); + } + + void notq_m(int offset, RegisterID base) + { + m_formatter.oneByteOp64(OP_GROUP3_Ev, GROUP3_OP_NOT, base, offset); + } +#endif + void orl_rr(RegisterID src, RegisterID dst) { m_formatter.oneByteOp(OP_OR_EvGv, src, dst); @@ -612,7 +765,10 @@ public: m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, dst); m_formatter.immediate8(imm); } else { - m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst); + if (dst == X86Registers::eax) + m_formatter.oneByteOp(OP_OR_EAXIv); + else + m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst); m_formatter.immediate32(imm); } } @@ -640,7 +796,10 @@ public: m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_OR, dst); m_formatter.immediate8(imm); } else { - m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst); + if (dst == X86Registers::eax) + m_formatter.oneByteOp64(OP_OR_EAXIv); + else + m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst); m_formatter.immediate32(imm); } } @@ -683,7 +842,10 @@ public: m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst); m_formatter.immediate8(imm); } else { - m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst); + if (dst == X86Registers::eax) + m_formatter.oneByteOp(OP_SUB_EAXIv); + else + m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst); m_formatter.immediate32(imm); } } @@ -705,13 +867,37 @@ public: m_formatter.oneByteOp64(OP_SUB_EvGv, src, dst); } + void subq_mr(int offset, RegisterID base, RegisterID dst) + { + m_formatter.oneByteOp64(OP_SUB_GvEv, dst, base, offset); + } + + void subq_rm(RegisterID src, int offset, RegisterID base) + { + m_formatter.oneByteOp64(OP_SUB_EvGv, src, base, offset); + } + void subq_ir(int imm, RegisterID dst) { if (CAN_SIGN_EXTEND_8_32(imm)) { m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst); m_formatter.immediate8(imm); } else { - m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst); + if (dst == X86Registers::eax) + m_formatter.oneByteOp64(OP_SUB_EAXIv); + else + m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst); + m_formatter.immediate32(imm); + } + } + + void subq_im(int imm, int offset, RegisterID base) + { + if (CAN_SIGN_EXTEND_8_32(imm)) { + m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, base, offset); + m_formatter.immediate8(imm); + } else { + m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, base, offset); m_formatter.immediate32(imm); } } @@ -760,7 +946,10 @@ public: m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst); m_formatter.immediate8(imm); } else { - m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst); + if (dst == X86Registers::eax) + m_formatter.oneByteOp(OP_XOR_EAXIv); + else + m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst); m_formatter.immediate32(imm); } } @@ -777,7 +966,10 @@ public: m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst); m_formatter.immediate8(imm); } else { - m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst); + if (dst == X86Registers::eax) + m_formatter.oneByteOp64(OP_XOR_EAXIv); + else + m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst); m_formatter.immediate32(imm); } } @@ -786,28 +978,100 @@ public: { m_formatter.oneByteOp64(OP_XOR_EvGv, src, base, offset); } - - void rorq_i8r(int imm, RegisterID dst) + +#endif + + void lzcnt_rr(RegisterID src, RegisterID dst) { - if (imm == 1) - m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_ROR, dst); - else { - m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_ROR, dst); - m_formatter.immediate8(imm); - } + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_LZCNT, dst, src); + } + + void lzcnt_mr(int offset, RegisterID base, RegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_LZCNT, dst, base, offset); + } + +#if CPU(X86_64) + void lzcntq_rr(RegisterID src, RegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp64(OP2_LZCNT, dst, src); } + void lzcntq_mr(int offset, RegisterID base, RegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp64(OP2_LZCNT, dst, base, offset); + } #endif - void sarl_i8r(int imm, RegisterID dst) + void bsr_rr(RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp(OP2_BSR, dst, src); + } + + void bsr_mr(int offset, RegisterID base, RegisterID dst) + { + m_formatter.twoByteOp(OP2_BSR, dst, base, offset); + } + +#if CPU(X86_64) + void bsrq_rr(RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp64(OP2_BSR, dst, src); + } + + void bsrq_mr(int offset, RegisterID base, RegisterID dst) + { + m_formatter.twoByteOp64(OP2_BSR, dst, base, offset); + } +#endif + + void tzcnt_rr(RegisterID src, RegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_TZCNT, dst, src); + } + +#if CPU(X86_64) + void tzcntq_rr(RegisterID src, RegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp64(OP2_TZCNT, dst, src); + } +#endif + + void bsf_rr(RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp(OP2_BSF, dst, src); + } + +#if CPU(X86_64) + void bsfq_rr(RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp64(OP2_BSF, dst, src); + } +#endif + +private: + template<GroupOpcodeID op> + void shiftInstruction32(int imm, RegisterID dst) { if (imm == 1) - m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst); + m_formatter.oneByteOp(OP_GROUP2_Ev1, op, dst); else { - m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst); + m_formatter.oneByteOp(OP_GROUP2_EvIb, op, dst); m_formatter.immediate8(imm); } } +public: + + void sarl_i8r(int imm, RegisterID dst) + { + shiftInstruction32<GROUP2_OP_SAR>(imm, dst); + } void sarl_CLr(RegisterID dst) { @@ -816,12 +1080,7 @@ public: void shrl_i8r(int imm, RegisterID dst) { - if (imm == 1) - m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHR, dst); - else { - m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHR, dst); - m_formatter.immediate8(imm); - } + shiftInstruction32<GROUP2_OP_SHR>(imm, dst); } void shrl_CLr(RegisterID dst) @@ -831,12 +1090,7 @@ public: void shll_i8r(int imm, RegisterID dst) { - if (imm == 1) - m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHL, dst); - else { - m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHL, dst); - m_formatter.immediate8(imm); - } + shiftInstruction32<GROUP2_OP_SHL>(imm, dst); } void shll_CLr(RegisterID dst) @@ -844,30 +1098,87 @@ public: m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst); } -#if CPU(X86_64) - void sarq_CLr(RegisterID dst) + void rorl_i8r(int imm, RegisterID dst) { - m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst); + shiftInstruction32<GROUP2_OP_ROR>(imm, dst); } - void sarq_i8r(int imm, RegisterID dst) + void rorl_CLr(RegisterID dst) + { + m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_ROR, dst); + } + + void roll_i8r(int imm, RegisterID dst) + { + shiftInstruction32<GROUP2_OP_ROL>(imm, dst); + } + + void roll_CLr(RegisterID dst) + { + m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_ROL, dst); + } + +#if CPU(X86_64) +private: + template<GroupOpcodeID op> + void shiftInstruction64(int imm, RegisterID dst) { if (imm == 1) - m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst); + m_formatter.oneByteOp64(OP_GROUP2_Ev1, op, dst); else { - m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst); + m_formatter.oneByteOp64(OP_GROUP2_EvIb, op, dst); m_formatter.immediate8(imm); } } +public: + void sarq_CLr(RegisterID dst) + { + m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst); + } + + void sarq_i8r(int imm, RegisterID dst) + { + shiftInstruction64<GROUP2_OP_SAR>(imm, dst); + } + + void shrq_i8r(int imm, RegisterID dst) + { + shiftInstruction64<GROUP2_OP_SHR>(imm, dst); + } + + void shrq_CLr(RegisterID dst) + { + m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SHR, dst); + } void shlq_i8r(int imm, RegisterID dst) { - if (imm == 1) - m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SHL, dst); - else { - m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SHL, dst); - m_formatter.immediate8(imm); - } + shiftInstruction64<GROUP2_OP_SHL>(imm, dst); + } + + void shlq_CLr(RegisterID dst) + { + m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst); + } + + void rorq_i8r(int imm, RegisterID dst) + { + shiftInstruction64<GROUP2_OP_ROR>(imm, dst); + } + + void rorq_CLr(RegisterID dst) + { + m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_ROR, dst); + } + + void rolq_i8r(int imm, RegisterID dst) + { + shiftInstruction64<GROUP2_OP_ROL>(imm, dst); + } + + void rolq_CLr(RegisterID dst) + { + m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_ROL, dst); } #endif // CPU(X86_64) @@ -894,11 +1205,28 @@ public: m_formatter.immediate32(value); } + void divl_r(RegisterID dst) + { + m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_DIV, dst); + } + void idivl_r(RegisterID dst) { m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_IDIV, dst); } +#if CPU(X86_64) + void divq_r(RegisterID dst) + { + m_formatter.oneByteOp64(OP_GROUP3_Ev, GROUP3_OP_DIV, dst); + } + + void idivq_r(RegisterID dst) + { + m_formatter.oneByteOp64(OP_GROUP3_Ev, GROUP3_OP_IDIV, dst); + } +#endif // CPU(X86_64) + // Comparisons: void cmpl_rr(RegisterID src, RegisterID dst) @@ -922,7 +1250,10 @@ public: m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst); m_formatter.immediate8(imm); } else { - m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst); + if (dst == X86Registers::eax) + m_formatter.oneByteOp(OP_CMP_EAXIv); + else + m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst); m_formatter.immediate32(imm); } } @@ -1008,7 +1339,10 @@ public: m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst); m_formatter.immediate8(imm); } else { - m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst); + if (dst == X86Registers::eax) + m_formatter.oneByteOp64(OP_CMP_EAXIv); + else + m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst); m_formatter.immediate32(imm); } } @@ -1091,7 +1425,10 @@ public: void testl_i32r(int imm, RegisterID dst) { - m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst); + if (dst == X86Registers::eax) + m_formatter.oneByteOp(OP_TEST_EAXIv); + else + m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst); m_formatter.immediate32(imm); } @@ -1145,7 +1482,10 @@ public: void testq_i32r(int imm, RegisterID dst) { - m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst); + if (dst == X86Registers::eax) + m_formatter.oneByteOp64(OP_TEST_EAXIv); + else + m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst); m_formatter.immediate32(imm); } @@ -1170,7 +1510,10 @@ public: void testb_i8r(int imm, RegisterID dst) { - m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst); + if (dst == X86Registers::eax) + m_formatter.oneByteOp(OP_TEST_ALIb); + else + m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst); m_formatter.immediate8(imm); } @@ -1199,6 +1542,16 @@ public: setne_r(dst); } + void setnp_r(RegisterID dst) + { + m_formatter.twoByteOp8(setccOpcode(ConditionNP), (GroupOpcodeID)0, dst); + } + + void setp_r(RegisterID dst) + { + m_formatter.twoByteOp8(setccOpcode(ConditionP), (GroupOpcodeID)0, dst); + } + // Various move ops: void cdq() @@ -1206,6 +1559,18 @@ public: m_formatter.oneByteOp(OP_CDQ); } +#if CPU(X86_64) + void cqo() + { + m_formatter.oneByteOp64(OP_CDQ); + } +#endif + + void fstps(int offset, RegisterID base) + { + m_formatter.oneByteOp(OP_ESCAPE_D9, ESCAPE_D9_FSTP_singleReal, base, offset); + } + void fstpl(int offset, RegisterID base) { m_formatter.oneByteOp(OP_ESCAPE_DD, ESCAPE_DD_FSTP_doubleReal, base, offset); @@ -1213,13 +1578,33 @@ public: void xchgl_rr(RegisterID src, RegisterID dst) { - m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst); + if (src == X86Registers::eax) + m_formatter.oneByteOp(OP_XCHG_EAX, dst); + else if (dst == X86Registers::eax) + m_formatter.oneByteOp(OP_XCHG_EAX, src); + else + m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst); + } + + void xchgl_rm(RegisterID src, int offset, RegisterID base) + { + m_formatter.oneByteOp(OP_XCHG_EvGv, src, base, offset); } #if CPU(X86_64) void xchgq_rr(RegisterID src, RegisterID dst) { - m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst); + if (src == X86Registers::eax) + m_formatter.oneByteOp64(OP_XCHG_EAX, dst); + else if (dst == X86Registers::eax) + m_formatter.oneByteOp64(OP_XCHG_EAX, src); + else + m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst); + } + + void xchgq_rm(RegisterID src, int offset, RegisterID base) + { + m_formatter.oneByteOp64(OP_XCHG_EvGv, src, base, offset); } #endif @@ -1330,7 +1715,16 @@ public: { m_formatter.oneByteOp8(OP_MOV_EbGb, src, base, index, scale, offset); } - + + void movw_rm(RegisterID src, int offset, RegisterID base) + { + m_formatter.prefix(PRE_OPERAND_SIZE); + + // FIXME: We often use oneByteOp8 for 16-bit operations. It's not clear that this is + // necessary. https://bugs.webkit.org/show_bug.cgi?id=153433 + m_formatter.oneByteOp8(OP_MOV_EvGv, src, base, offset); + } + void movw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale) { m_formatter.prefix(PRE_OPERAND_SIZE); @@ -1411,6 +1805,12 @@ public: m_formatter.oneByteOp64(OP_MOV_EAXIv, dst); m_formatter.immediate64(imm); } + + void mov_i32r(int32_t imm, RegisterID dst) + { + m_formatter.oneByteOp64(OP_GROUP11_EvIz, GROUP11_MOV, dst); + m_formatter.immediate32(imm); + } void movsxd_rr(RegisterID src, RegisterID dst) { @@ -1497,15 +1897,118 @@ public: m_formatter.twoByteOp8(OP2_MOVZX_GvEb, dst, src); } + void movsbl_rr(RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp8(OP2_MOVSX_GvEb, dst, src); + } + + void movzwl_rr(RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp8(OP2_MOVZX_GvEw, dst, src); + } + + void movswl_rr(RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp8(OP2_MOVSX_GvEw, dst, src); + } + + void cmovl_rr(Condition cond, RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp(cmovcc(cond), dst, src); + } + + void cmovl_mr(Condition cond, int offset, RegisterID base, RegisterID dst) + { + m_formatter.twoByteOp(cmovcc(cond), dst, base, offset); + } + + void cmovl_mr(Condition cond, int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) + { + m_formatter.twoByteOp(cmovcc(cond), dst, base, index, scale, offset); + } + + void cmovel_rr(RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp(cmovcc(ConditionE), dst, src); + } + + void cmovnel_rr(RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp(cmovcc(ConditionNE), dst, src); + } + + void cmovpl_rr(RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp(cmovcc(ConditionP), dst, src); + } + + void cmovnpl_rr(RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp(cmovcc(ConditionNP), dst, src); + } + +#if CPU(X86_64) + void cmovq_rr(Condition cond, RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp64(cmovcc(cond), dst, src); + } + + void cmovq_mr(Condition cond, int offset, RegisterID base, RegisterID dst) + { + m_formatter.twoByteOp64(cmovcc(cond), dst, base, offset); + } + + void cmovq_mr(Condition cond, int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) + { + m_formatter.twoByteOp64(cmovcc(cond), dst, base, index, scale, offset); + } + + void cmoveq_rr(RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp64(cmovcc(ConditionE), dst, src); + } + + void cmovneq_rr(RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp64(cmovcc(ConditionNE), dst, src); + } + + void cmovpq_rr(RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp64(cmovcc(ConditionP), dst, src); + } + + void cmovnpq_rr(RegisterID src, RegisterID dst) + { + m_formatter.twoByteOp64(cmovcc(ConditionNP), dst, src); + } +#else + void cmovl_mr(Condition cond, const void* addr, RegisterID dst) + { + m_formatter.twoByteOp(cmovcc(cond), dst, addr); + } +#endif + void leal_mr(int offset, RegisterID base, RegisterID dst) { m_formatter.oneByteOp(OP_LEA, dst, base, offset); } + + void leal_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) + { + m_formatter.oneByteOp(OP_LEA, dst, base, index, scale, offset); + } + #if CPU(X86_64) void leaq_mr(int offset, RegisterID base, RegisterID dst) { m_formatter.oneByteOp64(OP_LEA, dst, base, offset); } + + void leaq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst) + { + m_formatter.oneByteOp64(OP_LEA, dst, base, index, scale, offset); + } #endif // Flow control: @@ -1547,6 +2050,11 @@ public: m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, offset); } + void jmp_m(int offset, RegisterID base, RegisterID index, int scale) + { + m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, index, scale, offset); + } + #if !CPU(X86_64) void jmp_m(const void* address) { @@ -1662,12 +2170,66 @@ public: m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)src); } + void vaddsd_rr(XMMRegisterID a, XMMRegisterID b, XMMRegisterID dst) + { + m_formatter.vexNdsLigWigCommutativeTwoByteOp(PRE_SSE_F2, OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)a, (RegisterID)b); + } + void addsd_mr(int offset, RegisterID base, XMMRegisterID dst) { m_formatter.prefix(PRE_SSE_F2); m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, base, offset); } + void addsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F2); + m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, dst, base, index, scale, offset); + } + + void vaddsd_mr(int offset, RegisterID base, XMMRegisterID b, XMMRegisterID dst) + { + m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F2, OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)b, base, offset); + } + + void vaddsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID b, XMMRegisterID dst) + { + m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F2, OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)b, offset, base, index, scale); + } + + void addss_rr(XMMRegisterID src, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)src); + } + + void vaddss_rr(XMMRegisterID a, XMMRegisterID b, XMMRegisterID dst) + { + m_formatter.vexNdsLigWigCommutativeTwoByteOp(PRE_SSE_F3, OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)a, (RegisterID)b); + } + + void addss_mr(int offset, RegisterID base, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, base, offset); + } + + void addss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, dst, base, index, scale, offset); + } + + void vaddss_mr(int offset, RegisterID base, XMMRegisterID b, XMMRegisterID dst) + { + m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F3, OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)b, base, offset); + } + + void vaddss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID b, XMMRegisterID dst) + { + m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F3, OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)b, offset, base, index, scale); + } + #if !CPU(X86_64) void addsd_mr(const void* address, XMMRegisterID dst) { @@ -1682,12 +2244,36 @@ public: m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src); } + void cvtsi2ss_rr(RegisterID src, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src); + } + #if CPU(X86_64) void cvtsi2sdq_rr(RegisterID src, XMMRegisterID dst) { m_formatter.prefix(PRE_SSE_F2); m_formatter.twoByteOp64(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src); } + + void cvtsi2ssq_rr(RegisterID src, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp64(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src); + } + + void cvtsi2sdq_mr(int offset, RegisterID base, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F2); + m_formatter.twoByteOp64(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset); + } + + void cvtsi2ssq_mr(int offset, RegisterID base, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp64(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset); + } #endif void cvtsi2sd_mr(int offset, RegisterID base, XMMRegisterID dst) @@ -1696,6 +2282,12 @@ public: m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset); } + void cvtsi2ss_mr(int offset, RegisterID base, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset); + } + #if !CPU(X86_64) void cvtsi2sd_mr(const void* address, XMMRegisterID dst) { @@ -1710,18 +2302,44 @@ public: m_formatter.twoByteOp(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src); } + void cvttss2si_rr(XMMRegisterID src, RegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_CVTTSS2SI_GdWsd, dst, (RegisterID)src); + } + +#if CPU(X86_64) + void cvttss2siq_rr(XMMRegisterID src, RegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp64(OP2_CVTTSS2SI_GdWsd, dst, (RegisterID)src); + } +#endif + void cvtsd2ss_rr(XMMRegisterID src, XMMRegisterID dst) { m_formatter.prefix(PRE_SSE_F2); m_formatter.twoByteOp(OP2_CVTSD2SS_VsdWsd, dst, (RegisterID)src); } + void cvtsd2ss_mr(int offset, RegisterID base, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F2); + m_formatter.twoByteOp(OP2_CVTSD2SS_VsdWsd, dst, base, offset); + } + void cvtss2sd_rr(XMMRegisterID src, XMMRegisterID dst) { m_formatter.prefix(PRE_SSE_F3); m_formatter.twoByteOp(OP2_CVTSS2SD_VsdWsd, dst, (RegisterID)src); } - + + void cvtss2sd_mr(int offset, RegisterID base, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_CVTSS2SD_VsdWsd, dst, base, offset); + } + #if CPU(X86_64) void cvttsd2siq_rr(XMMRegisterID src, RegisterID dst) { @@ -1743,6 +2361,12 @@ public: } #if CPU(X86_64) + void movmskpd_rr(XMMRegisterID src, RegisterID dst) + { + m_formatter.prefix(PRE_SSE_66); + m_formatter.twoByteOp64(OP2_MOVMSKPD_VdEd, dst, (RegisterID)src); + } + void movq_rr(XMMRegisterID src, RegisterID dst) { m_formatter.prefix(PRE_SSE_66); @@ -1756,6 +2380,17 @@ public: } #endif + void movapd_rr(XMMRegisterID src, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_66); + m_formatter.twoByteOp(OP2_MOVAPD_VpdWpd, (RegisterID)dst, (RegisterID)src); + } + + void movaps_rr(XMMRegisterID src, XMMRegisterID dst) + { + m_formatter.twoByteOp(OP2_MOVAPS_VpdWpd, (RegisterID)dst, (RegisterID)src); + } + void movsd_rr(XMMRegisterID src, XMMRegisterID dst) { m_formatter.prefix(PRE_SSE_F2); @@ -1773,6 +2408,12 @@ public: m_formatter.prefix(PRE_SSE_F2); m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, index, scale, offset); } + + void movss_rm(XMMRegisterID src, int offset, RegisterID base) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset); + } void movss_rm(XMMRegisterID src, int offset, RegisterID base, RegisterID index, int scale) { @@ -1791,7 +2432,13 @@ public: m_formatter.prefix(PRE_SSE_F2); m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, dst, base, index, scale, offset); } - + + void movss_mr(int offset, RegisterID base, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset); + } + void movss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst) { m_formatter.prefix(PRE_SSE_F3); @@ -1817,12 +2464,66 @@ public: m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)src); } + void vmulsd_rr(XMMRegisterID a, XMMRegisterID b, XMMRegisterID dst) + { + m_formatter.vexNdsLigWigCommutativeTwoByteOp(PRE_SSE_F2, OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)a, (RegisterID)b); + } + void mulsd_mr(int offset, RegisterID base, XMMRegisterID dst) { m_formatter.prefix(PRE_SSE_F2); m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, base, offset); } + void mulsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F2); + m_formatter.twoByteOp(OP2_MULSD_VsdWsd, dst, base, index, scale, offset); + } + + void vmulsd_mr(int offset, RegisterID base, XMMRegisterID b, XMMRegisterID dst) + { + m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F2, OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)b, base, offset); + } + + void vmulsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID b, XMMRegisterID dst) + { + m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F2, OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)b, offset, base, index, scale); + } + + void mulss_rr(XMMRegisterID src, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)src); + } + + void vmulss_rr(XMMRegisterID a, XMMRegisterID b, XMMRegisterID dst) + { + m_formatter.vexNdsLigWigCommutativeTwoByteOp(PRE_SSE_F3, OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)a, (RegisterID)b); + } + + void mulss_mr(int offset, RegisterID base, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, base, offset); + } + + void mulss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_MULSD_VsdWsd, dst, base, index, scale, offset); + } + + void vmulss_mr(int offset, RegisterID base, XMMRegisterID b, XMMRegisterID dst) + { + m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F3, OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)b, base, offset); + } + + void vmulss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID b, XMMRegisterID dst) + { + m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F3, OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)b, offset, base, index, scale); + } + void pextrw_irr(int whichWord, XMMRegisterID src, RegisterID dst) { m_formatter.prefix(PRE_SSE_66); @@ -1856,12 +2557,66 @@ public: m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)src); } + void vsubsd_rr(XMMRegisterID a, XMMRegisterID b, XMMRegisterID dst) + { + m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F2, OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)a, (RegisterID)b); + } + void subsd_mr(int offset, RegisterID base, XMMRegisterID dst) { m_formatter.prefix(PRE_SSE_F2); m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, base, offset); } + void subsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F2); + m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, dst, base, index, scale, offset); + } + + void vsubsd_mr(XMMRegisterID b, int offset, RegisterID base, XMMRegisterID dst) + { + m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F2, OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)b, base, offset); + } + + void vsubsd_mr(XMMRegisterID b, int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst) + { + m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F2, OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)b, offset, base, index, scale); + } + + void subss_rr(XMMRegisterID src, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)src); + } + + void vsubss_rr(XMMRegisterID a, XMMRegisterID b, XMMRegisterID dst) + { + m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F3, OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)a, (RegisterID)b); + } + + void subss_mr(int offset, RegisterID base, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, base, offset); + } + + void subss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, dst, base, index, scale, offset); + } + + void vsubss_mr(XMMRegisterID b, int offset, RegisterID base, XMMRegisterID dst) + { + m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F3, OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)b, base, offset); + } + + void vsubss_mr(XMMRegisterID b, int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst) + { + m_formatter.vexNdsLigWigTwoByteOp(PRE_SSE_F3, OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)b, offset, base, index, scale); + } + void ucomisd_rr(XMMRegisterID src, XMMRegisterID dst) { m_formatter.prefix(PRE_SSE_66); @@ -1874,6 +2629,16 @@ public: m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, base, offset); } + void ucomiss_rr(XMMRegisterID src, XMMRegisterID dst) + { + m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, (RegisterID)src); + } + + void ucomiss_mr(int offset, RegisterID base, XMMRegisterID dst) + { + m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, base, offset); + } + void divsd_rr(XMMRegisterID src, XMMRegisterID dst) { m_formatter.prefix(PRE_SSE_F2); @@ -1886,8 +2651,39 @@ public: m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, base, offset); } + void divss_rr(XMMRegisterID src, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, (RegisterID)src); + } + + void divss_mr(int offset, RegisterID base, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, base, offset); + } + + void andps_rr(XMMRegisterID src, XMMRegisterID dst) + { + m_formatter.twoByteOp(OP2_ANDPS_VpdWpd, (RegisterID)dst, (RegisterID)src); + } + + void orps_rr(XMMRegisterID src, XMMRegisterID dst) + { + m_formatter.twoByteOp(OP2_ORPS_VpdWpd, (RegisterID)dst, (RegisterID)src); + } + + void xorps_rr(XMMRegisterID src, XMMRegisterID dst) + { + m_formatter.twoByteOp(OP2_XORPD_VpdWpd, (RegisterID)dst, (RegisterID)src); + } + void xorpd_rr(XMMRegisterID src, XMMRegisterID dst) { + if (src == dst) { + xorps_rr(src, dst); + return; + } m_formatter.prefix(PRE_SSE_66); m_formatter.twoByteOp(OP2_XORPD_VpdWpd, (RegisterID)dst, (RegisterID)src); } @@ -1903,7 +2699,60 @@ public: m_formatter.prefix(PRE_SSE_F2); m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, (RegisterID)src); } - + + void sqrtsd_mr(int offset, RegisterID base, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F2); + m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, base, offset); + } + + void sqrtss_rr(XMMRegisterID src, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, (RegisterID)src); + } + + void sqrtss_mr(int offset, RegisterID base, XMMRegisterID dst) + { + m_formatter.prefix(PRE_SSE_F3); + m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, base, offset); + } + + enum class RoundingType : uint8_t { + ToNearestWithTiesToEven = 0, + TowardNegativeInfiniti = 1, + TowardInfiniti = 2, + TowardZero = 3 + }; + + void roundss_rr(XMMRegisterID src, XMMRegisterID dst, RoundingType rounding) + { + m_formatter.prefix(PRE_SSE_66); + m_formatter.threeByteOp(OP2_3BYTE_ESCAPE_3A, OP3_ROUNDSS_VssWssIb, (RegisterID)dst, (RegisterID)src); + m_formatter.immediate8(static_cast<uint8_t>(rounding)); + } + + void roundss_mr(int offset, RegisterID base, XMMRegisterID dst, RoundingType rounding) + { + m_formatter.prefix(PRE_SSE_66); + m_formatter.threeByteOp(OP2_3BYTE_ESCAPE_3A, OP3_ROUNDSS_VssWssIb, (RegisterID)dst, base, offset); + m_formatter.immediate8(static_cast<uint8_t>(rounding)); + } + + void roundsd_rr(XMMRegisterID src, XMMRegisterID dst, RoundingType rounding) + { + m_formatter.prefix(PRE_SSE_66); + m_formatter.threeByteOp(OP2_3BYTE_ESCAPE_3A, OP3_ROUNDSD_VsdWsdIb, (RegisterID)dst, (RegisterID)src); + m_formatter.immediate8(static_cast<uint8_t>(rounding)); + } + + void roundsd_mr(int offset, RegisterID base, XMMRegisterID dst, RoundingType rounding) + { + m_formatter.prefix(PRE_SSE_66); + m_formatter.threeByteOp(OP2_3BYTE_ESCAPE_3A, OP3_ROUNDSD_VsdWsdIb, (RegisterID)dst, base, offset); + m_formatter.immediate8(static_cast<uint8_t>(rounding)); + } + // Misc instructions: void int3() @@ -1921,9 +2770,14 @@ public: m_formatter.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN); } + void lock() + { + m_formatter.prefix(PRE_LOCK); + } + void mfence() { - m_formatter.threeByteOp(OP3_MFENCE); + m_formatter.threeByteOp(OP2_3BYTE_ESCAPE_AE, OP3_MFENCE); } // Assembler admin methods: @@ -2010,6 +2864,11 @@ public: setRel32(from, to); } + static void relinkJumpToNop(void* from) + { + setInt32(from, 0); + } + static void relinkCall(void* from, void* to) { setRel32(from, to); @@ -2050,13 +2909,18 @@ public: { return 5; } + + static constexpr ptrdiff_t patchableJumpSize() + { + return 5; + } #if CPU(X86_64) static void revertJumpTo_movq_i64r(void* instructionStart, int64_t imm, RegisterID dst) { + const unsigned instructionSize = 10; // REX.W MOV IMM64 const int rexBytes = 1; const int opcodeBytes = 1; - ASSERT(rexBytes + opcodeBytes <= maxJumpReplacementSize()); uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart); ptr[0] = PRE_REX | (1 << 3) | (dst >> 3); ptr[1] = OP_MOV_EAXIv | (dst & 7); @@ -2066,11 +2930,33 @@ public: uint8_t asBytes[8]; } u; u.asWord = imm; - for (unsigned i = rexBytes + opcodeBytes; i < static_cast<unsigned>(maxJumpReplacementSize()); ++i) + for (unsigned i = rexBytes + opcodeBytes; i < instructionSize; ++i) + ptr[i] = u.asBytes[i - rexBytes - opcodeBytes]; + } + + static void revertJumpTo_movl_i32r(void* instructionStart, int32_t imm, RegisterID dst) + { + // We only revert jumps on inline caches, and inline caches always use the scratch register (r11). + // FIXME: If the above is ever false then we need to make this smarter with respect to emitting + // the REX byte. + ASSERT(dst == X86Registers::r11); + const unsigned instructionSize = 6; // REX MOV IMM32 + const int rexBytes = 1; + const int opcodeBytes = 1; + uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart); + ptr[0] = PRE_REX | (dst >> 3); + ptr[1] = OP_MOV_EAXIv | (dst & 7); + + union { + uint32_t asWord; + uint8_t asBytes[4]; + } u; + u.asWord = imm; + for (unsigned i = rexBytes + opcodeBytes; i < instructionSize; ++i) ptr[i] = u.asBytes[i - rexBytes - opcodeBytes]; } #endif - + static void revertJumpTo_cmpl_ir_force32(void* instructionStart, int32_t imm, RegisterID dst) { const int opcodeBytes = 1; @@ -2165,10 +3051,50 @@ public: { m_formatter.oneByteOp(OP_NOP); } - - static void fillNops(void* base, size_t size) + + static void fillNops(void* base, size_t size, bool isCopyingToExecutableMemory) { + UNUSED_PARAM(isCopyingToExecutableMemory); +#if CPU(X86_64) + static const uint8_t nops[10][10] = { + // nop + {0x90}, + // xchg %ax,%ax + {0x66, 0x90}, + // nopl (%[re]ax) + {0x0f, 0x1f, 0x00}, + // nopl 8(%[re]ax) + {0x0f, 0x1f, 0x40, 0x08}, + // nopl 8(%[re]ax,%[re]ax,1) + {0x0f, 0x1f, 0x44, 0x00, 0x08}, + // nopw 8(%[re]ax,%[re]ax,1) + {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x08}, + // nopl 512(%[re]ax) + {0x0f, 0x1f, 0x80, 0x00, 0x02, 0x00, 0x00}, + // nopl 512(%[re]ax,%[re]ax,1) + {0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00}, + // nopw 512(%[re]ax,%[re]ax,1) + {0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00}, + // nopw %cs:512(%[re]ax,%[re]ax,1) + {0x66, 0x2e, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00} + }; + + uint8_t* where = reinterpret_cast<uint8_t*>(base); + while (size) { + unsigned nopSize = static_cast<unsigned>(std::min<size_t>(size, 15)); + unsigned numPrefixes = nopSize <= 10 ? 0 : nopSize - 10; + for (unsigned i = 0; i != numPrefixes; ++i) + *where++ = 0x66; + + unsigned nopRest = nopSize - numPrefixes; + for (unsigned i = 0; i != nopRest; ++i) + *where++ = nops[nopRest-1][i]; + + size -= nopSize; + } +#else memset(base, OP_NOP, size); +#endif } // This is a no-op on x86 @@ -2200,16 +3126,14 @@ private: } class X86InstructionFormatter { - static const int maxInstructionSize = 16; public: - enum ModRmMode { - ModRmMemoryNoDisp, - ModRmMemoryDisp8, - ModRmMemoryDisp32, - ModRmRegister, + ModRmMemoryNoDisp = 0, + ModRmMemoryDisp8 = 1 << 6, + ModRmMemoryDisp32 = 2 << 6, + ModRmRegister = 3 << 6, }; // Legacy prefix bytes: @@ -2221,6 +3145,260 @@ private: m_buffer.putByte(pre); } +#if CPU(X86_64) + // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed). + static bool byteRegRequiresRex(int reg) + { + static_assert(X86Registers::esp == 4, "Necessary condition for OR-masking"); + return (reg >= X86Registers::esp); + } + static bool byteRegRequiresRex(int a, int b) + { + return byteRegRequiresRex(a | b); + } + + // Registers r8 & above require a REX prefixe. + static bool regRequiresRex(int reg) + { + static_assert(X86Registers::r8 == 8, "Necessary condition for OR-masking"); + return (reg >= X86Registers::r8); + } + static bool regRequiresRex(int a, int b) + { + return regRequiresRex(a | b); + } + static bool regRequiresRex(int a, int b, int c) + { + return regRequiresRex(a | b | c); + } +#else + static bool byteRegRequiresRex(int) { return false; } + static bool byteRegRequiresRex(int, int) { return false; } + static bool regRequiresRex(int) { return false; } + static bool regRequiresRex(int, int) { return false; } + static bool regRequiresRex(int, int, int) { return false; } +#endif + + class SingleInstructionBufferWriter : public AssemblerBuffer::LocalWriter { + public: + SingleInstructionBufferWriter(AssemblerBuffer& buffer) + : AssemblerBuffer::LocalWriter(buffer, maxInstructionSize) + { + } + + // Internals; ModRm and REX formatters. + + static constexpr RegisterID noBase = X86Registers::ebp; + static constexpr RegisterID hasSib = X86Registers::esp; + static constexpr RegisterID noIndex = X86Registers::esp; + +#if CPU(X86_64) + static constexpr RegisterID noBase2 = X86Registers::r13; + static constexpr RegisterID hasSib2 = X86Registers::r12; + + // Format a REX prefix byte. + ALWAYS_INLINE void emitRex(bool w, int r, int x, int b) + { + ASSERT(r >= 0); + ASSERT(x >= 0); + ASSERT(b >= 0); + putByteUnchecked(PRE_REX | ((int)w << 3) | ((r>>3)<<2) | ((x>>3)<<1) | (b>>3)); + } + + // Used to plant a REX byte with REX.w set (for 64-bit operations). + ALWAYS_INLINE void emitRexW(int r, int x, int b) + { + emitRex(true, r, x, b); + } + + // Used for operations with byte operands - use byteRegRequiresRex() to check register operands, + // regRequiresRex() to check other registers (i.e. address base & index). + ALWAYS_INLINE void emitRexIf(bool condition, int r, int x, int b) + { + if (condition) + emitRex(false, r, x, b); + } + + // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above). + ALWAYS_INLINE void emitRexIfNeeded(int r, int x, int b) + { + emitRexIf(regRequiresRex(r, x, b), r, x, b); + } +#else + // No REX prefix bytes on 32-bit x86. + ALWAYS_INLINE void emitRexIf(bool, int, int, int) { } + ALWAYS_INLINE void emitRexIfNeeded(int, int, int) { } +#endif + + ALWAYS_INLINE void putModRm(ModRmMode mode, int reg, RegisterID rm) + { + putByteUnchecked(mode | ((reg & 7) << 3) | (rm & 7)); + } + + ALWAYS_INLINE void putModRmSib(ModRmMode mode, int reg, RegisterID base, RegisterID index, int scale) + { + ASSERT(mode != ModRmRegister); + + putModRm(mode, reg, hasSib); + putByteUnchecked((scale << 6) | ((index & 7) << 3) | (base & 7)); + } + + ALWAYS_INLINE void registerModRM(int reg, RegisterID rm) + { + putModRm(ModRmRegister, reg, rm); + } + + ALWAYS_INLINE void memoryModRM(int reg, RegisterID base, int offset) + { + // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there. +#if CPU(X86_64) + if ((base == hasSib) || (base == hasSib2)) { +#else + if (base == hasSib) { +#endif + if (!offset) // No need to check if the base is noBase, since we know it is hasSib! + putModRmSib(ModRmMemoryNoDisp, reg, base, noIndex, 0); + else if (CAN_SIGN_EXTEND_8_32(offset)) { + putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0); + putByteUnchecked(offset); + } else { + putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0); + putIntUnchecked(offset); + } + } else { +#if CPU(X86_64) + if (!offset && (base != noBase) && (base != noBase2)) +#else + if (!offset && (base != noBase)) +#endif + putModRm(ModRmMemoryNoDisp, reg, base); + else if (CAN_SIGN_EXTEND_8_32(offset)) { + putModRm(ModRmMemoryDisp8, reg, base); + putByteUnchecked(offset); + } else { + putModRm(ModRmMemoryDisp32, reg, base); + putIntUnchecked(offset); + } + } + } + + ALWAYS_INLINE void memoryModRM_disp8(int reg, RegisterID base, int offset) + { + // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there. + ASSERT(CAN_SIGN_EXTEND_8_32(offset)); +#if CPU(X86_64) + if ((base == hasSib) || (base == hasSib2)) { +#else + if (base == hasSib) { +#endif + putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0); + putByteUnchecked(offset); + } else { + putModRm(ModRmMemoryDisp8, reg, base); + putByteUnchecked(offset); + } + } + + ALWAYS_INLINE void memoryModRM_disp32(int reg, RegisterID base, int offset) + { + // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there. +#if CPU(X86_64) + if ((base == hasSib) || (base == hasSib2)) { +#else + if (base == hasSib) { +#endif + putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0); + putIntUnchecked(offset); + } else { + putModRm(ModRmMemoryDisp32, reg, base); + putIntUnchecked(offset); + } + } + + ALWAYS_INLINE void memoryModRM(int reg, RegisterID base, RegisterID index, int scale, int offset) + { + ASSERT(index != noIndex); + +#if CPU(X86_64) + if (!offset && (base != noBase) && (base != noBase2)) +#else + if (!offset && (base != noBase)) +#endif + putModRmSib(ModRmMemoryNoDisp, reg, base, index, scale); + else if (CAN_SIGN_EXTEND_8_32(offset)) { + putModRmSib(ModRmMemoryDisp8, reg, base, index, scale); + putByteUnchecked(offset); + } else { + putModRmSib(ModRmMemoryDisp32, reg, base, index, scale); + putIntUnchecked(offset); + } + } + +#if !CPU(X86_64) + ALWAYS_INLINE void memoryModRM(int reg, const void* address) + { + // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32! + putModRm(ModRmMemoryNoDisp, reg, noBase); + putIntUnchecked(reinterpret_cast<int32_t>(address)); + } +#endif + ALWAYS_INLINE void twoBytesVex(OneByteOpcodeID simdPrefix, RegisterID inOpReg, RegisterID r) + { + putByteUnchecked(VexPrefix::TwoBytes); + + uint8_t secondByte = vexEncodeSimdPrefix(simdPrefix); + secondByte |= (~inOpReg & 0xf) << 3; + secondByte |= !regRequiresRex(r) << 7; + putByteUnchecked(secondByte); + } + + ALWAYS_INLINE void threeBytesVexNds(OneByteOpcodeID simdPrefix, VexImpliedBytes impliedBytes, RegisterID r, RegisterID inOpReg, RegisterID x, RegisterID b) + { + putByteUnchecked(VexPrefix::ThreeBytes); + + uint8_t secondByte = static_cast<uint8_t>(impliedBytes); + secondByte |= !regRequiresRex(r) << 7; + secondByte |= !regRequiresRex(x) << 6; + secondByte |= !regRequiresRex(b) << 5; + putByteUnchecked(secondByte); + + uint8_t thirdByte = vexEncodeSimdPrefix(simdPrefix); + thirdByte |= (~inOpReg & 0xf) << 3; + putByteUnchecked(thirdByte); + } + + ALWAYS_INLINE void threeBytesVexNds(OneByteOpcodeID simdPrefix, VexImpliedBytes impliedBytes, RegisterID r, RegisterID inOpReg, RegisterID b) + { + putByteUnchecked(VexPrefix::ThreeBytes); + + uint8_t secondByte = static_cast<uint8_t>(impliedBytes); + secondByte |= !regRequiresRex(r) << 7; + secondByte |= 1 << 6; // REX.X + secondByte |= !regRequiresRex(b) << 5; + putByteUnchecked(secondByte); + + uint8_t thirdByte = vexEncodeSimdPrefix(simdPrefix); + thirdByte |= (~inOpReg & 0xf) << 3; + putByteUnchecked(thirdByte); + } + private: + uint8_t vexEncodeSimdPrefix(OneByteOpcodeID simdPrefix) + { + switch (simdPrefix) { + case 0x66: + return 1; + case 0xF3: + return 2; + case 0xF2: + return 3; + default: + RELEASE_ASSERT_NOT_REACHED(); + } + return 0; + } + + }; + // Word-sized operands / no operand instruction formatters. // // In addition to the opcode, the following operand permutations are supported: @@ -2237,116 +3415,176 @@ private: void oneByteOp(OneByteOpcodeID opcode) { - m_buffer.ensureSpace(maxInstructionSize); - m_buffer.putByteUnchecked(opcode); + SingleInstructionBufferWriter writer(m_buffer); + writer.putByteUnchecked(opcode); } void oneByteOp(OneByteOpcodeID opcode, RegisterID reg) { - m_buffer.ensureSpace(maxInstructionSize); - emitRexIfNeeded(0, 0, reg); - m_buffer.putByteUnchecked(opcode + (reg & 7)); + SingleInstructionBufferWriter writer(m_buffer); + writer.emitRexIfNeeded(0, 0, reg); + writer.putByteUnchecked(opcode + (reg & 7)); } void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID rm) { - m_buffer.ensureSpace(maxInstructionSize); - emitRexIfNeeded(reg, 0, rm); - m_buffer.putByteUnchecked(opcode); - registerModRM(reg, rm); + SingleInstructionBufferWriter writer(m_buffer); + writer.emitRexIfNeeded(reg, 0, rm); + writer.putByteUnchecked(opcode); + writer.registerModRM(reg, rm); } void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, int offset) { - m_buffer.ensureSpace(maxInstructionSize); - emitRexIfNeeded(reg, 0, base); - m_buffer.putByteUnchecked(opcode); - memoryModRM(reg, base, offset); + SingleInstructionBufferWriter writer(m_buffer); + writer.emitRexIfNeeded(reg, 0, base); + writer.putByteUnchecked(opcode); + writer.memoryModRM(reg, base, offset); } void oneByteOp_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset) { - m_buffer.ensureSpace(maxInstructionSize); - emitRexIfNeeded(reg, 0, base); - m_buffer.putByteUnchecked(opcode); - memoryModRM_disp32(reg, base, offset); + SingleInstructionBufferWriter writer(m_buffer); + writer.emitRexIfNeeded(reg, 0, base); + writer.putByteUnchecked(opcode); + writer.memoryModRM_disp32(reg, base, offset); } void oneByteOp_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset) { - m_buffer.ensureSpace(maxInstructionSize); - emitRexIfNeeded(reg, 0, base); - m_buffer.putByteUnchecked(opcode); - memoryModRM_disp8(reg, base, offset); + SingleInstructionBufferWriter writer(m_buffer); + writer.emitRexIfNeeded(reg, 0, base); + writer.putByteUnchecked(opcode); + writer.memoryModRM_disp8(reg, base, offset); } void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset) { - m_buffer.ensureSpace(maxInstructionSize); - emitRexIfNeeded(reg, index, base); - m_buffer.putByteUnchecked(opcode); - memoryModRM(reg, base, index, scale, offset); + SingleInstructionBufferWriter writer(m_buffer); + writer.emitRexIfNeeded(reg, index, base); + writer.putByteUnchecked(opcode); + writer.memoryModRM(reg, base, index, scale, offset); } #if !CPU(X86_64) void oneByteOp(OneByteOpcodeID opcode, int reg, const void* address) { - m_buffer.ensureSpace(maxInstructionSize); - m_buffer.putByteUnchecked(opcode); - memoryModRM(reg, address); + SingleInstructionBufferWriter writer(m_buffer); + writer.putByteUnchecked(opcode); + writer.memoryModRM(reg, address); } #endif void twoByteOp(TwoByteOpcodeID opcode) { - m_buffer.ensureSpace(maxInstructionSize); - m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE); - m_buffer.putByteUnchecked(opcode); + SingleInstructionBufferWriter writer(m_buffer); + writer.putByteUnchecked(OP_2BYTE_ESCAPE); + writer.putByteUnchecked(opcode); } void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID rm) { - m_buffer.ensureSpace(maxInstructionSize); - emitRexIfNeeded(reg, 0, rm); - m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE); - m_buffer.putByteUnchecked(opcode); - registerModRM(reg, rm); + SingleInstructionBufferWriter writer(m_buffer); + writer.emitRexIfNeeded(reg, 0, rm); + writer.putByteUnchecked(OP_2BYTE_ESCAPE); + writer.putByteUnchecked(opcode); + writer.registerModRM(reg, rm); } void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset) { - m_buffer.ensureSpace(maxInstructionSize); - emitRexIfNeeded(reg, 0, base); - m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE); - m_buffer.putByteUnchecked(opcode); - memoryModRM(reg, base, offset); + SingleInstructionBufferWriter writer(m_buffer); + writer.emitRexIfNeeded(reg, 0, base); + writer.putByteUnchecked(OP_2BYTE_ESCAPE); + writer.putByteUnchecked(opcode); + writer.memoryModRM(reg, base, offset); } void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset) { - m_buffer.ensureSpace(maxInstructionSize); - emitRexIfNeeded(reg, index, base); - m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE); - m_buffer.putByteUnchecked(opcode); - memoryModRM(reg, base, index, scale, offset); + SingleInstructionBufferWriter writer(m_buffer); + writer.emitRexIfNeeded(reg, index, base); + writer.putByteUnchecked(OP_2BYTE_ESCAPE); + writer.putByteUnchecked(opcode); + writer.memoryModRM(reg, base, index, scale, offset); } #if !CPU(X86_64) void twoByteOp(TwoByteOpcodeID opcode, int reg, const void* address) { - m_buffer.ensureSpace(maxInstructionSize); - m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE); - m_buffer.putByteUnchecked(opcode); - memoryModRM(reg, address); + SingleInstructionBufferWriter writer(m_buffer); + writer.putByteUnchecked(OP_2BYTE_ESCAPE); + writer.putByteUnchecked(opcode); + writer.memoryModRM(reg, address); } #endif + void vexNdsLigWigTwoByteOp(OneByteOpcodeID simdPrefix, TwoByteOpcodeID opcode, RegisterID dest, RegisterID a, RegisterID b) + { + SingleInstructionBufferWriter writer(m_buffer); + if (regRequiresRex(b)) + writer.threeBytesVexNds(simdPrefix, VexImpliedBytes::TwoBytesOp, dest, a, b); + else + writer.twoBytesVex(simdPrefix, a, dest); + writer.putByteUnchecked(opcode); + writer.registerModRM(dest, b); + } + + void vexNdsLigWigCommutativeTwoByteOp(OneByteOpcodeID simdPrefix, TwoByteOpcodeID opcode, RegisterID dest, RegisterID a, RegisterID b) + { + // Since this is a commutative operation, we can try switching the arguments. + if (regRequiresRex(b)) + std::swap(a, b); + vexNdsLigWigTwoByteOp(simdPrefix, opcode, dest, a, b); + } + + void vexNdsLigWigTwoByteOp(OneByteOpcodeID simdPrefix, TwoByteOpcodeID opcode, RegisterID dest, RegisterID a, RegisterID base, int offset) + { + SingleInstructionBufferWriter writer(m_buffer); + if (regRequiresRex(base)) + writer.threeBytesVexNds(simdPrefix, VexImpliedBytes::TwoBytesOp, dest, a, base); + else + writer.twoBytesVex(simdPrefix, a, dest); + writer.putByteUnchecked(opcode); + writer.memoryModRM(dest, base, offset); + } - void threeByteOp(ThreeByteOpcodeID opcode) + void vexNdsLigWigTwoByteOp(OneByteOpcodeID simdPrefix, TwoByteOpcodeID opcode, RegisterID dest, RegisterID a, int offset, RegisterID base, RegisterID index, int scale) { - m_buffer.ensureSpace(maxInstructionSize); - m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE); - m_buffer.putByteUnchecked(OP2_3BYTE_ESCAPE); - m_buffer.putByteUnchecked(opcode); + SingleInstructionBufferWriter writer(m_buffer); + if (regRequiresRex(base, index)) + writer.threeBytesVexNds(simdPrefix, VexImpliedBytes::TwoBytesOp, dest, a, index, base); + else + writer.twoBytesVex(simdPrefix, a, dest); + writer.putByteUnchecked(opcode); + writer.memoryModRM(dest, base, index, scale, offset); + } + + void threeByteOp(TwoByteOpcodeID twoBytePrefix, ThreeByteOpcodeID opcode) + { + SingleInstructionBufferWriter writer(m_buffer); + writer.putByteUnchecked(OP_2BYTE_ESCAPE); + writer.putByteUnchecked(twoBytePrefix); + writer.putByteUnchecked(opcode); + } + + void threeByteOp(TwoByteOpcodeID twoBytePrefix, ThreeByteOpcodeID opcode, int reg, RegisterID rm) + { + SingleInstructionBufferWriter writer(m_buffer); + writer.emitRexIfNeeded(reg, 0, rm); + writer.putByteUnchecked(OP_2BYTE_ESCAPE); + writer.putByteUnchecked(twoBytePrefix); + writer.putByteUnchecked(opcode); + writer.registerModRM(reg, rm); + } + + void threeByteOp(TwoByteOpcodeID twoBytePrefix, ThreeByteOpcodeID opcode, int reg, RegisterID base, int displacement) + { + SingleInstructionBufferWriter writer(m_buffer); + writer.emitRexIfNeeded(reg, 0, base); + writer.putByteUnchecked(OP_2BYTE_ESCAPE); + writer.putByteUnchecked(twoBytePrefix); + writer.putByteUnchecked(opcode); + writer.memoryModRM(reg, base, displacement); } #if CPU(X86_64) @@ -2358,65 +3596,83 @@ private: void oneByteOp64(OneByteOpcodeID opcode) { - m_buffer.ensureSpace(maxInstructionSize); - emitRexW(0, 0, 0); - m_buffer.putByteUnchecked(opcode); + SingleInstructionBufferWriter writer(m_buffer); + writer.emitRexW(0, 0, 0); + writer.putByteUnchecked(opcode); } void oneByteOp64(OneByteOpcodeID opcode, RegisterID reg) { - m_buffer.ensureSpace(maxInstructionSize); - emitRexW(0, 0, reg); - m_buffer.putByteUnchecked(opcode + (reg & 7)); + SingleInstructionBufferWriter writer(m_buffer); + writer.emitRexW(0, 0, reg); + writer.putByteUnchecked(opcode + (reg & 7)); } void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID rm) { - m_buffer.ensureSpace(maxInstructionSize); - emitRexW(reg, 0, rm); - m_buffer.putByteUnchecked(opcode); - registerModRM(reg, rm); + SingleInstructionBufferWriter writer(m_buffer); + writer.emitRexW(reg, 0, rm); + writer.putByteUnchecked(opcode); + writer.registerModRM(reg, rm); } void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, int offset) { - m_buffer.ensureSpace(maxInstructionSize); - emitRexW(reg, 0, base); - m_buffer.putByteUnchecked(opcode); - memoryModRM(reg, base, offset); + SingleInstructionBufferWriter writer(m_buffer); + writer.emitRexW(reg, 0, base); + writer.putByteUnchecked(opcode); + writer.memoryModRM(reg, base, offset); } void oneByteOp64_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset) { - m_buffer.ensureSpace(maxInstructionSize); - emitRexW(reg, 0, base); - m_buffer.putByteUnchecked(opcode); - memoryModRM_disp32(reg, base, offset); + SingleInstructionBufferWriter writer(m_buffer); + writer.emitRexW(reg, 0, base); + writer.putByteUnchecked(opcode); + writer.memoryModRM_disp32(reg, base, offset); } void oneByteOp64_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset) { - m_buffer.ensureSpace(maxInstructionSize); - emitRexW(reg, 0, base); - m_buffer.putByteUnchecked(opcode); - memoryModRM_disp8(reg, base, offset); + SingleInstructionBufferWriter writer(m_buffer); + writer.emitRexW(reg, 0, base); + writer.putByteUnchecked(opcode); + writer.memoryModRM_disp8(reg, base, offset); } void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset) { - m_buffer.ensureSpace(maxInstructionSize); - emitRexW(reg, index, base); - m_buffer.putByteUnchecked(opcode); - memoryModRM(reg, base, index, scale, offset); + SingleInstructionBufferWriter writer(m_buffer); + writer.emitRexW(reg, index, base); + writer.putByteUnchecked(opcode); + writer.memoryModRM(reg, base, index, scale, offset); } void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID rm) { - m_buffer.ensureSpace(maxInstructionSize); - emitRexW(reg, 0, rm); - m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE); - m_buffer.putByteUnchecked(opcode); - registerModRM(reg, rm); + SingleInstructionBufferWriter writer(m_buffer); + writer.emitRexW(reg, 0, rm); + writer.putByteUnchecked(OP_2BYTE_ESCAPE); + writer.putByteUnchecked(opcode); + writer.registerModRM(reg, rm); + } + + void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset) + { + SingleInstructionBufferWriter writer(m_buffer); + writer.emitRexW(reg, 0, base); + writer.putByteUnchecked(OP_2BYTE_ESCAPE); + writer.putByteUnchecked(opcode); + writer.memoryModRM(reg, base, offset); + } + + void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset) + { + SingleInstructionBufferWriter writer(m_buffer); + writer.emitRexW(reg, index, base); + writer.putByteUnchecked(OP_2BYTE_ESCAPE); + writer.putByteUnchecked(opcode); + writer.memoryModRM(reg, base, index, scale, offset); } #endif @@ -2447,52 +3703,52 @@ private: void oneByteOp8(OneByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm) { - m_buffer.ensureSpace(maxInstructionSize); - emitRexIf(byteRegRequiresRex(rm), 0, 0, rm); - m_buffer.putByteUnchecked(opcode); - registerModRM(groupOp, rm); + SingleInstructionBufferWriter writer(m_buffer); + writer.emitRexIf(byteRegRequiresRex(rm), 0, 0, rm); + writer.putByteUnchecked(opcode); + writer.registerModRM(groupOp, rm); } void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID rm) { - m_buffer.ensureSpace(maxInstructionSize); - emitRexIf(byteRegRequiresRex(reg) || byteRegRequiresRex(rm), reg, 0, rm); - m_buffer.putByteUnchecked(opcode); - registerModRM(reg, rm); + SingleInstructionBufferWriter writer(m_buffer); + writer.emitRexIf(byteRegRequiresRex(reg, rm), reg, 0, rm); + writer.putByteUnchecked(opcode); + writer.registerModRM(reg, rm); } void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset) { - m_buffer.ensureSpace(maxInstructionSize); - emitRexIf(byteRegRequiresRex(reg) || byteRegRequiresRex(base), reg, 0, base); - m_buffer.putByteUnchecked(opcode); - memoryModRM(reg, base, offset); + SingleInstructionBufferWriter writer(m_buffer); + writer.emitRexIf(byteRegRequiresRex(reg, base), reg, 0, base); + writer.putByteUnchecked(opcode); + writer.memoryModRM(reg, base, offset); } void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset) { - m_buffer.ensureSpace(maxInstructionSize); - emitRexIf(byteRegRequiresRex(reg) || regRequiresRex(index) || regRequiresRex(base), reg, index, base); - m_buffer.putByteUnchecked(opcode); - memoryModRM(reg, base, index, scale, offset); + SingleInstructionBufferWriter writer(m_buffer); + writer.emitRexIf(byteRegRequiresRex(reg) || regRequiresRex(index, base), reg, index, base); + writer.putByteUnchecked(opcode); + writer.memoryModRM(reg, base, index, scale, offset); } void twoByteOp8(TwoByteOpcodeID opcode, RegisterID reg, RegisterID rm) { - m_buffer.ensureSpace(maxInstructionSize); - emitRexIf(byteRegRequiresRex(reg)|byteRegRequiresRex(rm), reg, 0, rm); - m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE); - m_buffer.putByteUnchecked(opcode); - registerModRM(reg, rm); + SingleInstructionBufferWriter writer(m_buffer); + writer.emitRexIf(byteRegRequiresRex(reg, rm), reg, 0, rm); + writer.putByteUnchecked(OP_2BYTE_ESCAPE); + writer.putByteUnchecked(opcode); + writer.registerModRM(reg, rm); } void twoByteOp8(TwoByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm) { - m_buffer.ensureSpace(maxInstructionSize); - emitRexIf(byteRegRequiresRex(rm), 0, 0, rm); - m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE); - m_buffer.putByteUnchecked(opcode); - registerModRM(groupOp, rm); + SingleInstructionBufferWriter writer(m_buffer); + writer.emitRexIf(byteRegRequiresRex(rm), 0, 0, rm); + writer.putByteUnchecked(OP_2BYTE_ESCAPE); + writer.putByteUnchecked(opcode); + writer.registerModRM(groupOp, rm); } // Immediates: @@ -2535,177 +3791,6 @@ private: unsigned debugOffset() { return m_buffer.debugOffset(); } - private: - - // Internals; ModRm and REX formatters. - - static const RegisterID noBase = X86Registers::ebp; - static const RegisterID hasSib = X86Registers::esp; - static const RegisterID noIndex = X86Registers::esp; -#if CPU(X86_64) - static const RegisterID noBase2 = X86Registers::r13; - static const RegisterID hasSib2 = X86Registers::r12; - - // Registers r8 & above require a REX prefixe. - inline bool regRequiresRex(int reg) - { - return (reg >= X86Registers::r8); - } - - // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed). - inline bool byteRegRequiresRex(int reg) - { - return (reg >= X86Registers::esp); - } - - // Format a REX prefix byte. - inline void emitRex(bool w, int r, int x, int b) - { - ASSERT(r >= 0); - ASSERT(x >= 0); - ASSERT(b >= 0); - m_buffer.putByteUnchecked(PRE_REX | ((int)w << 3) | ((r>>3)<<2) | ((x>>3)<<1) | (b>>3)); - } - - // Used to plant a REX byte with REX.w set (for 64-bit operations). - inline void emitRexW(int r, int x, int b) - { - emitRex(true, r, x, b); - } - - // Used for operations with byte operands - use byteRegRequiresRex() to check register operands, - // regRequiresRex() to check other registers (i.e. address base & index). - inline void emitRexIf(bool condition, int r, int x, int b) - { - if (condition) emitRex(false, r, x, b); - } - - // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above). - inline void emitRexIfNeeded(int r, int x, int b) - { - emitRexIf(regRequiresRex(r) || regRequiresRex(x) || regRequiresRex(b), r, x, b); - } -#else - // No REX prefix bytes on 32-bit x86. - inline bool regRequiresRex(int) { return false; } - inline bool byteRegRequiresRex(int) { return false; } - inline void emitRexIf(bool, int, int, int) {} - inline void emitRexIfNeeded(int, int, int) {} -#endif - - void putModRm(ModRmMode mode, int reg, RegisterID rm) - { - m_buffer.putByteUnchecked((mode << 6) | ((reg & 7) << 3) | (rm & 7)); - } - - void putModRmSib(ModRmMode mode, int reg, RegisterID base, RegisterID index, int scale) - { - ASSERT(mode != ModRmRegister); - - putModRm(mode, reg, hasSib); - m_buffer.putByteUnchecked((scale << 6) | ((index & 7) << 3) | (base & 7)); - } - - void registerModRM(int reg, RegisterID rm) - { - putModRm(ModRmRegister, reg, rm); - } - - void memoryModRM(int reg, RegisterID base, int offset) - { - // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there. -#if CPU(X86_64) - if ((base == hasSib) || (base == hasSib2)) { -#else - if (base == hasSib) { -#endif - if (!offset) // No need to check if the base is noBase, since we know it is hasSib! - putModRmSib(ModRmMemoryNoDisp, reg, base, noIndex, 0); - else if (CAN_SIGN_EXTEND_8_32(offset)) { - putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0); - m_buffer.putByteUnchecked(offset); - } else { - putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0); - m_buffer.putIntUnchecked(offset); - } - } else { -#if CPU(X86_64) - if (!offset && (base != noBase) && (base != noBase2)) -#else - if (!offset && (base != noBase)) -#endif - putModRm(ModRmMemoryNoDisp, reg, base); - else if (CAN_SIGN_EXTEND_8_32(offset)) { - putModRm(ModRmMemoryDisp8, reg, base); - m_buffer.putByteUnchecked(offset); - } else { - putModRm(ModRmMemoryDisp32, reg, base); - m_buffer.putIntUnchecked(offset); - } - } - } - - void memoryModRM_disp8(int reg, RegisterID base, int offset) - { - // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there. - ASSERT(CAN_SIGN_EXTEND_8_32(offset)); -#if CPU(X86_64) - if ((base == hasSib) || (base == hasSib2)) { -#else - if (base == hasSib) { -#endif - putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0); - m_buffer.putByteUnchecked(offset); - } else { - putModRm(ModRmMemoryDisp8, reg, base); - m_buffer.putByteUnchecked(offset); - } - } - - void memoryModRM_disp32(int reg, RegisterID base, int offset) - { - // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there. -#if CPU(X86_64) - if ((base == hasSib) || (base == hasSib2)) { -#else - if (base == hasSib) { -#endif - putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0); - m_buffer.putIntUnchecked(offset); - } else { - putModRm(ModRmMemoryDisp32, reg, base); - m_buffer.putIntUnchecked(offset); - } - } - - void memoryModRM(int reg, RegisterID base, RegisterID index, int scale, int offset) - { - ASSERT(index != noIndex); - -#if CPU(X86_64) - if (!offset && (base != noBase) && (base != noBase2)) -#else - if (!offset && (base != noBase)) -#endif - putModRmSib(ModRmMemoryNoDisp, reg, base, index, scale); - else if (CAN_SIGN_EXTEND_8_32(offset)) { - putModRmSib(ModRmMemoryDisp8, reg, base, index, scale); - m_buffer.putByteUnchecked(offset); - } else { - putModRmSib(ModRmMemoryDisp32, reg, base, index, scale); - m_buffer.putIntUnchecked(offset); - } - } - -#if !CPU(X86_64) - void memoryModRM(int reg, const void* address) - { - // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32! - putModRm(ModRmMemoryNoDisp, reg, noBase); - m_buffer.putIntUnchecked(reinterpret_cast<int32_t>(address)); - } -#endif - public: AssemblerBuffer m_buffer; } m_formatter; @@ -2716,5 +3801,3 @@ private: } // namespace JSC #endif // ENABLE(ASSEMBLER) && CPU(X86) - -#endif // X86Assembler_h |