summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/assembler/ARMv7Assembler.h
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
commit1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c (patch)
tree46dcd36c86e7fbc6e5df36deb463b33e9967a6f7 /Source/JavaScriptCore/assembler/ARMv7Assembler.h
parent32761a6cee1d0dee366b885b7b9c777e67885688 (diff)
downloadWebKitGtk-tarball-master.tar.gz
Diffstat (limited to 'Source/JavaScriptCore/assembler/ARMv7Assembler.h')
-rw-r--r--Source/JavaScriptCore/assembler/ARMv7Assembler.h647
1 files changed, 371 insertions, 276 deletions
diff --git a/Source/JavaScriptCore/assembler/ARMv7Assembler.h b/Source/JavaScriptCore/assembler/ARMv7Assembler.h
index 5257f32a8..86218ea72 100644
--- a/Source/JavaScriptCore/assembler/ARMv7Assembler.h
+++ b/Source/JavaScriptCore/assembler/ARMv7Assembler.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009, 2010, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2010, 2012, 2013, 2014 Apple Inc. All rights reserved.
* Copyright (C) 2010 University of Szeged
*
* Redistribution and use in source and binary forms, with or without
@@ -24,12 +24,12 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef ARMAssembler_h
-#define ARMAssembler_h
+#pragma once
#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
#include "AssemblerBuffer.h"
+#include "AssemblerCommon.h"
#include <limits.h>
#include <wtf/Assertions.h>
#include <wtf/Vector.h>
@@ -38,23 +38,83 @@
namespace JSC {
namespace ARMRegisters {
+
+ #define FOR_EACH_CPU_REGISTER(V) \
+ FOR_EACH_CPU_GPREGISTER(V) \
+ FOR_EACH_CPU_SPECIAL_REGISTER(V) \
+ FOR_EACH_CPU_FPREGISTER(V)
+
+ // The following are defined as pairs of the following value:
+ // 1. type of the storage needed to save the register value by the JIT probe.
+ // 2. name of the register.
+ #define FOR_EACH_CPU_GPREGISTER(V) \
+ V(void*, r0) \
+ V(void*, r1) \
+ V(void*, r2) \
+ V(void*, r3) \
+ V(void*, r4) \
+ V(void*, r5) \
+ V(void*, r6) \
+ V(void*, r7) \
+ V(void*, r8) \
+ V(void*, r9) \
+ V(void*, r10) \
+ V(void*, r11) \
+ V(void*, ip) \
+ V(void*, sp) \
+ V(void*, lr) \
+ V(void*, pc)
+
+ #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \
+ V(void*, apsr) \
+ V(void*, fpscr) \
+
+ #define FOR_EACH_CPU_FPREGISTER(V) \
+ V(double, d0) \
+ V(double, d1) \
+ V(double, d2) \
+ V(double, d3) \
+ V(double, d4) \
+ V(double, d5) \
+ V(double, d6) \
+ V(double, d7) \
+ V(double, d8) \
+ V(double, d9) \
+ V(double, d10) \
+ V(double, d11) \
+ V(double, d12) \
+ V(double, d13) \
+ V(double, d14) \
+ V(double, d15) \
+ V(double, d16) \
+ V(double, d17) \
+ V(double, d18) \
+ V(double, d19) \
+ V(double, d20) \
+ V(double, d21) \
+ V(double, d22) \
+ V(double, d23) \
+ V(double, d24) \
+ V(double, d25) \
+ V(double, d26) \
+ V(double, d27) \
+ V(double, d28) \
+ V(double, d29) \
+ V(double, d30) \
+ V(double, d31)
+
typedef enum {
- r0,
- r1,
- r2,
- r3,
- r4,
- r5,
- r6,
- r7, fp = r7, // frame pointer
- r8,
- r9, sb = r9, // static base
- r10, sl = r10, // stack limit
- r11,
- r12, ip = r12,
- r13, sp = r13,
- r14, lr = r14,
- r15, pc = r15,
+ #define DECLARE_REGISTER(_type, _regName) _regName,
+ FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
+ #undef DECLARE_REGISTER
+
+ fp = r7, // frame pointer
+ sb = r9, // static base
+ sl = r10, // stack limit
+ r12 = ip,
+ r13 = sp,
+ r14 = lr,
+ r15 = pc
} RegisterID;
typedef enum {
@@ -93,38 +153,9 @@ namespace ARMRegisters {
} FPSingleRegisterID;
typedef enum {
- d0,
- d1,
- d2,
- d3,
- d4,
- d5,
- d6,
- d7,
- d8,
- d9,
- d10,
- d11,
- d12,
- d13,
- d14,
- d15,
- d16,
- d17,
- d18,
- d19,
- d20,
- d21,
- d22,
- d23,
- d24,
- d25,
- d26,
- d27,
- d28,
- d29,
- d30,
- d31,
+ #define DECLARE_REGISTER(_type, _regName) _regName,
+ FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
+ #undef DECLARE_REGISTER
} FPDoubleRegisterID;
typedef enum {
@@ -174,77 +205,7 @@ namespace ARMRegisters {
return (FPDoubleRegisterID)(reg >> 1);
}
-#if USE(MASM_PROBE)
- #define FOR_EACH_CPU_REGISTER(V) \
- FOR_EACH_CPU_GPREGISTER(V) \
- FOR_EACH_CPU_SPECIAL_REGISTER(V) \
- FOR_EACH_CPU_FPREGISTER(V)
-
- #define FOR_EACH_CPU_GPREGISTER(V) \
- V(void*, r0) \
- V(void*, r1) \
- V(void*, r2) \
- V(void*, r3) \
- V(void*, r4) \
- V(void*, r5) \
- V(void*, r6) \
- V(void*, r7) \
- V(void*, r8) \
- V(void*, r9) \
- V(void*, r10) \
- V(void*, r11) \
- V(void*, ip) \
- V(void*, sp) \
- V(void*, lr) \
- V(void*, pc)
-
- #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \
- V(void*, apsr) \
- V(void*, fpscr) \
-
- #define FOR_EACH_CPU_FPREGISTER(V) \
- V(double, d0) \
- V(double, d1) \
- V(double, d2) \
- V(double, d3) \
- V(double, d4) \
- V(double, d5) \
- V(double, d6) \
- V(double, d7) \
- V(double, d8) \
- V(double, d9) \
- V(double, d10) \
- V(double, d11) \
- V(double, d12) \
- V(double, d13) \
- V(double, d14) \
- V(double, d15) \
- FOR_EACH_CPU_FPREGISTER_EXTENSION(V)
-
-#if CPU(APPLE_ARMV7S)
- #define FOR_EACH_CPU_FPREGISTER_EXTENSION(V) \
- V(double, d16) \
- V(double, d17) \
- V(double, d18) \
- V(double, d19) \
- V(double, d20) \
- V(double, d21) \
- V(double, d22) \
- V(double, d23) \
- V(double, d24) \
- V(double, d25) \
- V(double, d26) \
- V(double, d27) \
- V(double, d28) \
- V(double, d29) \
- V(double, d30) \
- V(double, d31)
-#else
- #define FOR_EACH_CPU_FPREGISTER_EXTENSION(V) // Nothing to add.
-#endif // CPU(APPLE_ARMV7S)
-
-#endif // USE(MASM_PROBE)
-}
+} // namespace ARMRegisters
class ARMv7Assembler;
class ARMThumbImmediate {
@@ -492,11 +453,11 @@ public:
typedef ARMRegisters::FPQuadRegisterID FPQuadRegisterID;
typedef FPDoubleRegisterID FPRegisterID;
- static RegisterID firstRegister() { return ARMRegisters::r0; }
- static RegisterID lastRegister() { return ARMRegisters::r13; }
-
- static FPRegisterID firstFPRegister() { return ARMRegisters::d0; }
- static FPRegisterID lastFPRegister() { return ARMRegisters::d31; }
+ static constexpr RegisterID firstRegister() { return ARMRegisters::r0; }
+ static constexpr RegisterID lastRegister() { return ARMRegisters::r13; }
+
+ static constexpr FPRegisterID firstFPRegister() { return ARMRegisters::d0; }
+ static constexpr FPRegisterID lastFPRegister() { return ARMRegisters::d31; }
// (HS, LO, HI, LS) -> (AE, B, A, BE)
// (VS, VC) -> (O, NO)
@@ -583,6 +544,8 @@ public:
{
}
+ AssemblerBuffer& buffer() { return m_formatter.m_buffer; }
+
private:
// ARMv7, Appx-A.6.3
@@ -646,6 +609,8 @@ private:
OP_ADD_SP_imm_T1 = 0xA800,
OP_ADD_SP_imm_T2 = 0xB000,
OP_SUB_SP_imm_T1 = 0xB080,
+ OP_PUSH_T1 = 0xB400,
+ OP_POP_T1 = 0xBC00,
OP_BKPT = 0xBE00,
OP_IT = 0xBF00,
OP_NOP_T1 = 0xBF00,
@@ -654,6 +619,8 @@ private:
typedef enum {
OP_B_T1 = 0xD000,
OP_B_T2 = 0xE000,
+ OP_POP_T2 = 0xE8BD,
+ OP_PUSH_T2 = 0xE92D,
OP_AND_reg_T2 = 0xEA00,
OP_TST_reg_T2 = 0xEA10,
OP_ORR_reg_T2 = 0xEA40,
@@ -714,7 +681,7 @@ private:
OP_MOVT = 0xF2C0,
OP_UBFX_T1 = 0xF3C0,
OP_NOP_T2a = 0xF3AF,
- OP_DMB_SY_T2a = 0xF3BF,
+ OP_DMB_T1a = 0xF3BF,
OP_STRB_imm_T3 = 0xF800,
OP_STRB_reg_T2 = 0xF800,
OP_LDRB_imm_T3 = 0xF810,
@@ -741,39 +708,40 @@ private:
OP_ROR_reg_T2 = 0xFA60,
OP_CLZ = 0xFAB0,
OP_SMULL_T1 = 0xFB80,
-#if CPU(APPLE_ARMV7S)
+#if HAVE(ARM_IDIV_INSTRUCTIONS)
OP_SDIV_T1 = 0xFB90,
OP_UDIV_T1 = 0xFBB0,
#endif
} OpcodeID1;
typedef enum {
- OP_VADD_T2b = 0x0A00,
- OP_VDIVb = 0x0A00,
- OP_FLDSb = 0x0A00,
- OP_VLDRb = 0x0A00,
- OP_VMOV_IMM_T2b = 0x0A00,
- OP_VMOV_T2b = 0x0A40,
- OP_VMUL_T2b = 0x0A00,
- OP_FSTSb = 0x0A00,
- OP_VSTRb = 0x0A00,
- OP_VMOV_StoCb = 0x0A10,
- OP_VMOV_CtoSb = 0x0A10,
- OP_VMOV_DtoCb = 0x0A10,
- OP_VMOV_CtoDb = 0x0A10,
- OP_VMRSb = 0x0A10,
- OP_VABS_T2b = 0x0A40,
- OP_VCMPb = 0x0A40,
- OP_VCVT_FPIVFPb = 0x0A40,
- OP_VNEG_T2b = 0x0A40,
- OP_VSUB_T2b = 0x0A40,
- OP_VSQRT_T1b = 0x0A40,
- OP_VCVTSD_T1b = 0x0A40,
- OP_VCVTDS_T1b = 0x0A40,
- OP_NOP_T2b = 0x8000,
- OP_DMB_SY_T2b = 0x8F5F,
- OP_B_T3b = 0x8000,
- OP_B_T4b = 0x9000,
+ OP_VADD_T2b = 0x0A00,
+ OP_VDIVb = 0x0A00,
+ OP_FLDSb = 0x0A00,
+ OP_VLDRb = 0x0A00,
+ OP_VMOV_IMM_T2b = 0x0A00,
+ OP_VMOV_T2b = 0x0A40,
+ OP_VMUL_T2b = 0x0A00,
+ OP_FSTSb = 0x0A00,
+ OP_VSTRb = 0x0A00,
+ OP_VMOV_StoCb = 0x0A10,
+ OP_VMOV_CtoSb = 0x0A10,
+ OP_VMOV_DtoCb = 0x0A10,
+ OP_VMOV_CtoDb = 0x0A10,
+ OP_VMRSb = 0x0A10,
+ OP_VABS_T2b = 0x0A40,
+ OP_VCMPb = 0x0A40,
+ OP_VCVT_FPIVFPb = 0x0A40,
+ OP_VNEG_T2b = 0x0A40,
+ OP_VSUB_T2b = 0x0A40,
+ OP_VSQRT_T1b = 0x0A40,
+ OP_VCVTSD_T1b = 0x0A40,
+ OP_VCVTDS_T1b = 0x0A40,
+ OP_NOP_T2b = 0x8000,
+ OP_DMB_SY_T1b = 0x8F5F,
+ OP_DMB_ISHST_T1b = 0x8F5A,
+ OP_B_T3b = 0x8000,
+ OP_B_T4b = 0x9000,
} OpcodeID2;
struct FourFours {
@@ -799,11 +767,11 @@ private:
class ARMInstructionFormatter;
// false means else!
- bool ifThenElseConditionBit(Condition condition, bool isIf)
+ static bool ifThenElseConditionBit(Condition condition, bool isIf)
{
return isIf ? (condition & 1) : !(condition & 1);
}
- uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if)
+ static uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if)
{
int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
| (ifThenElseConditionBit(condition, inst3if) << 2)
@@ -812,7 +780,7 @@ private:
ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
return (condition << 4) | mask;
}
- uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if)
+ static uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if)
{
int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
| (ifThenElseConditionBit(condition, inst3if) << 2)
@@ -820,7 +788,7 @@ private:
ASSERT((condition != ConditionAL) || !(mask & (mask - 1)));
return (condition << 4) | mask;
}
- uint8_t ifThenElse(Condition condition, bool inst2if)
+ static uint8_t ifThenElse(Condition condition, bool inst2if)
{
int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
| 4;
@@ -828,7 +796,7 @@ private:
return (condition << 4) | mask;
}
- uint8_t ifThenElse(Condition condition)
+ static uint8_t ifThenElse(Condition condition)
{
int mask = 8;
return (condition << 4) | mask;
@@ -855,7 +823,7 @@ public:
ASSERT(rn != ARMRegisters::pc);
ASSERT(imm.isValid());
- if (rn == ARMRegisters::sp) {
+ if (rn == ARMRegisters::sp && imm.isUInt16()) {
ASSERT(!(imm.getUInt16() & 3));
if (!(rd & 8) && imm.isUInt10()) {
m_formatter.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1, rd, static_cast<uint8_t>(imm.getUInt10() >> 2));
@@ -894,6 +862,11 @@ public:
// NOTE: In an IT block, add doesn't modify the flags register.
ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm)
{
+ if (rd == ARMRegisters::sp) {
+ mov(rd, rn);
+ rn = rd;
+ }
+
if (rd == rn)
m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rm, rd);
else if (rd == rm)
@@ -1183,9 +1156,10 @@ public:
{
ASSERT(rn != ARMRegisters::pc); // LDR (literal)
ASSERT(imm.isUInt12());
+ ASSERT(!(imm.getUInt12() & 1));
if (!((rt | rn) & 8) && imm.isUInt6())
- m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 2, rn, rt);
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 1, rn, rt);
else
m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2, rn, rt, imm.getUInt12());
}
@@ -1353,11 +1327,14 @@ public:
uint16_t* address = static_cast<uint16_t*>(instructionStart);
ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm));
ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(imm >> 16));
- address[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
- address[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(right, lo16);
- address[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
- address[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond(right, hi16);
- address[4] = OP_CMP_reg_T2 | left;
+ uint16_t instruction[] = {
+ twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16),
+ twoWordOp5i6Imm4Reg4EncodedImmSecond(right, lo16),
+ twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16),
+ twoWordOp5i6Imm4Reg4EncodedImmSecond(right, hi16),
+ static_cast<uint16_t>(OP_CMP_reg_T2 | left)
+ };
+ performJITMemcpy(address, instruction, sizeof(uint16_t) * 5);
cacheFlush(address, sizeof(uint16_t) * 5);
}
#else
@@ -1368,8 +1345,11 @@ public:
ASSERT(!BadReg(rd));
uint16_t* address = static_cast<uint16_t*>(instructionStart);
- address[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, imm);
- address[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, imm);
+ uint16_t instruction[] = {
+ twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, imm),
+ twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, imm)
+ };
+ performJITMemcpy(address, instruction, sizeof(uint16_t) * 2);
cacheFlush(address, sizeof(uint16_t) * 2);
}
#endif
@@ -1488,9 +1468,49 @@ public:
m_formatter.twoWordOp12Reg4FourFours(OP_ROR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
}
-#if CPU(APPLE_ARMV7S)
+ ALWAYS_INLINE void pop(RegisterID dest)
+ {
+ if (dest < ARMRegisters::r8)
+ m_formatter.oneWordOp7Imm9(OP_POP_T1, 1 << dest);
+ else {
+ // Load postindexed with writeback.
+ ldr(dest, ARMRegisters::sp, sizeof(void*), false, true);
+ }
+ }
+
+ ALWAYS_INLINE void pop(uint32_t registerList)
+ {
+ ASSERT(WTF::bitCount(registerList) > 1);
+ ASSERT(!((1 << ARMRegisters::pc) & registerList) || !((1 << ARMRegisters::lr) & registerList));
+ ASSERT(!((1 << ARMRegisters::sp) & registerList));
+ m_formatter.twoWordOp16Imm16(OP_POP_T2, registerList);
+ }
+
+ ALWAYS_INLINE void push(RegisterID src)
+ {
+ if (src < ARMRegisters::r8)
+ m_formatter.oneWordOp7Imm9(OP_PUSH_T1, 1 << src);
+ else if (src == ARMRegisters::lr)
+ m_formatter.oneWordOp7Imm9(OP_PUSH_T1, 0x100);
+ else {
+ // Store preindexed with writeback.
+ str(src, ARMRegisters::sp, -sizeof(void*), true, true);
+ }
+ }
+
+ ALWAYS_INLINE void push(uint32_t registerList)
+ {
+ ASSERT(WTF::bitCount(registerList) > 1);
+ ASSERT(!((1 << ARMRegisters::pc) & registerList));
+ ASSERT(!((1 << ARMRegisters::sp) & registerList));
+ m_formatter.twoWordOp16Imm16(OP_PUSH_T2, registerList);
+ }
+
+#if HAVE(ARM_IDIV_INSTRUCTIONS)
+ template<int datasize>
ALWAYS_INLINE void sdiv(RegisterID rd, RegisterID rn, RegisterID rm)
{
+ static_assert(datasize == 32, "sdiv datasize must be 32 for armv7s");
ASSERT(!BadReg(rd));
ASSERT(!BadReg(rn));
ASSERT(!BadReg(rm));
@@ -1635,8 +1655,8 @@ public:
ASSERT(rn != ARMRegisters::pc);
ASSERT(imm.isUInt12());
- if (!((rt | rn) & 8) && imm.isUInt7())
- m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRH_imm_T1, imm.getUInt7() >> 2, rn, rt);
+ if (!((rt | rn) & 8) && imm.isUInt6())
+ m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRH_imm_T1, imm.getUInt6() >> 1, rn, rt);
else
m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T2, rn, rt, imm.getUInt12());
}
@@ -1834,7 +1854,7 @@ public:
m_formatter.twoWordOp12Reg40Imm3Reg4Imm20Imm5(OP_UBFX_T1, rd, rn, (lsb & 0x1c) << 10, (lsb & 0x3) << 6, (width - 1) & 0x1f);
}
-#if CPU(APPLE_ARMV7S)
+#if HAVE(ARM_IDIV_INSTRUCTIONS)
ALWAYS_INLINE void udiv(RegisterID rd, RegisterID rn, RegisterID rm)
{
ASSERT(!BadReg(rd));
@@ -1984,9 +2004,51 @@ public:
m_formatter.twoWordOp16Op16(OP_NOP_T2a, OP_NOP_T2b);
}
+ static constexpr int16_t nopPseudo16()
+ {
+ return OP_NOP_T1;
+ }
+
+ static constexpr int32_t nopPseudo32()
+ {
+ return OP_NOP_T2a | (OP_NOP_T2b << 16);
+ }
+
+ static void fillNops(void* base, size_t size, bool isCopyingToExecutableMemory)
+ {
+ RELEASE_ASSERT(!(size % sizeof(int16_t)));
+
+ char* ptr = static_cast<char*>(base);
+ const size_t num32s = size / sizeof(int32_t);
+ for (size_t i = 0; i < num32s; i++) {
+ const int32_t insn = nopPseudo32();
+ if (isCopyingToExecutableMemory)
+ performJITMemcpy(ptr, &insn, sizeof(int32_t));
+ else
+ memcpy(ptr, &insn, sizeof(int32_t));
+ ptr += sizeof(int32_t);
+ }
+
+ const size_t num16s = (size % sizeof(int32_t)) / sizeof(int16_t);
+ ASSERT(num16s == 0 || num16s == 1);
+ ASSERT(num16s * sizeof(int16_t) + num32s * sizeof(int32_t) == size);
+ if (num16s) {
+ const int16_t insn = nopPseudo16();
+ if (isCopyingToExecutableMemory)
+ performJITMemcpy(ptr, &insn, sizeof(int16_t));
+ else
+ memcpy(ptr, &insn, sizeof(int16_t));
+ }
+ }
+
void dmbSY()
{
- m_formatter.twoWordOp16Op16(OP_DMB_SY_T2a, OP_DMB_SY_T2b);
+ m_formatter.twoWordOp16Op16(OP_DMB_T1a, OP_DMB_SY_T1b);
+ }
+
+ void dmbISHST()
+ {
+ m_formatter.twoWordOp16Op16(OP_DMB_T1a, OP_DMB_ISHST_T1b);
}
AssemblerLabel labelIgnoringWatchpoints()
@@ -2036,14 +2098,7 @@ public:
return b.m_offset - a.m_offset;
}
- int executableOffsetFor(int location)
- {
- if (!location)
- return 0;
- return static_cast<int32_t*>(m_formatter.data())[location / sizeof(int32_t) - 1];
- }
-
- int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
+ static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
// Assembler admin methods:
@@ -2052,7 +2107,7 @@ public:
return a.from() < b.from();
}
- bool canCompact(JumpType jumpType)
+ static bool canCompact(JumpType jumpType)
{
// The following cannot be compacted:
// JumpFixed: represents custom jump sequence
@@ -2061,7 +2116,7 @@ public:
return (jumpType == JumpNoCondition) || (jumpType == JumpCondition);
}
- JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
+ static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
{
if (jumpType == JumpFixed)
return LinkInvalid;
@@ -2105,51 +2160,43 @@ public:
return LinkConditionalBX;
}
- JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
+ static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
{
JumpLinkType linkType = computeJumpType(record.type(), from, to);
record.setLinkType(linkType);
return linkType;
}
- void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset)
- {
- int32_t ptr = regionStart / sizeof(int32_t);
- const int32_t end = regionEnd / sizeof(int32_t);
- int32_t* offsets = static_cast<int32_t*>(m_formatter.data());
- while (ptr < end)
- offsets[ptr++] = offset;
- }
-
Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink()
{
std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator);
return m_jumpsToLink;
}
- void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, uint8_t* to)
+ static void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, const uint8_t* fromInstruction8, uint8_t* to)
{
+ const uint16_t* fromInstruction = reinterpret_cast_ptr<const uint16_t*>(fromInstruction8);
switch (record.linkType()) {
case LinkJumpT1:
- linkJumpT1(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
+ linkJumpT1(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to);
break;
case LinkJumpT2:
- linkJumpT2(reinterpret_cast_ptr<uint16_t*>(from), to);
+ linkJumpT2(reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to);
break;
case LinkJumpT3:
- linkJumpT3(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
+ linkJumpT3(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to);
break;
case LinkJumpT4:
- linkJumpT4(reinterpret_cast_ptr<uint16_t*>(from), to);
+ linkJumpT4(reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to);
break;
case LinkConditionalJumpT4:
- linkConditionalJumpT4(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
+ linkConditionalJumpT4(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to);
break;
case LinkConditionalBX:
- linkConditionalBX(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), to);
+ linkConditionalBX(record.condition(), reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to);
break;
case LinkBX:
- linkBX(reinterpret_cast_ptr<uint16_t*>(from), to);
+ linkBX(reinterpret_cast_ptr<uint16_t*>(from), fromInstruction, to);
break;
default:
RELEASE_ASSERT_NOT_REACHED();
@@ -2186,7 +2233,7 @@ public:
ASSERT(from.isSet());
uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
- linkJumpAbsolute(location, to);
+ linkJumpAbsolute(location, location, to);
}
static void linkCall(void* code, AssemblerLabel from, void* to)
@@ -2202,15 +2249,24 @@ public:
setPointer(reinterpret_cast<char*>(code) + where.m_offset, value, false);
}
+ // The static relink and replace methods can use can use |from| for both
+ // the write and executable address for call and jump patching
+ // as they're modifying existing (linked) code, so the address being
+ // provided is correct for relative address computation.
static void relinkJump(void* from, void* to)
{
ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
ASSERT(!(reinterpret_cast<intptr_t>(to) & 1));
- linkJumpAbsolute(reinterpret_cast<uint16_t*>(from), to);
+ linkJumpAbsolute(reinterpret_cast<uint16_t*>(from), reinterpret_cast<uint16_t*>(from), to);
cacheFlush(reinterpret_cast<uint16_t*>(from) - 5, 5 * sizeof(uint16_t));
}
+
+ static void relinkJumpToNop(void* from)
+ {
+ relinkJump(from, from);
+ }
static void relinkCall(void* from, void* to)
{
@@ -2246,8 +2302,9 @@ public:
offset |= (1 << 11);
uint16_t* location = reinterpret_cast<uint16_t*>(where);
- location[1] &= ~((1 << 12) - 1);
- location[1] |= offset;
+ uint16_t instruction = location[1] & ~((1 << 12) - 1);
+ instruction |= offset;
+ performJITMemcpy(location + 1, &instruction, sizeof(uint16_t));
cacheFlush(location, sizeof(uint16_t) * 2);
}
@@ -2271,16 +2328,16 @@ public:
#if OS(LINUX)
if (canBeJumpT4(reinterpret_cast<uint16_t*>(instructionStart), to)) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 2;
- linkJumpT4(ptr, to);
+ linkJumpT4(ptr, ptr, to);
cacheFlush(ptr - 2, sizeof(uint16_t) * 2);
} else {
uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 5;
- linkBX(ptr, to);
+ linkBX(ptr, ptr, to);
cacheFlush(ptr - 5, sizeof(uint16_t) * 5);
}
#else
uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart) + 2;
- linkJumpT4(ptr, to);
+ linkJumpT4(ptr, ptr, to);
cacheFlush(ptr - 2, sizeof(uint16_t) * 2);
#endif
}
@@ -2293,6 +2350,11 @@ public:
return 4;
#endif
}
+
+ static constexpr ptrdiff_t patchableJumpSize()
+ {
+ return 10;
+ }
static void replaceWithLoad(void* instructionStart)
{
@@ -2301,14 +2363,17 @@ public:
switch (ptr[0] & 0xFFF0) {
case OP_LDR_imm_T3:
break;
- case OP_ADD_imm_T3:
+ case OP_ADD_imm_T3: {
ASSERT(!(ptr[1] & 0xF000));
- ptr[0] &= 0x000F;
- ptr[0] |= OP_LDR_imm_T3;
- ptr[1] |= (ptr[1] & 0x0F00) << 4;
- ptr[1] &= 0xF0FF;
+ uint16_t instructions[2];
+ instructions[0] = ptr[0] & 0x000F;
+ instructions[0] |= OP_LDR_imm_T3;
+ instructions[1] = ptr[1] | (ptr[1] & 0x0F00) << 4;
+ instructions[1] &= 0xF0FF;
+ performJITMemcpy(ptr, instructions, sizeof(uint16_t) * 2);
cacheFlush(ptr, sizeof(uint16_t) * 2);
break;
+ }
default:
RELEASE_ASSERT_NOT_REACHED();
}
@@ -2319,14 +2384,17 @@ public:
ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1));
uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart);
switch (ptr[0] & 0xFFF0) {
- case OP_LDR_imm_T3:
+ case OP_LDR_imm_T3: {
ASSERT(!(ptr[1] & 0x0F00));
- ptr[0] &= 0x000F;
- ptr[0] |= OP_ADD_imm_T3;
- ptr[1] |= (ptr[1] & 0xF000) >> 4;
- ptr[1] &= 0x0FFF;
+ uint16_t instructions[2];
+ instructions[0] = ptr[0] & 0x000F;
+ instructions[0] |= OP_ADD_imm_T3;
+ instructions[1] = ptr[1] | (ptr[1] & 0xF000) >> 4;
+ instructions[1] &= 0x0FFF;
+ performJITMemcpy(ptr, instructions, sizeof(uint16_t) * 2);
cacheFlush(ptr, sizeof(uint16_t) * 2);
break;
+ }
case OP_ADD_imm_T3:
break;
default:
@@ -2375,8 +2443,6 @@ public:
linuxPageFlush(current, current + page);
linuxPageFlush(current, end);
-#elif OS(WINCE)
- CacheRangeFlush(code, size, CACHE_SYNC_ALL);
#else
#error "The cacheFlush support is missing on this platform."
#endif
@@ -2456,11 +2522,13 @@ private:
ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value));
ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value >> 16));
- location[-4] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
- location[-3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-3] >> 8) & 0xf, lo16);
- location[-2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
- location[-1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-1] >> 8) & 0xf, hi16);
+ uint16_t instructions[4];
+ instructions[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
+ instructions[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-3] >> 8) & 0xf, lo16);
+ instructions[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
+ instructions[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-1] >> 8) & 0xf, hi16);
+ performJITMemcpy(location - 4, instructions, 4 * sizeof(uint16_t));
if (flush)
cacheFlush(location - 4, 4 * sizeof(uint16_t));
}
@@ -2488,8 +2556,10 @@ private:
ASSERT(imm.isValid());
ASSERT(imm.isUInt7());
uint16_t* location = reinterpret_cast<uint16_t*>(code);
- location[0] &= ~((static_cast<uint16_t>(0x7f) >> 2) << 6);
- location[0] |= (imm.getUInt7() >> 2) << 6;
+ uint16_t instruction;
+ instruction = location[0] & ~((static_cast<uint16_t>(0x7f) >> 2) << 6);
+ instruction |= (imm.getUInt7() >> 2) << 6;
+ performJITMemcpy(location, &instruction, sizeof(uint16_t));
cacheFlush(location, sizeof(uint16_t));
}
@@ -2498,39 +2568,39 @@ private:
setInt32(code, reinterpret_cast<uint32_t>(value), flush);
}
- static bool isB(void* address)
+ static bool isB(const void* address)
{
- uint16_t* instruction = static_cast<uint16_t*>(address);
+ const uint16_t* instruction = static_cast<const uint16_t*>(address);
return ((instruction[0] & 0xf800) == OP_B_T4a) && ((instruction[1] & 0xd000) == OP_B_T4b);
}
- static bool isBX(void* address)
+ static bool isBX(const void* address)
{
- uint16_t* instruction = static_cast<uint16_t*>(address);
+ const uint16_t* instruction = static_cast<const uint16_t*>(address);
return (instruction[0] & 0xff87) == OP_BX;
}
- static bool isMOV_imm_T3(void* address)
+ static bool isMOV_imm_T3(const void* address)
{
- uint16_t* instruction = static_cast<uint16_t*>(address);
+ const uint16_t* instruction = static_cast<const uint16_t*>(address);
return ((instruction[0] & 0xFBF0) == OP_MOV_imm_T3) && ((instruction[1] & 0x8000) == 0);
}
- static bool isMOVT(void* address)
+ static bool isMOVT(const void* address)
{
- uint16_t* instruction = static_cast<uint16_t*>(address);
+ const uint16_t* instruction = static_cast<const uint16_t*>(address);
return ((instruction[0] & 0xFBF0) == OP_MOVT) && ((instruction[1] & 0x8000) == 0);
}
- static bool isNOP_T1(void* address)
+ static bool isNOP_T1(const void* address)
{
- uint16_t* instruction = static_cast<uint16_t*>(address);
+ const uint16_t* instruction = static_cast<const uint16_t*>(address);
return instruction[0] == OP_NOP_T1;
}
- static bool isNOP_T2(void* address)
+ static bool isNOP_T2(const void* address)
{
- uint16_t* instruction = static_cast<uint16_t*>(address);
+ const uint16_t* instruction = static_cast<const uint16_t*>(address);
return (instruction[0] == OP_NOP_T2a) && (instruction[1] == OP_NOP_T2b);
}
@@ -2578,7 +2648,7 @@ private:
return ((relative << 7) >> 7) == relative;
}
- void linkJumpT1(Condition cond, uint16_t* instruction, void* target)
+ static void linkJumpT1(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target)
{
// FIMXE: this should be up in the MacroAssembler layer. :-(
ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
@@ -2593,10 +2663,11 @@ private:
// All branch offsets should be an even distance.
ASSERT(!(relative & 1));
- instruction[-1] = OP_B_T1 | ((cond & 0xf) << 8) | ((relative & 0x1fe) >> 1);
+ uint16_t newInstruction = OP_B_T1 | ((cond & 0xf) << 8) | ((relative & 0x1fe) >> 1);
+ performJITMemcpy(writeTarget - 1, &newInstruction, sizeof(uint16_t));
}
- static void linkJumpT2(uint16_t* instruction, void* target)
+ static void linkJumpT2(uint16_t* writeTarget, const uint16_t* instruction, void* target)
{
// FIMXE: this should be up in the MacroAssembler layer. :-(
ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
@@ -2611,10 +2682,11 @@ private:
// All branch offsets should be an even distance.
ASSERT(!(relative & 1));
- instruction[-1] = OP_B_T2 | ((relative & 0xffe) >> 1);
+ uint16_t newInstruction = OP_B_T2 | ((relative & 0xffe) >> 1);
+ performJITMemcpy(writeTarget - 1, &newInstruction, sizeof(uint16_t));
}
- void linkJumpT3(Condition cond, uint16_t* instruction, void* target)
+ static void linkJumpT3(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target)
{
// FIMXE: this should be up in the MacroAssembler layer. :-(
ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
@@ -2625,11 +2697,13 @@ private:
// All branch offsets should be an even distance.
ASSERT(!(relative & 1));
- instruction[-2] = OP_B_T3a | ((relative & 0x100000) >> 10) | ((cond & 0xf) << 6) | ((relative & 0x3f000) >> 12);
- instruction[-1] = OP_B_T3b | ((relative & 0x80000) >> 8) | ((relative & 0x40000) >> 5) | ((relative & 0xffe) >> 1);
+ uint16_t instructions[2];
+ instructions[0] = OP_B_T3a | ((relative & 0x100000) >> 10) | ((cond & 0xf) << 6) | ((relative & 0x3f000) >> 12);
+ instructions[1] = OP_B_T3b | ((relative & 0x80000) >> 8) | ((relative & 0x40000) >> 5) | ((relative & 0xffe) >> 1);
+ performJITMemcpy(writeTarget - 2, instructions, 2 * sizeof(uint16_t));
}
- static void linkJumpT4(uint16_t* instruction, void* target)
+ static void linkJumpT4(uint16_t* writeTarget, const uint16_t* instruction, void* target)
{
// FIMXE: this should be up in the MacroAssembler layer. :-(
ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
@@ -2643,47 +2717,55 @@ private:
// All branch offsets should be an even distance.
ASSERT(!(relative & 1));
- instruction[-2] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
- instruction[-1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
+ uint16_t instructions[2];
+ instructions[0] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
+ instructions[1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
+ performJITMemcpy(writeTarget - 2, instructions, 2 * sizeof(uint16_t));
}
- void linkConditionalJumpT4(Condition cond, uint16_t* instruction, void* target)
+ static void linkConditionalJumpT4(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target)
{
// FIMXE: this should be up in the MacroAssembler layer. :-(
ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
- instruction[-3] = ifThenElse(cond) | OP_IT;
- linkJumpT4(instruction, target);
+ uint16_t newInstruction = ifThenElse(cond) | OP_IT;
+ performJITMemcpy(writeTarget - 3, &newInstruction, sizeof(uint16_t));
+ linkJumpT4(writeTarget, instruction, target);
}
- static void linkBX(uint16_t* instruction, void* target)
+ static void linkBX(uint16_t* writeTarget, const uint16_t* instruction, void* target)
{
// FIMXE: this should be up in the MacroAssembler layer. :-(
- ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT_UNUSED(instruction, !(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(writeTarget) & 1));
ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
- instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
- instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
- instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
- instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
- instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
+ uint16_t instructions[5];
+ instructions[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
+ instructions[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
+ instructions[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
+ instructions[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
+ instructions[4] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
+
+ performJITMemcpy(writeTarget - 5, instructions, 5 * sizeof(uint16_t));
}
- void linkConditionalBX(Condition cond, uint16_t* instruction, void* target)
+ static void linkConditionalBX(Condition cond, uint16_t* writeTarget, const uint16_t* instruction, void* target)
{
// FIMXE: this should be up in the MacroAssembler layer. :-(
ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
- linkBX(instruction, target);
- instruction[-6] = ifThenElse(cond, true, true) | OP_IT;
+ linkBX(writeTarget, instruction, target);
+ uint16_t newInstruction = ifThenElse(cond, true, true) | OP_IT;
+ performJITMemcpy(writeTarget - 6, &newInstruction, sizeof(uint16_t));
}
- static void linkJumpAbsolute(uint16_t* instruction, void* target)
+ static void linkJumpAbsolute(uint16_t* writeTarget, const uint16_t* instruction, void* target)
{
// FIMXE: this should be up in the MacroAssembler layer. :-(
ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
@@ -2691,26 +2773,31 @@ private:
ASSERT((isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1))
|| (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2)));
-
+
if (canBeJumpT4(instruction, target)) {
// There may be a better way to fix this, but right now put the NOPs first, since in the
// case of an conditional branch this will be coming after an ITTT predicating *three*
// instructions! Looking backwards to modify the ITTT to an IT is not easy, due to
// variable wdith encoding - the previous instruction might *look* like an ITTT but
// actually be the second half of a 2-word op.
- instruction[-5] = OP_NOP_T1;
- instruction[-4] = OP_NOP_T2a;
- instruction[-3] = OP_NOP_T2b;
- linkJumpT4(instruction, target);
+ uint16_t instructions[3];
+ instructions[0] = OP_NOP_T1;
+ instructions[1] = OP_NOP_T2a;
+ instructions[2] = OP_NOP_T2b;
+ performJITMemcpy(writeTarget - 5, instructions, 3 * sizeof(uint16_t));
+ linkJumpT4(writeTarget, instruction, target);
} else {
const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
- instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
- instruction[-4] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
- instruction[-3] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
- instruction[-2] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
- instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
+
+ uint16_t instructions[5];
+ instructions[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
+ instructions[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, lo16);
+ instructions[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16);
+ instructions[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond(JUMP_TEMPORARY_REGISTER, hi16);
+ instructions[4] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3);
+ performJITMemcpy(writeTarget - 5, instructions, 5 * sizeof(uint16_t));
}
}
@@ -2753,6 +2840,11 @@ private:
m_buffer.putShort(op | (reg1 << 6) | (reg2 << 3) | reg3);
}
+ ALWAYS_INLINE void oneWordOp7Imm9(OpcodeID op, uint16_t imm)
+ {
+ m_buffer.putShort(op | imm);
+ }
+
ALWAYS_INLINE void oneWordOp8Imm8(OpcodeID op, uint8_t imm)
{
m_buffer.putShort(op | imm);
@@ -2791,6 +2883,12 @@ private:
m_buffer.putShort(op2);
}
+ ALWAYS_INLINE void twoWordOp16Imm16(OpcodeID1 op1, uint16_t imm)
+ {
+ m_buffer.putShort(op1);
+ m_buffer.putShort(imm);
+ }
+
ALWAYS_INLINE void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op, int imm4, RegisterID rd, ARMThumbImmediate imm)
{
ARMThumbImmediate newImm = imm;
@@ -2851,7 +2949,6 @@ private:
unsigned debugOffset() { return m_buffer.debugOffset(); }
- private:
AssemblerBuffer m_buffer;
} m_formatter;
@@ -2863,5 +2960,3 @@ private:
} // namespace JSC
#endif // ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
-
-#endif // ARMAssembler_h