summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
commit1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c (patch)
tree46dcd36c86e7fbc6e5df36deb463b33e9967a6f7 /Source/JavaScriptCore/assembler/MacroAssemblerARM64.h
parent32761a6cee1d0dee366b885b7b9c777e67885688 (diff)
downloadWebKitGtk-tarball-master.tar.gz
Diffstat (limited to 'Source/JavaScriptCore/assembler/MacroAssemblerARM64.h')
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerARM64.h1545
1 files changed, 1383 insertions, 162 deletions
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h b/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h
index a128923fc..f4cdd36c0 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2014-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,20 +23,32 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef MacroAssemblerARM64_h
-#define MacroAssemblerARM64_h
+#pragma once
#if ENABLE(ASSEMBLER)
#include "ARM64Assembler.h"
#include "AbstractMacroAssembler.h"
#include <wtf/MathExtras.h>
+#include <wtf/Optional.h>
namespace JSC {
-class MacroAssemblerARM64 : public AbstractMacroAssembler<ARM64Assembler> {
+class MacroAssemblerARM64 : public AbstractMacroAssembler<ARM64Assembler, MacroAssemblerARM64> {
+public:
+ static const unsigned numGPRs = 32;
+ static const unsigned numFPRs = 32;
+
static const RegisterID dataTempRegister = ARM64Registers::ip0;
static const RegisterID memoryTempRegister = ARM64Registers::ip1;
+
+ RegisterID scratchRegister()
+ {
+ RELEASE_ASSERT(m_allowScratchRegister);
+ return getCachedDataTempRegisterIDAndInvalidate();
+ }
+
+private:
static const ARM64Registers::FPRegisterID fpTempRegister = ARM64Registers::q31;
static const ARM64Assembler::SetFlags S = ARM64Assembler::S;
static const intptr_t maskHalfWord0 = 0xffffl;
@@ -64,13 +76,11 @@ public:
Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); }
void* unlinkedCode() { return m_assembler.unlinkedCode(); }
- bool canCompact(JumpType jumpType) { return m_assembler.canCompact(jumpType); }
- JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(jumpType, from, to); }
- JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(record, from, to); }
- void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) {return m_assembler.recordLinkOffsets(regionStart, regionEnd, offset); }
- int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return m_assembler.jumpSizeDelta(jumpType, jumpLinkType); }
- void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return m_assembler.link(record, from, to); }
- int executableOffsetFor(int location) { return m_assembler.executableOffsetFor(location); }
+ static bool canCompact(JumpType jumpType) { return ARM64Assembler::canCompact(jumpType); }
+ static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return ARM64Assembler::computeJumpType(jumpType, from, to); }
+ static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return ARM64Assembler::computeJumpType(record, from, to); }
+ static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return ARM64Assembler::jumpSizeDelta(jumpType, jumpLinkType); }
+ static void link(LinkRecord& record, uint8_t* from, const uint8_t* fromInstruction, uint8_t* to) { return ARM64Assembler::link(record, from, fromInstruction, to); }
static const Scale ScalePtr = TimesEight;
@@ -130,10 +140,15 @@ public:
// FIXME: Get reasonable implementations for these
static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; }
static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; }
- static bool shouldBlindForSpecificArch(uintptr_t value) { return value >= 0x00ffffff; }
// Integer operations:
+ void add32(RegisterID a, RegisterID b, RegisterID dest)
+ {
+ ASSERT(a != ARM64Registers::sp && b != ARM64Registers::sp);
+ m_assembler.add<32>(dest, a, b);
+ }
+
void add32(RegisterID src, RegisterID dest)
{
m_assembler.add<32>(dest, dest, src);
@@ -150,7 +165,10 @@ public:
m_assembler.add<32>(dest, src, UInt12(imm.m_value));
else if (isUInt12(-imm.m_value))
m_assembler.sub<32>(dest, src, UInt12(-imm.m_value));
- else {
+ else if (src != dest) {
+ move(imm, dest);
+ add32(src, dest);
+ } else {
move(imm, getCachedDataTempRegisterIDAndInvalidate());
m_assembler.add<32>(dest, src, dataTempRegister);
}
@@ -199,9 +217,20 @@ public:
add32(dataTempRegister, dest);
}
+ void add64(RegisterID a, RegisterID b, RegisterID dest)
+ {
+ ASSERT(a != ARM64Registers::sp || b != ARM64Registers::sp);
+ if (b == ARM64Registers::sp)
+ std::swap(a, b);
+ m_assembler.add<64>(dest, a, b);
+ }
+
void add64(RegisterID src, RegisterID dest)
{
- m_assembler.add<64>(dest, dest, src);
+ if (src == ARM64Registers::sp)
+ m_assembler.add<64>(dest, src, dest);
+ else
+ m_assembler.add<64>(dest, dest, src);
}
void add64(TrustedImm32 imm, RegisterID dest)
@@ -288,6 +317,11 @@ public:
store64(dataTempRegister, address.m_ptr);
}
+ void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest)
+ {
+ add64(imm, srcDest);
+ }
+
void add64(Address src, RegisterID dest)
{
load64(src, getCachedDataTempRegisterIDAndInvalidate());
@@ -334,6 +368,24 @@ public:
and32(dataTempRegister, dest);
}
+ void and64(RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ m_assembler.and_<64>(dest, src1, src2);
+ }
+
+ void and64(TrustedImm64 imm, RegisterID src, RegisterID dest)
+ {
+ LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value);
+
+ if (logicalImm.isValid()) {
+ m_assembler.and_<64>(dest, src, logicalImm);
+ return;
+ }
+
+ move(imm, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.and_<64>(dest, src, dataTempRegister);
+ }
+
void and64(RegisterID src, RegisterID dest)
{
m_assembler.and_<64>(dest, dest, src);
@@ -370,6 +422,31 @@ public:
m_assembler.clz<32>(dest, src);
}
+ void countLeadingZeros64(RegisterID src, RegisterID dest)
+ {
+ m_assembler.clz<64>(dest, src);
+ }
+
+ void countTrailingZeros32(RegisterID src, RegisterID dest)
+ {
+ // Arm does not have a count trailing zeros only a count leading zeros.
+ m_assembler.rbit<32>(dest, src);
+ m_assembler.clz<32>(dest, dest);
+ }
+
+ void countTrailingZeros64(RegisterID src, RegisterID dest)
+ {
+ // Arm does not have a count trailing zeros only a count leading zeros.
+ m_assembler.rbit<64>(dest, src);
+ m_assembler.clz<64>(dest, dest);
+ }
+
+ // Only used for testing purposes.
+ void illegalInstruction()
+ {
+ m_assembler.illegalInstruction();
+ }
+
void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
{
m_assembler.lsl<32>(dest, src, shiftAmount);
@@ -409,21 +486,81 @@ public:
{
lshift64(dest, imm, dest);
}
+
+ void mul32(RegisterID left, RegisterID right, RegisterID dest)
+ {
+ m_assembler.mul<32>(dest, left, right);
+ }
void mul32(RegisterID src, RegisterID dest)
{
m_assembler.mul<32>(dest, dest, src);
}
-
+
+ void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ {
+ move(imm, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.mul<32>(dest, src, dataTempRegister);
+ }
+
void mul64(RegisterID src, RegisterID dest)
{
m_assembler.mul<64>(dest, dest, src);
}
- void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
+ void mul64(RegisterID left, RegisterID right, RegisterID dest)
{
- move(imm, getCachedDataTempRegisterIDAndInvalidate());
- m_assembler.mul<32>(dest, src, dataTempRegister);
+ m_assembler.mul<64>(dest, left, right);
+ }
+
+ void multiplyAdd32(RegisterID mulLeft, RegisterID mulRight, RegisterID summand, RegisterID dest)
+ {
+ m_assembler.madd<32>(dest, mulLeft, mulRight, summand);
+ }
+
+ void multiplySub32(RegisterID mulLeft, RegisterID mulRight, RegisterID minuend, RegisterID dest)
+ {
+ m_assembler.msub<32>(dest, mulLeft, mulRight, minuend);
+ }
+
+ void multiplyNeg32(RegisterID mulLeft, RegisterID mulRight, RegisterID dest)
+ {
+ m_assembler.msub<32>(dest, mulLeft, mulRight, ARM64Registers::zr);
+ }
+
+ void multiplyAdd64(RegisterID mulLeft, RegisterID mulRight, RegisterID summand, RegisterID dest)
+ {
+ m_assembler.madd<64>(dest, mulLeft, mulRight, summand);
+ }
+
+ void multiplySub64(RegisterID mulLeft, RegisterID mulRight, RegisterID minuend, RegisterID dest)
+ {
+ m_assembler.msub<64>(dest, mulLeft, mulRight, minuend);
+ }
+
+ void multiplyNeg64(RegisterID mulLeft, RegisterID mulRight, RegisterID dest)
+ {
+ m_assembler.msub<64>(dest, mulLeft, mulRight, ARM64Registers::zr);
+ }
+
+ void div32(RegisterID dividend, RegisterID divisor, RegisterID dest)
+ {
+ m_assembler.sdiv<32>(dest, dividend, divisor);
+ }
+
+ void div64(RegisterID dividend, RegisterID divisor, RegisterID dest)
+ {
+ m_assembler.sdiv<64>(dest, dividend, divisor);
+ }
+
+ void uDiv32(RegisterID dividend, RegisterID divisor, RegisterID dest)
+ {
+ m_assembler.udiv<32>(dest, dividend, divisor);
+ }
+
+ void uDiv64(RegisterID dividend, RegisterID divisor, RegisterID dest)
+ {
+ m_assembler.udiv<64>(dest, dividend, divisor);
}
void neg32(RegisterID dest)
@@ -460,6 +597,7 @@ public:
return;
}
+ ASSERT(src != dataTempRegister);
move(imm, getCachedDataTempRegisterIDAndInvalidate());
m_assembler.orr<32>(dest, src, dataTempRegister);
}
@@ -471,6 +609,27 @@ public:
store32(dataTempRegister, address.m_ptr);
}
+ void or32(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
+ if (logicalImm.isValid()) {
+ load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.orr<32>(dataTempRegister, dataTempRegister, logicalImm);
+ store32(dataTempRegister, address.m_ptr);
+ } else {
+ load32(address.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
+ or32(imm, memoryTempRegister, getCachedDataTempRegisterIDAndInvalidate());
+ store32(dataTempRegister, address.m_ptr);
+ }
+ }
+
+ void or32(TrustedImm32 imm, Address address)
+ {
+ load32(address, getCachedDataTempRegisterIDAndInvalidate());
+ or32(imm, dataTempRegister, dataTempRegister);
+ store32(dataTempRegister, address);
+ }
+
void or64(RegisterID src, RegisterID dest)
{
or64(dest, src, dest);
@@ -491,14 +650,27 @@ public:
LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
if (logicalImm.isValid()) {
- m_assembler.orr<64>(dest, dest, logicalImm);
+ m_assembler.orr<64>(dest, src, logicalImm);
return;
}
signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
m_assembler.orr<64>(dest, src, dataTempRegister);
}
-
+
+ void or64(TrustedImm64 imm, RegisterID src, RegisterID dest)
+ {
+ LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value);
+
+ if (logicalImm.isValid()) {
+ m_assembler.orr<64>(dest, src, logicalImm);
+ return;
+ }
+
+ move(imm, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.orr<64>(dest, src, dataTempRegister);
+ }
+
void or64(TrustedImm64 imm, RegisterID dest)
{
LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
@@ -512,9 +684,34 @@ public:
m_assembler.orr<64>(dest, dest, dataTempRegister);
}
+ void rotateRight32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.ror<32>(dest, src, imm.m_value & 31);
+ }
+
+ void rotateRight32(TrustedImm32 imm, RegisterID srcDst)
+ {
+ rotateRight32(srcDst, imm, srcDst);
+ }
+
+ void rotateRight32(RegisterID src, RegisterID shiftAmmount, RegisterID dest)
+ {
+ m_assembler.ror<32>(dest, src, shiftAmmount);
+ }
+
+ void rotateRight64(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.ror<64>(dest, src, imm.m_value & 63);
+ }
+
void rotateRight64(TrustedImm32 imm, RegisterID srcDst)
{
- m_assembler.ror<64>(srcDst, srcDst, imm.m_value & 63);
+ rotateRight64(srcDst, imm, srcDst);
+ }
+
+ void rotateRight64(RegisterID src, RegisterID shiftAmmount, RegisterID dest)
+ {
+ m_assembler.ror<64>(dest, src, shiftAmmount);
}
void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
@@ -539,12 +736,12 @@ public:
void rshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
{
- m_assembler.lsr<64>(dest, src, shiftAmount);
+ m_assembler.asr<64>(dest, src, shiftAmount);
}
void rshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
{
- m_assembler.lsr<64>(dest, src, imm.m_value & 0x3f);
+ m_assembler.asr<64>(dest, src, imm.m_value & 0x3f);
}
void rshift64(RegisterID shiftAmount, RegisterID dest)
@@ -562,6 +759,11 @@ public:
m_assembler.sub<32>(dest, dest, src);
}
+ void sub32(RegisterID left, RegisterID right, RegisterID dest)
+ {
+ m_assembler.sub<32>(dest, left, right);
+ }
+
void sub32(TrustedImm32 imm, RegisterID dest)
{
if (isUInt12(imm.m_value)) {
@@ -624,6 +826,11 @@ public:
{
m_assembler.sub<64>(dest, dest, src);
}
+
+ void sub64(RegisterID a, RegisterID b, RegisterID dest)
+ {
+ m_assembler.sub<64>(dest, a, b);
+ }
void sub64(TrustedImm32 imm, RegisterID dest)
{
@@ -677,6 +884,26 @@ public:
urshift32(dest, imm, dest);
}
+ void urshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ m_assembler.lsr<64>(dest, src, shiftAmount);
+ }
+
+ void urshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.lsr<64>(dest, src, imm.m_value & 0x3f);
+ }
+
+ void urshift64(RegisterID shiftAmount, RegisterID dest)
+ {
+ urshift64(dest, shiftAmount, dest);
+ }
+
+ void urshift64(TrustedImm32 imm, RegisterID dest)
+ {
+ urshift64(dest, imm, dest);
+ }
+
void xor32(RegisterID src, RegisterID dest)
{
xor32(dest, src, dest);
@@ -700,7 +927,7 @@ public:
LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
if (logicalImm.isValid()) {
- m_assembler.eor<32>(dest, dest, logicalImm);
+ m_assembler.eor<32>(dest, src, logicalImm);
return;
}
@@ -731,6 +958,23 @@ public:
xor64(imm, dest, dest);
}
+ void xor64(TrustedImm64 imm, RegisterID src, RegisterID dest)
+ {
+ if (imm.m_value == -1)
+ m_assembler.mvn<64>(dest, src);
+ else {
+ LogicalImmediate logicalImm = LogicalImmediate::create64(imm.m_value);
+
+ if (logicalImm.isValid()) {
+ m_assembler.eor<64>(dest, src, logicalImm);
+ return;
+ }
+
+ move(imm, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.eor<64>(dest, src, dataTempRegister);
+ }
+ }
+
void xor64(TrustedImm32 imm, RegisterID src, RegisterID dest)
{
if (imm.m_value == -1)
@@ -739,7 +983,7 @@ public:
LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
if (logicalImm.isValid()) {
- m_assembler.eor<64>(dest, dest, logicalImm);
+ m_assembler.eor<64>(dest, src, logicalImm);
return;
}
@@ -748,6 +992,20 @@ public:
}
}
+ void not32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.mvn<32>(dest, src);
+ }
+
+ void not64(RegisterID src, RegisterID dest)
+ {
+ m_assembler.mvn<64>(dest, src);
+ }
+
+ void not64(RegisterID srcDst)
+ {
+ m_assembler.mvn<64>(srcDst, srcDst);
+ }
// Memory access operations:
@@ -777,6 +1035,11 @@ public:
load<64>(address, dest);
}
+ void load64(RegisterID src, PostIndex simm, RegisterID dest)
+ {
+ m_assembler.ldr<64>(dest, src, simm);
+ }
+
DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest)
{
DataLabel32 label(this);
@@ -793,6 +1056,38 @@ public:
return label;
}
+ void loadPair64(RegisterID src, RegisterID dest1, RegisterID dest2)
+ {
+ loadPair64(src, TrustedImm32(0), dest1, dest2);
+ }
+
+ void loadPair64(RegisterID src, TrustedImm32 offset, RegisterID dest1, RegisterID dest2)
+ {
+ m_assembler.ldp<64>(dest1, dest2, src, offset.m_value);
+ }
+
+ void loadPair64WithNonTemporalAccess(RegisterID src, RegisterID dest1, RegisterID dest2)
+ {
+ loadPair64WithNonTemporalAccess(src, TrustedImm32(0), dest1, dest2);
+ }
+
+ void loadPair64WithNonTemporalAccess(RegisterID src, TrustedImm32 offset, RegisterID dest1, RegisterID dest2)
+ {
+ m_assembler.ldnp<64>(dest1, dest2, src, offset.m_value);
+ }
+
+ void abortWithReason(AbortReason reason)
+ {
+ move(TrustedImm32(reason), dataTempRegister);
+ breakpoint();
+ }
+
+ void abortWithReason(AbortReason reason, intptr_t misc)
+ {
+ move(TrustedImm64(misc), memoryTempRegister);
+ abortWithReason(reason);
+ }
+
ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
{
ConvertibleLoadLabel result(this);
@@ -874,16 +1169,35 @@ public:
load16(address, dest);
}
- void load16Signed(BaseIndex address, RegisterID dest)
+ void load16SignedExtendTo32(ImplicitAddress address, RegisterID dest)
+ {
+ if (tryLoadSignedWithOffset<16>(dest, address.base, address.offset))
+ return;
+
+ signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.ldrsh<32>(dest, address.base, memoryTempRegister);
+ }
+
+ void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
{
if (!address.offset && (!address.scale || address.scale == 1)) {
- m_assembler.ldrsh<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+ m_assembler.ldrsh<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
return;
}
signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
- m_assembler.ldrsh<64>(dest, address.base, memoryTempRegister);
+ m_assembler.ldrsh<32>(dest, address.base, memoryTempRegister);
+ }
+
+ void zeroExtend16To32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.uxth<32>(dest, src);
+ }
+
+ void signExtend16To32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.sxth<32>(dest, src);
}
void load8(ImplicitAddress address, RegisterID dest)
@@ -909,22 +1223,54 @@ public:
void load8(const void* address, RegisterID dest)
{
- moveToCachedReg(TrustedImmPtr(address), m_cachedMemoryTempRegister);
+ moveToCachedReg(TrustedImmPtr(address), cachedMemoryTempRegister());
m_assembler.ldrb(dest, memoryTempRegister, ARM64Registers::zr);
if (dest == memoryTempRegister)
- m_cachedMemoryTempRegister.invalidate();
+ cachedMemoryTempRegister().invalidate();
}
- void load8Signed(BaseIndex address, RegisterID dest)
+ void load8(RegisterID src, PostIndex simm, RegisterID dest)
+ {
+ m_assembler.ldrb(dest, src, simm);
+ }
+
+ void load8SignedExtendTo32(ImplicitAddress address, RegisterID dest)
+ {
+ if (tryLoadSignedWithOffset<8>(dest, address.base, address.offset))
+ return;
+
+ signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.ldrsb<32>(dest, address.base, memoryTempRegister);
+ }
+
+ void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
{
if (!address.offset && !address.scale) {
- m_assembler.ldrsb<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
+ m_assembler.ldrsb<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
return;
}
signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
- m_assembler.ldrsb<64>(dest, address.base, memoryTempRegister);
+ m_assembler.ldrsb<32>(dest, address.base, memoryTempRegister);
+ }
+
+ void load8SignedExtendTo32(const void* address, RegisterID dest)
+ {
+ moveToCachedReg(TrustedImmPtr(address), cachedMemoryTempRegister());
+ m_assembler.ldrsb<32>(dest, memoryTempRegister, ARM64Registers::zr);
+ if (dest == memoryTempRegister)
+ cachedMemoryTempRegister().invalidate();
+ }
+
+ void zeroExtend8To32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.uxtb<32>(dest, src);
+ }
+
+ void signExtend8To32(RegisterID src, RegisterID dest)
+ {
+ m_assembler.sxtb<32>(dest, src);
}
void store64(RegisterID src, ImplicitAddress address)
@@ -953,6 +1299,11 @@ public:
store<64>(src, address);
}
+ void store64(TrustedImm32 imm, ImplicitAddress address)
+ {
+ store64(TrustedImm64(imm.m_value), address);
+ }
+
void store64(TrustedImm64 imm, ImplicitAddress address)
{
if (!imm.m_value) {
@@ -960,7 +1311,7 @@ public:
return;
}
- moveToCachedReg(imm, m_dataMemoryTempRegister);
+ moveToCachedReg(imm, dataMemoryTempRegister());
store64(dataTempRegister, address);
}
@@ -971,9 +1322,14 @@ public:
return;
}
- moveToCachedReg(imm, m_dataMemoryTempRegister);
+ moveToCachedReg(imm, dataMemoryTempRegister());
store64(dataTempRegister, address);
}
+
+ void store64(RegisterID src, RegisterID dest, PostIndex simm)
+ {
+ m_assembler.str<64>(src, dest, simm);
+ }
DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address)
{
@@ -983,6 +1339,26 @@ public:
return label;
}
+ void storePair64(RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ storePair64(src1, src2, dest, TrustedImm32(0));
+ }
+
+ void storePair64(RegisterID src1, RegisterID src2, RegisterID dest, TrustedImm32 offset)
+ {
+ m_assembler.stp<64>(src1, src2, dest, offset.m_value);
+ }
+
+ void storePair64WithNonTemporalAccess(RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ storePair64WithNonTemporalAccess(src1, src2, dest, TrustedImm32(0));
+ }
+
+ void storePair64WithNonTemporalAccess(RegisterID src1, RegisterID src2, RegisterID dest, TrustedImm32 offset)
+ {
+ m_assembler.stnp<64>(src1, src2, dest, offset.m_value);
+ }
+
void store32(RegisterID src, ImplicitAddress address)
{
if (tryStoreWithOffset<32>(src, address.base, address.offset))
@@ -1016,7 +1392,7 @@ public:
return;
}
- moveToCachedReg(imm, m_dataMemoryTempRegister);
+ moveToCachedReg(imm, dataMemoryTempRegister());
store32(dataTempRegister, address);
}
@@ -1027,7 +1403,7 @@ public:
return;
}
- moveToCachedReg(imm, m_dataMemoryTempRegister);
+ moveToCachedReg(imm, dataMemoryTempRegister());
store32(dataTempRegister, address);
}
@@ -1038,10 +1414,20 @@ public:
return;
}
- moveToCachedReg(imm, m_dataMemoryTempRegister);
+ moveToCachedReg(imm, dataMemoryTempRegister());
store32(dataTempRegister, address);
}
+ void storeZero32(ImplicitAddress address)
+ {
+ store32(ARM64Registers::zr, address);
+ }
+
+ void storeZero32(BaseIndex address)
+ {
+ store32(ARM64Registers::zr, address);
+ }
+
DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
{
DataLabel32 label(this);
@@ -1050,6 +1436,15 @@ public:
return label;
}
+ void store16(RegisterID src, ImplicitAddress address)
+ {
+ if (tryStoreWithOffset<16>(src, address.base, address.offset))
+ return;
+
+ signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.strh(src, address.base, memoryTempRegister);
+ }
+
void store16(RegisterID src, BaseIndex address)
{
if (!address.offset && (!address.scale || address.scale == 1)) {
@@ -1080,17 +1475,43 @@ public:
m_assembler.strb(src, memoryTempRegister, 0);
}
+ void store8(RegisterID src, ImplicitAddress address)
+ {
+ if (tryStoreWithOffset<8>(src, address.base, address.offset))
+ return;
+
+ signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.strb(src, address.base, memoryTempRegister);
+ }
+
void store8(TrustedImm32 imm, void* address)
{
- if (!imm.m_value) {
+ TrustedImm32 imm8(static_cast<int8_t>(imm.m_value));
+ if (!imm8.m_value) {
store8(ARM64Registers::zr, address);
return;
}
- move(imm, getCachedDataTempRegisterIDAndInvalidate());
+ move(imm8, getCachedDataTempRegisterIDAndInvalidate());
store8(dataTempRegister, address);
}
+ void store8(TrustedImm32 imm, ImplicitAddress address)
+ {
+ TrustedImm32 imm8(static_cast<int8_t>(imm.m_value));
+ if (!imm8.m_value) {
+ store8(ARM64Registers::zr, address);
+ return;
+ }
+
+ move(imm8, getCachedDataTempRegisterIDAndInvalidate());
+ store8(dataTempRegister, address);
+ }
+
+ void store8(RegisterID src, RegisterID dest, PostIndex simm)
+ {
+ m_assembler.strb(src, dest, simm);
+ }
// Floating-point operations:
@@ -1098,6 +1519,7 @@ public:
static bool supportsFloatingPointTruncate() { return true; }
static bool supportsFloatingPointSqrt() { return true; }
static bool supportsFloatingPointAbs() { return true; }
+ static bool supportsFloatingPointRounding() { return true; }
enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
@@ -1106,6 +1528,11 @@ public:
m_assembler.fabs<64>(dest, src);
}
+ void absFloat(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.fabs<32>(dest, src);
+ }
+
void addDouble(FPRegisterID src, FPRegisterID dest)
{
addDouble(dest, src, dest);
@@ -1124,20 +1551,56 @@ public:
void addDouble(AbsoluteAddress address, FPRegisterID dest)
{
- loadDouble(address.m_ptr, fpTempRegister);
+ loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister);
addDouble(fpTempRegister, dest);
}
+ void addFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.fadd<32>(dest, op1, op2);
+ }
+
void ceilDouble(FPRegisterID src, FPRegisterID dest)
{
m_assembler.frintp<64>(dest, src);
}
+ void ceilFloat(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.frintp<32>(dest, src);
+ }
+
void floorDouble(FPRegisterID src, FPRegisterID dest)
{
m_assembler.frintm<64>(dest, src);
}
+ void floorFloat(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.frintm<32>(dest, src);
+ }
+
+ void roundTowardNearestIntDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.frintn<64>(dest, src);
+ }
+
+ void roundTowardNearestIntFloat(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.frintn<32>(dest, src);
+ }
+
+ void roundTowardZeroDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.frintz<64>(dest, src);
+ }
+
+ void roundTowardZeroFloat(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.frintz<32>(dest, src);
+ }
+
+
// Convert 'src' to an integer, and places the resulting 'dest'.
// If the result is not representable as a 32 bit value, branch.
// May also branch for some values that are representable in 32 bits
@@ -1150,32 +1613,26 @@ public:
m_assembler.scvtf<64, 32>(fpTempRegister, dest);
failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister));
- // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
- if (negZeroCheck)
- failureCases.append(branchTest32(Zero, dest));
+ // Test for negative zero.
+ if (negZeroCheck) {
+ Jump valueIsNonZero = branchTest32(NonZero, dest);
+ RegisterID scratch = getCachedMemoryTempRegisterIDAndInvalidate();
+ m_assembler.fmov<64>(scratch, src);
+ failureCases.append(makeTestBitAndBranch(scratch, 63, IsNonZero));
+ valueIsNonZero.link(this);
+ }
}
Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
{
m_assembler.fcmp<64>(left, right);
+ return jumpAfterFloatingPointCompare(cond);
+ }
- if (cond == DoubleNotEqual) {
- // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
- Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
- Jump result = makeBranch(ARM64Assembler::ConditionNE);
- unordered.link(this);
- return result;
- }
- if (cond == DoubleEqualOrUnordered) {
- Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
- Jump notEqual = makeBranch(ARM64Assembler::ConditionNE);
- unordered.link(this);
- // We get here if either unordered or equal.
- Jump result = jump();
- notEqual.link(this);
- return result;
- }
- return makeBranch(cond);
+ Jump branchFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
+ {
+ m_assembler.fcmp<32>(left, right);
+ return jumpAfterFloatingPointCompare(cond);
}
Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID)
@@ -1204,7 +1661,7 @@ public:
// Truncate to a 64-bit integer in dataTempRegister, copy the low 32-bit to dest.
m_assembler.fcvtzs<64, 64>(getCachedDataTempRegisterIDAndInvalidate(), src);
zeroExtend32ToPtr(dataTempRegister, dest);
- // Check thlow 32-bits sign extend to be equal to the full value.
+ // Check the low 32-bits sign extend to be equal to the full value.
m_assembler.cmp<64>(dataTempRegister, dataTempRegister, ARM64Assembler::SXTW, 0);
return Jump(makeBranch(branchType == BranchIfTruncateSuccessful ? Equal : NotEqual));
}
@@ -1241,12 +1698,32 @@ public:
load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
convertInt32ToDouble(dataTempRegister, dest);
}
+
+ void convertInt32ToFloat(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.scvtf<32, 32>(dest, src);
+ }
void convertInt64ToDouble(RegisterID src, FPRegisterID dest)
{
m_assembler.scvtf<64, 64>(dest, src);
}
-
+
+ void convertInt64ToFloat(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.scvtf<32, 64>(dest, src);
+ }
+
+ void convertUInt64ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.ucvtf<64, 64>(dest, src);
+ }
+
+ void convertUInt64ToFloat(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.ucvtf<32, 64>(dest, src);
+ }
+
void divDouble(FPRegisterID src, FPRegisterID dest)
{
divDouble(dest, src, dest);
@@ -1257,6 +1734,11 @@ public:
m_assembler.fdiv<64>(dest, op1, op2);
}
+ void divFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.fdiv<32>(dest, op1, op2);
+ }
+
void loadDouble(ImplicitAddress address, FPRegisterID dest)
{
if (tryLoadWithOffset<64>(dest, address.base, address.offset))
@@ -1278,12 +1760,21 @@ public:
m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
}
- void loadDouble(const void* address, FPRegisterID dest)
+ void loadDouble(TrustedImmPtr address, FPRegisterID dest)
{
- moveToCachedReg(TrustedImmPtr(address), m_cachedMemoryTempRegister);
+ moveToCachedReg(address, cachedMemoryTempRegister());
m_assembler.ldr<64>(dest, memoryTempRegister, ARM64Registers::zr);
}
+ void loadFloat(ImplicitAddress address, FPRegisterID dest)
+ {
+ if (tryLoadWithOffset<32>(dest, address.base, address.offset))
+ return;
+
+ signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
+ }
+
void loadFloat(BaseIndex address, FPRegisterID dest)
{
if (!address.offset && (!address.scale || address.scale == 2)) {
@@ -1301,16 +1792,130 @@ public:
m_assembler.fmov<64>(dest, src);
}
+ void moveZeroToDouble(FPRegisterID reg)
+ {
+ m_assembler.fmov<64>(reg, ARM64Registers::zr);
+ }
+
void moveDoubleTo64(FPRegisterID src, RegisterID dest)
{
m_assembler.fmov<64>(dest, src);
}
+ void moveFloatTo32(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.fmov<32>(dest, src);
+ }
+
void move64ToDouble(RegisterID src, FPRegisterID dest)
{
m_assembler.fmov<64>(dest, src);
}
+ void move32ToFloat(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.fmov<32>(dest, src);
+ }
+
+ void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
+ {
+ m_assembler.fcmp<64>(left, right);
+ moveConditionallyAfterFloatingPointCompare<64>(cond, src, dest);
+ }
+
+ void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+ {
+ m_assembler.fcmp<64>(left, right);
+ moveConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest);
+ }
+
+ void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
+ {
+ m_assembler.fcmp<32>(left, right);
+ moveConditionallyAfterFloatingPointCompare<64>(cond, src, dest);
+ }
+
+ void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+ {
+ m_assembler.fcmp<32>(left, right);
+ moveConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest);
+ }
+
+ template<int datasize>
+ void moveConditionallyAfterFloatingPointCompare(DoubleCondition cond, RegisterID src, RegisterID dest)
+ {
+ if (cond == DoubleNotEqual) {
+ Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
+ m_assembler.csel<datasize>(dest, src, dest, ARM64Assembler::ConditionNE);
+ unordered.link(this);
+ return;
+ }
+ if (cond == DoubleEqualOrUnordered) {
+ // If the compare is unordered, src is copied to dest and the
+ // next csel has all arguments equal to src.
+ // If the compare is ordered, dest is unchanged and EQ decides
+ // what value to set.
+ m_assembler.csel<datasize>(dest, src, dest, ARM64Assembler::ConditionVS);
+ m_assembler.csel<datasize>(dest, src, dest, ARM64Assembler::ConditionEQ);
+ return;
+ }
+ m_assembler.csel<datasize>(dest, src, dest, ARM64Condition(cond));
+ }
+
+ template<int datasize>
+ void moveConditionallyAfterFloatingPointCompare(DoubleCondition cond, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+ {
+ if (cond == DoubleNotEqual) {
+ Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
+ m_assembler.csel<datasize>(dest, thenCase, elseCase, ARM64Assembler::ConditionNE);
+ unordered.link(this);
+ return;
+ }
+ if (cond == DoubleEqualOrUnordered) {
+ // If the compare is unordered, thenCase is copied to elseCase and the
+ // next csel has all arguments equal to thenCase.
+ // If the compare is ordered, dest is unchanged and EQ decides
+ // what value to set.
+ m_assembler.csel<datasize>(elseCase, thenCase, elseCase, ARM64Assembler::ConditionVS);
+ m_assembler.csel<datasize>(dest, thenCase, elseCase, ARM64Assembler::ConditionEQ);
+ return;
+ }
+ m_assembler.csel<datasize>(dest, thenCase, elseCase, ARM64Condition(cond));
+ }
+
+ template<int datasize>
+ void moveDoubleConditionallyAfterFloatingPointCompare(DoubleCondition cond, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+ {
+ if (cond == DoubleNotEqual) {
+ Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
+ m_assembler.fcsel<datasize>(dest, thenCase, elseCase, ARM64Assembler::ConditionNE);
+ unordered.link(this);
+ return;
+ }
+ if (cond == DoubleEqualOrUnordered) {
+ // If the compare is unordered, thenCase is copied to elseCase and the
+ // next csel has all arguments equal to thenCase.
+ // If the compare is ordered, dest is unchanged and EQ decides
+ // what value to set.
+ m_assembler.fcsel<datasize>(elseCase, thenCase, elseCase, ARM64Assembler::ConditionVS);
+ m_assembler.fcsel<datasize>(dest, thenCase, elseCase, ARM64Assembler::ConditionEQ);
+ return;
+ }
+ m_assembler.fcsel<datasize>(dest, thenCase, elseCase, ARM64Condition(cond));
+ }
+
+ void moveDoubleConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+ {
+ m_assembler.fcmp<64>(left, right);
+ moveDoubleConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest);
+ }
+
+ void moveDoubleConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+ {
+ m_assembler.fcmp<32>(left, right);
+ moveDoubleConditionallyAfterFloatingPointCompare<64>(cond, thenCase, elseCase, dest);
+ }
+
void mulDouble(FPRegisterID src, FPRegisterID dest)
{
mulDouble(dest, src, dest);
@@ -1327,16 +1932,51 @@ public:
mulDouble(fpTempRegister, dest);
}
+ void mulFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.fmul<32>(dest, op1, op2);
+ }
+
+ void andDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.vand<64>(dest, op1, op2);
+ }
+
+ void andFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ andDouble(op1, op2, dest);
+ }
+
+ void orDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.vorr<64>(dest, op1, op2);
+ }
+
+ void orFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ orDouble(op1, op2, dest);
+ }
+
void negateDouble(FPRegisterID src, FPRegisterID dest)
{
m_assembler.fneg<64>(dest, src);
}
+ void negateFloat(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.fneg<32>(dest, src);
+ }
+
void sqrtDouble(FPRegisterID src, FPRegisterID dest)
{
m_assembler.fsqrt<64>(dest, src);
}
+ void sqrtFloat(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.fsqrt<32>(dest, src);
+ }
+
void storeDouble(FPRegisterID src, ImplicitAddress address)
{
if (tryStoreWithOffset<64>(src, address.base, address.offset))
@@ -1346,9 +1986,9 @@ public:
m_assembler.str<64>(src, address.base, memoryTempRegister);
}
- void storeDouble(FPRegisterID src, const void* address)
+ void storeDouble(FPRegisterID src, TrustedImmPtr address)
{
- moveToCachedReg(TrustedImmPtr(address), m_cachedMemoryTempRegister);
+ moveToCachedReg(address, cachedMemoryTempRegister());
m_assembler.str<64>(src, memoryTempRegister, ARM64Registers::zr);
}
@@ -1363,6 +2003,15 @@ public:
m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
m_assembler.str<64>(src, address.base, memoryTempRegister);
}
+
+ void storeFloat(FPRegisterID src, ImplicitAddress address)
+ {
+ if (tryStoreWithOffset<32>(src, address.base, address.offset))
+ return;
+
+ signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.str<32>(src, address.base, memoryTempRegister);
+ }
void storeFloat(FPRegisterID src, BaseIndex address)
{
@@ -1392,6 +2041,11 @@ public:
subDouble(fpTempRegister, dest);
}
+ void subFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.fsub<32>(dest, op1, op2);
+ }
+
// Result is undefined if the value is outside of the integer range.
void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
{
@@ -1403,6 +2057,45 @@ public:
m_assembler.fcvtzu<32, 64>(dest, src);
}
+ void truncateDoubleToInt64(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.fcvtzs<64, 64>(dest, src);
+ }
+
+ void truncateDoubleToUint64(FPRegisterID src, RegisterID dest, FPRegisterID, FPRegisterID)
+ {
+ truncateDoubleToUint64(src, dest);
+ }
+
+ void truncateDoubleToUint64(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.fcvtzu<64, 64>(dest, src);
+ }
+
+ void truncateFloatToInt32(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.fcvtzs<32, 32>(dest, src);
+ }
+
+ void truncateFloatToUint32(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.fcvtzu<32, 32>(dest, src);
+ }
+
+ void truncateFloatToInt64(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.fcvtzs<64, 32>(dest, src);
+ }
+
+ void truncateFloatToUint64(FPRegisterID src, RegisterID dest, FPRegisterID, FPRegisterID)
+ {
+ truncateFloatToUint64(src, dest);
+ }
+
+ void truncateFloatToUint64(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.fcvtzu<64, 32>(dest, src);
+ }
// Stack manipulation operations:
//
@@ -1437,6 +2130,16 @@ public:
CRASH();
}
+ void popPair(RegisterID dest1, RegisterID dest2)
+ {
+ m_assembler.ldp<64>(dest1, dest2, ARM64Registers::sp, PairPostIndex(16));
+ }
+
+ void pushPair(RegisterID src1, RegisterID src2)
+ {
+ m_assembler.stp<64>(src1, src2, ARM64Registers::sp, PairPreIndex(-16));
+ }
+
void popToRestore(RegisterID dest)
{
m_assembler.ldr<64>(dest, ARM64Registers::sp, PostIndex(16));
@@ -1446,6 +2149,15 @@ public:
{
m_assembler.str<64>(src, ARM64Registers::sp, PreIndex(-16));
}
+
+ void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm)
+ {
+ RegisterID reg = dataTempRegister;
+ pushPair(reg, reg);
+ move(imm, reg);
+ store64(reg, stackPointerRegister);
+ load64(Address(stackPointerRegister, 8), reg);
+ }
void pushToSave(Address address)
{
@@ -1471,6 +2183,7 @@ public:
storeDouble(src, stackPointerRegister);
}
+ static ptrdiff_t pushToSaveByteOffset() { return 16; }
// Register move operations:
@@ -1501,6 +2214,11 @@ public:
move(reg2, reg1);
move(dataTempRegister, reg2);
}
+
+ void signExtend32ToPtr(TrustedImm32 imm, RegisterID dest)
+ {
+ move(TrustedImmPtr(reinterpret_cast<void*>(static_cast<intptr_t>(imm.m_value))), dest);
+ }
void signExtend32ToPtr(RegisterID src, RegisterID dest)
{
@@ -1512,6 +2230,169 @@ public:
m_assembler.uxtw(dest, src);
}
+ void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest)
+ {
+ m_assembler.cmp<32>(left, right);
+ m_assembler.csel<32>(dest, src, dest, ARM64Condition(cond));
+ }
+
+ void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+ {
+ m_assembler.cmp<32>(left, right);
+ m_assembler.csel<32>(dest, thenCase, elseCase, ARM64Condition(cond));
+ }
+
+ void moveConditionally32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+ {
+ if (!right.m_value) {
+ if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
+ moveConditionallyTest32(*resultCondition, left, left, thenCase, elseCase, dest);
+ return;
+ }
+ }
+
+ if (isUInt12(right.m_value))
+ m_assembler.cmp<32>(left, UInt12(right.m_value));
+ else if (isUInt12(-right.m_value))
+ m_assembler.cmn<32>(left, UInt12(-right.m_value));
+ else {
+ moveToCachedReg(right, dataMemoryTempRegister());
+ m_assembler.cmp<32>(left, dataTempRegister);
+ }
+ m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
+ }
+
+ void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest)
+ {
+ m_assembler.cmp<64>(left, right);
+ m_assembler.csel<64>(dest, src, dest, ARM64Condition(cond));
+ }
+
+ void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+ {
+ m_assembler.cmp<64>(left, right);
+ m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
+ }
+
+ void moveConditionally64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+ {
+ if (!right.m_value) {
+ if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
+ moveConditionallyTest64(*resultCondition, left, left, thenCase, elseCase, dest);
+ return;
+ }
+ }
+
+ if (isUInt12(right.m_value))
+ m_assembler.cmp<64>(left, UInt12(right.m_value));
+ else if (isUInt12(-right.m_value))
+ m_assembler.cmn<64>(left, UInt12(-right.m_value));
+ else {
+ moveToCachedReg(right, dataMemoryTempRegister());
+ m_assembler.cmp<64>(left, dataTempRegister);
+ }
+ m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
+ }
+
+ void moveConditionallyTest32(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest)
+ {
+ m_assembler.tst<32>(testReg, mask);
+ m_assembler.csel<32>(dest, src, dest, ARM64Condition(cond));
+ }
+
+ void moveConditionallyTest32(ResultCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+ {
+ m_assembler.tst<32>(left, right);
+ m_assembler.csel<32>(dest, thenCase, elseCase, ARM64Condition(cond));
+ }
+
+ void moveConditionallyTest32(ResultCondition cond, RegisterID left, TrustedImm32 right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+ {
+ test32(left, right);
+ m_assembler.csel<32>(dest, thenCase, elseCase, ARM64Condition(cond));
+ }
+
+ void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest)
+ {
+ m_assembler.tst<64>(testReg, mask);
+ m_assembler.csel<64>(dest, src, dest, ARM64Condition(cond));
+ }
+
+ void moveConditionallyTest64(ResultCondition cond, RegisterID left, RegisterID right, RegisterID thenCase, RegisterID elseCase, RegisterID dest)
+ {
+ m_assembler.tst<64>(left, right);
+ m_assembler.csel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
+ }
+
+ void moveDoubleConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+ {
+ m_assembler.cmp<32>(left, right);
+ m_assembler.fcsel<32>(dest, thenCase, elseCase, ARM64Condition(cond));
+ }
+
+ void moveDoubleConditionally32(RelationalCondition cond, RegisterID left, TrustedImm32 right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+ {
+ if (!right.m_value) {
+ if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
+ moveDoubleConditionallyTest32(*resultCondition, left, left, thenCase, elseCase, dest);
+ return;
+ }
+ }
+
+ if (isUInt12(right.m_value))
+ m_assembler.cmp<32>(left, UInt12(right.m_value));
+ else if (isUInt12(-right.m_value))
+ m_assembler.cmn<32>(left, UInt12(-right.m_value));
+ else {
+ moveToCachedReg(right, dataMemoryTempRegister());
+ m_assembler.cmp<32>(left, dataTempRegister);
+ }
+ m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
+ }
+
+ void moveDoubleConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+ {
+ m_assembler.cmp<64>(left, right);
+ m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
+ }
+
+ void moveDoubleConditionally64(RelationalCondition cond, RegisterID left, TrustedImm32 right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+ {
+ if (!right.m_value) {
+ if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
+ moveDoubleConditionallyTest64(*resultCondition, left, left, thenCase, elseCase, dest);
+ return;
+ }
+ }
+
+ if (isUInt12(right.m_value))
+ m_assembler.cmp<64>(left, UInt12(right.m_value));
+ else if (isUInt12(-right.m_value))
+ m_assembler.cmn<64>(left, UInt12(-right.m_value));
+ else {
+ moveToCachedReg(right, dataMemoryTempRegister());
+ m_assembler.cmp<64>(left, dataTempRegister);
+ }
+ m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
+ }
+
+ void moveDoubleConditionallyTest32(ResultCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+ {
+ m_assembler.tst<32>(left, right);
+ m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
+ }
+
+ void moveDoubleConditionallyTest32(ResultCondition cond, RegisterID left, TrustedImm32 right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+ {
+ test32(left, right);
+ m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
+ }
+
+ void moveDoubleConditionallyTest64(ResultCondition cond, RegisterID left, RegisterID right, FPRegisterID thenCase, FPRegisterID elseCase, FPRegisterID dest)
+ {
+ m_assembler.tst<64>(left, right);
+ m_assembler.fcsel<64>(dest, thenCase, elseCase, ARM64Condition(cond));
+ }
// Forwards / external control flow operations:
//
@@ -1539,12 +2420,17 @@ public:
Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
{
+ if (!right.m_value) {
+ if (auto resultCondition = commuteCompareToZeroIntoTest(cond))
+ return branchTest32(*resultCondition, left, left);
+ }
+
if (isUInt12(right.m_value))
m_assembler.cmp<32>(left, UInt12(right.m_value));
else if (isUInt12(-right.m_value))
m_assembler.cmn<32>(left, UInt12(-right.m_value));
else {
- moveToCachedReg(right, m_dataMemoryTempRegister);
+ moveToCachedReg(right, dataMemoryTempRegister());
m_assembler.cmp<32>(left, dataTempRegister);
}
return Jump(makeBranch(cond));
@@ -1588,19 +2474,52 @@ public:
Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right)
{
+ if (right == ARM64Registers::sp) {
+ if (cond == Equal && left != ARM64Registers::sp) {
+ // CMP can only use SP for the left argument, since we are testing for equality, the order
+ // does not matter here.
+ std::swap(left, right);
+ } else {
+ move(right, getCachedDataTempRegisterIDAndInvalidate());
+ right = dataTempRegister;
+ }
+ }
m_assembler.cmp<64>(left, right);
return Jump(makeBranch(cond));
}
+ Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm32 right)
+ {
+ if (!right.m_value) {
+ if (auto resultCondition = commuteCompareToZeroIntoTest(cond))
+ return branchTest64(*resultCondition, left, left);
+ }
+
+ if (isUInt12(right.m_value))
+ m_assembler.cmp<64>(left, UInt12(right.m_value));
+ else if (isUInt12(-right.m_value))
+ m_assembler.cmn<64>(left, UInt12(-right.m_value));
+ else {
+ moveToCachedReg(right, dataMemoryTempRegister());
+ m_assembler.cmp<64>(left, dataTempRegister);
+ }
+ return Jump(makeBranch(cond));
+ }
+
Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right)
{
intptr_t immediate = right.m_value;
+ if (!immediate) {
+ if (auto resultCondition = commuteCompareToZeroIntoTest(cond))
+ return branchTest64(*resultCondition, left, left);
+ }
+
if (isUInt12(immediate))
m_assembler.cmp<64>(left, UInt12(static_cast<int32_t>(immediate)));
else if (isUInt12(-immediate))
m_assembler.cmn<64>(left, UInt12(static_cast<int32_t>(-immediate)));
else {
- moveToCachedReg(right, m_dataMemoryTempRegister);
+ moveToCachedReg(right, dataMemoryTempRegister());
m_assembler.cmp<64>(left, dataTempRegister);
}
return Jump(makeBranch(cond));
@@ -1630,33 +2549,62 @@ public:
return branch64(cond, memoryTempRegister, right);
}
+ Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
+ {
+ load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
+ return branch64(cond, memoryTempRegister, right);
+ }
+
Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
{
- ASSERT(!(0xffffff00 & right.m_value));
- load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
- return branch32(cond, memoryTempRegister, right);
+ TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+ MacroAssemblerHelpers::load8OnCondition(*this, cond, left, getCachedMemoryTempRegisterIDAndInvalidate());
+ return branch32(cond, memoryTempRegister, right8);
}
Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
{
- ASSERT(!(0xffffff00 & right.m_value));
- load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
- return branch32(cond, memoryTempRegister, right);
+ TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+ MacroAssemblerHelpers::load8OnCondition(*this, cond, left, getCachedMemoryTempRegisterIDAndInvalidate());
+ return branch32(cond, memoryTempRegister, right8);
}
Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
{
- ASSERT(!(0xffffff00 & right.m_value));
- load8(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
- return branch32(cond, memoryTempRegister, right);
+ TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+ MacroAssemblerHelpers::load8OnCondition(*this, cond, left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
+ return branch32(cond, memoryTempRegister, right8);
}
Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
{
+ if (reg == mask && (cond == Zero || cond == NonZero))
+ return Jump(makeCompareAndBranch<32>(static_cast<ZeroCondition>(cond), reg));
m_assembler.tst<32>(reg, mask);
return Jump(makeBranch(cond));
}
+ void test32(RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ if (mask.m_value == -1)
+ m_assembler.tst<32>(reg, reg);
+ else {
+ LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value);
+
+ if (logicalImm.isValid())
+ m_assembler.tst<32>(reg, logicalImm);
+ else {
+ move(mask, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.tst<32>(reg, dataTempRegister);
+ }
+ }
+ }
+
+ Jump branch(ResultCondition cond)
+ {
+ return Jump(makeBranch(cond));
+ }
+
Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
{
if (mask.m_value == -1) {
@@ -1666,13 +2614,10 @@ public:
} else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond)));
else {
- if ((cond == Zero) || (cond == NonZero)) {
- LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value);
-
- if (logicalImm.isValid()) {
- m_assembler.tst<32>(reg, logicalImm);
- return Jump(makeBranch(cond));
- }
+ LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value);
+ if (logicalImm.isValid()) {
+ m_assembler.tst<32>(reg, logicalImm);
+ return Jump(makeBranch(cond));
}
move(mask, getCachedDataTempRegisterIDAndInvalidate());
@@ -1695,6 +2640,8 @@ public:
Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask)
{
+ if (reg == mask && (cond == Zero || cond == NonZero))
+ return Jump(makeCompareAndBranch<64>(static_cast<ZeroCondition>(cond), reg));
m_assembler.tst<64>(reg, mask);
return Jump(makeBranch(cond));
}
@@ -1708,13 +2655,11 @@ public:
} else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond)));
else {
- if ((cond == Zero) || (cond == NonZero)) {
- LogicalImmediate logicalImm = LogicalImmediate::create64(mask.m_value);
+ LogicalImmediate logicalImm = LogicalImmediate::create64(mask.m_value);
- if (logicalImm.isValid()) {
- m_assembler.tst<64>(reg, logicalImm);
- return Jump(makeBranch(cond));
- }
+ if (logicalImm.isValid()) {
+ m_assembler.tst<64>(reg, logicalImm);
+ return Jump(makeBranch(cond));
}
signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
@@ -1723,6 +2668,28 @@ public:
return Jump(makeBranch(cond));
}
+ Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm64 mask)
+ {
+ if (mask.m_value == -1) {
+ if ((cond == Zero) || (cond == NonZero))
+ return Jump(makeCompareAndBranch<64>(static_cast<ZeroCondition>(cond), reg));
+ m_assembler.tst<64>(reg, reg);
+ } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
+ return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond)));
+ else {
+ LogicalImmediate logicalImm = LogicalImmediate::create64(mask.m_value);
+
+ if (logicalImm.isValid()) {
+ m_assembler.tst<64>(reg, logicalImm);
+ return Jump(makeBranch(cond));
+ }
+
+ move(mask, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.tst<64>(reg, dataTempRegister);
+ }
+ return Jump(makeBranch(cond));
+ }
+
Jump branchTest64(ResultCondition cond, Address address, RegisterID mask)
{
load64(address, getCachedDataTempRegisterIDAndInvalidate());
@@ -1749,27 +2716,36 @@ public:
Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
{
- load8(address, getCachedDataTempRegisterIDAndInvalidate());
- return branchTest32(cond, dataTempRegister, mask);
+ TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+ MacroAssemblerHelpers::load8OnCondition(*this, cond, address, getCachedDataTempRegisterIDAndInvalidate());
+ return branchTest32(cond, dataTempRegister, mask8);
}
Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
{
- load8(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
- return branchTest32(cond, dataTempRegister, mask);
+ TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+ MacroAssemblerHelpers::load8OnCondition(*this, cond, address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
+ return branchTest32(cond, dataTempRegister, mask8);
}
Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
{
+ TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
move(TrustedImmPtr(reinterpret_cast<void*>(address.offset)), getCachedDataTempRegisterIDAndInvalidate());
- m_assembler.ldrb(dataTempRegister, address.base, dataTempRegister);
- return branchTest32(cond, dataTempRegister, mask);
+
+ if (MacroAssemblerHelpers::isUnsigned<MacroAssemblerARM64>(cond))
+ m_assembler.ldrb(dataTempRegister, address.base, dataTempRegister);
+ else
+ m_assembler.ldrsb<32>(dataTempRegister, address.base, dataTempRegister);
+
+ return branchTest32(cond, dataTempRegister, mask8);
}
Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
{
- load8(address, getCachedDataTempRegisterIDAndInvalidate());
- return branchTest32(cond, dataTempRegister, mask);
+ TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+ MacroAssemblerHelpers::load8OnCondition(*this, cond, address, getCachedDataTempRegisterIDAndInvalidate());
+ return branchTest32(cond, dataTempRegister, mask8);
}
Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
@@ -1875,7 +2851,14 @@ public:
return branchAdd64(cond, dest, imm, dest);
}
- Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ Jump branchAdd64(RelationalCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ ASSERT(isUInt12(imm.m_value));
+ m_assembler.add<64, S>(dest, dest, UInt12(imm.m_value));
+ return Jump(makeBranch(cond));
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID scratch1, RegisterID scratch2, RegisterID dest)
{
ASSERT(cond != Signed);
@@ -1886,14 +2869,19 @@ public:
// This is a signed multiple of two 32-bit values, producing a 64-bit result.
m_assembler.smull(dest, src1, src2);
- // Copy bits 63..32 of the result to bits 31..0 of dataTempRegister.
- m_assembler.asr<64>(getCachedDataTempRegisterIDAndInvalidate(), dest, 32);
- // Splat bit 31 of the result to bits 31..0 of memoryTempRegister.
- m_assembler.asr<32>(getCachedMemoryTempRegisterIDAndInvalidate(), dest, 31);
+ // Copy bits 63..32 of the result to bits 31..0 of scratch1.
+ m_assembler.asr<64>(scratch1, dest, 32);
+ // Splat bit 31 of the result to bits 31..0 of scratch2.
+ m_assembler.asr<32>(scratch2, dest, 31);
// After a mul32 the top 32 bits of the register should be clear.
zeroExtend32ToPtr(dest, dest);
// Check that bits 31..63 of the original result were all equal.
- return branch32(NotEqual, memoryTempRegister, dataTempRegister);
+ return branch32(NotEqual, scratch2, scratch1);
+ }
+
+ Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ return branchMul32(cond, src1, src2, getCachedDataTempRegisterIDAndInvalidate(), getCachedMemoryTempRegisterIDAndInvalidate(), dest);
}
Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
@@ -1901,13 +2889,13 @@ public:
return branchMul32(cond, dest, src, dest);
}
- Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
+ Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
{
move(imm, getCachedDataTempRegisterIDAndInvalidate());
return branchMul32(cond, dataTempRegister, src, dest);
}
- Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID scratch1, RegisterID scratch2, RegisterID dest)
{
ASSERT(cond != Signed);
@@ -1917,12 +2905,17 @@ public:
if (cond != Overflow)
return branchTest64(cond, dest);
- // Compute bits 127..64 of the result into dataTempRegister.
- m_assembler.smulh(getCachedDataTempRegisterIDAndInvalidate(), src1, src2);
- // Splat bit 63 of the result to bits 63..0 of memoryTempRegister.
- m_assembler.asr<64>(getCachedMemoryTempRegisterIDAndInvalidate(), dest, 63);
+ // Compute bits 127..64 of the result into scratch1.
+ m_assembler.smulh(scratch1, src1, src2);
+ // Splat bit 63 of the result to bits 63..0 of scratch2.
+ m_assembler.asr<64>(scratch2, dest, 63);
// Check that bits 31..63 of the original result were all equal.
- return branch64(NotEqual, memoryTempRegister, dataTempRegister);
+ return branch64(NotEqual, scratch2, scratch1);
+ }
+
+ Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ return branchMul64(cond, src1, src2, getCachedDataTempRegisterIDAndInvalidate(), getCachedMemoryTempRegisterIDAndInvalidate(), dest);
}
Jump branchMul64(ResultCondition cond, RegisterID src, RegisterID dest)
@@ -2010,6 +3003,13 @@ public:
return branchSub64(cond, dest, imm, dest);
}
+ Jump branchSub64(RelationalCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ ASSERT(isUInt12(imm.m_value));
+ m_assembler.sub<64, S>(dest, dest, UInt12(imm.m_value));
+ return Jump(makeBranch(cond));
+ }
+
// Jumps, calls, returns
@@ -2054,6 +3054,12 @@ public:
load64(address, getCachedDataTempRegisterIDAndInvalidate());
m_assembler.br(dataTempRegister);
}
+
+ void jump(BaseIndex address)
+ {
+ load64(address, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.br(dataTempRegister);
+ }
void jump(AbsoluteAddress address)
{
@@ -2074,6 +3080,13 @@ public:
return Call(m_assembler.label(), Call::LinkableNear);
}
+ ALWAYS_INLINE Call nearTailCall()
+ {
+ AssemblerLabel label = m_assembler.label();
+ m_assembler.b();
+ return Call(label, Call::LinkableNearTail);
+ }
+
ALWAYS_INLINE void ret()
{
m_assembler.ret();
@@ -2108,8 +3121,21 @@ public:
void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
{
- move(right, getCachedDataTempRegisterIDAndInvalidate());
- m_assembler.cmp<32>(left, dataTempRegister);
+ if (!right.m_value) {
+ if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
+ test32(*resultCondition, left, left, dest);
+ return;
+ }
+ }
+
+ if (isUInt12(right.m_value))
+ m_assembler.cmp<32>(left, UInt12(right.m_value));
+ else if (isUInt12(-right.m_value))
+ m_assembler.cmn<32>(left, UInt12(-right.m_value));
+ else {
+ move(right, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.cmp<32>(left, dataTempRegister);
+ }
m_assembler.cset<32>(dest, ARM64Condition(cond));
}
@@ -2121,6 +3147,13 @@ public:
void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
{
+ if (!right.m_value) {
+ if (auto resultCondition = commuteCompareToZeroIntoTest(cond)) {
+ test64(*resultCondition, left, left, dest);
+ return;
+ }
+ }
+
signExtend32ToPtr(right, getCachedDataTempRegisterIDAndInvalidate());
m_assembler.cmp<64>(left, dataTempRegister);
m_assembler.cset<32>(dest, ARM64Condition(cond));
@@ -2128,32 +3161,35 @@ public:
void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
{
- load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
- move(right, getCachedDataTempRegisterIDAndInvalidate());
+ TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right);
+ MacroAssemblerHelpers::load8OnCondition(*this, cond, left, getCachedMemoryTempRegisterIDAndInvalidate());
+ move(right8, getCachedDataTempRegisterIDAndInvalidate());
compare32(cond, memoryTempRegister, dataTempRegister, dest);
}
-
+
+ void test32(ResultCondition cond, RegisterID src, RegisterID mask, RegisterID dest)
+ {
+ m_assembler.tst<32>(src, mask);
+ m_assembler.cset<32>(dest, ARM64Condition(cond));
+ }
+
void test32(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest)
{
- if (mask.m_value == -1)
- m_assembler.tst<32>(src, src);
- else {
- signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
- m_assembler.tst<32>(src, dataTempRegister);
- }
+ test32(src, mask);
m_assembler.cset<32>(dest, ARM64Condition(cond));
}
void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
{
- load32(address, getCachedDataTempRegisterIDAndInvalidate());
- test32(cond, dataTempRegister, mask, dest);
+ load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
+ test32(cond, memoryTempRegister, mask, dest);
}
void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
{
- load8(address, getCachedDataTempRegisterIDAndInvalidate());
- test32(cond, dataTempRegister, mask, dest);
+ TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask);
+ MacroAssemblerHelpers::load8OnCondition(*this, cond, address, getCachedMemoryTempRegisterIDAndInvalidate());
+ test32(cond, memoryTempRegister, mask8, dest);
}
void test64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
@@ -2173,6 +3209,10 @@ public:
m_assembler.cset<32>(dest, ARM64Condition(cond));
}
+ void setCarry(RegisterID dest)
+ {
+ m_assembler.cset<32>(dest, ARM64Assembler::ConditionCS);
+ }
// Patchable operations
@@ -2204,10 +3244,17 @@ public:
return branch64(cond, left, dataTempRegister);
}
- PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
+ ALWAYS_INLINE Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+ {
+ dataLabel = DataLabel32(this);
+ moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
+ return branch32(cond, left, dataTempRegister);
+ }
+
+ PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
{
m_makeJumpPatchable = true;
- Jump result = branch32(cond, left, TrustedImm32(right));
+ Jump result = branch64(cond, left, TrustedImm64(right));
m_makeJumpPatchable = false;
return PatchableJump(result);
}
@@ -2228,6 +3275,30 @@ public:
return PatchableJump(result);
}
+ PatchableJump patchableBranch32(RelationalCondition cond, Address left, TrustedImm32 imm)
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branch32(cond, left, imm);
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ PatchableJump patchableBranch64(RelationalCondition cond, RegisterID reg, TrustedImm64 imm)
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branch64(cond, reg, imm);
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ PatchableJump patchableBranch64(RelationalCondition cond, RegisterID left, RegisterID right)
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branch64(cond, left, right);
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
{
m_makeJumpPatchable = true;
@@ -2236,6 +3307,14 @@ public:
return PatchableJump(result);
}
+ PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branch32WithPatch(cond, left, dataLabel, initialRightValue);
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
PatchableJump patchableJump()
{
m_makeJumpPatchable = true;
@@ -2274,11 +3353,26 @@ public:
m_assembler.nop();
}
+ // We take memoryFence to mean acqrel. This has acqrel semantics on ARM64.
void memoryFence()
{
- m_assembler.dmbSY();
+ m_assembler.dmbISH();
}
+ // We take this to mean that it prevents motion of normal stores. That's a store fence on ARM64 (hence the "ST").
+ void storeFence()
+ {
+ m_assembler.dmbISHST();
+ }
+
+ // We take this to mean that it prevents motion of normal loads. Ideally we'd have expressed this
+ // using dependencies or half fences, but there are cases where this is as good as it gets. The only
+ // way to get a standalone load fence instruction on ARM is to use the ISH fence, which is just like
+ // the memoryFence().
+ void loadFence()
+ {
+ m_assembler.dmbISH();
+ }
// Misc helper functions.
@@ -2288,6 +3382,23 @@ public:
return static_cast<RelationalCondition>(ARM64Assembler::invert(static_cast<ARM64Assembler::Condition>(cond)));
}
+ static std::optional<ResultCondition> commuteCompareToZeroIntoTest(RelationalCondition cond)
+ {
+ switch (cond) {
+ case Equal:
+ return Zero;
+ case NotEqual:
+ return NonZero;
+ case LessThan:
+ return Signed;
+ case GreaterThanOrEqual:
+ return PositiveOrZero;
+ break;
+ default:
+ return std::nullopt;
+ }
+ }
+
static FunctionPtr readCallTarget(CodeLocationCall call)
{
return FunctionPtr(reinterpret_cast<void(*)()>(ARM64Assembler::readCallTarget(call.dataLocation())));
@@ -2303,9 +3414,20 @@ public:
return ARM64Assembler::maxJumpReplacementSize();
}
- RegisterID scratchRegisterForBlinding() { return getCachedDataTempRegisterIDAndInvalidate(); }
+ static ptrdiff_t patchableJumpSize()
+ {
+ return ARM64Assembler::patchableJumpSize();
+ }
+
+ RegisterID scratchRegisterForBlinding()
+ {
+ // We *do not* have a scratch register for blinding.
+ RELEASE_ASSERT_NOT_REACHED();
+ return getCachedDataTempRegisterIDAndInvalidate();
+ }
static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
+ static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
{
@@ -2318,6 +3440,12 @@ public:
return CodeLocationLabel();
}
+ static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ return CodeLocationLabel();
+ }
+
static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
{
reemitInitialMoveWithPatch(instructionStart.dataLocation(), initialValue);
@@ -2328,6 +3456,25 @@ public:
UNREACHABLE_FOR_PLATFORM();
}
+ static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ }
+
+ static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
+ {
+ ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
+ }
+
+ static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ {
+ ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
+ }
+
+#if ENABLE(MASM_PROBE)
+ void probe(ProbeFunction, void* arg1, void* arg2);
+#endif // ENABLE(MASM_PROBE)
+
protected:
ALWAYS_INLINE Jump makeBranch(ARM64Assembler::Condition cond)
{
@@ -2381,8 +3528,26 @@ protected:
}
private:
- ALWAYS_INLINE RegisterID getCachedDataTempRegisterIDAndInvalidate() { return m_dataMemoryTempRegister.registerIDInvalidate(); }
- ALWAYS_INLINE RegisterID getCachedMemoryTempRegisterIDAndInvalidate() { return m_cachedMemoryTempRegister.registerIDInvalidate(); }
+ ALWAYS_INLINE RegisterID getCachedDataTempRegisterIDAndInvalidate()
+ {
+ RELEASE_ASSERT(m_allowScratchRegister);
+ return dataMemoryTempRegister().registerIDInvalidate();
+ }
+ ALWAYS_INLINE RegisterID getCachedMemoryTempRegisterIDAndInvalidate()
+ {
+ RELEASE_ASSERT(m_allowScratchRegister);
+ return cachedMemoryTempRegister().registerIDInvalidate();
+ }
+ ALWAYS_INLINE CachedTempRegister& dataMemoryTempRegister()
+ {
+ RELEASE_ASSERT(m_allowScratchRegister);
+ return m_dataMemoryTempRegister;
+ }
+ ALWAYS_INLINE CachedTempRegister& cachedMemoryTempRegister()
+ {
+ RELEASE_ASSERT(m_allowScratchRegister);
+ return m_cachedMemoryTempRegister;
+ }
ALWAYS_INLINE bool isInIntRange(intptr_t value)
{
@@ -2461,6 +3626,18 @@ private:
}
template<int datasize>
+ ALWAYS_INLINE void loadSignedAddressedByUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
+ {
+ loadUnsignedImmediate<datasize>(rt, rn, pimm);
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void loadSignedAddressedByUnscaledImmediate(RegisterID rt, RegisterID rn, int simm)
+ {
+ loadUnscaledImmediate<datasize>(rt, rn, simm);
+ }
+
+ template<int datasize>
ALWAYS_INLINE void storeUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
{
m_assembler.str<datasize>(rt, rn, pimm);
@@ -2498,21 +3675,16 @@ private:
}
}
- void signExtend32ToPtr(TrustedImm32 imm, RegisterID dest)
- {
- move(TrustedImmPtr(reinterpret_cast<void*>(static_cast<intptr_t>(imm.m_value))), dest);
- }
-
template<int datasize>
ALWAYS_INLINE void load(const void* address, RegisterID dest)
{
intptr_t currentRegisterContents;
- if (m_cachedMemoryTempRegister.value(currentRegisterContents)) {
+ if (cachedMemoryTempRegister().value(currentRegisterContents)) {
intptr_t addressAsInt = reinterpret_cast<intptr_t>(address);
intptr_t addressDelta = addressAsInt - currentRegisterContents;
if (dest == memoryTempRegister)
- m_cachedMemoryTempRegister.invalidate();
+ cachedMemoryTempRegister().invalidate();
if (isInIntRange(addressDelta)) {
if (ARM64Assembler::canEncodeSImmOffset(addressDelta)) {
@@ -2528,7 +3700,7 @@ private:
if ((addressAsInt & (~maskHalfWord0)) == (currentRegisterContents & (~maskHalfWord0))) {
m_assembler.movk<64>(memoryTempRegister, addressAsInt & maskHalfWord0, 0);
- m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
+ cachedMemoryTempRegister().setValue(reinterpret_cast<intptr_t>(address));
m_assembler.ldr<datasize>(dest, memoryTempRegister, ARM64Registers::zr);
return;
}
@@ -2536,17 +3708,18 @@ private:
move(TrustedImmPtr(address), memoryTempRegister);
if (dest == memoryTempRegister)
- m_cachedMemoryTempRegister.invalidate();
+ cachedMemoryTempRegister().invalidate();
else
- m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
+ cachedMemoryTempRegister().setValue(reinterpret_cast<intptr_t>(address));
m_assembler.ldr<datasize>(dest, memoryTempRegister, ARM64Registers::zr);
}
template<int datasize>
ALWAYS_INLINE void store(RegisterID src, const void* address)
{
+ ASSERT(src != memoryTempRegister);
intptr_t currentRegisterContents;
- if (m_cachedMemoryTempRegister.value(currentRegisterContents)) {
+ if (cachedMemoryTempRegister().value(currentRegisterContents)) {
intptr_t addressAsInt = reinterpret_cast<intptr_t>(address);
intptr_t addressDelta = addressAsInt - currentRegisterContents;
@@ -2564,14 +3737,14 @@ private:
if ((addressAsInt & (~maskHalfWord0)) == (currentRegisterContents & (~maskHalfWord0))) {
m_assembler.movk<64>(memoryTempRegister, addressAsInt & maskHalfWord0, 0);
- m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
+ cachedMemoryTempRegister().setValue(reinterpret_cast<intptr_t>(address));
m_assembler.str<datasize>(src, memoryTempRegister, ARM64Registers::zr);
return;
}
}
move(TrustedImmPtr(address), memoryTempRegister);
- m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
+ cachedMemoryTempRegister().setValue(reinterpret_cast<intptr_t>(address));
m_assembler.str<datasize>(src, memoryTempRegister, ARM64Registers::zr);
}
@@ -2648,6 +3821,20 @@ private:
}
template<int datasize>
+ ALWAYS_INLINE bool tryLoadSignedWithOffset(RegisterID rt, RegisterID rn, int32_t offset)
+ {
+ if (ARM64Assembler::canEncodeSImmOffset(offset)) {
+ loadSignedAddressedByUnscaledImmediate<datasize>(rt, rn, offset);
+ return true;
+ }
+ if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
+ loadSignedAddressedByUnsignedImmediate<datasize>(rt, rn, static_cast<unsigned>(offset));
+ return true;
+ }
+ return false;
+ }
+
+ template<int datasize>
ALWAYS_INLINE bool tryLoadWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset)
{
if (ARM64Assembler::canEncodeSImmOffset(offset)) {
@@ -2689,25 +3876,37 @@ private:
return false;
}
- friend class LinkBuffer;
- friend class RepatchBuffer;
-
- static void linkCall(void* code, Call call, FunctionPtr function)
+ Jump jumpAfterFloatingPointCompare(DoubleCondition cond)
{
- if (call.isFlagSet(Call::Near))
- ARM64Assembler::linkCall(code, call.m_label, function.value());
- else
- ARM64Assembler::linkPointer(code, call.m_label.labelAtOffset(REPATCH_OFFSET_CALL_TO_POINTER), function.value());
+ if (cond == DoubleNotEqual) {
+ // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
+ Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
+ Jump result = makeBranch(ARM64Assembler::ConditionNE);
+ unordered.link(this);
+ return result;
+ }
+ if (cond == DoubleEqualOrUnordered) {
+ Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
+ Jump notEqual = makeBranch(ARM64Assembler::ConditionNE);
+ unordered.link(this);
+ // We get here if either unordered or equal.
+ Jump result = jump();
+ notEqual.link(this);
+ return result;
+ }
+ return makeBranch(cond);
}
- static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
- {
- ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
- }
+ friend class LinkBuffer;
- static void repatchCall(CodeLocationCall call, FunctionPtr destination)
+ static void linkCall(void* code, Call call, FunctionPtr function)
{
- ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
+ if (!call.isFlagSet(Call::Near))
+ ARM64Assembler::linkPointer(code, call.m_label.labelAtOffset(REPATCH_OFFSET_CALL_TO_POINTER), function.value());
+ else if (call.isFlagSet(Call::Tail))
+ ARM64Assembler::linkJump(code, call.m_label, function.value());
+ else
+ ARM64Assembler::linkCall(code, call.m_label, function.value());
}
CachedTempRegister m_dataMemoryTempRegister;
@@ -2729,6 +3928,18 @@ ALWAYS_INLINE void MacroAssemblerARM64::loadUnsignedImmediate<16>(RegisterID rt,
}
template<>
+ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm)
+{
+ m_assembler.ldrsb<64>(rt, rn, pimm);
+}
+
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnsignedImmediate<16>(RegisterID rt, RegisterID rn, unsigned pimm)
+{
+ m_assembler.ldrsh<64>(rt, rn, pimm);
+}
+
+template<>
ALWAYS_INLINE void MacroAssemblerARM64::loadUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm)
{
m_assembler.ldurb(rt, rn, simm);
@@ -2741,6 +3952,18 @@ ALWAYS_INLINE void MacroAssemblerARM64::loadUnscaledImmediate<16>(RegisterID rt,
}
template<>
+ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm)
+{
+ m_assembler.ldursb<64>(rt, rn, simm);
+}
+
+template<>
+ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnscaledImmediate<16>(RegisterID rt, RegisterID rn, int simm)
+{
+ m_assembler.ldursh<64>(rt, rn, simm);
+}
+
+template<>
ALWAYS_INLINE void MacroAssemblerARM64::storeUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm)
{
m_assembler.strb(rt, rn, pimm);
@@ -2767,5 +3990,3 @@ ALWAYS_INLINE void MacroAssemblerARM64::storeUnscaledImmediate<16>(RegisterID rt
} // namespace JSC
#endif // ENABLE(ASSEMBLER)
-
-#endif // MacroAssemblerARM64_h