summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/assembler/MacroAssembler.h
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
commit1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c (patch)
tree46dcd36c86e7fbc6e5df36deb463b33e9967a6f7 /Source/JavaScriptCore/assembler/MacroAssembler.h
parent32761a6cee1d0dee366b885b7b9c777e67885688 (diff)
downloadWebKitGtk-tarball-master.tar.gz
Diffstat (limited to 'Source/JavaScriptCore/assembler/MacroAssembler.h')
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssembler.h355
1 files changed, 289 insertions, 66 deletions
diff --git a/Source/JavaScriptCore/assembler/MacroAssembler.h b/Source/JavaScriptCore/assembler/MacroAssembler.h
index 4a43eb625..b6aba874d 100644
--- a/Source/JavaScriptCore/assembler/MacroAssembler.h
+++ b/Source/JavaScriptCore/assembler/MacroAssembler.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,13 +23,12 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef MacroAssembler_h
-#define MacroAssembler_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(ASSEMBLER)
+#include "JSCJSValue.h"
+
#if CPU(ARM_THUMB2)
#include "MacroAssemblerARMv7.h"
namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; };
@@ -56,78 +55,53 @@ namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; };
#include "MacroAssemblerX86_64.h"
namespace JSC { typedef MacroAssemblerX86_64 MacroAssemblerBase; };
-#elif CPU(SH4)
-#include "MacroAssemblerSH4.h"
-namespace JSC {
-typedef MacroAssemblerSH4 MacroAssemblerBase;
-};
-
#else
#error "The MacroAssembler is not supported on this platform."
#endif
+#include "MacroAssemblerHelpers.h"
+
namespace JSC {
class MacroAssembler : public MacroAssemblerBase {
public:
- static bool isStackRelated(RegisterID reg)
- {
- return reg == stackPointerRegister || reg == framePointerRegister;
- }
-
- static RegisterID firstRealRegister()
- {
- RegisterID firstRegister = MacroAssembler::firstRegister();
- while (MacroAssembler::isStackRelated(firstRegister))
- firstRegister = static_cast<RegisterID>(firstRegister + 1);
- return firstRegister;
- }
-
- static RegisterID nextRegister(RegisterID reg)
- {
- RegisterID result = static_cast<RegisterID>(reg + 1);
- while (MacroAssembler::isStackRelated(result))
- result = static_cast<RegisterID>(result + 1);
- return result;
- }
-
- static RegisterID secondRealRegister()
+ static constexpr RegisterID nextRegister(RegisterID reg)
{
- return nextRegister(firstRealRegister());
+ return static_cast<RegisterID>(reg + 1);
}
- static FPRegisterID nextFPRegister(FPRegisterID reg)
+ static constexpr FPRegisterID nextFPRegister(FPRegisterID reg)
{
return static_cast<FPRegisterID>(reg + 1);
}
- static unsigned numberOfRegisters()
+ static constexpr unsigned numberOfRegisters()
{
return lastRegister() - firstRegister() + 1;
}
- static unsigned registerIndex(RegisterID reg)
+ static constexpr unsigned registerIndex(RegisterID reg)
{
return reg - firstRegister();
}
- static unsigned numberOfFPRegisters()
+ static constexpr unsigned numberOfFPRegisters()
{
return lastFPRegister() - firstFPRegister() + 1;
}
- static unsigned fpRegisterIndex(FPRegisterID reg)
+ static constexpr unsigned fpRegisterIndex(FPRegisterID reg)
{
return reg - firstFPRegister();
}
- static unsigned registerIndex(FPRegisterID reg)
+ static constexpr unsigned registerIndex(FPRegisterID reg)
{
return fpRegisterIndex(reg) + numberOfRegisters();
}
- static unsigned totalNumberOfRegisters()
+ static constexpr unsigned totalNumberOfRegisters()
{
return numberOfRegisters() + numberOfFPRegisters();
}
@@ -135,14 +109,16 @@ public:
using MacroAssemblerBase::pop;
using MacroAssemblerBase::jump;
using MacroAssemblerBase::branch32;
+ using MacroAssemblerBase::compare32;
using MacroAssemblerBase::move;
using MacroAssemblerBase::add32;
+ using MacroAssemblerBase::mul32;
using MacroAssemblerBase::and32;
using MacroAssemblerBase::branchAdd32;
using MacroAssemblerBase::branchMul32;
-#if CPU(X86_64)
+#if CPU(ARM64) || CPU(ARM_THUMB2) || CPU(ARM_TRADITIONAL) || CPU(X86_64)
using MacroAssemblerBase::branchPtr;
-#endif // CPU(X86_64)
+#endif
using MacroAssemblerBase::branchSub32;
using MacroAssemblerBase::lshift32;
using MacroAssemblerBase::or32;
@@ -160,7 +136,6 @@ public:
static const double twoToThe32; // This is super useful for some double code.
// Utilities used by the DFG JIT.
-#if ENABLE(DFG_JIT)
using MacroAssemblerBase::invert;
static DoubleCondition invert(DoubleCondition cond)
@@ -190,10 +165,9 @@ public:
return DoubleGreaterThanOrEqual;
case DoubleLessThanOrEqualOrUnordered:
return DoubleGreaterThan;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- return DoubleEqual; // make compiler happy
}
+ RELEASE_ASSERT_NOT_REACHED();
+ return DoubleEqual; // make compiler happy
}
static bool isInvertible(ResultCondition cond)
@@ -201,6 +175,8 @@ public:
switch (cond) {
case Zero:
case NonZero:
+ case Signed:
+ case PositiveOrZero:
return true;
default:
return false;
@@ -214,14 +190,65 @@ public:
return NonZero;
case NonZero:
return Zero;
+ case Signed:
+ return PositiveOrZero;
+ case PositiveOrZero:
+ return Signed;
default:
RELEASE_ASSERT_NOT_REACHED();
return Zero; // Make compiler happy for release builds.
}
}
-#endif
- // Platform agnostic onvenience functions,
+ static RelationalCondition flip(RelationalCondition cond)
+ {
+ switch (cond) {
+ case Equal:
+ case NotEqual:
+ return cond;
+ case Above:
+ return Below;
+ case AboveOrEqual:
+ return BelowOrEqual;
+ case Below:
+ return Above;
+ case BelowOrEqual:
+ return AboveOrEqual;
+ case GreaterThan:
+ return LessThan;
+ case GreaterThanOrEqual:
+ return LessThanOrEqual;
+ case LessThan:
+ return GreaterThan;
+ case LessThanOrEqual:
+ return GreaterThanOrEqual;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return Equal;
+ }
+
+ static bool isSigned(RelationalCondition cond)
+ {
+ return MacroAssemblerHelpers::isSigned<MacroAssembler>(cond);
+ }
+
+ static bool isUnsigned(RelationalCondition cond)
+ {
+ return MacroAssemblerHelpers::isUnsigned<MacroAssembler>(cond);
+ }
+
+ static bool isSigned(ResultCondition cond)
+ {
+ return MacroAssemblerHelpers::isSigned<MacroAssembler>(cond);
+ }
+
+ static bool isUnsigned(ResultCondition cond)
+ {
+ return MacroAssemblerHelpers::isUnsigned<MacroAssembler>(cond);
+ }
+
+ // Platform agnostic convenience functions,
// described in terms of other macro assembly methods.
void pop()
{
@@ -258,6 +285,10 @@ public:
{
push(src);
}
+ void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm)
+ {
+ push(imm);
+ }
void popToRestore(RegisterID dest)
{
pop(dest);
@@ -272,6 +303,8 @@ public:
loadDouble(stackPointerRegister, dest);
addPtr(TrustedImm32(sizeof(double)), stackPointerRegister);
}
+
+ static ptrdiff_t pushToSaveByteOffset() { return sizeof(void*); }
#endif // !CPU(ARM64)
#if CPU(X86_64) || CPU(ARM64)
@@ -316,6 +349,11 @@ public:
branchPtr(cond, op1, imm).linkTo(target, this);
}
+ Jump branch32(RelationalCondition cond, RegisterID left, AbsoluteAddress right)
+ {
+ return branch32(flip(cond), right, left);
+ }
+
void branch32(RelationalCondition cond, RegisterID op1, RegisterID op2, Label target)
{
branch32(cond, op1, op2).linkTo(target, this);
@@ -346,6 +384,11 @@ public:
return branch32(commute(cond), right, left);
}
+ void compare32(RelationalCondition cond, Imm32 left, RegisterID right, RegisterID dest)
+ {
+ compare32(commute(cond), right, left, dest);
+ }
+
void branchTestPtr(ResultCondition cond, RegisterID reg, Label target)
{
branchTestPtr(cond, reg).linkTo(target, this);
@@ -362,6 +405,11 @@ public:
return PatchableJump(branchPtrWithPatch(cond, left, dataLabel, initialRightValue));
}
+ PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+ {
+ return PatchableJump(branch32WithPatch(cond, left, dataLabel, initialRightValue));
+ }
+
#if !CPU(ARM_TRADITIONAL)
PatchableJump patchableJump()
{
@@ -377,6 +425,11 @@ public:
{
return PatchableJump(branch32(cond, reg, imm));
}
+
+ PatchableJump patchableBranch32(RelationalCondition cond, Address address, TrustedImm32 imm)
+ {
+ return PatchableJump(branch32(cond, address, imm));
+ }
#endif
#endif
@@ -414,6 +467,19 @@ public:
return condition;
}
+ void oops()
+ {
+ abortWithReason(B3Oops);
+ }
+
+ // B3 has additional pseudo-opcodes for returning, when it wants to signal that the return
+ // consumes some register in some way.
+ void retVoid() { ret(); }
+ void ret32(RegisterID) { ret(); }
+ void ret64(RegisterID) { ret(); }
+ void retFloat(FPRegisterID) { ret(); }
+ void retDouble(FPRegisterID) { ret(); }
+
static const unsigned BlindingModulus = 64;
bool shouldConsiderBlinding()
{
@@ -439,6 +505,11 @@ public:
add32(src, dest);
}
+ void addPtr(RegisterID left, RegisterID right, RegisterID dest)
+ {
+ add32(left, right, dest);
+ }
+
void addPtr(TrustedImm32 imm, RegisterID srcDest)
{
add32(imm, srcDest);
@@ -474,6 +545,21 @@ public:
and32(TrustedImm32(imm), srcDest);
}
+ void lshiftPtr(Imm32 imm, RegisterID srcDest)
+ {
+ lshift32(trustedImm32ForShift(imm), srcDest);
+ }
+
+ void rshiftPtr(Imm32 imm, RegisterID srcDest)
+ {
+ rshift32(trustedImm32ForShift(imm), srcDest);
+ }
+
+ void urshiftPtr(Imm32 imm, RegisterID srcDest)
+ {
+ urshift32(trustedImm32ForShift(imm), srcDest);
+ }
+
void negPtr(RegisterID dest)
{
neg32(dest);
@@ -595,6 +681,11 @@ public:
store32(TrustedImm32(imm), address);
}
+ void storePtr(TrustedImm32 imm, ImplicitAddress address)
+ {
+ store32(imm, address);
+ }
+
void storePtr(TrustedImmPtr imm, BaseIndex address)
{
store32(TrustedImm32(imm), address);
@@ -691,6 +782,11 @@ public:
{
add64(src, dest);
}
+
+ void addPtr(RegisterID left, RegisterID right, RegisterID dest)
+ {
+ add64(left, right, dest);
+ }
void addPtr(Address src, RegisterID dest)
{
@@ -747,6 +843,16 @@ public:
lshift64(trustedImm32ForShift(imm), srcDest);
}
+ void rshiftPtr(Imm32 imm, RegisterID srcDest)
+ {
+ rshift64(trustedImm32ForShift(imm), srcDest);
+ }
+
+ void urshiftPtr(Imm32 imm, RegisterID srcDest)
+ {
+ urshift64(trustedImm32ForShift(imm), srcDest);
+ }
+
void negPtr(RegisterID dest)
{
neg64(dest);
@@ -857,6 +963,11 @@ public:
store64(TrustedImm64(imm), address);
}
+ void storePtr(TrustedImm32 imm, ImplicitAddress address)
+ {
+ store64(imm, address);
+ }
+
void storePtr(TrustedImmPtr imm, BaseIndex address)
{
store64(TrustedImm64(imm), address);
@@ -986,7 +1097,7 @@ public:
if (bitwise_cast<uint64_t>(value * 1.0) != bitwise_cast<uint64_t>(value))
return shouldConsiderBlinding();
- value = abs(value);
+ value = fabs(value);
// Only allow a limited set of fractional components
double scaledValue = value * 8;
if (scaledValue / 8 != value)
@@ -1137,7 +1248,7 @@ public:
void convertInt32ToDouble(Imm32 imm, FPRegisterID dest)
{
- if (shouldBlind(imm)) {
+ if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
RegisterID scratchRegister = scratchRegisterForBlinding();
loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister);
convertInt32ToDouble(scratchRegister, dest);
@@ -1173,7 +1284,7 @@ public:
Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
{
- if (shouldBlind(right)) {
+ if (shouldBlind(right) && haveScratchRegisterForBlinding()) {
RegisterID scratchRegister = scratchRegisterForBlinding();
loadRotationBlindedConstant(rotationBlindConstant(right), scratchRegister);
return branchPtr(cond, left, scratchRegister);
@@ -1183,7 +1294,7 @@ public:
void storePtr(ImmPtr imm, Address dest)
{
- if (shouldBlind(imm)) {
+ if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
RegisterID scratchRegister = scratchRegisterForBlinding();
loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
storePtr(scratchRegister, dest);
@@ -1193,7 +1304,7 @@ public:
void store64(Imm64 imm, Address dest)
{
- if (shouldBlind(imm)) {
+ if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
RegisterID scratchRegister = scratchRegisterForBlinding();
loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
store64(scratchRegister, dest);
@@ -1203,6 +1314,37 @@ public:
#endif // !CPU(X86_64)
+#if ENABLE(B3_JIT)
+ // We should implement this the right way eventually, but for now, it's fine because it arises so
+ // infrequently.
+ void compareDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID dest)
+ {
+ move(TrustedImm32(0), dest);
+ Jump falseCase = branchDouble(invert(cond), left, right);
+ move(TrustedImm32(1), dest);
+ falseCase.link(this);
+ }
+ void compareFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID dest)
+ {
+ move(TrustedImm32(0), dest);
+ Jump falseCase = branchFloat(invert(cond), left, right);
+ move(TrustedImm32(1), dest);
+ falseCase.link(this);
+ }
+#endif
+
+ void lea32(Address address, RegisterID dest)
+ {
+ add32(TrustedImm32(address.offset), address.base, dest);
+ }
+
+#if CPU(X86_64) || CPU(ARM64)
+ void lea64(Address address, RegisterID dest)
+ {
+ add64(TrustedImm32(address.offset), address.base, dest);
+ }
+#endif // CPU(X86_64) || CPU(ARM64)
+
bool shouldBlind(Imm32 imm)
{
#if ENABLE(FORCED_JIT_BLINDING)
@@ -1316,6 +1458,16 @@ public:
} else
add32(imm.asTrustedImm32(), dest);
}
+
+ void add32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ BlindedImm32 key = additionBlindedConstant(imm);
+ add32(key.value1, src, dest);
+ add32(key.value2, dest);
+ } else
+ add32(imm.asTrustedImm32(), src, dest);
+ }
void addPtr(Imm32 imm, RegisterID dest)
{
@@ -1327,6 +1479,27 @@ public:
addPtr(imm.asTrustedImm32(), dest);
}
+ void mul32(Imm32 imm, RegisterID src, RegisterID dest)
+ {
+ if (shouldBlind(imm)) {
+ if (src != dest || haveScratchRegisterForBlinding()) {
+ if (src == dest) {
+ move(src, scratchRegisterForBlinding());
+ src = scratchRegisterForBlinding();
+ }
+ loadXorBlindedConstant(xorBlindConstant(imm), dest);
+ mul32(src, dest);
+ return;
+ }
+ // If we don't have a scratch register available for use, we'll just
+ // place a random number of nops.
+ uint32_t nopCount = random() & 3;
+ while (nopCount--)
+ nop();
+ }
+ mul32(imm.asTrustedImm32(), src, dest);
+ }
+
void and32(Imm32 imm, RegisterID dest)
{
if (shouldBlind(imm)) {
@@ -1486,23 +1659,50 @@ public:
return branch32(cond, left, right.asTrustedImm32());
}
- Jump branchAdd32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest)
+ void compare32(RelationalCondition cond, RegisterID left, Imm32 right, RegisterID dest)
{
- if (src == dest)
- ASSERT(haveScratchRegisterForBlinding());
+ if (shouldBlind(right)) {
+ if (left != dest || haveScratchRegisterForBlinding()) {
+ RegisterID blindedConstantReg = dest;
+ if (left == dest)
+ blindedConstantReg = scratchRegisterForBlinding();
+ loadXorBlindedConstant(xorBlindConstant(right), blindedConstantReg);
+ compare32(cond, left, blindedConstantReg, dest);
+ return;
+ }
+ // If we don't have a scratch register available for use, we'll just
+ // place a random number of nops.
+ uint32_t nopCount = random() & 3;
+ while (nopCount--)
+ nop();
+ compare32(cond, left, right.asTrustedImm32(), dest);
+ return;
+ }
+
+ compare32(cond, left, right.asTrustedImm32(), dest);
+ }
+ Jump branchAdd32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest)
+ {
if (shouldBlind(imm)) {
- if (src == dest) {
- move(src, scratchRegisterForBlinding());
- src = scratchRegisterForBlinding();
+ if (src != dest || haveScratchRegisterForBlinding()) {
+ if (src == dest) {
+ move(src, scratchRegisterForBlinding());
+ src = scratchRegisterForBlinding();
+ }
+ loadXorBlindedConstant(xorBlindConstant(imm), dest);
+ return branchAdd32(cond, src, dest);
}
- loadXorBlindedConstant(xorBlindConstant(imm), dest);
- return branchAdd32(cond, src, dest);
+ // If we don't have a scratch register available for use, we'll just
+ // place a random number of nops.
+ uint32_t nopCount = random() & 3;
+ while (nopCount--)
+ nop();
}
return branchAdd32(cond, src, imm.asTrustedImm32(), dest);
}
- Jump branchMul32(ResultCondition cond, Imm32 imm, RegisterID src, RegisterID dest)
+ Jump branchMul32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest)
{
if (src == dest)
ASSERT(haveScratchRegisterForBlinding());
@@ -1515,7 +1715,7 @@ public:
loadXorBlindedConstant(xorBlindConstant(imm), dest);
return branchMul32(cond, src, dest);
}
- return branchMul32(cond, imm.asTrustedImm32(), src, dest);
+ return branchMul32(cond, src, imm.asTrustedImm32(), dest);
}
// branchSub32 takes a scratch register as 32 bit platforms make use of this,
@@ -1560,12 +1760,35 @@ public:
{
urshift32(src, trustedImm32ForShift(amount), dest);
}
+
+#if ENABLE(MASM_PROBE)
+ using MacroAssemblerBase::probe;
+
+ // Let's you print from your JIT generated code.
+ // See comments in MacroAssemblerPrinter.h for examples of how to use this.
+ template<typename... Arguments>
+ void print(Arguments... args);
+
+ void probe(std::function<void (ProbeContext*)>);
+#endif
};
} // namespace JSC
+namespace WTF {
+
+class PrintStream;
+
+void printInternal(PrintStream&, JSC::MacroAssembler::RelationalCondition);
+void printInternal(PrintStream&, JSC::MacroAssembler::ResultCondition);
+void printInternal(PrintStream&, JSC::MacroAssembler::DoubleCondition);
+
+} // namespace WTF
+
#else // ENABLE(ASSEMBLER)
+namespace JSC {
+
// If there is no assembler for this platform, at least allow code to make references to
// some of the things it would otherwise define, albeit without giving that code any way
// of doing anything useful.
@@ -1579,6 +1802,6 @@ public:
enum FPRegisterID { NoFPRegister };
};
-#endif // ENABLE(ASSEMBLER)
+} // namespace JSC
-#endif // MacroAssembler_h
+#endif // ENABLE(ASSEMBLER)