diff options
Diffstat (limited to 'Source/JavaScriptCore/assembler/MacroAssemblerARM.h')
-rw-r--r-- | Source/JavaScriptCore/assembler/MacroAssemblerARM.h | 294 |
1 files changed, 222 insertions, 72 deletions
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARM.h b/Source/JavaScriptCore/assembler/MacroAssemblerARM.h index 7eae2ee01..7d36034a3 100644 --- a/Source/JavaScriptCore/assembler/MacroAssemblerARM.h +++ b/Source/JavaScriptCore/assembler/MacroAssemblerARM.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2013 Apple Inc. + * Copyright (C) 2008, 2013-2016 Apple Inc. * Copyright (C) 2009, 2010 University of Szeged * All rights reserved. * @@ -25,8 +25,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef MacroAssemblerARM_h -#define MacroAssemblerARM_h +#pragma once #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) @@ -35,11 +34,14 @@ namespace JSC { -class MacroAssemblerARM : public AbstractMacroAssembler<ARMAssembler> { +class MacroAssemblerARM : public AbstractMacroAssembler<ARMAssembler, MacroAssemblerARM> { static const int DoubleConditionMask = 0x0f; static const int DoubleConditionBitSpecial = 0x10; COMPILE_ASSERT(!(DoubleConditionBitSpecial & DoubleConditionMask), DoubleConditionBitSpecial_should_not_interfere_with_ARMAssembler_Condition_codes); public: + static const unsigned numGPRs = 16; + static const unsigned numFPRs = 16; + typedef ARMRegisters::FPRegisterID FPRegisterID; enum RelationalCondition { @@ -228,13 +230,31 @@ public: store32(ARMRegisters::S1, ARMRegisters::S0); } + void or32(TrustedImm32 imm, AbsoluteAddress dest) + { + move(TrustedImmPtr(dest.m_ptr), ARMRegisters::S0); + load32(Address(ARMRegisters::S0), ARMRegisters::S1); + or32(imm, ARMRegisters::S1); // It uses S0 as temporary register, we need to reload the address. + move(TrustedImmPtr(dest.m_ptr), ARMRegisters::S0); + store32(ARMRegisters::S1, ARMRegisters::S0); + } + + void or32(TrustedImm32 imm, Address address) + { + load32(address, ARMRegisters::S0); + or32(imm, ARMRegisters::S0, ARMRegisters::S0); + store32(ARMRegisters::S0, address); + } + void or32(TrustedImm32 imm, RegisterID dest) { + ASSERT(dest != ARMRegisters::S0); m_assembler.orrs(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0)); } void or32(TrustedImm32 imm, RegisterID src, RegisterID dest) { + ASSERT(src != ARMRegisters::S0); m_assembler.orrs(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0)); } @@ -263,7 +283,10 @@ public: void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) { - m_assembler.movs(dest, m_assembler.asr(src, imm.m_value & 0x1f)); + if (!imm.m_value) + move(src, dest); + else + m_assembler.movs(dest, m_assembler.asr(src, imm.m_value & 0x1f)); } void urshift32(RegisterID shiftAmount, RegisterID dest) @@ -286,7 +309,10 @@ public: void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) { - m_assembler.movs(dest, m_assembler.lsr(src, imm.m_value & 0x1f)); + if (!imm.m_value) + move(src, dest); + else + m_assembler.movs(dest, m_assembler.lsr(src, imm.m_value & 0x1f)); } void sub32(RegisterID src, RegisterID dest) @@ -294,6 +320,11 @@ public: m_assembler.subs(dest, dest, src); } + void sub32(RegisterID left, RegisterID right, RegisterID dest) + { + m_assembler.subs(dest, left, right); + } + void sub32(TrustedImm32 imm, RegisterID dest) { m_assembler.subs(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0)); @@ -370,7 +401,12 @@ public: m_assembler.dataTransfer32(ARMAssembler::LoadUint8, dest, ARMRegisters::S0, 0); } - void load8Signed(BaseIndex address, RegisterID dest) + void load8SignedExtendTo32(Address address, RegisterID dest) + { + m_assembler.dataTransfer16(ARMAssembler::LoadInt8, dest, address.base, address.offset); + } + + void load8SignedExtendTo32(BaseIndex address, RegisterID dest) { m_assembler.baseIndexTransfer16(ARMAssembler::LoadInt8, dest, address.base, address.index, static_cast<int>(address.scale), address.offset); } @@ -385,7 +421,7 @@ public: m_assembler.baseIndexTransfer16(ARMAssembler::LoadUint16, dest, address.base, address.index, static_cast<int>(address.scale), address.offset); } - void load16Signed(BaseIndex address, RegisterID dest) + void load16SignedExtendTo32(BaseIndex address, RegisterID dest) { m_assembler.baseIndexTransfer16(ARMAssembler::LoadInt16, dest, address.base, address.index, static_cast<int>(address.scale), address.offset); } @@ -414,6 +450,18 @@ public: load16(address, dest); } + void abortWithReason(AbortReason reason) + { + move(TrustedImm32(reason), ARMRegisters::S0); + breakpoint(); + } + + void abortWithReason(AbortReason reason, intptr_t misc) + { + move(TrustedImm32(misc), ARMRegisters::S1); + abortWithReason(reason); + } + ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest) { ConvertibleLoadLabel result(this); @@ -459,16 +507,29 @@ public: m_assembler.baseIndexTransfer32(ARMAssembler::StoreUint8, src, address.base, address.index, static_cast<int>(address.scale), address.offset); } + void store8(RegisterID src, ImplicitAddress address) + { + m_assembler.dtrUp(ARMAssembler::StoreUint8, src, address.base, address.offset); + } + void store8(RegisterID src, const void* address) { move(TrustedImmPtr(address), ARMRegisters::S0); m_assembler.dtrUp(ARMAssembler::StoreUint8, src, ARMRegisters::S0, 0); } + void store8(TrustedImm32 imm, ImplicitAddress address) + { + TrustedImm32 imm8(static_cast<int8_t>(imm.m_value)); + move(imm8, ARMRegisters::S1); + store8(ARMRegisters::S1, address); + } + void store8(TrustedImm32 imm, const void* address) { + TrustedImm32 imm8(static_cast<int8_t>(imm.m_value)); move(TrustedImm32(reinterpret_cast<ARMWord>(address)), ARMRegisters::S0); - move(imm, ARMRegisters::S1); + move(imm8, ARMRegisters::S1); m_assembler.dtrUp(ARMAssembler::StoreUint8, ARMRegisters::S1, ARMRegisters::S0, 0); } @@ -517,6 +578,12 @@ public: m_assembler.pop(dest); } + void popPair(RegisterID dest1, RegisterID dest2) + { + m_assembler.pop(dest1); + m_assembler.pop(dest2); + } + void push(RegisterID src) { m_assembler.push(src); @@ -534,6 +601,12 @@ public: push(ARMRegisters::S0); } + void pushPair(RegisterID src1, RegisterID src2) + { + m_assembler.push(src2); + m_assembler.push(src1); + } + void move(TrustedImm32 imm, RegisterID dest) { m_assembler.moveImm(imm.m_value, dest); @@ -571,21 +644,29 @@ public: Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right) { - load8(left, ARMRegisters::S1); - return branch32(cond, ARMRegisters::S1, right); + TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right); + MacroAssemblerHelpers::load8OnCondition(*this, cond, left, ARMRegisters::S1); + return branch32(cond, ARMRegisters::S1, right8); } Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right) { - ASSERT(!(right.m_value & 0xFFFFFF00)); - load8(left, ARMRegisters::S1); - return branch32(cond, ARMRegisters::S1, right); + TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right); + MacroAssemblerHelpers::load8OnCondition(*this, cond, left, ARMRegisters::S1); + return branch32(cond, ARMRegisters::S1, right8); } Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right) { + TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right); move(TrustedImmPtr(left.m_ptr), ARMRegisters::S1); - load8(Address(ARMRegisters::S1), ARMRegisters::S1); + MacroAssemblerHelpers::load8OnCondition(*this, cond, Address(ARMRegisters::S1), ARMRegisters::S1); + return branch32(cond, ARMRegisters::S1, right8); + } + + Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right) + { + load32(left, ARMRegisters::S1); return branch32(cond, ARMRegisters::S1, right); } @@ -633,33 +714,36 @@ public: Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) { - load8(address, ARMRegisters::S1); - return branchTest32(cond, ARMRegisters::S1, mask); + TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask); + MacroAssemblerHelpers::load8OnCondition(*this, cond, address, ARMRegisters::S1); + return branchTest32(cond, ARMRegisters::S1, mask8); } Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) { - load8(address, ARMRegisters::S1); - return branchTest32(cond, ARMRegisters::S1, mask); + TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask); + MacroAssemblerHelpers::load8OnCondition(*this, cond, address, ARMRegisters::S1); + return branchTest32(cond, ARMRegisters::S1, mask8); } Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) { + TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask); move(TrustedImmPtr(address.m_ptr), ARMRegisters::S1); - load8(Address(ARMRegisters::S1), ARMRegisters::S1); - return branchTest32(cond, ARMRegisters::S1, mask); + MacroAssemblerHelpers::load8OnCondition(*this, cond, Address(ARMRegisters::S1), ARMRegisters::S1); + return branchTest32(cond, ARMRegisters::S1, mask8); } Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask) { - ASSERT((cond == Zero) || (cond == NonZero)); + ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == PositiveOrZero); m_assembler.tst(reg, mask); return Jump(m_assembler.jmp(ARMCondition(cond))); } Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) { - ASSERT((cond == Zero) || (cond == NonZero)); + ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == PositiveOrZero); ARMWord w = m_assembler.getImm(mask.m_value, ARMRegisters::S0, true); if (w & ARMAssembler::Op2InvertedImmediate) m_assembler.bics(ARMRegisters::S0, reg, w & ~ARMAssembler::Op2InvertedImmediate); @@ -790,7 +874,7 @@ public: return branchMul32(cond, src, dest, dest); } - Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest) + Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest) { ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); if (cond == Overflow) { @@ -858,6 +942,14 @@ public: return PatchableJump(jump); } + PatchableJump patchableBranch32(RelationalCondition cond, Address address, TrustedImm32 imm) + { + internalCompare32(address, imm); + Jump jump(m_assembler.loadBranchTarget(ARMRegisters::S1, ARMCondition(cond), false)); + m_assembler.bx(ARMRegisters::S1, ARMCondition(cond)); + return PatchableJump(jump); + } + void breakpoint() { m_assembler.bkpt(0); @@ -869,6 +961,11 @@ public: return Call(m_assembler.blx(ARMRegisters::S1), Call::LinkableNear); } + Call nearTailCall() + { + return Call(m_assembler.jmp(), Call::LinkableNearTail); + } + Call call(RegisterID target) { return Call(m_assembler.blx(target), Call::None); @@ -900,14 +997,15 @@ public: void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest) { - load8(left, ARMRegisters::S1); - compare32(cond, ARMRegisters::S1, right, dest); + TrustedImm32 right8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, right); + MacroAssemblerHelpers::load8OnCondition(*this, cond, left, ARMRegisters::S1); + compare32(cond, ARMRegisters::S1, right8, dest); } void test32(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest) { if (mask.m_value == -1) - m_assembler.cmp(0, reg); + m_assembler.tst(reg, reg); else m_assembler.tst(reg, m_assembler.getImm(mask.m_value, ARMRegisters::S0)); m_assembler.mov(dest, ARMAssembler::getOp2Byte(0)); @@ -922,8 +1020,9 @@ public: void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest) { - load8(address, ARMRegisters::S1); - test32(cond, ARMRegisters::S1, mask, dest); + TrustedImm32 mask8 = MacroAssemblerHelpers::mask8OnCondition(*this, cond, mask); + MacroAssemblerHelpers::load8OnCondition(*this, cond, address, ARMRegisters::S1); + test32(cond, ARMRegisters::S1, mask8, dest); } void add32(TrustedImm32 imm, RegisterID src, RegisterID dest) @@ -1021,6 +1120,13 @@ public: return dataLabel; } + DataLabel32 moveWithPatch(TrustedImm32 initialValue, RegisterID dest) + { + DataLabel32 dataLabel(this); + m_assembler.ldrUniqueImmediate(dest, static_cast<ARMWord>(initialValue.m_value)); + return dataLabel; + } + Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) { ensureSpace(3 * sizeof(ARMWord), 2 * sizeof(ARMWord)); @@ -1038,6 +1144,15 @@ public: return jump; } + Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0)) + { + load32(left, ARMRegisters::S1); + ensureSpace(3 * sizeof(ARMWord), 2 * sizeof(ARMWord)); + dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S0); + Jump jump = branch32(cond, ARMRegisters::S0, ARMRegisters::S1, true); + return jump; + } + DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address) { DataLabelPtr dataLabel = moveWithPatch(initialValue, ARMRegisters::S1); @@ -1066,6 +1181,7 @@ public: return s_isVFPPresent; } static bool supportsFloatingPointAbs() { return false; } + static bool supportsFloatingPointRounding() { return false; } void loadFloat(BaseIndex address, FPRegisterID dest) { @@ -1082,12 +1198,30 @@ public: m_assembler.baseIndexTransferFloat(ARMAssembler::LoadDouble, dest, address.base, address.index, static_cast<int>(address.scale), address.offset); } - void loadDouble(const void* address, FPRegisterID dest) + void loadDouble(TrustedImmPtr address, FPRegisterID dest) { - move(TrustedImm32(reinterpret_cast<ARMWord>(address)), ARMRegisters::S0); + move(TrustedImm32(reinterpret_cast<ARMWord>(address.m_value)), ARMRegisters::S0); m_assembler.doubleDtrUp(ARMAssembler::LoadDouble, dest, ARMRegisters::S0, 0); } + NO_RETURN_DUE_TO_CRASH void ceilDouble(FPRegisterID, FPRegisterID) + { + ASSERT(!supportsFloatingPointRounding()); + CRASH(); + } + + NO_RETURN_DUE_TO_CRASH void floorDouble(FPRegisterID, FPRegisterID) + { + ASSERT(!supportsFloatingPointRounding()); + CRASH(); + } + + NO_RETURN_DUE_TO_CRASH void roundTowardZeroDouble(FPRegisterID, FPRegisterID) + { + ASSERT(!supportsFloatingPointRounding()); + CRASH(); + } + void storeFloat(FPRegisterID src, BaseIndex address) { m_assembler.baseIndexTransferFloat(ARMAssembler::StoreFloat, src, address.base, address.index, static_cast<int>(address.scale), address.offset); @@ -1103,9 +1237,9 @@ public: m_assembler.baseIndexTransferFloat(ARMAssembler::StoreDouble, src, address.base, address.index, static_cast<int>(address.scale), address.offset); } - void storeDouble(FPRegisterID src, const void* address) + void storeDouble(FPRegisterID src, TrustedImmPtr address) { - move(TrustedImm32(reinterpret_cast<ARMWord>(address)), ARMRegisters::S0); + move(TrustedImm32(reinterpret_cast<ARMWord>(address.m_value)), ARMRegisters::S0); m_assembler.dataTransferFloat(ARMAssembler::StoreDouble, src, ARMRegisters::S0, 0); } @@ -1115,6 +1249,12 @@ public: m_assembler.vmov_f64(dest, src); } + void moveZeroToDouble(FPRegisterID reg) + { + static double zeroConstant = 0.; + loadDouble(TrustedImmPtr(&zeroConstant), reg); + } + void addDouble(FPRegisterID src, FPRegisterID dest) { m_assembler.vadd_f64(dest, dest, src); @@ -1133,7 +1273,7 @@ public: void addDouble(AbsoluteAddress address, FPRegisterID dest) { - loadDouble(address.m_ptr, ARMRegisters::SD0); + loadDouble(TrustedImmPtr(address.m_ptr), ARMRegisters::SD0); addDouble(ARMRegisters::SD0, dest); } @@ -1330,6 +1470,11 @@ public: m_assembler.dmbSY(); } + void storeFence() + { + m_assembler.dmbISHST(); + } + static FunctionPtr readCallTarget(CodeLocationCall call) { return FunctionPtr(reinterpret_cast<void(*)()>(ARMAssembler::readCallTarget(call.dataLocation()))); @@ -1342,11 +1487,22 @@ public: static ptrdiff_t maxJumpReplacementSize() { - ARMAssembler::maxJumpReplacementSize(); - return 0; + return ARMAssembler::maxJumpReplacementSize(); + } + + static ptrdiff_t patchableJumpSize() + { + return ARMAssembler::patchableJumpSize(); } static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; } + static bool canJumpReplacePatchableBranch32WithPatch() { return false; } + + static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32) + { + UNREACHABLE_FOR_PLATFORM(); + return CodeLocationLabel(); + } static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr) { @@ -1364,36 +1520,29 @@ public: ARMAssembler::revertBranchPtrWithPatch(instructionStart.dataLocation(), reg, reinterpret_cast<uintptr_t>(initialValue) & 0xffff); } - static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*) + static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t) { UNREACHABLE_FOR_PLATFORM(); } -#if USE(MASM_PROBE) - struct CPUState { - #define DECLARE_REGISTER(_type, _regName) \ - _type _regName; - FOR_EACH_CPU_REGISTER(DECLARE_REGISTER) - #undef DECLARE_REGISTER - }; - - struct ProbeContext; - typedef void (*ProbeFunction)(struct ProbeContext*); + static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*) + { + UNREACHABLE_FOR_PLATFORM(); + } - struct ProbeContext { - ProbeFunction probeFunction; - void* arg1; - void* arg2; - CPUState cpu; + static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) + { + ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress()); + } - void dump(const char* indentation = 0); - private: - void dumpCPURegisters(const char* indentation); - }; + static void repatchCall(CodeLocationCall call, FunctionPtr destination) + { + ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress()); + } - // For details about probe(), see comment in MacroAssemblerX86_64.h. - void probe(ProbeFunction, void* arg1 = 0, void* arg2 = 0); -#endif // USE(MASM_PROBE) +#if ENABLE(MASM_PROBE) + void probe(ProbeFunction, void* arg1, void* arg2); +#endif // ENABLE(MASM_PROBE) protected: ARMAssembler::Condition ARMCondition(RelationalCondition cond) @@ -1424,7 +1573,6 @@ protected: private: friend class LinkBuffer; - friend class RepatchBuffer; void internalCompare32(RegisterID left, TrustedImm32 right) { @@ -1435,22 +1583,26 @@ private: m_assembler.cmp(left, m_assembler.getImm(right.m_value, ARMRegisters::S0)); } - static void linkCall(void* code, Call call, FunctionPtr function) + void internalCompare32(Address left, TrustedImm32 right) { - ARMAssembler::linkCall(code, call.m_label, function.value()); + ARMWord tmp = (static_cast<unsigned>(right.m_value) == 0x80000000) ? ARMAssembler::InvalidImmediate : m_assembler.getOp2(-right.m_value); + load32(left, ARMRegisters::S1); + if (tmp != ARMAssembler::InvalidImmediate) + m_assembler.cmn(ARMRegisters::S1, tmp); + else + m_assembler.cmp(ARMRegisters::S1, m_assembler.getImm(right.m_value, ARMRegisters::S0)); } - static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) + static void linkCall(void* code, Call call, FunctionPtr function) { - ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress()); + if (call.isFlagSet(Call::Tail)) + ARMAssembler::linkJump(code, call.m_label, function.value()); + else + ARMAssembler::linkCall(code, call.m_label, function.value()); } - static void repatchCall(CodeLocationCall call, FunctionPtr destination) - { - ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress()); - } -#if USE(MASM_PROBE) +#if ENABLE(MASM_PROBE) inline TrustedImm32 trustedImm32FromPtr(void* ptr) { return TrustedImm32(TrustedImmPtr(ptr)); @@ -1470,8 +1622,6 @@ private: static const bool s_isVFPPresent; }; -} +} // namespace JSC #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) - -#endif // MacroAssemblerARM_h |