summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/offlineasm/arm64.rb
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore/offlineasm/arm64.rb')
-rw-r--r--Source/JavaScriptCore/offlineasm/arm64.rb194
1 files changed, 140 insertions, 54 deletions
diff --git a/Source/JavaScriptCore/offlineasm/arm64.rb b/Source/JavaScriptCore/offlineasm/arm64.rb
index e0a23ff37..ead489133 100644
--- a/Source/JavaScriptCore/offlineasm/arm64.rb
+++ b/Source/JavaScriptCore/offlineasm/arm64.rb
@@ -1,4 +1,5 @@
-# Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+# Copyright (C) 2011, 2012, 2014-2016 Apple Inc. All rights reserved.
+# Copyright (C) 2014 University of Szeged. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -36,32 +37,38 @@ require "risc"
#
# GPR conventions, to match the baseline JIT:
#
-# x0 => return value, cached result, first argument, t0, a0, r0
+# x0 => t0, a0, r0
# x1 => t1, a1, r1
# x2 => t2, a2
-# x3 => a3
-# x9 => (nonArgGPR1 in baseline)
-# x10 => t4 (unused in baseline)
-# x11 => t5 (unused in baseline)
-# x12 => t6 (unused in baseline)
-# x13 => scratch (unused in baseline)
-# x16 => scratch
-# x17 => scratch
-# x23 => t3
-# x27 => csr1 (tagTypeNumber)
-# x28 => csr2 (tagMask)
+# x3 => t3, a3
+# x4 => t4
+# x5 => t5
+# x13 => (scratch)
+# x16 => (scratch)
+# x17 => (scratch)
+# x26 => csr0 (PB)
+# x27 => csr1 (tagTypeNumber)
+# x28 => csr2 (tagMask)
# x29 => cfr
# sp => sp
# lr => lr
#
-# FPR conentions, to match the baseline JIT:
+# FPR conventions, to match the baseline JIT:
#
-# q0 => ft0
-# q1 => ft1
-# q2 => ft2
-# q3 => ft3
-# q4 => ft4 (unused in baseline)
-# q5 => ft5 (unused in baseline)
+# q0 => ft0, fa0, fr
+# q1 => ft1, fa1
+# q2 => ft2, fa2
+# q3 => ft3, fa3
+# q4 => ft4 (unused in baseline)
+# q5 => ft5 (unused in baseline)
+# q8 => csfr0 (Only the lower 64 bits)
+# q9 => csfr1 (Only the lower 64 bits)
+# q10 => csfr2 (Only the lower 64 bits)
+# q11 => csfr3 (Only the lower 64 bits)
+# q12 => csfr4 (Only the lower 64 bits)
+# q13 => csfr5 (Only the lower 64 bits)
+# q14 => csfr6 (Only the lower 64 bits)
+# q15 => csfr7 (Only the lower 64 bits)
# q31 => scratch
def arm64GPRName(name, kind)
@@ -108,26 +115,38 @@ class RegisterID
arm64GPRName('x1', kind)
when 't2', 'a2'
arm64GPRName('x2', kind)
- when 'a3'
+ when 't3', 'a3'
arm64GPRName('x3', kind)
- when 't3'
- arm64GPRName('x23', kind)
when 't4'
- arm64GPRName('x10', kind)
+ arm64GPRName('x4', kind)
when 't5'
- arm64GPRName('x11', kind)
- when 't6'
- arm64GPRName('x12', kind)
+ arm64GPRName('x5', kind)
when 'cfr'
arm64GPRName('x29', kind)
+ when 'csr0'
+ arm64GPRName('x19', kind)
when 'csr1'
- arm64GPRName('x27', kind)
+ arm64GPRName('x20', kind)
when 'csr2'
+ arm64GPRName('x21', kind)
+ when 'csr3'
+ arm64GPRName('x22', kind)
+ when 'csr4'
+ arm64GPRName('x23', kind)
+ when 'csr5'
+ arm64GPRName('x24', kind)
+ when 'csr6'
+ arm64GPRName('x25', kind)
+ when 'csr7'
+ arm64GPRName('x26', kind)
+ when 'csr8'
+ arm64GPRName('x27', kind)
+ when 'csr9'
arm64GPRName('x28', kind)
when 'sp'
'sp'
when 'lr'
- 'lr'
+ 'x30'
else
raise "Bad register name #{@name} at #{codeOriginString}"
end
@@ -137,18 +156,34 @@ end
class FPRegisterID
def arm64Operand(kind)
case @name
- when 'ft0'
+ when 'ft0', 'fr', 'fa0'
arm64FPRName('q0', kind)
- when 'ft1'
+ when 'ft1', 'fa1'
arm64FPRName('q1', kind)
- when 'ft2'
+ when 'ft2', 'fa2'
arm64FPRName('q2', kind)
- when 'ft3'
+ when 'ft3', 'fa3'
arm64FPRName('q3', kind)
when 'ft4'
arm64FPRName('q4', kind)
when 'ft5'
arm64FPRName('q5', kind)
+ when 'csfr0'
+ arm64FPRName('q8', kind)
+ when 'csfr1'
+ arm64FPRName('q9', kind)
+ when 'csfr2'
+ arm64FPRName('q10', kind)
+ when 'csfr3'
+ arm64FPRName('q11', kind)
+ when 'csfr4'
+ arm64FPRName('q12', kind)
+ when 'csfr5'
+ arm64FPRName('q13', kind)
+ when 'csfr6'
+ arm64FPRName('q14', kind)
+ when 'csfr7'
+ arm64FPRName('q15', kind)
else "Bad register name #{@name} at #{codeOriginString}"
end
end
@@ -195,6 +230,64 @@ end
# Actual lowering code follows.
#
+def arm64LowerMalformedLoadStoreAddresses(list)
+ newList = []
+
+ def isAddressMalformed(operand)
+ operand.is_a? Address and not (-255..4095).include? operand.offset.value
+ end
+
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ if node.opcode =~ /^store/ and isAddressMalformed(node.operands[1])
+ address = node.operands[1]
+ tmp = Tmp.new(codeOrigin, :gpr)
+ newList << Instruction.new(node.codeOrigin, "move", [address.offset, tmp])
+ newList << Instruction.new(node.codeOrigin, node.opcode, [node.operands[0], BaseIndex.new(node.codeOrigin, address.base, tmp, 1, Immediate.new(codeOrigin, 0))], node.annotation)
+ elsif node.opcode =~ /^load/ and isAddressMalformed(node.operands[0])
+ address = node.operands[0]
+ tmp = Tmp.new(codeOrigin, :gpr)
+ newList << Instruction.new(node.codeOrigin, "move", [address.offset, tmp])
+ newList << Instruction.new(node.codeOrigin, node.opcode, [BaseIndex.new(node.codeOrigin, address.base, tmp, 1, Immediate.new(codeOrigin, 0)), node.operands[1]], node.annotation)
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+# Workaround for Cortex-A53 erratum (835769)
+def arm64CortexA53Fix835769(list)
+ newList = []
+ lastOpcodeUnsafe = false
+
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ case node.opcode
+ when /^store/, /^load/
+ # List all macro instructions that can be lowered to a load, store or prefetch ARM64 assembly instruction
+ lastOpcodeUnsafe = true
+ when "muli", "mulp", "mulq", "smulli"
+ # List all macro instructions that can be lowered to a 64-bit multiply-accumulate ARM64 assembly instruction
+ # (defined as one of MADD, MSUB, SMADDL, SMSUBL, UMADDL or UMSUBL).
+ if lastOpcodeUnsafe
+ newList << Instruction.new(node.codeOrigin, "nopCortexA53Fix835769", [])
+ end
+ lastOpcodeUnsafe = false
+ else
+ lastOpcodeUnsafe = false
+ end
+ end
+ newList << node
+ }
+ newList
+end
+
class Sequence
def getModifiedListARM64
result = @list
@@ -202,6 +295,7 @@ class Sequence
result = riscLowerSimpleBranchOps(result)
result = riscLowerHardBranchOps64(result)
result = riscLowerShiftOps(result)
+ result = arm64LowerMalformedLoadStoreAddresses(result)
result = riscLowerMalformedAddresses(result) {
| node, address |
case node.opcode
@@ -250,6 +344,7 @@ class Sequence
result = riscLowerTest(result)
result = assignRegistersToTemporaries(result, :gpr, ARM64_EXTRA_GPRS)
result = assignRegistersToTemporaries(result, :fpr, ARM64_EXTRA_FPRS)
+ result = arm64CortexA53Fix835769(result)
return result
end
end
@@ -367,7 +462,7 @@ def emitARM64MoveImmediate(value, target)
[48, 32, 16, 0].each {
| shift |
currentValue = (value >> shift) & 0xffff
- next if currentValue == (isNegative ? 0xffff : 0) and shift != 0
+ next if currentValue == (isNegative ? 0xffff : 0) and (shift != 0 or !first)
if first
if isNegative
$asm.puts "movn #{target.arm64Operand(:ptr)}, \##{(~currentValue) & 0xffff}, lsl \##{shift}"
@@ -385,6 +480,7 @@ class Instruction
def lowerARM64
$asm.comment codeOriginString
$asm.annotation annotation if $enableInstrAnnotations
+ $asm.debugAnnotation codeOrigin.debugDirective if $enableDebugAnnotations
case opcode
when 'addi'
@@ -584,22 +680,6 @@ class Instruction
| ops |
$asm.puts "stp #{ops[0].arm64Operand(:ptr)}, #{ops[1].arm64Operand(:ptr)}, [sp, #-16]!"
}
- when "popLRAndFP"
- $asm.puts "ldp fp, lr, [sp], #16"
- when "pushLRAndFP"
- $asm.puts "stp fp, lr, [sp, #-16]!"
- when "popCalleeSaves"
- $asm.puts "ldp x28, x27, [sp], #16"
- $asm.puts "ldp x26, x25, [sp], #16"
- $asm.puts "ldp x24, x23, [sp], #16"
- $asm.puts "ldp x22, x21, [sp], #16"
- $asm.puts "ldp x20, x19, [sp], #16"
- when "pushCalleeSaves"
- $asm.puts "stp x20, x19, [sp, #-16]!"
- $asm.puts "stp x22, x21, [sp, #-16]!"
- $asm.puts "stp x24, x23, [sp, #-16]!"
- $asm.puts "stp x26, x25, [sp, #-16]!"
- $asm.puts "stp x28, x27, [sp, #-16]!"
when "move"
if operands[0].immediate?
emitARM64MoveImmediate(operands[0].value, operands[1])
@@ -607,13 +687,13 @@ class Instruction
emitARM64("mov", operands, :ptr)
end
when "sxi2p"
- emitARM64("sxtw", operands, :ptr)
+ emitARM64("sxtw", operands, [:int, :ptr])
when "sxi2q"
- emitARM64("sxtw", operands, :ptr)
+ emitARM64("sxtw", operands, [:int, :ptr])
when "zxi2p"
- emitARM64("uxtw", operands, :ptr)
+ emitARM64("uxtw", operands, [:int, :ptr])
when "zxi2q"
- emitARM64("uxtw", operands, :ptr)
+ emitARM64("uxtw", operands, [:int, :ptr])
when "nop"
$asm.puts "nop"
when "bieq", "bbeq"
@@ -818,6 +898,12 @@ class Instruction
$asm.puts "smaddl #{operands[2].arm64Operand(:ptr)}, #{operands[0].arm64Operand(:int)}, #{operands[1].arm64Operand(:int)}, xzr"
when "memfence"
$asm.puts "dmb sy"
+ when "pcrtoaddr"
+ $asm.puts "adr #{operands[1].arm64Operand(:ptr)}, #{operands[0].value}"
+ when "nopCortexA53Fix835769"
+ $asm.putStr("#if CPU(ARM64_CORTEXA53)")
+ $asm.puts "nop"
+ $asm.putStr("#endif")
else
lowerDefault
end