summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/offlineasm
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
commit1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c (patch)
tree46dcd36c86e7fbc6e5df36deb463b33e9967a6f7 /Source/JavaScriptCore/offlineasm
parent32761a6cee1d0dee366b885b7b9c777e67885688 (diff)
downloadWebKitGtk-tarball-master.tar.gz
Diffstat (limited to 'Source/JavaScriptCore/offlineasm')
-rw-r--r--Source/JavaScriptCore/offlineasm/arm.rb67
-rw-r--r--Source/JavaScriptCore/offlineasm/arm64.rb194
-rw-r--r--Source/JavaScriptCore/offlineasm/asm.rb149
-rw-r--r--Source/JavaScriptCore/offlineasm/ast.rb179
-rw-r--r--Source/JavaScriptCore/offlineasm/backends.rb64
-rw-r--r--Source/JavaScriptCore/offlineasm/cloop.rb72
-rw-r--r--Source/JavaScriptCore/offlineasm/config.rb25
-rw-r--r--Source/JavaScriptCore/offlineasm/generate_offset_extractor.rb11
-rw-r--r--Source/JavaScriptCore/offlineasm/instructions.rb46
-rw-r--r--Source/JavaScriptCore/offlineasm/mips.rb243
-rw-r--r--Source/JavaScriptCore/offlineasm/parser.rb167
-rw-r--r--Source/JavaScriptCore/offlineasm/registers.rb26
-rw-r--r--Source/JavaScriptCore/offlineasm/risc.rb2
-rw-r--r--Source/JavaScriptCore/offlineasm/self_hash.rb15
-rw-r--r--Source/JavaScriptCore/offlineasm/settings.rb59
-rw-r--r--Source/JavaScriptCore/offlineasm/sh4.rb1072
-rw-r--r--Source/JavaScriptCore/offlineasm/transform.rb12
-rw-r--r--Source/JavaScriptCore/offlineasm/x86.rb801
18 files changed, 1523 insertions, 1681 deletions
diff --git a/Source/JavaScriptCore/offlineasm/arm.rb b/Source/JavaScriptCore/offlineasm/arm.rb
index 10b339eb6..f8660a42a 100644
--- a/Source/JavaScriptCore/offlineasm/arm.rb
+++ b/Source/JavaScriptCore/offlineasm/arm.rb
@@ -1,4 +1,4 @@
-# Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+# Copyright (C) 2011, 2012, 2015-2016 Apple Inc. All rights reserved.
# Copyright (C) 2013 University of Szeged. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -27,6 +27,34 @@ require "ast"
require "opt"
require "risc"
+# GPR conventions, to match the baseline JIT
+#
+# x0 => t0, a0, r0
+# x1 => t1, a1, r1
+# x2 => t2, a2, r2
+# x3 => t3, a3, r3
+# x6 => (callee-save scratch)
+# x7 => cfr (ARMv7 only)
+# x8 => t4 (callee-save)
+# x9 => t5 (callee-save)
+# x10 => (callee-save scratch)
+# x11 => cfr (ARM and ARMv7 traditional)
+# x12 => (callee-save scratch)
+# lr => lr
+# sp => sp
+# pc => pc
+#
+# FPR conventions, to match the baseline JIT
+#
+# d0 => ft0, fa0, fr
+# d1 => ft1, fa1
+# d2 => ft2
+# d3 => ft3
+# d4 => ft4
+# d5 => ft5
+# d6 => (scratch)
+# d7 => (scratch)
+
def isARMv7
case $activeBackend
when "ARMv7"
@@ -66,10 +94,13 @@ end
ARM_EXTRA_GPRS = [SpecialRegister.new("r6"), SpecialRegister.new("r10"), SpecialRegister.new("r12")]
ARM_EXTRA_FPRS = [SpecialRegister.new("d7")]
ARM_SCRATCH_FPR = SpecialRegister.new("d6")
+OS_DARWIN = ((RUBY_PLATFORM =~ /darwin/i) != nil)
def armMoveImmediate(value, register)
# Currently we only handle the simple cases, and fall back to mov/movt for the complex ones.
- if value >= 0 && value < 256
+ if value.is_a? String
+ $asm.puts "mov #{register.armOperand}, (#{value})"
+ elsif value >= 0 && value < 256
$asm.puts "mov #{register.armOperand}, \##{value}"
elsif (~value) >= 0 && (~value) < 256
$asm.puts "mvn #{register.armOperand}, \##{~value}"
@@ -106,6 +137,8 @@ class RegisterID
"lr"
when "sp"
"sp"
+ when "pc"
+ "pc"
else
raise "Bad register #{name} for ARM at #{codeOriginString}"
end
@@ -115,9 +148,9 @@ end
class FPRegisterID
def armOperand
case name
- when "ft0", "fr"
+ when "ft0", "fr", "fa0"
"d0"
- when "ft1"
+ when "ft1", "fa1"
"d1"
when "ft2"
"d2"
@@ -317,6 +350,7 @@ class Instruction
def lowerARMCommon
$asm.codeOrigin codeOriginString if $enableCodeOriginComments
$asm.annotation annotation if $enableInstrAnnotations
+ $asm.debugAnnotation codeOrigin.debugDirective if $enableDebugAnnotations
case opcode
when "addi", "addp", "addis", "addps"
@@ -335,7 +369,7 @@ class Instruction
else
$asm.puts "adds #{operands[2].armOperand}, #{operands[1].armOperand}, #{operands[0].armOperand}"
end
- elsif operands.size == 3 and operands[0].immediate?
+ elsif operands.size == 3 and operands[0].register?
raise unless operands[1].register?
raise unless operands[2].register?
$asm.puts "adds #{armFlippedOperands(operands)}"
@@ -462,24 +496,15 @@ class Instruction
| op |
$asm.puts "push { #{op.armOperand} }"
}
- when "popCalleeSaves"
- if isARMv7
- $asm.puts "pop {r4-r6, r8-r11}"
- else
- $asm.puts "pop {r4-r10}"
- end
- when "pushCalleeSaves"
- if isARMv7
- $asm.puts "push {r4-r6, r8-r11}"
- else
- $asm.puts "push {r4-r10}"
- end
when "move"
if operands[0].immediate?
armMoveImmediate(operands[0].value, operands[1])
else
$asm.puts "mov #{armFlippedOperands(operands)}"
end
+ when "mvlbl"
+ $asm.puts "movw #{operands[1].armOperand}, \#:lower16:#{operands[0].value}"
+ $asm.puts "movt #{operands[1].armOperand}, \#:upper16:#{operands[0].value}"
when "nop"
$asm.puts "nop"
when "bieq", "bpeq", "bbeq"
@@ -544,7 +569,11 @@ class Instruction
end
when "call"
if operands[0].label?
- $asm.puts "blx #{operands[0].asmLabel}"
+ if OS_DARWIN
+ $asm.puts "blx #{operands[0].asmLabel}"
+ else
+ $asm.puts "bl #{operands[0].asmLabel}"
+ end
else
$asm.puts "blx #{operands[0].armOperand}"
end
@@ -601,6 +630,8 @@ class Instruction
$asm.puts "smull #{operands[2].armOperand}, #{operands[3].armOperand}, #{operands[0].armOperand}, #{operands[1].armOperand}"
when "memfence"
$asm.puts "dmb sy"
+ when "clrbp"
+ $asm.puts "bic #{operands[2].armOperand}, #{operands[0].armOperand}, #{operands[1].armOperand}"
else
lowerDefault
end
diff --git a/Source/JavaScriptCore/offlineasm/arm64.rb b/Source/JavaScriptCore/offlineasm/arm64.rb
index e0a23ff37..ead489133 100644
--- a/Source/JavaScriptCore/offlineasm/arm64.rb
+++ b/Source/JavaScriptCore/offlineasm/arm64.rb
@@ -1,4 +1,5 @@
-# Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+# Copyright (C) 2011, 2012, 2014-2016 Apple Inc. All rights reserved.
+# Copyright (C) 2014 University of Szeged. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -36,32 +37,38 @@ require "risc"
#
# GPR conventions, to match the baseline JIT:
#
-# x0 => return value, cached result, first argument, t0, a0, r0
+# x0 => t0, a0, r0
# x1 => t1, a1, r1
# x2 => t2, a2
-# x3 => a3
-# x9 => (nonArgGPR1 in baseline)
-# x10 => t4 (unused in baseline)
-# x11 => t5 (unused in baseline)
-# x12 => t6 (unused in baseline)
-# x13 => scratch (unused in baseline)
-# x16 => scratch
-# x17 => scratch
-# x23 => t3
-# x27 => csr1 (tagTypeNumber)
-# x28 => csr2 (tagMask)
+# x3 => t3, a3
+# x4 => t4
+# x5 => t5
+# x13 => (scratch)
+# x16 => (scratch)
+# x17 => (scratch)
+# x26 => csr0 (PB)
+# x27 => csr1 (tagTypeNumber)
+# x28 => csr2 (tagMask)
# x29 => cfr
# sp => sp
# lr => lr
#
-# FPR conentions, to match the baseline JIT:
+# FPR conventions, to match the baseline JIT:
#
-# q0 => ft0
-# q1 => ft1
-# q2 => ft2
-# q3 => ft3
-# q4 => ft4 (unused in baseline)
-# q5 => ft5 (unused in baseline)
+# q0 => ft0, fa0, fr
+# q1 => ft1, fa1
+# q2 => ft2, fa2
+# q3 => ft3, fa3
+# q4 => ft4 (unused in baseline)
+# q5 => ft5 (unused in baseline)
+# q8 => csfr0 (Only the lower 64 bits)
+# q9 => csfr1 (Only the lower 64 bits)
+# q10 => csfr2 (Only the lower 64 bits)
+# q11 => csfr3 (Only the lower 64 bits)
+# q12 => csfr4 (Only the lower 64 bits)
+# q13 => csfr5 (Only the lower 64 bits)
+# q14 => csfr6 (Only the lower 64 bits)
+# q15 => csfr7 (Only the lower 64 bits)
# q31 => scratch
def arm64GPRName(name, kind)
@@ -108,26 +115,38 @@ class RegisterID
arm64GPRName('x1', kind)
when 't2', 'a2'
arm64GPRName('x2', kind)
- when 'a3'
+ when 't3', 'a3'
arm64GPRName('x3', kind)
- when 't3'
- arm64GPRName('x23', kind)
when 't4'
- arm64GPRName('x10', kind)
+ arm64GPRName('x4', kind)
when 't5'
- arm64GPRName('x11', kind)
- when 't6'
- arm64GPRName('x12', kind)
+ arm64GPRName('x5', kind)
when 'cfr'
arm64GPRName('x29', kind)
+ when 'csr0'
+ arm64GPRName('x19', kind)
when 'csr1'
- arm64GPRName('x27', kind)
+ arm64GPRName('x20', kind)
when 'csr2'
+ arm64GPRName('x21', kind)
+ when 'csr3'
+ arm64GPRName('x22', kind)
+ when 'csr4'
+ arm64GPRName('x23', kind)
+ when 'csr5'
+ arm64GPRName('x24', kind)
+ when 'csr6'
+ arm64GPRName('x25', kind)
+ when 'csr7'
+ arm64GPRName('x26', kind)
+ when 'csr8'
+ arm64GPRName('x27', kind)
+ when 'csr9'
arm64GPRName('x28', kind)
when 'sp'
'sp'
when 'lr'
- 'lr'
+ 'x30'
else
raise "Bad register name #{@name} at #{codeOriginString}"
end
@@ -137,18 +156,34 @@ end
class FPRegisterID
def arm64Operand(kind)
case @name
- when 'ft0'
+ when 'ft0', 'fr', 'fa0'
arm64FPRName('q0', kind)
- when 'ft1'
+ when 'ft1', 'fa1'
arm64FPRName('q1', kind)
- when 'ft2'
+ when 'ft2', 'fa2'
arm64FPRName('q2', kind)
- when 'ft3'
+ when 'ft3', 'fa3'
arm64FPRName('q3', kind)
when 'ft4'
arm64FPRName('q4', kind)
when 'ft5'
arm64FPRName('q5', kind)
+ when 'csfr0'
+ arm64FPRName('q8', kind)
+ when 'csfr1'
+ arm64FPRName('q9', kind)
+ when 'csfr2'
+ arm64FPRName('q10', kind)
+ when 'csfr3'
+ arm64FPRName('q11', kind)
+ when 'csfr4'
+ arm64FPRName('q12', kind)
+ when 'csfr5'
+ arm64FPRName('q13', kind)
+ when 'csfr6'
+ arm64FPRName('q14', kind)
+ when 'csfr7'
+ arm64FPRName('q15', kind)
else "Bad register name #{@name} at #{codeOriginString}"
end
end
@@ -195,6 +230,64 @@ end
# Actual lowering code follows.
#
+def arm64LowerMalformedLoadStoreAddresses(list)
+ newList = []
+
+ def isAddressMalformed(operand)
+ operand.is_a? Address and not (-255..4095).include? operand.offset.value
+ end
+
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ if node.opcode =~ /^store/ and isAddressMalformed(node.operands[1])
+ address = node.operands[1]
+ tmp = Tmp.new(codeOrigin, :gpr)
+ newList << Instruction.new(node.codeOrigin, "move", [address.offset, tmp])
+ newList << Instruction.new(node.codeOrigin, node.opcode, [node.operands[0], BaseIndex.new(node.codeOrigin, address.base, tmp, 1, Immediate.new(codeOrigin, 0))], node.annotation)
+ elsif node.opcode =~ /^load/ and isAddressMalformed(node.operands[0])
+ address = node.operands[0]
+ tmp = Tmp.new(codeOrigin, :gpr)
+ newList << Instruction.new(node.codeOrigin, "move", [address.offset, tmp])
+ newList << Instruction.new(node.codeOrigin, node.opcode, [BaseIndex.new(node.codeOrigin, address.base, tmp, 1, Immediate.new(codeOrigin, 0)), node.operands[1]], node.annotation)
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+# Workaround for Cortex-A53 erratum (835769)
+def arm64CortexA53Fix835769(list)
+ newList = []
+ lastOpcodeUnsafe = false
+
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ case node.opcode
+ when /^store/, /^load/
+ # List all macro instructions that can be lowered to a load, store or prefetch ARM64 assembly instruction
+ lastOpcodeUnsafe = true
+ when "muli", "mulp", "mulq", "smulli"
+ # List all macro instructions that can be lowered to a 64-bit multiply-accumulate ARM64 assembly instruction
+ # (defined as one of MADD, MSUB, SMADDL, SMSUBL, UMADDL or UMSUBL).
+ if lastOpcodeUnsafe
+ newList << Instruction.new(node.codeOrigin, "nopCortexA53Fix835769", [])
+ end
+ lastOpcodeUnsafe = false
+ else
+ lastOpcodeUnsafe = false
+ end
+ end
+ newList << node
+ }
+ newList
+end
+
class Sequence
def getModifiedListARM64
result = @list
@@ -202,6 +295,7 @@ class Sequence
result = riscLowerSimpleBranchOps(result)
result = riscLowerHardBranchOps64(result)
result = riscLowerShiftOps(result)
+ result = arm64LowerMalformedLoadStoreAddresses(result)
result = riscLowerMalformedAddresses(result) {
| node, address |
case node.opcode
@@ -250,6 +344,7 @@ class Sequence
result = riscLowerTest(result)
result = assignRegistersToTemporaries(result, :gpr, ARM64_EXTRA_GPRS)
result = assignRegistersToTemporaries(result, :fpr, ARM64_EXTRA_FPRS)
+ result = arm64CortexA53Fix835769(result)
return result
end
end
@@ -367,7 +462,7 @@ def emitARM64MoveImmediate(value, target)
[48, 32, 16, 0].each {
| shift |
currentValue = (value >> shift) & 0xffff
- next if currentValue == (isNegative ? 0xffff : 0) and shift != 0
+ next if currentValue == (isNegative ? 0xffff : 0) and (shift != 0 or !first)
if first
if isNegative
$asm.puts "movn #{target.arm64Operand(:ptr)}, \##{(~currentValue) & 0xffff}, lsl \##{shift}"
@@ -385,6 +480,7 @@ class Instruction
def lowerARM64
$asm.comment codeOriginString
$asm.annotation annotation if $enableInstrAnnotations
+ $asm.debugAnnotation codeOrigin.debugDirective if $enableDebugAnnotations
case opcode
when 'addi'
@@ -584,22 +680,6 @@ class Instruction
| ops |
$asm.puts "stp #{ops[0].arm64Operand(:ptr)}, #{ops[1].arm64Operand(:ptr)}, [sp, #-16]!"
}
- when "popLRAndFP"
- $asm.puts "ldp fp, lr, [sp], #16"
- when "pushLRAndFP"
- $asm.puts "stp fp, lr, [sp, #-16]!"
- when "popCalleeSaves"
- $asm.puts "ldp x28, x27, [sp], #16"
- $asm.puts "ldp x26, x25, [sp], #16"
- $asm.puts "ldp x24, x23, [sp], #16"
- $asm.puts "ldp x22, x21, [sp], #16"
- $asm.puts "ldp x20, x19, [sp], #16"
- when "pushCalleeSaves"
- $asm.puts "stp x20, x19, [sp, #-16]!"
- $asm.puts "stp x22, x21, [sp, #-16]!"
- $asm.puts "stp x24, x23, [sp, #-16]!"
- $asm.puts "stp x26, x25, [sp, #-16]!"
- $asm.puts "stp x28, x27, [sp, #-16]!"
when "move"
if operands[0].immediate?
emitARM64MoveImmediate(operands[0].value, operands[1])
@@ -607,13 +687,13 @@ class Instruction
emitARM64("mov", operands, :ptr)
end
when "sxi2p"
- emitARM64("sxtw", operands, :ptr)
+ emitARM64("sxtw", operands, [:int, :ptr])
when "sxi2q"
- emitARM64("sxtw", operands, :ptr)
+ emitARM64("sxtw", operands, [:int, :ptr])
when "zxi2p"
- emitARM64("uxtw", operands, :ptr)
+ emitARM64("uxtw", operands, [:int, :ptr])
when "zxi2q"
- emitARM64("uxtw", operands, :ptr)
+ emitARM64("uxtw", operands, [:int, :ptr])
when "nop"
$asm.puts "nop"
when "bieq", "bbeq"
@@ -818,6 +898,12 @@ class Instruction
$asm.puts "smaddl #{operands[2].arm64Operand(:ptr)}, #{operands[0].arm64Operand(:int)}, #{operands[1].arm64Operand(:int)}, xzr"
when "memfence"
$asm.puts "dmb sy"
+ when "pcrtoaddr"
+ $asm.puts "adr #{operands[1].arm64Operand(:ptr)}, #{operands[0].value}"
+ when "nopCortexA53Fix835769"
+ $asm.putStr("#if CPU(ARM64_CORTEXA53)")
+ $asm.puts "nop"
+ $asm.putStr("#endif")
else
lowerDefault
end
diff --git a/Source/JavaScriptCore/offlineasm/asm.rb b/Source/JavaScriptCore/offlineasm/asm.rb
index bf2426399..fa0040256 100644
--- a/Source/JavaScriptCore/offlineasm/asm.rb
+++ b/Source/JavaScriptCore/offlineasm/asm.rb
@@ -1,6 +1,6 @@
#!/usr/bin/env ruby
-# Copyright (C) 2011 Apple Inc. All rights reserved.
+# Copyright (C) 2011, 2016 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -29,6 +29,7 @@ require "config"
require "backends"
require "digest/sha1"
require "offsets"
+require 'optparse'
require "parser"
require "self_hash"
require "settings"
@@ -47,16 +48,32 @@ class Assembler
@numGlobalLabels = 0
@newlineSpacerState = :none
+ @lastlabel = ""
end
-
+
def enterAsm
- @outp.puts "OFFLINE_ASM_BEGIN"
+ @outp.puts "OFFLINE_ASM_BEGIN" if !$emitWinAsm
+
+ if !$emitWinAsm
+ @outp.puts "OFFLINE_ASM_GLOBAL_LABEL(llintPCRangeStart)"
+ else
+ putsProc("llintPCRangeStart", "")
+ putsProcEndIfNeeded
+ end
@state = :asm
+ SourceFile.outputDotFileList(@outp) if $enableDebugAnnotations
end
def leaveAsm
+ putsProcEndIfNeeded if $emitWinAsm
+ if !$emitWinAsm
+ @outp.puts "OFFLINE_ASM_GLOBAL_LABEL(llintPCRangeEnd)"
+ else
+ putsProc("llintPCRangeEnd", "")
+ putsProcEndIfNeeded
+ end
putsLastComment
- @outp.puts "OFFLINE_ASM_END"
+ @outp.puts "OFFLINE_ASM_END" if !$emitWinAsm
@state = :cpp
end
@@ -84,7 +101,7 @@ class Assembler
result += "#{@codeOrigin}"
end
if result != ""
- result = "// " + result
+ result = $commentPrefix + " " + result
end
# Reset all the components that we've just sent to be dumped.
@@ -137,7 +154,11 @@ class Assembler
def puts(*line)
raise unless @state == :asm
- @outp.puts(formatDump(" \"\\t" + line.join('') + "\\n\"", lastComment))
+ if !$emitWinAsm
+ @outp.puts(formatDump(" \"\\t" + line.join('') + "\\n\"", lastComment))
+ else
+ @outp.puts(formatDump(" " + line.join(''), lastComment))
+ end
end
def print(line)
@@ -152,15 +173,45 @@ class Assembler
end
end
- def putsLabel(labelName)
+ def putsProc(label, comment)
+ raise unless $emitWinAsm
+ @outp.puts(formatDump("#{label} PROC PUBLIC", comment))
+ @lastlabel = label
+ end
+
+ def putsProcEndIfNeeded
+ raise unless $emitWinAsm
+ if @lastlabel != ""
+ @outp.puts("#{@lastlabel} ENDP")
+ end
+ @lastlabel = ""
+ end
+
+ def putsLabel(labelName, isGlobal)
raise unless @state == :asm
@numGlobalLabels += 1
+ putsProcEndIfNeeded if $emitWinAsm and isGlobal
putsNewlineSpacerIfAppropriate(:global)
@internalComment = $enableLabelCountComments ? "Global Label #{@numGlobalLabels}" : nil
- if /\Allint_op_/.match(labelName)
- @outp.puts(formatDump("OFFLINE_ASM_OPCODE_LABEL(op_#{$~.post_match})", lastComment))
+ if isGlobal
+ if !$emitWinAsm
+ @outp.puts(formatDump("OFFLINE_ASM_GLOBAL_LABEL(#{labelName})", lastComment))
+ else
+ putsProc(labelName, lastComment)
+ end
+ elsif /\Allint_op_/.match(labelName)
+ if !$emitWinAsm
+ @outp.puts(formatDump("OFFLINE_ASM_OPCODE_LABEL(op_#{$~.post_match})", lastComment))
+ else
+ label = "llint_" + "op_#{$~.post_match}"
+ @outp.puts(formatDump(" _#{label}:", lastComment))
+ end
else
- @outp.puts(formatDump("OFFLINE_ASM_GLUE_LABEL(#{labelName})", lastComment))
+ if !$emitWinAsm
+ @outp.puts(formatDump("OFFLINE_ASM_GLUE_LABEL(#{labelName})", lastComment))
+ else
+ @outp.puts(formatDump(" _#{labelName}:", lastComment))
+ end
end
@newlineSpacerState = :none # After a global label, we can use another spacer.
end
@@ -170,15 +221,35 @@ class Assembler
@numLocalLabels += 1
@outp.puts("\n")
@internalComment = $enableLabelCountComments ? "Local Label #{@numLocalLabels}" : nil
- @outp.puts(formatDump(" OFFLINE_ASM_LOCAL_LABEL(#{labelName})", lastComment))
+ if !$emitWinAsm
+ @outp.puts(formatDump(" OFFLINE_ASM_LOCAL_LABEL(#{labelName})", lastComment))
+ else
+ @outp.puts(formatDump(" #{labelName}:", lastComment))
+ end
end
-
+
+ def self.externLabelReference(labelName)
+ if !$emitWinAsm
+ "\" LOCAL_REFERENCE(#{labelName}) \""
+ else
+ "#{labelName}"
+ end
+ end
+
def self.labelReference(labelName)
- "\" LOCAL_REFERENCE(#{labelName}) \""
+ if !$emitWinAsm
+ "\" LOCAL_LABEL_STRING(#{labelName}) \""
+ else
+ "_#{labelName}"
+ end
end
def self.localLabelReference(labelName)
- "\" LOCAL_LABEL_STRING(#{labelName}) \""
+ if !$emitWinAsm
+ "\" LOCAL_LABEL_STRING(#{labelName}) \""
+ else
+ "#{labelName}"
+ end
end
def self.cLabelReference(labelName)
@@ -200,13 +271,13 @@ class Assembler
@commentState = :one
when :one
if $enableCodeOriginComments
- @outp.puts " // #{@codeOrigin}"
- @outp.puts " // #{text}"
+ @outp.puts " " + $commentPrefix + " #{@codeOrigin}"
+ @outp.puts " " + $commentPrefix + " #{text}"
end
@codeOrigin = nil
@commentState = :many
when :many
- @outp.puts "// #{text}" if $enableCodeOriginComments
+ @outp.puts $commentPrefix + " #{text}" if $enableCodeOriginComments
else
raise
end
@@ -215,16 +286,30 @@ class Assembler
def comment(text)
@comment = text
end
+
def annotation(text)
@annotation = text
end
+
+ def debugAnnotation(text)
+ @outp.puts text
+ end
end
+IncludeFile.processIncludeOptions()
+
asmFile = ARGV.shift
offsetsFile = ARGV.shift
outputFlnm = ARGV.shift
-$stderr.puts "offlineasm: Parsing #{asmFile} and #{offsetsFile} and creating assembly file #{outputFlnm}."
+$options = {}
+OptionParser.new do |opts|
+ opts.banner = "Usage: asm.rb asmFile offsetsFile outputFileName [--assembler=<ASM>]"
+ # This option is currently only used to specify the masm assembler
+ opts.on("--assembler=[ASM]", "Specify an assembler to use.") do |assembler|
+ $options[:assembler] = assembler
+ end
+end.parse!
begin
configurationList = offsetsAndConfigurationIndex(offsetsFile)
@@ -233,10 +318,19 @@ rescue MissingMagicValuesException
exit 0
end
+# The MS compiler doesn't accept DWARF2 debug annotations.
+if isMSVC
+ $enableDebugAnnotations = false
+end
+
+$emitWinAsm = isMSVC ? outputFlnm.index(".asm") != nil : false
+$commentPrefix = $emitWinAsm ? ";" : "//"
+
inputHash =
- "// offlineasm input hash: " + parseHash(asmFile) +
+ $commentPrefix + " offlineasm input hash: " + parseHash(asmFile) +
" " + Digest::SHA1.hexdigest(configurationList.map{|v| (v[0] + [v[1]]).join(' ')}.join(' ')) +
- " " + selfHash
+ " " + selfHash +
+ " " + Digest::SHA1.hexdigest($options.has_key?(:assembler) ? $options[:assembler] : "")
if FileTest.exist? outputFlnm
File.open(outputFlnm, "r") {
@@ -253,17 +347,25 @@ File.open(outputFlnm, "w") {
| outp |
$output = outp
$output.puts inputHash
-
+
$asm = Assembler.new($output)
ast = parse(asmFile)
-
+
configurationList.each {
| configuration |
offsetsList = configuration[0]
configIndex = configuration[1]
forSettings(computeSettingsCombinations(ast)[configIndex], ast) {
| concreteSettings, lowLevelAST, backend |
+
+ # There could be multiple backends we are generating for, but the C_LOOP is
+ # always by itself so this check to turn off $enableDebugAnnotations won't
+ # affect the generation for any other backend.
+ if backend == "C_LOOP"
+ $enableDebugAnnotations = false
+ end
+
lowLevelAST = lowLevelAST.resolve(*buildOffsetsMap(lowLevelAST, offsetsList))
lowLevelAST.validate
emitCodeInConfiguration(concreteSettings, lowLevelAST, backend) {
@@ -274,6 +376,3 @@ File.open(outputFlnm, "w") {
}
}
}
-
-$stderr.puts "offlineasm: Assembly file #{outputFlnm} successfully generated."
-
diff --git a/Source/JavaScriptCore/offlineasm/ast.rb b/Source/JavaScriptCore/offlineasm/ast.rb
index 74bccff56..1241b7fe5 100644
--- a/Source/JavaScriptCore/offlineasm/ast.rb
+++ b/Source/JavaScriptCore/offlineasm/ast.rb
@@ -229,6 +229,10 @@ class Immediate < NoChildren
true
end
+ def immediateOperand?
+ true
+ end
+
def register?
false
end
@@ -255,6 +259,10 @@ class AddImmediates < Node
"(#{left.dump} + #{right.dump})"
end
+ def value
+ "#{left.value} + #{right.value}"
+ end
+
def address?
false
end
@@ -267,6 +275,10 @@ class AddImmediates < Node
true
end
+ def immediateOperand?
+ true
+ end
+
def register?
false
end
@@ -293,6 +305,10 @@ class SubImmediates < Node
"(#{left.dump} - #{right.dump})"
end
+ def value
+ "#{left.value} - #{right.value}"
+ end
+
def address?
false
end
@@ -305,6 +321,10 @@ class SubImmediates < Node
true
end
+ def immediateOperand?
+ true
+ end
+
def register?
false
end
@@ -343,6 +363,10 @@ class MulImmediates < Node
true
end
+ def immediateOperand?
+ false
+ end
+
def register?
false
end
@@ -380,6 +404,10 @@ class NegImmediate < Node
true
end
+ def immediateOperand?
+ false
+ end
+
def register?
false
end
@@ -418,6 +446,10 @@ class OrImmediates < Node
true
end
+ def immediateOperand?
+ false
+ end
+
def register?
false
end
@@ -456,6 +488,10 @@ class AndImmediates < Node
true
end
+ def immediateOperand?
+ false
+ end
+
def register?
false
end
@@ -494,6 +530,10 @@ class XorImmediates < Node
true
end
+ def immediateOperand?
+ false
+ end
+
def register?
false
end
@@ -531,6 +571,48 @@ class BitnotImmediate < Node
true
end
+ def immediateOperand?
+ false
+ end
+
+ def register?
+ false
+ end
+end
+
+class StringLiteral < NoChildren
+ attr_reader :value
+
+ def initialize(codeOrigin, value)
+ super(codeOrigin)
+ @value = value[1..-2]
+ raise "Bad string literal #{value.inspect} at #{codeOriginString}" unless value.is_a? String
+ end
+
+ def dump
+ "#{value}"
+ end
+
+ def ==(other)
+ other.is_a? StringLiteral and other.value == @value
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ false
+ end
+
+ def immediateOperand?
+ false
+ end
+
def register?
false
end
@@ -607,6 +689,10 @@ class FPRegisterID < NoChildren
false
end
+ def immediateOperand?
+ false
+ end
+
def register?
true
end
@@ -629,6 +715,10 @@ class SpecialRegister < NoChildren
false
end
+ def immediateOperand?
+ false
+ end
+
def register?
true
end
@@ -699,6 +789,10 @@ class Address < Node
false
end
+ def immediateOperand?
+ true
+ end
+
def register?
false
end
@@ -759,6 +853,10 @@ class BaseIndex < Node
false
end
+ def immediateOperand?
+ false
+ end
+
def register?
false
end
@@ -792,6 +890,10 @@ class AbsoluteAddress < NoChildren
false
end
+ def immediateOperand?
+ true
+ end
+
def register?
false
end
@@ -825,6 +927,8 @@ class Instruction < Node
$asm.putLocalAnnotation
when "globalAnnotation"
$asm.putGlobalAnnotation
+ when "emit"
+ $asm.puts "#{operands[0].dump}"
else
raise "Unhandled opcode #{opcode} at #{codeOriginString}"
end
@@ -864,6 +968,7 @@ class ConstDecl < Node
end
$labelMapping = {}
+$referencedExternLabels = Array.new
class Label < NoChildren
attr_reader :name
@@ -871,17 +976,61 @@ class Label < NoChildren
def initialize(codeOrigin, name)
super(codeOrigin)
@name = name
+ @extern = true
+ @global = false
end
- def self.forName(codeOrigin, name)
+ def self.forName(codeOrigin, name, definedInFile = false)
if $labelMapping[name]
raise "Label name collision: #{name}" unless $labelMapping[name].is_a? Label
else
$labelMapping[name] = Label.new(codeOrigin, name)
end
+ if definedInFile
+ $labelMapping[name].clearExtern()
+ end
$labelMapping[name]
end
-
+
+ def self.setAsGlobal(codeOrigin, name)
+ if $labelMapping[name]
+ label = $labelMapping[name]
+ raise "Label: #{name} declared global multiple times" unless not label.global?
+ label.setGlobal()
+ else
+ newLabel = Label.new(codeOrigin, name)
+ newLabel.setGlobal()
+ $labelMapping[name] = newLabel
+ end
+ end
+
+ def self.resetReferenced
+ $referencedExternLabels = Array.new
+ end
+
+ def self.forReferencedExtern()
+ $referencedExternLabels.each {
+ | label |
+ yield "#{label.name}"
+ }
+ end
+
+ def clearExtern
+ @extern = false
+ end
+
+ def extern?
+ @extern
+ end
+
+ def setGlobal
+ @global = true
+ end
+
+ def global?
+ @global
+ end
+
def dump
"#{name}:"
end
@@ -949,10 +1098,24 @@ class LabelReference < Node
label.name
end
+ def extern?
+ $labelMapping[name].is_a? Label and $labelMapping[name].extern?
+ end
+
+ def used
+ if !$referencedExternLabels.include?(@label) and extern?
+ $referencedExternLabels.push(@label)
+ end
+ end
+
def dump
label.name
end
+ def value
+ asmLabel()
+ end
+
def address?
false
end
@@ -964,6 +1127,10 @@ class LabelReference < Node
def immediate?
false
end
+
+ def immediateOperand?
+ true
+ end
end
class LocalLabelReference < NoChildren
@@ -989,6 +1156,10 @@ class LocalLabelReference < NoChildren
def dump
label.name
end
+
+ def value
+ asmLabel()
+ end
def address?
false
@@ -1001,6 +1172,10 @@ class LocalLabelReference < NoChildren
def immediate?
false
end
+
+ def immediateOperand?
+ true
+ end
end
class Sequence < Node
diff --git a/Source/JavaScriptCore/offlineasm/backends.rb b/Source/JavaScriptCore/offlineasm/backends.rb
index bf01b59b5..ba1633591 100644
--- a/Source/JavaScriptCore/offlineasm/backends.rb
+++ b/Source/JavaScriptCore/offlineasm/backends.rb
@@ -1,4 +1,4 @@
-# Copyright (C) 2011 Apple Inc. All rights reserved.
+# Copyright (C) 2011, 2016 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -27,19 +27,19 @@ require "arm64"
require "ast"
require "x86"
require "mips"
-require "sh4"
require "cloop"
BACKENDS =
[
"X86",
+ "X86_WIN",
"X86_64",
+ "X86_64_WIN",
"ARM",
"ARMv7",
"ARMv7_TRADITIONAL",
"ARM64",
"MIPS",
- "SH4",
"C_LOOP"
]
@@ -51,26 +51,67 @@ BACKENDS =
WORKING_BACKENDS =
[
"X86",
+ "X86_WIN",
"X86_64",
+ "X86_64_WIN",
"ARM",
"ARMv7",
"ARMv7_TRADITIONAL",
"ARM64",
"MIPS",
- "SH4",
"C_LOOP"
]
BACKEND_PATTERN = Regexp.new('\\A(' + BACKENDS.join(')|(') + ')\\Z')
+$allBackends = {}
+$validBackends = {}
+BACKENDS.each {
+ | backend |
+ $validBackends[backend] = true
+ $allBackends[backend] = true
+}
+
+def includeOnlyBackends(list)
+ newValidBackends = {}
+ list.each {
+ | backend |
+ if $validBackends[backend]
+ newValidBackends[backend] = true
+ end
+ }
+ $validBackends = newValidBackends
+end
+
+def isBackend?(backend)
+ $allBackends[backend]
+end
+
+def isValidBackend?(backend)
+ $validBackends[backend]
+end
+
+def validBackends
+ $validBackends.keys
+end
+
+class LoweringError < StandardError
+ attr_reader :originString
+
+ def initialize(e, originString)
+ super "#{e} (due to #{originString})"
+ @originString = originString
+ set_backtrace e.backtrace
+ end
+end
+
class Node
def lower(name)
begin
$activeBackend = name
send("lower" + name)
rescue => e
- e.message << "At #{codeOriginString}"
- raise e
+ raise LoweringError.new(e, codeOriginString)
end
end
end
@@ -79,7 +120,8 @@ end
class Label
def lower(name)
- $asm.putsLabel(self.name[1..-1])
+ $asm.debugAnnotation codeOrigin.debugDirective if $enableDebugAnnotations
+ $asm.putsLabel(self.name[1..-1], @global)
end
end
@@ -91,8 +133,13 @@ end
class LabelReference
def asmLabel
- Assembler.labelReference(name[1..-1])
+ if extern?
+ Assembler.externLabelReference(name[1..-1])
+ else
+ Assembler.labelReference(name[1..-1])
+ end
end
+
def cLabel
Assembler.cLabelReference(name[1..-1])
end
@@ -102,6 +149,7 @@ class LocalLabelReference
def asmLabel
Assembler.localLabelReference("_offlineasm_"+name[1..-1])
end
+
def cLabel
Assembler.cLocalLabelReference("_offlineasm_"+name[1..-1])
end
diff --git a/Source/JavaScriptCore/offlineasm/cloop.rb b/Source/JavaScriptCore/offlineasm/cloop.rb
index 852e864e9..7e939480e 100644
--- a/Source/JavaScriptCore/offlineasm/cloop.rb
+++ b/Source/JavaScriptCore/offlineasm/cloop.rb
@@ -1,4 +1,4 @@
-# Copyright (C) 2012 Apple Inc. All rights reserved.
+# Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -68,34 +68,22 @@ C_LOOP_SCRATCH_FPR = SpecialRegister.new("d6")
class RegisterID
def clDump
case name
- when "a0"
- "a0"
- when "a1"
- "a1"
- when "a2"
- "a2"
- when "a3"
- "a3"
- when "a4"
- "a4"
- when "a5"
- "a5"
- when "a6"
- "a6"
- when "a6"
- "a6"
- when "t0"
+ # The cloop is modelled on the ARM implementation. Hence, the a0-a3
+ # registers are aliases for r0-r3 i.e. t0-t3 in our case.
+ when "t0", "a0", "r0"
"t0"
- when "t1"
+ when "t1", "a1", "r1"
"t1"
- when "t2"
+ when "t2", "a2"
"t2"
- when "t3"
+ when "t3", "a3"
"t3"
when "t4"
- "rPC"
- when "t6"
- "rBasePC"
+ "pc"
+ when "t5"
+ "t5"
+ when "csr0"
+ "pcBase"
when "csr1"
"tagTypeNumber"
when "csr2"
@@ -103,7 +91,7 @@ class RegisterID
when "cfr"
"cfr"
when "lr"
- "rRetVPC"
+ "lr"
when "sp"
"sp"
else
@@ -555,14 +543,18 @@ end
# operands: callTarget, currentFrame, currentPC
def cloopEmitCallSlowPath(operands)
$asm.putc "{"
- $asm.putc " ExecState* exec = CAST<ExecState*>(#{operands[1].clValue(:voidPtr)});"
- $asm.putc " Instruction* pc = CAST<Instruction*>(#{operands[2].clValue(:voidPtr)});"
- $asm.putc " SlowPathReturnType result = #{operands[0].cLabel}(exec, pc);"
- $asm.putc " decodeResult(result, t0.instruction, t1.vp);"
+ $asm.putc " SlowPathReturnType result = #{operands[0].cLabel}(#{operands[1].clDump}, #{operands[2].clDump});"
+ $asm.putc " decodeResult(result, t0.vp, t1.vp);"
$asm.putc "}"
end
+def cloopEmitCallSlowPathVoid(operands)
+ $asm.putc "#{operands[0].cLabel}(#{operands[1].clDump}, #{operands[2].clDump});"
+end
+
class Instruction
+ @@didReturnFromJSLabelCounter = 0
+
def lowerC_LOOP
$asm.codeOrigin codeOriginString if $enableCodeOriginComments
$asm.annotation annotation if $enableInstrAnnotations && (opcode != "cloopDo")
@@ -876,7 +868,8 @@ class Instruction
when "break"
$asm.putc "CRASH(); // break instruction not implemented."
when "ret"
- $asm.putc "goto doReturnHelper;"
+ $asm.putc "opcode = lr.opcode;"
+ $asm.putc "DISPATCH_OPCODE();"
when "cbeq"
cloopEmitCompareAndSet(operands, :uint8, "==")
@@ -1097,8 +1090,17 @@ class Instruction
cloopEmitOpAndBranch(operands, "|", :int32, "!= 0")
when "memfence"
- when "pushCalleeSaves"
- when "popCalleeSaves"
+
+ when "push"
+ operands.each {
+ | op |
+ $asm.putc "PUSH(#{op.clDump});"
+ }
+ when "pop"
+ operands.each {
+ | op |
+ $asm.putc "POP(#{op.clDump});"
+ }
# A convenience and compact call to crash because we don't want to use
@@ -1113,8 +1115,11 @@ class Instruction
# use of the call instruction. Instead, we just implement JS calls
# as an opcode dispatch.
when "cloopCallJSFunction"
+ @@didReturnFromJSLabelCounter += 1
+ $asm.putc "lr.opcode = getOpcode(llint_cloop_did_return_from_js_#{@@didReturnFromJSLabelCounter});"
$asm.putc "opcode = #{operands[0].clValue(:opcode)};"
$asm.putc "DISPATCH_OPCODE();"
+ $asm.putsLabel("llint_cloop_did_return_from_js_#{@@didReturnFromJSLabelCounter}", false)
# We can't do generic function calls with an arbitrary set of args, but
# fortunately we don't have to here. All native function calls always
@@ -1135,6 +1140,9 @@ class Instruction
when "cloopCallSlowPath"
cloopEmitCallSlowPath(operands)
+ when "cloopCallSlowPathVoid"
+ cloopEmitCallSlowPathVoid(operands)
+
# For debugging only. This is used to insert instrumentation into the
# generated LLIntAssembly.h during llint development only. Do not use
# for production code.
diff --git a/Source/JavaScriptCore/offlineasm/config.rb b/Source/JavaScriptCore/offlineasm/config.rb
index 4c86eeceb..88a0cf5e8 100644
--- a/Source/JavaScriptCore/offlineasm/config.rb
+++ b/Source/JavaScriptCore/offlineasm/config.rb
@@ -1,4 +1,4 @@
-# Copyright (C) 2012 Apple Inc. All rights reserved.
+# Copyright (C) 2012, 2016 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -55,3 +55,26 @@ $enableCodeOriginComments = true
# ...
#
$enableInstrAnnotations = false
+
+# Turns on generation of DWARF2 debug annotions for file and line numbers.
+# Allows for source level debuging of the original .asm files in a debugger.
+#
+def shouldEnableDebugAnnotations()
+ if ENV['GCC_VERSION'] =~ /\.clang\./ and ENV['DT_TOOLCHAIN_DIR'] =~ /Xcode.app/
+ clangVersionOut = %x`xcrun clang --version`
+ if ($? == 0)
+ # clang version 800.0.12 or higher is required for debug annotations
+ versionMatch = /clang-(\d{3,}).(\d{1,3}).(\d{1,3})/.match(clangVersionOut)
+ if versionMatch.length >= 4
+ totalVersion = versionMatch[1].to_i * 1000000 + versionMatch[2].to_i * 1000 + versionMatch[3].to_i
+ if totalVersion >= 800000012
+ return true
+ end
+ end
+ end
+ end
+
+ false
+end
+
+$enableDebugAnnotations = shouldEnableDebugAnnotations()
diff --git a/Source/JavaScriptCore/offlineasm/generate_offset_extractor.rb b/Source/JavaScriptCore/offlineasm/generate_offset_extractor.rb
index 81c28632c..aafa93416 100644
--- a/Source/JavaScriptCore/offlineasm/generate_offset_extractor.rb
+++ b/Source/JavaScriptCore/offlineasm/generate_offset_extractor.rb
@@ -34,10 +34,16 @@ require "self_hash"
require "settings"
require "transform"
+IncludeFile.processIncludeOptions()
+
inputFlnm = ARGV.shift
outputFlnm = ARGV.shift
-$stderr.puts "offlineasm: Parsing #{inputFlnm} and creating offset extractor #{outputFlnm}."
+validBackends = ARGV.shift
+if validBackends
+ $stderr.puts "Only dealing with backends: #{validBackends}"
+ includeOnlyBackends(validBackends.split(","))
+end
def emitMagicNumber
OFFSET_MAGIC_NUMBERS.each {
@@ -141,6 +147,3 @@ File.open(outputFlnm, "w") {
}
outp.puts "};"
}
-
-$stderr.puts "offlineasm: offset extractor #{outputFlnm} successfully generated."
-
diff --git a/Source/JavaScriptCore/offlineasm/instructions.rb b/Source/JavaScriptCore/offlineasm/instructions.rb
index 5a3463fc9..bbfce7193 100644
--- a/Source/JavaScriptCore/offlineasm/instructions.rb
+++ b/Source/JavaScriptCore/offlineasm/instructions.rb
@@ -22,6 +22,7 @@
# THE POSSIBILITY OF SUCH DAMAGE.
require "config"
+require "set"
# Interesting invariant, which we take advantage of: branching instructions
# always begin with "b", and no non-branching instructions begin with "b".
@@ -29,6 +30,7 @@ require "config"
MACRO_INSTRUCTIONS =
[
+ "emit",
"addi",
"andi",
"lshifti",
@@ -247,8 +249,6 @@ MACRO_INSTRUCTIONS =
"bnz",
"leai",
"leap",
- "pushCalleeSaves",
- "popCalleeSaves",
"memfence"
]
@@ -258,10 +258,16 @@ X86_INSTRUCTIONS =
"idivi"
]
+ARM_INSTRUCTIONS =
+ [
+ "clrbp",
+ "mvlbl"
+ ]
+
ARM64_INSTRUCTIONS =
[
- "popLRAndFP", # ARM64 requires registers to be pushed and popped in pairs,
- "pushLRAndFP" # therefore we do LR (link register) and FP (frame pointer) together.
+ "pcrtoaddr", # Address from PC relative offset - adr instruction
+ "nopFixCortexA53Err835769" # nop on Cortex-A53 (nothing otherwise)
]
RISC_INSTRUCTIONS =
@@ -275,34 +281,22 @@ RISC_INSTRUCTIONS =
MIPS_INSTRUCTIONS =
[
+ "la",
"movz",
"movn",
+ "setcallreg",
"slt",
"sltu",
- "pichdr",
- "pichdrra"
- ]
-
-SH4_INSTRUCTIONS =
- [
- "shllx",
- "shlrx",
- "shld",
- "shad",
- "bdnan",
- "loaddReversedAndIncrementAddress",
- "storedReversedAndDecrementAddress",
- "ldspr",
- "stspr",
- "setargs"
+ "pichdr"
]
CXX_INSTRUCTIONS =
[
- "cloopCrash", # no operands
- "cloopCallJSFunction", # operands: callee
- "cloopCallNative", # operands: callee
- "cloopCallSlowPath", # operands: callTarget, currentFrame, currentPC
+ "cloopCrash", # no operands
+ "cloopCallJSFunction", # operands: callee
+ "cloopCallNative", # operands: callee
+ "cloopCallSlowPath", # operands: callTarget, currentFrame, currentPC
+ "cloopCallSlowPathVoid", # operands: callTarget, currentFrame, currentPC
# For debugging only:
# Takes no operands but simply emits whatever follows in // comments as
@@ -313,9 +307,9 @@ CXX_INSTRUCTIONS =
"cloopDo", # no operands
]
-INSTRUCTIONS = MACRO_INSTRUCTIONS + X86_INSTRUCTIONS + ARM64_INSTRUCTIONS + RISC_INSTRUCTIONS + MIPS_INSTRUCTIONS + SH4_INSTRUCTIONS + CXX_INSTRUCTIONS
+INSTRUCTIONS = MACRO_INSTRUCTIONS + X86_INSTRUCTIONS + ARM_INSTRUCTIONS + ARM64_INSTRUCTIONS + RISC_INSTRUCTIONS + MIPS_INSTRUCTIONS + CXX_INSTRUCTIONS
-INSTRUCTION_PATTERN = Regexp.new('\\A((' + INSTRUCTIONS.join(')|(') + '))\\Z')
+INSTRUCTION_SET = INSTRUCTIONS.to_set
def isBranch(instruction)
instruction =~ /^b/
diff --git a/Source/JavaScriptCore/offlineasm/mips.rb b/Source/JavaScriptCore/offlineasm/mips.rb
index 686f58f16..53bb9c143 100644
--- a/Source/JavaScriptCore/offlineasm/mips.rb
+++ b/Source/JavaScriptCore/offlineasm/mips.rb
@@ -24,6 +24,43 @@
require 'risc'
+# GPR conventions, to match the baseline JIT
+#
+# $a0 => a0
+# $a1 => a1
+# $a2 => a2
+# $a3 => a3
+# $v0 => t0, r0
+# $v1 => t1, r1
+# $t0 => (scratch)
+# $t1 => (scratch)
+# $t2 => t2
+# $t3 => t3
+# $t4 => t4
+# $t5 => t5
+# $t6 => t6
+# $t7 => (scratch)
+# $t8 => (scratch)
+# $t9 => (stores the callee of a call opcode)
+# $gp => (globals)
+# $s4 => (callee-save used to preserve $gp across calls)
+# $ra => lr
+# $sp => sp
+# $fp => cfr
+#
+# FPR conventions, to match the baseline JIT
+# We don't have fa2 or fa3!
+# $f0 => ft0, fr
+# $f2 => ft1
+# $f4 => ft2
+# $f6 => ft3
+# $f8 => ft4
+# $f10 => ft5
+# $f12 => fa0
+# $f14 => fa1
+# $f16 => (scratch)
+# $f18 => (scratch)
+
class Assembler
def putStr(str)
@outp.puts str
@@ -57,12 +94,10 @@ class SpecialRegister < NoChildren
end
end
-MIPS_TEMP_GPRS = [SpecialRegister.new("$t5"), SpecialRegister.new("$t6"), SpecialRegister.new("$t7"),
- SpecialRegister.new("$t8")]
+MIPS_TEMP_GPRS = [SpecialRegister.new("$t0"), SpecialRegister.new("$t1"), SpecialRegister.new("$t7"), SpecialRegister.new("$t8")]
MIPS_ZERO_REG = SpecialRegister.new("$zero")
MIPS_GP_REG = SpecialRegister.new("$gp")
MIPS_GPSAVE_REG = SpecialRegister.new("$s4")
-MIPS_JUMP_REG = SpecialRegister.new("$ra")
MIPS_CALL_REG = SpecialRegister.new("$t9")
MIPS_TEMP_FPRS = [SpecialRegister.new("$f16")]
MIPS_SCRATCH_FPR = SpecialRegister.new("$f18")
@@ -86,24 +121,18 @@ class RegisterID
"$a2"
when "a3"
"$a3"
- when "r0", "t0"
+ when "t0", "r0"
"$v0"
- when "r1", "t1"
+ when "t1", "r1"
"$v1"
when "t2"
"$t2"
when "t3"
- "$s3"
- when "t4" # PC reg in llint
- "$s2"
+ "$t3"
+ when "t4"
+ "$t4"
when "t5"
"$t5"
- when "t6"
- "$t6"
- when "t7"
- "$t7"
- when "t8"
- "$t8"
when "cfr"
"$fp"
when "lr"
@@ -162,6 +191,70 @@ class AbsoluteAddress
end
#
+# Negate condition of branches to labels.
+#
+
+class Instruction
+ def mipsNegateCondition(list)
+ /^(b(add|sub|or|mul|t)?)([ipb])/.match(opcode)
+ case $~.post_match
+ when "eq"
+ op = "neq"
+ when "neq"
+ op = "eq"
+ when "z"
+ op = "nz"
+ when "nz"
+ op = "z"
+ when "gt"
+ op = "lteq"
+ when "gteq"
+ op = "lt"
+ when "lt"
+ op = "gteq"
+ when "lteq"
+ op = "gt"
+ when "a"
+ op = "beq"
+ when "b"
+ op = "aeq"
+ when "aeq"
+ op = "b"
+ when "beq"
+ op = "a"
+ else
+ raise "Can't negate #{opcode} branch."
+ end
+ noBranch = LocalLabel.unique("nobranch")
+ noBranchRef = LocalLabelReference.new(codeOrigin, noBranch)
+ toRef = operands[-1]
+ list << Instruction.new(codeOrigin, "#{$1}#{$3}#{op}", operands[0..-2].push(noBranchRef), annotation)
+ list << Instruction.new(codeOrigin, "la", [toRef, MIPS_CALL_REG])
+ list << Instruction.new(codeOrigin, "jmp", [MIPS_CALL_REG])
+ list << noBranch
+ end
+end
+
+def mipsLowerFarBranchOps(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ annotation = node.annotation
+ case node.opcode
+ when /^b(add|sub|or|mul|t)?([ipb])/
+ if node.operands[-1].is_a? LabelReference
+ node.mipsNegateCondition(newList)
+ next
+ end
+ end
+ end
+ newList << node
+ }
+ newList
+end
+
+#
# Lower 'and' masked branches
#
@@ -172,9 +265,10 @@ def lowerMIPSCondBranch(list, condOp, node)
[node.operands[0], MIPS_ZERO_REG, node.operands[-1]],
node.annotation)
elsif node.operands.size == 3
+ tl = condOp[-1, 1]
tmp = Tmp.new(node.codeOrigin, :gpr)
list << Instruction.new(node.codeOrigin,
- "andi",
+ "and" + tl,
[node.operands[0], node.operands[1], tmp],
node.annotation)
list << Instruction.new(node.codeOrigin,
@@ -431,6 +525,8 @@ def mipsLowerMisplacedImmediates(list)
end
when /^(addi|subi)/
newList << node.riscLowerMalformedImmediatesRecurse(newList, -0x7fff..0x7fff)
+ when "andi", "andp", "ori", "orp", "xori", "xorp"
+ newList << node.riscLowerMalformedImmediatesRecurse(newList, 0..0xffff)
else
newList << node
end
@@ -445,6 +541,30 @@ end
# Specialization of lowering of misplaced addresses.
#
+class LocalLabelReference
+ def register?
+ false
+ end
+end
+
+def mipsAsRegister(preList, postList, operand, needRestore)
+ tmp = MIPS_CALL_REG
+ if operand.address?
+ preList << Instruction.new(operand.codeOrigin, "loadp", [operand, MIPS_CALL_REG])
+ elsif operand.is_a? LabelReference
+ preList << Instruction.new(operand.codeOrigin, "la", [operand, MIPS_CALL_REG])
+ elsif operand.register? and operand != MIPS_CALL_REG
+ preList << Instruction.new(operand.codeOrigin, "move", [operand, MIPS_CALL_REG])
+ else
+ needRestore = false
+ tmp = operand
+ end
+ if needRestore
+ postList << Instruction.new(operand.codeOrigin, "move", [MIPS_GPSAVE_REG, MIPS_GP_REG])
+ end
+ tmp
+end
+
def mipsLowerMisplacedAddresses(list)
newList = []
list.each {
@@ -454,33 +574,13 @@ def mipsLowerMisplacedAddresses(list)
annotation = node.annotation
case node.opcode
when "jmp"
- if node.operands[0].address?
- newList << Instruction.new(node.operands[0].codeOrigin, "loadi", [node.operands[0], MIPS_JUMP_REG])
- newList << Instruction.new(node.codeOrigin, node.opcode, [MIPS_JUMP_REG])
- else
- newList << Instruction.new(node.codeOrigin,
- node.opcode,
- [riscAsRegister(newList, postInstructions, node.operands[0], "p", false)])
- end
+ newList << Instruction.new(node.codeOrigin,
+ node.opcode,
+ [mipsAsRegister(newList, [], node.operands[0], false)])
when "call"
- restoreGP = false;
- tmp = MIPS_CALL_REG
- if node.operands[0].address?
- newList << Instruction.new(node.operands[0].codeOrigin, "loadp", [node.operands[0], MIPS_CALL_REG])
- restoreGP = true;
- elsif node.operands[0].is_a? LabelReference
- tmp = node.operands[0]
- restoreGP = true;
- elsif node.operands[0].register?
- newList << Instruction.new(node.operands[0].codeOrigin, "move", [node.operands[0], MIPS_CALL_REG])
- restoreGP = true;
- else
- tmp = node.operands[0]
- end
- newList << Instruction.new(node.codeOrigin, node.opcode, [tmp])
- if restoreGP
- newList << Instruction.new(node.codeOrigin, "move", [MIPS_GPSAVE_REG, MIPS_GP_REG])
- end
+ newList << Instruction.new(node.codeOrigin,
+ node.opcode,
+ [mipsAsRegister(newList, postInstructions, node.operands[0], true)])
when "slt", "sltu"
newList << Instruction.new(node.codeOrigin,
node.opcode,
@@ -489,6 +589,10 @@ def mipsLowerMisplacedAddresses(list)
newList << Instruction.new(node.codeOrigin,
node.opcode,
riscAsRegisters(newList, [], node.operands, "b"))
+ when "andb"
+ newList << Instruction.new(node.codeOrigin,
+ "andi",
+ riscAsRegisters(newList, [], node.operands, "b"))
when /^(bz|bnz|bs|bo)/
tl = $~.post_match == "" ? "i" : $~.post_match
newList << Instruction.new(node.codeOrigin,
@@ -565,7 +669,7 @@ class Address
end
#
-# Add PIC compatible header code to prologue/entry rutins.
+# Add PIC compatible header code to all the LLInt rutins.
#
def mipsAddPICCode(list)
@@ -574,13 +678,7 @@ def mipsAddPICCode(list)
| node |
myList << node
if node.is_a? Label
- if /_prologue$/.match(node.name) || /^_llint_function_/.match(node.name)
- # Functions called from trampoline/JIT codes.
- myList << Instruction.new(node.codeOrigin, "pichdr", [])
- elsif /_llint_op_catch/.match(node.name)
- # Exception cactcher entry point function.
- myList << Instruction.new(node.codeOrigin, "pichdrra", [])
- end
+ myList << Instruction.new(node.codeOrigin, "pichdr", [])
end
}
myList
@@ -606,6 +704,7 @@ class Sequence
}
result = mipsAddPICCode(result)
+ result = mipsLowerFarBranchOps(result)
result = mipsLowerSimpleBranchOps(result)
result = riscLowerSimpleBranchOps(result)
result = riscLowerHardBranchOps(result)
@@ -714,6 +813,16 @@ def emitMIPSDoubleBranch(branchOpcode, neg, operands)
end
end
+def emitMIPSJumpOrCall(opcode, operand)
+ if operand.label?
+ raise "Direct call/jump to a not local label." unless operand.is_a? LocalLabelReference
+ $asm.puts "#{opcode} #{operand.asmLabel}"
+ else
+ raise "Invalid call/jump register." unless operand == MIPS_CALL_REG
+ $asm.puts "#{opcode}r #{MIPS_CALL_REG.mipsOperand}"
+ end
+end
+
class Instruction
def lowerMIPS
$asm.comment codeOriginString
@@ -784,6 +893,8 @@ class Instruction
$asm.puts "ldc1 #{mipsFlippedOperands(operands)}"
when "stored"
$asm.puts "sdc1 #{mipsOperands(operands)}"
+ when "la"
+ $asm.puts "la #{operands[1].mipsOperand}, #{operands[0].asmLabel}"
when "addd"
emitMIPS("add.d", operands)
when "divd"
@@ -850,20 +961,6 @@ class Instruction
$asm.puts "addiu $sp, $sp, -4"
$asm.puts "sw #{op.mipsOperand}, 0($sp)"
}
- when "popCalleeSaves"
- $asm.puts "lw $16, 0($sp)"
- $asm.puts "lw $17, 4($sp)"
- $asm.puts "lw $18, 8($sp)"
- $asm.puts "lw $19, 12($sp)"
- $asm.puts "lw $20, 16($sp)"
- $asm.puts "addiu $sp, $sp, 20"
- when "pushCalleeSaves"
- $asm.puts "addiu $sp, $sp, -20"
- $asm.puts "sw $20, 16($sp)"
- $asm.puts "sw $19, 12($sp)"
- $asm.puts "sw $18, 8($sp)"
- $asm.puts "sw $17, 4($sp)"
- $asm.puts "sw $16, 0($sp)"
when "move", "sxi2p", "zxi2p"
if operands[0].is_a? Immediate
mipsMoveImmediate(operands[0].value, operands[1])
@@ -885,17 +982,9 @@ class Instruction
when "bilteq", "bplteq", "bblteq"
$asm.puts "ble #{mipsOperands(operands[0..1])}, #{operands[2].asmLabel}"
when "jmp"
- if operands[0].label?
- $asm.puts "j #{operands[0].asmLabel}"
- else
- $asm.puts "jr #{operands[0].mipsOperand}"
- end
+ emitMIPSJumpOrCall("j", operands[0])
when "call"
- if operands[0].label?
- $asm.puts "jal #{operands[0].asmLabel}"
- else
- $asm.puts "jalr #{operands[0].mipsOperand}"
- end
+ emitMIPSJumpOrCall("jal", operands[0])
when "break"
$asm.puts "break"
when "ret"
@@ -949,16 +1038,14 @@ class Instruction
$asm.puts "movz #{operands[0].mipsOperand}, #{operands[1].mipsOperand}, #{operands[2].mipsOperand}"
when "movn"
$asm.puts "movn #{operands[0].mipsOperand}, #{operands[1].mipsOperand}, #{operands[2].mipsOperand}"
+ when "setcallreg"
+ $asm.puts "move #{MIPS_CALL_REG.mipsOperand}, #{operands[0].mipsOperand}"
when "slt", "sltb"
$asm.puts "slt #{operands[0].mipsOperand}, #{operands[1].mipsOperand}, #{operands[2].mipsOperand}"
when "sltu", "sltub"
$asm.puts "sltu #{operands[0].mipsOperand}, #{operands[1].mipsOperand}, #{operands[2].mipsOperand}"
when "pichdr"
- $asm.putStr("OFFLINE_ASM_CPLOAD($25)")
- $asm.puts "move $s4, $gp"
- when "pichdrra"
- $asm.putStr("OFFLINE_ASM_CPLOAD($31)")
- $asm.puts "move $s4, $gp"
+ $asm.putStr("OFFLINE_ASM_CPLOAD(#{MIPS_CALL_REG.mipsOperand})")
when "memfence"
$asm.puts "sync"
else
diff --git a/Source/JavaScriptCore/offlineasm/parser.rb b/Source/JavaScriptCore/offlineasm/parser.rb
index 3b9c67bed..b44511245 100644
--- a/Source/JavaScriptCore/offlineasm/parser.rb
+++ b/Source/JavaScriptCore/offlineasm/parser.rb
@@ -1,4 +1,4 @@
-# Copyright (C) 2011 Apple Inc. All rights reserved.
+# Copyright (C) 2011, 2016 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -28,19 +28,83 @@ require "pathname"
require "registers"
require "self_hash"
+class SourceFile
+ @@fileNames = []
+
+ attr_reader :name, :fileNumber
+
+ def SourceFile.outputDotFileList(outp)
+ @@fileNames.each_index {
+ | index |
+ outp.puts "\".file #{index+1} \\\"#{@@fileNames[index]}\\\"\\n\""
+ }
+ end
+
+ def initialize(fileName)
+ @name = Pathname.new(fileName)
+ pathName = "#{@name.realpath}"
+ fileNumber = @@fileNames.index(pathName)
+ if not fileNumber
+ @@fileNames << pathName
+ fileNumber = @@fileNames.length
+ else
+ fileNumber += 1 # File numbers are 1 based
+ end
+ @fileNumber = fileNumber
+ end
+end
+
class CodeOrigin
- attr_reader :fileName, :lineNumber
+ attr_reader :lineNumber
- def initialize(fileName, lineNumber)
- @fileName = fileName
+ def initialize(sourceFile, lineNumber)
+ @sourceFile = sourceFile
@lineNumber = lineNumber
end
-
+
+ def fileName
+ @sourceFile.name
+ end
+
+ def debugDirective
+ $emitWinAsm ? nil : "\".loc #{@sourceFile.fileNumber} #{lineNumber}\\n\""
+ end
+
def to_s
"#{fileName}:#{lineNumber}"
end
end
+class IncludeFile
+ @@includeDirs = []
+
+ attr_reader :fileName
+
+ def initialize(moduleName, defaultDir)
+ directory = nil
+ @@includeDirs.each {
+ | includePath |
+ fileName = includePath + (moduleName + ".asm")
+ directory = includePath unless not File.file?(fileName)
+ }
+ if not directory
+ directory = defaultDir
+ end
+
+ @fileName = directory + (moduleName + ".asm")
+ end
+
+ def self.processIncludeOptions()
+ while ARGV[0][/^-I/]
+ path = ARGV.shift[2..-1]
+ if not path
+ path = ARGV.shift
+ end
+ @@includeDirs << (path + "/")
+ end
+ end
+end
+
class Token
attr_reader :codeOrigin, :string
@@ -87,8 +151,7 @@ end
# The lexer. Takes a string and returns an array of tokens.
#
-def lex(str, fileName)
- fileName = Pathname.new(fileName)
+def lex(str, file)
result = []
lineNumber = 1
annotation = nil
@@ -108,35 +171,37 @@ def lex(str, fileName)
# use of this for its cloopDo debugging utility even if
# enableInstrAnnotations is not enabled.
if annotation
- result << Annotation.new(CodeOrigin.new(fileName, lineNumber),
+ result << Annotation.new(CodeOrigin.new(file, lineNumber),
annotationType, annotation)
annotation = nil
end
- result << Token.new(CodeOrigin.new(fileName, lineNumber), $&)
+ result << Token.new(CodeOrigin.new(file, lineNumber), $&)
lineNumber += 1
when /\A[a-zA-Z]([a-zA-Z0-9_.]*)/
- result << Token.new(CodeOrigin.new(fileName, lineNumber), $&)
+ result << Token.new(CodeOrigin.new(file, lineNumber), $&)
when /\A\.([a-zA-Z0-9_]*)/
- result << Token.new(CodeOrigin.new(fileName, lineNumber), $&)
+ result << Token.new(CodeOrigin.new(file, lineNumber), $&)
when /\A_([a-zA-Z0-9_]*)/
- result << Token.new(CodeOrigin.new(fileName, lineNumber), $&)
+ result << Token.new(CodeOrigin.new(file, lineNumber), $&)
when /\A([ \t]+)/
# whitespace, ignore
whitespaceFound = true
str = $~.post_match
next
when /\A0x([0-9a-fA-F]+)/
- result << Token.new(CodeOrigin.new(fileName, lineNumber), $&.hex.to_s)
+ result << Token.new(CodeOrigin.new(file, lineNumber), $&.hex.to_s)
when /\A0([0-7]+)/
- result << Token.new(CodeOrigin.new(fileName, lineNumber), $&.oct.to_s)
+ result << Token.new(CodeOrigin.new(file, lineNumber), $&.oct.to_s)
when /\A([0-9]+)/
- result << Token.new(CodeOrigin.new(fileName, lineNumber), $&)
+ result << Token.new(CodeOrigin.new(file, lineNumber), $&)
when /\A::/
- result << Token.new(CodeOrigin.new(fileName, lineNumber), $&)
+ result << Token.new(CodeOrigin.new(file, lineNumber), $&)
when /\A[:,\(\)\[\]=\+\-~\|&^*]/
- result << Token.new(CodeOrigin.new(fileName, lineNumber), $&)
+ result << Token.new(CodeOrigin.new(file, lineNumber), $&)
+ when /\A".*"/
+ result << Token.new(CodeOrigin.new(file, lineNumber), $&)
else
- raise "Lexer error at #{CodeOrigin.new(fileName, lineNumber).to_s}, unexpected sequence #{str[0..20].inspect}"
+ raise "Lexer error at #{CodeOrigin.new(file, lineNumber).to_s}, unexpected sequence #{str[0..20].inspect}"
end
whitespaceFound = false
str = $~.post_match
@@ -153,13 +218,13 @@ def isRegister(token)
end
def isInstruction(token)
- token =~ INSTRUCTION_PATTERN
+ INSTRUCTION_SET.member? token.string
end
def isKeyword(token)
- token =~ /\A((true)|(false)|(if)|(then)|(else)|(elsif)|(end)|(and)|(or)|(not)|(macro)|(const)|(sizeof)|(error)|(include))\Z/ or
+ token =~ /\A((true)|(false)|(if)|(then)|(else)|(elsif)|(end)|(and)|(or)|(not)|(global)|(macro)|(const)|(sizeof)|(error)|(include))\Z/ or
token =~ REGISTER_PATTERN or
- token =~ INSTRUCTION_PATTERN
+ isInstruction(token)
end
def isIdentifier(token)
@@ -182,6 +247,10 @@ def isInteger(token)
token =~ /\A[0-9]/
end
+def isString(token)
+ token =~ /\A".*"/
+end
+
#
# The parser. Takes an array of tokens and returns an AST. Methods
# other than parse(tokens) are not for public consumption.
@@ -367,6 +436,10 @@ class Parser
result = Immediate.new(@tokens[@idx].codeOrigin, @tokens[@idx].string.to_i)
@idx += 1
result
+ elsif isString @tokens[@idx]
+ result = StringLiteral.new(@tokens[@idx].codeOrigin, @tokens[@idx].string)
+ @idx += 1
+ result
elsif isIdentifier @tokens[@idx]
codeOrigin, names = parseColonColon
if names.size > 1
@@ -380,6 +453,14 @@ class Parser
@idx += 1
codeOrigin, names = parseColonColon
Sizeof.forName(codeOrigin, names.join('::'))
+ elsif isLabel @tokens[@idx]
+ result = LabelReference.new(@tokens[@idx].codeOrigin, Label.forName(@tokens[@idx].codeOrigin, @tokens[@idx].string))
+ @idx += 1
+ result
+ elsif isLocalLabel @tokens[@idx]
+ result = LocalLabelReference.new(@tokens[@idx].codeOrigin, LocalLabel.forName(@tokens[@idx].codeOrigin, @tokens[@idx].string))
+ @idx += 1
+ result
else
parseError
end
@@ -400,7 +481,7 @@ class Parser
end
def couldBeExpression
- @tokens[@idx] == "-" or @tokens[@idx] == "~" or @tokens[@idx] == "sizeof" or isInteger(@tokens[@idx]) or isVariable(@tokens[@idx]) or @tokens[@idx] == "("
+ @tokens[@idx] == "-" or @tokens[@idx] == "~" or @tokens[@idx] == "sizeof" or isInteger(@tokens[@idx]) or isString(@tokens[@idx]) or isVariable(@tokens[@idx]) or @tokens[@idx] == "("
end
def parseExpressionAdd
@@ -573,6 +654,14 @@ class Parser
body = parseSequence(/\Aend\Z/, "while inside of macro #{name}")
@idx += 1
list << Macro.new(codeOrigin, name, variables, body)
+ elsif @tokens[@idx] == "global"
+ codeOrigin = @tokens[@idx].codeOrigin
+ @idx += 1
+ skipNewLine
+ parseError unless isLabel(@tokens[@idx])
+ name = @tokens[@idx].string
+ @idx += 1
+ Label.setAsGlobal(codeOrigin, name)
elsif isInstruction @tokens[@idx]
codeOrigin = @tokens[@idx].codeOrigin
name = @tokens[@idx].string
@@ -677,7 +766,7 @@ class Parser
parseError unless @tokens[@idx] == ":"
# It's a label.
if isLabel name
- list << Label.forName(codeOrigin, name)
+ list << Label.forName(codeOrigin, name, true)
else
list << LocalLabel.forName(codeOrigin, name)
end
@@ -686,9 +775,8 @@ class Parser
@idx += 1
parseError unless isIdentifier(@tokens[@idx])
moduleName = @tokens[@idx].string
- fileName = @tokens[@idx].codeOrigin.fileName.dirname + (moduleName + ".asm")
+ fileName = IncludeFile.new(moduleName, @tokens[@idx].codeOrigin.fileName.dirname).fileName
@idx += 1
- $stderr.puts "offlineasm: Including file #{fileName}"
list << parse(fileName)
else
parseError "Expecting terminal #{final} #{comment}"
@@ -696,10 +784,33 @@ class Parser
}
Sequence.new(firstCodeOrigin, list)
end
+
+ def parseIncludes(final, comment)
+ firstCodeOrigin = @tokens[@idx].codeOrigin
+ fileList = []
+ fileList << @tokens[@idx].codeOrigin.fileName
+ loop {
+ if (@idx == @tokens.length and not final) or (final and @tokens[@idx] =~ final)
+ break
+ elsif @tokens[@idx] == "include"
+ @idx += 1
+ parseError unless isIdentifier(@tokens[@idx])
+ moduleName = @tokens[@idx].string
+ fileName = IncludeFile.new(moduleName, @tokens[@idx].codeOrigin.fileName.dirname).fileName
+ @idx += 1
+
+ fileList << fileName
+ else
+ @idx += 1
+ end
+ }
+
+ return fileList
+ end
end
def parseData(data, fileName)
- parser = Parser.new(data, fileName)
+ parser = Parser.new(data, SourceFile.new(fileName))
parser.parseSequence(nil, "")
end
@@ -708,6 +819,8 @@ def parse(fileName)
end
def parseHash(fileName)
- dirHash(Pathname.new(fileName).dirname, /\.asm$/)
+ parser = Parser.new(IO::read(fileName), SourceFile.new(fileName))
+ fileList = parser.parseIncludes(nil, "")
+ fileListHash(fileList)
end
diff --git a/Source/JavaScriptCore/offlineasm/registers.rb b/Source/JavaScriptCore/offlineasm/registers.rb
index 94e0767a6..b6ed36d00 100644
--- a/Source/JavaScriptCore/offlineasm/registers.rb
+++ b/Source/JavaScriptCore/offlineasm/registers.rb
@@ -31,10 +31,6 @@ GPRS =
"t3",
"t4",
"t5",
- "t6",
- "t7",
- "t8",
- "t9",
"cfr",
"a0",
"a1",
@@ -44,10 +40,18 @@ GPRS =
"r1",
"sp",
"lr",
-
+ "pc",
# 64-bit only registers:
- "csr1", # tag type number register
- "csr2" # tag mask register
+ "csr0",
+ "csr1",
+ "csr2",
+ "csr3",
+ "csr4",
+ "csr5",
+ "csr6",
+ "csr7",
+ "csr8",
+ "csr9"
]
FPRS =
@@ -62,6 +66,14 @@ FPRS =
"fa1",
"fa2",
"fa3",
+ "csfr0",
+ "csfr1",
+ "csfr2",
+ "csfr3",
+ "csfr4",
+ "csfr5",
+ "csfr6",
+ "csfr7",
"fr"
]
diff --git a/Source/JavaScriptCore/offlineasm/risc.rb b/Source/JavaScriptCore/offlineasm/risc.rb
index 3fbc07d0b..28e01654b 100644
--- a/Source/JavaScriptCore/offlineasm/risc.rb
+++ b/Source/JavaScriptCore/offlineasm/risc.rb
@@ -374,7 +374,7 @@ def riscLowerMalformedImmediates(list, validImmediates)
when "addi", "addp", "addq", "addis", "subi", "subp", "subq", "subis"
if node.operands[0].is_a? Immediate and
(not validImmediates.include? node.operands[0].value) and
- validImmediates.include? -node.operands[0].value
+ validImmediates.include? -node.operands[0].value and
node.operands.size == 2
if node.opcode =~ /add/
newOpcode = "sub" + $~.post_match
diff --git a/Source/JavaScriptCore/offlineasm/self_hash.rb b/Source/JavaScriptCore/offlineasm/self_hash.rb
index b91057391..6c736ff5b 100644
--- a/Source/JavaScriptCore/offlineasm/self_hash.rb
+++ b/Source/JavaScriptCore/offlineasm/self_hash.rb
@@ -45,6 +45,21 @@ def dirHash(directory, regexp)
end
#
+# fileListHash(fileList) -> SHA1 hexdigest
+#
+# Returns a hash of all files in the list.
+#
+
+def fileListHash(fileList)
+ contents = ""
+ fileList.each {
+ | fileName |
+ contents += IO::read(fileName)
+ }
+ return Digest::SHA1.hexdigest(contents)
+end
+
+#
# selfHash -> SHA1 hexdigest
#
# Returns a hash of the offlineasm source code. This allows dependency
diff --git a/Source/JavaScriptCore/offlineasm/settings.rb b/Source/JavaScriptCore/offlineasm/settings.rb
index 601934f99..eec092584 100644
--- a/Source/JavaScriptCore/offlineasm/settings.rb
+++ b/Source/JavaScriptCore/offlineasm/settings.rb
@@ -54,7 +54,28 @@ def computeSettingsCombinations(ast)
settingsCombinator(settingsCombinations, newMap, remaining[1..-1])
end
- settingsCombinator(settingsCombinations, {}, (ast.filter(Setting).uniq.collect{|v| v.name} + BACKENDS).uniq)
+ nonBackendSettings = ast.filter(Setting).uniq.collect{ |v| v.name }
+ nonBackendSettings.delete_if {
+ | setting |
+ isBackend? setting
+ }
+
+ allBackendsFalse = {}
+ BACKENDS.each {
+ | backend |
+ allBackendsFalse[backend] = false
+ }
+
+ # This will create entries for invalid backends. That's fine. It's necessary
+ # because it ensures that generate_offsets_extractor (which knows about valid
+ # backends) has settings indices that are compatible with what asm will see
+ # (asm doesn't know about valid backends).
+ BACKENDS.each {
+ | backend |
+ map = allBackendsFalse.clone
+ map[backend] = true
+ settingsCombinator(settingsCombinations, map, nonBackendSettings)
+ }
settingsCombinations
end
@@ -73,15 +94,13 @@ def forSettings(concreteSettings, ast)
selectedBackend = nil
BACKENDS.each {
| backend |
- isSupported = concreteSettings[backend]
- raise unless isSupported != nil
- numClaimedBackends += if isSupported then 1 else 0 end
- if isSupported
+ if concreteSettings[backend]
+ raise if selectedBackend
selectedBackend = backend
end
}
- return if numClaimedBackends > 1
+ return unless isValidBackend? selectedBackend
# Resolve the AST down to a low-level form (no macros or conditionals).
lowLevelAST = ast.resolveSettings(concreteSettings)
@@ -172,7 +191,17 @@ end
#
def emitCodeInConfiguration(concreteSettings, ast, backend)
- $output.puts cppSettingsTest(concreteSettings)
+ Label.resetReferenced
+
+ if !$emitWinAsm
+ $output.puts cppSettingsTest(concreteSettings)
+ else
+ if backend == "X86_WIN"
+ $output.puts ".MODEL FLAT, C"
+ end
+ $output.puts "INCLUDE #{File.basename($output.path)}.sym"
+ $output.puts "_TEXT SEGMENT"
+ end
if isASTErroneous(ast)
$output.puts "#error \"Invalid configuration.\""
@@ -182,7 +211,21 @@ def emitCodeInConfiguration(concreteSettings, ast, backend)
yield concreteSettings, ast, backend
end
- $output.puts "#endif"
+ if !$emitWinAsm
+ $output.puts "#endif"
+ else
+ $output.puts "_TEXT ENDS"
+ $output.puts "END"
+
+ # Write symbols needed by MASM
+ File.open("#{File.basename($output.path)}.sym", "w") {
+ | outp |
+ Label.forReferencedExtern {
+ | name |
+ outp.puts "EXTERN #{name[1..-1]} : near"
+ }
+ }
+ end
end
#
diff --git a/Source/JavaScriptCore/offlineasm/sh4.rb b/Source/JavaScriptCore/offlineasm/sh4.rb
deleted file mode 100644
index a804b29cc..000000000
--- a/Source/JavaScriptCore/offlineasm/sh4.rb
+++ /dev/null
@@ -1,1072 +0,0 @@
-# Copyright (C) 2013 Apple Inc. All rights reserved.
-# Copyright (C) 2013 Cisco Systems, Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY CISCO SYSTEMS, INC. ``AS IS'' AND ANY
-# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CISCO SYSTEMS, INC. OR ITS
-# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
-# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-require 'risc'
-
-class Node
- def sh4SingleHi
- doubleOperand = sh4Operand
- raise "Bogus register name #{doubleOperand}" unless doubleOperand =~ /^dr/
- "fr" + ($~.post_match.to_i).to_s
- end
- def sh4SingleLo
- doubleOperand = sh4Operand
- raise "Bogus register name #{doubleOperand}" unless doubleOperand =~ /^dr/
- "fr" + ($~.post_match.to_i + 1).to_s
- end
-end
-
-class SpecialRegister < NoChildren
- def sh4Operand
- @name
- end
-
- def dump
- @name
- end
-
- def register?
- true
- end
-end
-
-SH4_TMP_GPRS = [ SpecialRegister.new("r3"), SpecialRegister.new("r11"), SpecialRegister.new("r13") ]
-SH4_TMP_FPRS = [ SpecialRegister.new("dr10") ]
-
-class RegisterID
- def sh4Operand
- case name
- when "t0"
- "r0"
- when "t1"
- "r1"
- when "t2"
- "r2"
- when "t3"
- "r10"
- when "t4", "a0"
- "r4"
- when "t5", "a1"
- "r5"
- when "t6", "a2"
- "r6"
- when "t7", "a3"
- "r7"
- when "t8"
- "r8"
- when "t9"
- "r9"
- when "cfr"
- "r14"
- when "sp"
- "r15"
- when "lr"
- "pr"
- else
- raise "Bad register #{name} for SH4 at #{codeOriginString}"
- end
- end
-end
-
-class FPRegisterID
- def sh4Operand
- case name
- when "ft0", "fr"
- "dr0"
- when "ft1"
- "dr2"
- when "ft2"
- "dr4"
- when "ft3"
- "dr6"
- when "ft4"
- "dr8"
- when "fa0"
- "dr12"
- else
- raise "Bad register #{name} for SH4 at #{codeOriginString}"
- end
- end
-end
-
-class Immediate
- def sh4Operand
- raise "Invalid immediate #{value} at #{codeOriginString}" if value < -128 or value > 127
- "##{value}"
- end
-end
-
-class Address
- def sh4Operand
- raise "Bad offset #{offset.value} at #{codeOriginString}" if offset.value < 0 or offset.value > 60
- if offset.value == 0
- "@#{base.sh4Operand}"
- else
- "@(#{offset.value}, #{base.sh4Operand})"
- end
- end
-
- def sh4OperandPostInc
- raise "Bad offset #{offset.value} for post inc at #{codeOriginString}" unless offset.value == 0
- "@#{base.sh4Operand}+"
- end
-
- def sh4OperandPreDec
- raise "Bad offset #{offset.value} for pre dec at #{codeOriginString}" unless offset.value == 0
- "@-#{base.sh4Operand}"
- end
-end
-
-class BaseIndex
- def sh4Operand
- raise "Unconverted base index at #{codeOriginString}"
- end
-end
-
-class AbsoluteAddress
- def sh4Operand
- raise "Unconverted absolute address at #{codeOriginString}"
- end
-end
-
-class ConstPool < Node
- attr_reader :size
- attr_reader :entries
-
- def initialize(codeOrigin, entries, size)
- super(codeOrigin)
- raise "Invalid size #{size} for ConstPool" unless size == 16 or size == 32
- @size = size
- @entries = entries
- end
-
- def dump
- "#{size}: #{entries}"
- end
-
- def address?
- false
- end
-
- def label?
- false
- end
-
- def immediate?
- false
- end
-
- def register?
- false
- end
-
- def lowerSH4
- if size == 16
- $asm.puts ".balign 2"
- else
- $asm.puts ".balign 4"
- end
- entries.map {
- |e|
- e.label.lower("SH4")
- if e.size == 16
- $asm.puts ".word #{e.value}"
- else
- $asm.puts ".long #{e.value}"
- end
- }
- end
-end
-
-class ConstPoolEntry < Node
- attr_reader :size
- attr_reader :value
- attr_reader :label
- attr_reader :labelref
-
- def initialize(codeOrigin, value, size)
- super(codeOrigin)
- raise "Invalid size #{size} for ConstPoolEntry" unless size == 16 or size == 32
- @size = size
- @value = value
- @label = LocalLabel.unique("constpool#{size}")
- @labelref = LocalLabelReference.new(codeOrigin, label)
- end
-
- def dump
- "#{value} (#{size} @ #{label})"
- end
-
- def ==(other)
- other.is_a? ConstPoolEntry and other.value == @value
- end
-
- def address?
- false
- end
-
- def label?
- false
- end
-
- def immediate?
- false
- end
-
- def register?
- false
- end
-end
-
-
-#
-# Lowering of shift ops for SH4. For example:
-#
-# rshifti foo, bar
-#
-# becomes:
-#
-# negi foo, tmp
-# shad tmp, bar
-#
-
-def sh4LowerShiftOps(list)
- newList = []
- list.each {
- | node |
- if node.is_a? Instruction
- case node.opcode
- when "ulshifti", "ulshiftp", "urshifti", "urshiftp", "lshifti", "lshiftp", "rshifti", "rshiftp"
- if node.opcode[0, 1] == "u"
- type = "l"
- direction = node.opcode[1, 1]
- else
- type = "a"
- direction = node.opcode[0, 1]
- end
- if node.operands[0].is_a? Immediate
- maskedImm = Immediate.new(node.operands[0].codeOrigin, node.operands[0].value & 31)
- if maskedImm.value == 0
- # There is nothing to do here.
- elsif maskedImm.value == 1 or (type == "l" and [2, 8, 16].include? maskedImm.value)
- newList << Instruction.new(node.codeOrigin, "sh#{type}#{direction}x", [maskedImm, node.operands[1]])
- else
- tmp = Tmp.new(node.codeOrigin, :gpr)
- if direction == "l"
- newList << Instruction.new(node.codeOrigin, "move", [maskedImm, tmp])
- else
- newList << Instruction.new(node.codeOrigin, "move", [Immediate.new(node.operands[0].codeOrigin, -1 * maskedImm.value), tmp])
- end
- newList << Instruction.new(node.codeOrigin, "sh#{type}d", [tmp, node.operands[1]])
- end
- else
- tmp = Tmp.new(node.codeOrigin, :gpr)
- newList << Instruction.new(node.codeOrigin, "move", [Immediate.new(node.operands[0].codeOrigin, 31), tmp])
- newList << Instruction.new(node.codeOrigin, "andi", [node.operands[0], tmp])
- if direction == "r"
- newList << Instruction.new(node.codeOrigin, "negi", [tmp, tmp])
- end
- newList << Instruction.new(node.codeOrigin, "sh#{type}d", [tmp, node.operands[1]])
- end
- else
- newList << node
- end
- else
- newList << node
- end
- }
- newList
-end
-
-
-#
-# Lowering of simple branch ops for SH4. For example:
-#
-# baddis foo, bar, baz
-#
-# will become:
-#
-# addi foo, bar
-# bs bar, baz
-#
-
-def sh4LowerSimpleBranchOps(list)
- newList = []
- list.each {
- | node |
- if node.is_a? Instruction
- annotation = node.annotation
- case node.opcode
- when /^b(addi|subi|ori|addp)/
- op = $1
- bc = $~.post_match
-
- case op
- when "addi", "addp"
- op = "addi"
- when "subi", "subp"
- op = "subi"
- when "ori", "orp"
- op = "ori"
- end
-
- if bc == "s"
- raise "Invalid operands number (#{node.operands.size})" unless node.operands.size == 3
- if node.operands[1].is_a? RegisterID or node.operands[1].is_a? SpecialRegister
- newList << Instruction.new(node.codeOrigin, op, node.operands[0..1])
- newList << Instruction.new(node.codeOrigin, "bs", node.operands[1..2])
- else
- tmpVal = Tmp.new(node.codeOrigin, :gpr)
- tmpPtr = Tmp.new(node.codeOrigin, :gpr)
- addr = Address.new(node.codeOrigin, tmpPtr, Immediate.new(node.codeOrigin, 0))
- newList << Instruction.new(node.codeOrigin, "leap", [node.operands[1], tmpPtr])
- newList << Instruction.new(node.codeOrigin, "loadi", [addr, tmpVal])
- newList << Instruction.new(node.codeOrigin, op, [node.operands[0], tmpVal])
- newList << Instruction.new(node.codeOrigin, "storei", [tmpVal, addr])
- newList << Instruction.new(node.codeOrigin, "bs", [tmpVal, node.operands[2]])
- end
- elsif bc == "nz"
- raise "Invalid operands number (#{node.operands.size})" unless node.operands.size == 3
- newList << Instruction.new(node.codeOrigin, op, node.operands[0..1])
- newList << Instruction.new(node.codeOrigin, "btinz", node.operands[1..2])
- else
- newList << node
- end
- when "bmulio", "bmulpo"
- raise "Invalid operands number (#{node.operands.size})" unless node.operands.size == 3
- tmp1 = Tmp.new(node.codeOrigin, :gpr)
- tmp2 = Tmp.new(node.codeOrigin, :gpr)
- newList << Instruction.new(node.codeOrigin, node.opcode, [tmp1, tmp2].concat(node.operands))
- else
- newList << node
- end
- else
- newList << node
- end
- }
- newList
-end
-
-
-#
-# Lowering of double accesses for SH4. For example:
-#
-# loadd [foo, bar, 8], baz
-#
-# becomes:
-#
-# leap [foo, bar, 8], tmp
-# loaddReversedAndIncrementAddress [tmp], baz
-#
-
-def sh4LowerDoubleAccesses(list)
- newList = []
- list.each {
- | node |
- if node.is_a? Instruction
- case node.opcode
- when "loadd"
- tmp = Tmp.new(codeOrigin, :gpr)
- addr = Address.new(codeOrigin, tmp, Immediate.new(codeOrigin, 0))
- newList << Instruction.new(codeOrigin, "leap", [node.operands[0], tmp])
- newList << Instruction.new(node.codeOrigin, "loaddReversedAndIncrementAddress", [addr, node.operands[1]], node.annotation)
- when "stored"
- tmp = Tmp.new(codeOrigin, :gpr)
- addr = Address.new(codeOrigin, tmp, Immediate.new(codeOrigin, 0))
- newList << Instruction.new(codeOrigin, "leap", [node.operands[1].withOffset(8), tmp])
- newList << Instruction.new(node.codeOrigin, "storedReversedAndDecrementAddress", [node.operands[0], addr], node.annotation)
- else
- newList << node
- end
- else
- newList << node
- end
- }
- newList
-end
-
-
-#
-# Lowering of double specials for SH4.
-#
-
-def sh4LowerDoubleSpecials(list)
- newList = []
- list.each {
- | node |
- if node.is_a? Instruction
- case node.opcode
- when "bdltun", "bdgtun"
- # Handle specific floating point unordered opcodes.
- newList << Instruction.new(codeOrigin, "bdnan", [node.operands[0], node.operands[2]])
- newList << Instruction.new(codeOrigin, "bdnan", [node.operands[1], node.operands[2]])
- newList << Instruction.new(codeOrigin, node.opcode[0..-3], node.operands)
- when "bdnequn", "bdgtequn", "bdltequn"
- newList << Instruction.new(codeOrigin, node.opcode[0..-3], node.operands)
- when "bdneq", "bdgteq", "bdlteq"
- # Handle specific floating point ordered opcodes.
- outlabel = LocalLabel.unique("out_#{node.opcode}")
- outref = LocalLabelReference.new(codeOrigin, outlabel)
- newList << Instruction.new(codeOrigin, "bdnan", [node.operands[0], outref])
- newList << Instruction.new(codeOrigin, "bdnan", [node.operands[1], outref])
- newList << Instruction.new(codeOrigin, node.opcode, node.operands)
- newList << outlabel
- else
- newList << node
- end
- else
- newList << node
- end
- }
- newList
-end
-
-
-#
-# Lowering of misplaced labels for SH4.
-#
-
-def sh4LowerMisplacedLabels(list)
- newList = []
- list.each {
- | node |
- if node.is_a? Instruction
- operands = node.operands
- newOperands = []
- operands.each {
- | operand |
- if operand.is_a? LabelReference
- tmp = Tmp.new(operand.codeOrigin, :gpr)
- newList << Instruction.new(operand.codeOrigin, "move", [operand, tmp])
- newOperands << tmp
- else
- newOperands << operand
- end
- }
- newList << Instruction.new(node.codeOrigin, node.opcode, newOperands, node.annotation)
- else
- newList << node
- end
- }
- newList
-end
-
-
-#
-# Lowering of misplaced special registers for SH4. For example:
-#
-# storep pr, foo
-#
-# becomes:
-#
-# stspr tmp
-# storep tmp, foo
-#
-
-def sh4LowerMisplacedSpecialRegisters(list)
- newList = []
- list.each {
- | node |
- if node.is_a? Instruction
- case node.opcode
- when "move"
- if node.operands[0].is_a? RegisterID and node.operands[0].sh4Operand == "pr"
- newList << Instruction.new(codeOrigin, "stspr", [node.operands[1]])
- elsif node.operands[1].is_a? RegisterID and node.operands[1].sh4Operand == "pr"
- newList << Instruction.new(codeOrigin, "ldspr", [node.operands[0]])
- else
- newList << node
- end
- when "loadi", "loadis", "loadp"
- if node.operands[1].is_a? RegisterID and node.operands[1].sh4Operand == "pr"
- tmp = Tmp.new(codeOrigin, :gpr)
- newList << Instruction.new(codeOrigin, node.opcode, [node.operands[0], tmp])
- newList << Instruction.new(codeOrigin, "ldspr", [tmp])
- else
- newList << node
- end
- when "storei", "storep"
- if node.operands[0].is_a? RegisterID and node.operands[0].sh4Operand == "pr"
- tmp = Tmp.new(codeOrigin, :gpr)
- newList << Instruction.new(codeOrigin, "stspr", [tmp])
- newList << Instruction.new(codeOrigin, node.opcode, [tmp, node.operands[1]])
- else
- newList << node
- end
- else
- newList << node
- end
- else
- newList << node
- end
- }
- newList
-end
-
-
-#
-# Group immediate values outside -128..127 range into constant pools for SH4.
-# These constant pools will be placed behind non-return opcodes jmp and ret, for example:
-#
-# move 1024, foo
-# ...
-# ret
-#
-# becomes:
-#
-# move [label], foo
-# ...
-# ret
-# label: 1024
-#
-
-def sh4LowerConstPool(list)
- newList = []
- currentPool16 = []
- currentPool32 = []
- list.each {
- | node |
- if node.is_a? Instruction
- case node.opcode
- when "jmp", "ret"
- newList << node
- if not currentPool16.empty?
- newList << ConstPool.new(codeOrigin, currentPool16, 16)
- currentPool16 = []
- end
- if not currentPool32.empty?
- newList << ConstPool.new(codeOrigin, currentPool32, 32)
- currentPool32 = []
- end
- when "move"
- if node.operands[0].is_a? Immediate and not (-128..127).include? node.operands[0].value
- poolEntry = nil
- if (-32768..32767).include? node.operands[0].value
- currentPool16.each { |e|
- if e.value == node.operands[0].value
- poolEntry = e
- end
- }
- if !poolEntry
- poolEntry = ConstPoolEntry.new(codeOrigin, node.operands[0].value, 16)
- currentPool16 << poolEntry
- end
- else
- currentPool32.each { |e|
- if e.value == node.operands[0].value
- poolEntry = e
- end
- }
- if !poolEntry
- poolEntry = ConstPoolEntry.new(codeOrigin, node.operands[0].value, 32)
- currentPool32 << poolEntry
- end
- end
- newList << Instruction.new(codeOrigin, "move", [poolEntry, node.operands[1]])
- elsif node.operands[0].is_a? LabelReference
- poolEntry = nil
- currentPool32.each { |e|
- if e.value == node.operands[0].asmLabel
- poolEntry = e
- end
- }
- if !poolEntry
- poolEntry = ConstPoolEntry.new(codeOrigin, node.operands[0].asmLabel, 32)
- currentPool32 << poolEntry
- end
- newList << Instruction.new(codeOrigin, "move", [poolEntry, node.operands[1]])
- else
- newList << node
- end
- else
- newList << node
- end
- else
- newList << node
- end
- }
- if not currentPool16.empty?
- newList << ConstPool.new(codeOrigin, currentPool16, 16)
- end
- if not currentPool32.empty?
- newList << ConstPool.new(codeOrigin, currentPool32, 32)
- end
- newList
-end
-
-
-#
-# Lowering of argument setup for SH4.
-# This phase avoids argument register trampling. For example, if a0 == t4:
-#
-# setargs t1, t4
-#
-# becomes:
-#
-# move t4, a1
-# move t1, a0
-#
-
-def sh4LowerArgumentSetup(list)
- a0 = RegisterID.forName(codeOrigin, "a0")
- a1 = RegisterID.forName(codeOrigin, "a1")
- a2 = RegisterID.forName(codeOrigin, "a2")
- a3 = RegisterID.forName(codeOrigin, "a3")
- newList = []
- list.each {
- | node |
- if node.is_a? Instruction
- case node.opcode
- when "setargs"
- if node.operands.size == 2
- if node.operands[1].sh4Operand != a0.sh4Operand
- newList << Instruction.new(codeOrigin, "move", [node.operands[0], a0])
- newList << Instruction.new(codeOrigin, "move", [node.operands[1], a1])
- elsif node.operands[0].sh4Operand != a1.sh4Operand
- newList << Instruction.new(codeOrigin, "move", [node.operands[1], a1])
- newList << Instruction.new(codeOrigin, "move", [node.operands[0], a0])
- else
- # As (operands[0] == a1) and (operands[1] == a0), we just need to swap a0 and a1.
- newList << Instruction.new(codeOrigin, "xori", [a0, a1])
- newList << Instruction.new(codeOrigin, "xori", [a1, a0])
- newList << Instruction.new(codeOrigin, "xori", [a0, a1])
- end
- elsif node.operands.size == 4
- # FIXME: We just raise an error if something is likely to go wrong for now.
- # It would be better to implement a recovering algorithm.
- if (node.operands[0].sh4Operand == a1.sh4Operand) or
- (node.operands[0].sh4Operand == a2.sh4Operand) or
- (node.operands[0].sh4Operand == a3.sh4Operand) or
- (node.operands[1].sh4Operand == a0.sh4Operand) or
- (node.operands[1].sh4Operand == a2.sh4Operand) or
- (node.operands[1].sh4Operand == a3.sh4Operand) or
- (node.operands[2].sh4Operand == a0.sh4Operand) or
- (node.operands[2].sh4Operand == a1.sh4Operand) or
- (node.operands[2].sh4Operand == a3.sh4Operand) or
- (node.operands[3].sh4Operand == a0.sh4Operand) or
- (node.operands[3].sh4Operand == a1.sh4Operand) or
- (node.operands[3].sh4Operand == a2.sh4Operand)
- raise "Potential argument register trampling detected."
- end
-
- newList << Instruction.new(codeOrigin, "move", [node.operands[0], a0])
- newList << Instruction.new(codeOrigin, "move", [node.operands[1], a1])
- newList << Instruction.new(codeOrigin, "move", [node.operands[2], a2])
- newList << Instruction.new(codeOrigin, "move", [node.operands[3], a3])
- else
- raise "Invalid operands number (#{node.operands.size}) for setargs"
- end
- else
- newList << node
- end
- else
- newList << node
- end
- }
- newList
-end
-
-
-class Sequence
- def getModifiedListSH4
- result = @list
-
- # Verify that we will only see instructions and labels.
- result.each {
- | node |
- unless node.is_a? Instruction or
- node.is_a? Label or
- node.is_a? LocalLabel or
- node.is_a? Skip
- raise "Unexpected #{node.inspect} at #{node.codeOrigin}"
- end
- }
-
- result = sh4LowerShiftOps(result)
- result = sh4LowerSimpleBranchOps(result)
- result = riscLowerMalformedAddresses(result) {
- | node, address |
- if address.is_a? Address
- case node.opcode
- when "btbz", "btbnz", "cbeq", "bbeq", "bbneq", "bbb", "loadb", "storeb"
- (0..15).include? address.offset.value and
- ((node.operands[0].is_a? RegisterID and node.operands[0].sh4Operand == "r0") or
- (node.operands[1].is_a? RegisterID and node.operands[1].sh4Operand == "r0"))
- when "loadh"
- (0..30).include? address.offset.value and
- ((node.operands[0].is_a? RegisterID and node.operands[0].sh4Operand == "r0") or
- (node.operands[1].is_a? RegisterID and node.operands[1].sh4Operand == "r0"))
- else
- (0..60).include? address.offset.value
- end
- else
- false
- end
- }
- result = sh4LowerDoubleAccesses(result)
- result = sh4LowerDoubleSpecials(result)
- result = riscLowerMisplacedImmediates(result, ["storeb", "storei", "storep", "muli", "mulp", "andi", "ori", "xori",
- "cbeq", "cieq", "cpeq", "cineq", "cpneq", "cib", "baddio", "bsubio", "bmulio", "baddis",
- "bbeq", "bbneq", "bbb", "bieq", "bpeq", "bineq", "bpneq", "bia", "bpa", "biaeq", "bpaeq", "bib", "bpb",
- "bigteq", "bpgteq", "bilt", "bplt", "bigt", "bpgt", "bilteq", "bplteq", "btiz", "btpz", "btinz", "btpnz", "btbz", "btbnz"])
- result = riscLowerMalformedImmediates(result, -128..127)
- result = riscLowerMisplacedAddresses(result)
- result = sh4LowerMisplacedLabels(result)
- result = sh4LowerMisplacedSpecialRegisters(result)
-
- result = assignRegistersToTemporaries(result, :gpr, SH4_TMP_GPRS)
- result = assignRegistersToTemporaries(result, :gpr, SH4_TMP_FPRS)
-
- result = sh4LowerConstPool(result)
- result = sh4LowerArgumentSetup(result)
-
- return result
- end
-end
-
-def sh4Operands(operands)
- operands.map{|v| v.sh4Operand}.join(", ")
-end
-
-def emitSH4Branch(sh4opcode, operand)
- raise "Invalid operand #{operand}" unless operand.is_a? RegisterID or operand.is_a? SpecialRegister
- $asm.puts "#{sh4opcode} @#{operand.sh4Operand}"
- $asm.puts "nop"
-end
-
-def emitSH4ShiftImm(val, operand, direction)
- tmp = val
- while tmp > 0
- if tmp >= 16
- $asm.puts "shl#{direction}16 #{operand.sh4Operand}"
- tmp -= 16
- elsif tmp >= 8
- $asm.puts "shl#{direction}8 #{operand.sh4Operand}"
- tmp -= 8
- elsif tmp >= 2
- $asm.puts "shl#{direction}2 #{operand.sh4Operand}"
- tmp -= 2
- else
- $asm.puts "shl#{direction} #{operand.sh4Operand}"
- tmp -= 1
- end
- end
-end
-
-def emitSH4BranchIfT(dest, neg)
- outlabel = LocalLabel.unique("branchIfT")
- sh4opcode = neg ? "bt" : "bf"
- $asm.puts "#{sh4opcode} #{LocalLabelReference.new(codeOrigin, outlabel).asmLabel}"
- if dest.is_a? LocalLabelReference
- $asm.puts "bra #{dest.asmLabel}"
- $asm.puts "nop"
- else
- emitSH4Branch("jmp", dest)
- end
- outlabel.lower("SH4")
-end
-
-def emitSH4IntCompare(cmpOpcode, operands)
- $asm.puts "cmp/#{cmpOpcode} #{sh4Operands([operands[1], operands[0]])}"
-end
-
-def emitSH4CondBranch(cmpOpcode, neg, operands)
- emitSH4IntCompare(cmpOpcode, operands)
- emitSH4BranchIfT(operands[2], neg)
-end
-
-def emitSH4CompareSet(cmpOpcode, neg, operands)
- emitSH4IntCompare(cmpOpcode, operands)
- if !neg
- $asm.puts "movt #{operands[2].sh4Operand}"
- else
- outlabel = LocalLabel.unique("compareSet")
- $asm.puts "mov #0, #{operands[2].sh4Operand}"
- $asm.puts "bt #{LocalLabelReference.new(codeOrigin, outlabel).asmLabel}"
- $asm.puts "mov #1, #{operands[2].sh4Operand}"
- outlabel.lower("SH4")
- end
-end
-
-def emitSH4BranchIfNaN(operands)
- raise "Invalid operands number (#{operands.size})" unless operands.size == 2
- $asm.puts "fcmp/eq #{sh4Operands([operands[0], operands[0]])}"
- $asm.puts "bf #{operands[1].asmLabel}"
-end
-
-def emitSH4DoubleCondBranch(cmpOpcode, neg, operands)
- if cmpOpcode == "lt"
- $asm.puts "fcmp/gt #{sh4Operands([operands[0], operands[1]])}"
- else
- $asm.puts "fcmp/#{cmpOpcode} #{sh4Operands([operands[1], operands[0]])}"
- end
- emitSH4BranchIfT(operands[2], neg)
-end
-
-class Instruction
- def lowerSH4
- $asm.comment codeOriginString
- case opcode
- when "addi", "addp"
- if operands.size == 3
- if operands[0].sh4Operand == operands[2].sh4Operand
- $asm.puts "add #{sh4Operands([operands[1], operands[2]])}"
- elsif operands[1].sh4Operand == operands[2].sh4Operand
- $asm.puts "add #{sh4Operands([operands[0], operands[2]])}"
- else
- $asm.puts "mov #{sh4Operands([operands[0], operands[2]])}"
- $asm.puts "add #{sh4Operands([operands[1], operands[2]])}"
- end
- else
- $asm.puts "add #{sh4Operands(operands)}"
- end
- when "subi", "subp"
- if operands.size == 3
- if operands[1].sh4Operand == operands[2].sh4Operand
- $asm.puts "neg #{sh4Operands([operands[2], operands[2]])}"
- $asm.puts "add #{sh4Operands([operands[0], operands[2]])}"
- else
- $asm.puts "mov #{sh4Operands([operands[0], operands[2]])}"
- $asm.puts "sub #{sh4Operands([operands[1], operands[2]])}"
- end
- else
- if operands[0].is_a? Immediate
- $asm.puts "add #{sh4Operands([Immediate.new(codeOrigin, -1 * operands[0].value), operands[1]])}"
- else
- $asm.puts "sub #{sh4Operands(operands)}"
- end
- end
- when "muli", "mulp"
- $asm.puts "mul.l #{sh4Operands(operands[0..1])}"
- $asm.puts "sts macl, #{operands[-1].sh4Operand}"
- when "negi", "negp"
- if operands.size == 2
- $asm.puts "neg #{sh4Operands(operands)}"
- else
- $asm.puts "neg #{sh4Operands([operands[0], operands[0]])}"
- end
- when "andi", "andp", "ori", "orp", "xori", "xorp"
- raise "#{opcode} with #{operands.size} operands is not handled yet" unless operands.size == 2
- sh4opcode = opcode[0..-2]
- $asm.puts "#{sh4opcode} #{sh4Operands(operands)}"
- when "shllx", "shlrx"
- raise "Unhandled parameters for opcode #{opcode}" unless operands[0].is_a? Immediate
- if operands[0].value == 1
- $asm.puts "shl#{opcode[3, 1]} #{operands[1].sh4Operand}"
- else
- $asm.puts "shl#{opcode[3, 1]}#{operands[0].value} #{operands[1].sh4Operand}"
- end
- when "shld", "shad"
- $asm.puts "#{opcode} #{sh4Operands(operands)}"
- when "loaddReversedAndIncrementAddress"
- # As we are little endian, we don't use "fmov @Rm, DRn" here.
- $asm.puts "fmov.s #{operands[0].sh4OperandPostInc}, #{operands[1].sh4SingleLo}"
- $asm.puts "fmov.s #{operands[0].sh4OperandPostInc}, #{operands[1].sh4SingleHi}"
- when "storedReversedAndDecrementAddress"
- # As we are little endian, we don't use "fmov DRm, @Rn" here.
- $asm.puts "fmov.s #{operands[0].sh4SingleHi}, #{operands[1].sh4OperandPreDec}"
- $asm.puts "fmov.s #{operands[0].sh4SingleLo}, #{operands[1].sh4OperandPreDec}"
- when "ci2d"
- $asm.puts "lds #{operands[0].sh4Operand}, fpul"
- $asm.puts "float fpul, #{operands[1].sh4Operand}"
- when "fii2d"
- $asm.puts "lds #{operands[0].sh4Operand}, fpul"
- $asm.puts "fsts fpul, #{operands[2].sh4SingleLo}"
- $asm.puts "lds #{operands[1].sh4Operand}, fpul"
- $asm.puts "fsts fpul, #{operands[2].sh4SingleHi}"
- when "fd2ii"
- $asm.puts "flds #{operands[0].sh4SingleLo}, fpul"
- $asm.puts "sts fpul, #{operands[1].sh4Operand}"
- $asm.puts "flds #{operands[0].sh4SingleHi}, fpul"
- $asm.puts "sts fpul, #{operands[2].sh4Operand}"
- when "addd", "subd", "muld", "divd"
- sh4opcode = opcode[0..-2]
- $asm.puts "f#{sh4opcode} #{sh4Operands(operands)}"
- when "bcd2i"
- $asm.puts "ftrc #{operands[0].sh4Operand}, fpul"
- $asm.puts "sts fpul, #{operands[1].sh4Operand}"
- $asm.puts "float fpul, #{SH4_TMP_FPRS[0].sh4Operand}"
- $asm.puts "fcmp/eq #{sh4Operands([operands[0], SH4_TMP_FPRS[0]])}"
- $asm.puts "bf #{operands[2].asmLabel}"
- $asm.puts "tst #{sh4Operands([operands[1], operands[1]])}"
- $asm.puts "bt #{operands[2].asmLabel}"
- when "bdnan"
- emitSH4BranchIfNaN(operands)
- when "bdneq"
- emitSH4DoubleCondBranch("eq", true, operands)
- when "bdgteq"
- emitSH4DoubleCondBranch("lt", true, operands)
- when "bdlt"
- emitSH4DoubleCondBranch("lt", false, operands)
- when "bdlteq"
- emitSH4DoubleCondBranch("gt", true, operands)
- when "bdgt"
- emitSH4DoubleCondBranch("gt", false, operands)
- when "baddio", "baddpo", "bsubio", "bsubpo"
- raise "#{opcode} with #{operands.size} operands is not handled yet" unless operands.size == 3
- $asm.puts "#{opcode[1, 3]}v #{sh4Operands([operands[0], operands[1]])}"
- $asm.puts "bt #{operands[2].asmLabel}"
- when "bmulio", "bmulpo"
- raise "Invalid operands number (#{operands.size})" unless operands.size == 5
- $asm.puts "dmuls.l #{sh4Operands([operands[2], operands[3]])}"
- $asm.puts "sts macl, #{operands[3].sh4Operand}"
- $asm.puts "cmp/pz #{operands[3].sh4Operand}"
- $asm.puts "movt #{operands[1].sh4Operand}"
- $asm.puts "add #-1, #{operands[1].sh4Operand}"
- $asm.puts "sts mach, #{operands[0].sh4Operand}"
- $asm.puts "cmp/eq #{sh4Operands([operands[0], operands[1]])}"
- $asm.puts "bf #{operands[4].asmLabel}"
- when "btiz", "btpz", "btbz", "btinz", "btpnz", "btbnz"
- if operands.size == 3
- $asm.puts "tst #{sh4Operands([operands[0], operands[1]])}"
- else
- if operands[0].sh4Operand == "r0"
- $asm.puts "cmp/eq #0, r0"
- else
- $asm.puts "tst #{sh4Operands([operands[0], operands[0]])}"
- end
- end
- emitSH4BranchIfT(operands[-1], (opcode[-2, 2] == "nz"))
- when "cieq", "cpeq", "cbeq"
- emitSH4CompareSet("eq", false, operands)
- when "cineq", "cpneq", "cbneq"
- emitSH4CompareSet("eq", true, operands)
- when "cib", "cpb", "cbb"
- emitSH4CompareSet("hs", true, operands)
- when "bieq", "bpeq", "bbeq"
- emitSH4CondBranch("eq", false, operands)
- when "bineq", "bpneq", "bbneq"
- emitSH4CondBranch("eq", true, operands)
- when "bib", "bpb", "bbb"
- emitSH4CondBranch("hs", true, operands)
- when "bia", "bpa", "bba"
- emitSH4CondBranch("hi", false, operands)
- when "bibeq", "bpbeq"
- emitSH4CondBranch("hi", true, operands)
- when "biaeq", "bpaeq"
- emitSH4CondBranch("hs", false, operands)
- when "bigteq", "bpgteq", "bbgteq"
- emitSH4CondBranch("ge", false, operands)
- when "bilt", "bplt", "bblt"
- emitSH4CondBranch("ge", true, operands)
- when "bigt", "bpgt", "bbgt"
- emitSH4CondBranch("gt", false, operands)
- when "bilteq", "bplteq", "bblteq"
- emitSH4CondBranch("gt", true, operands)
- when "bs"
- $asm.puts "cmp/pz #{operands[0].sh4Operand}"
- $asm.puts "bf #{operands[1].asmLabel}"
- when "call"
- if operands[0].is_a? LocalLabelReference
- $asm.puts "bsr #{operands[0].asmLabel}"
- $asm.puts "nop"
- elsif operands[0].is_a? RegisterID or operands[0].is_a? SpecialRegister
- emitSH4Branch("jsr", operands[0])
- else
- raise "Unhandled parameters for opcode #{opcode} at #{codeOriginString}"
- end
- when "jmp"
- if operands[0].is_a? LocalLabelReference
- $asm.puts "bra #{operands[0].asmLabel}"
- $asm.puts "nop"
- elsif operands[0].is_a? RegisterID or operands[0].is_a? SpecialRegister
- emitSH4Branch("jmp", operands[0])
- else
- raise "Unhandled parameters for opcode #{opcode} at #{codeOriginString}"
- end
- when "ret"
- $asm.puts "rts"
- $asm.puts "nop"
- when "loadb"
- $asm.puts "mov.b #{sh4Operands(operands)}"
- $asm.puts "extu.b #{sh4Operands([operands[1], operands[1]])}"
- when "storeb"
- $asm.puts "mov.b #{sh4Operands(operands)}"
- when "loadh"
- $asm.puts "mov.w #{sh4Operands(operands)}"
- $asm.puts "extu.w #{sh4Operands([operands[1], operands[1]])}"
- when "loadi", "loadis", "loadp", "storei", "storep"
- $asm.puts "mov.l #{sh4Operands(operands)}"
- when "move"
- if operands[0].is_a? ConstPoolEntry
- if operands[0].size == 16
- $asm.puts "mov.w #{operands[0].labelref.asmLabel}, #{operands[1].sh4Operand}"
- else
- $asm.puts "mov.l #{operands[0].labelref.asmLabel}, #{operands[1].sh4Operand}"
- end
- elsif operands[0].sh4Operand != operands[1].sh4Operand
- $asm.puts "mov #{sh4Operands(operands)}"
- end
- when "leap"
- if operands[0].is_a? BaseIndex
- biop = operands[0]
- $asm.puts "mov #{sh4Operands([biop.index, operands[1]])}"
- if biop.scaleShift > 0
- emitSH4ShiftImm(biop.scaleShift, operands[1], "l")
- end
- $asm.puts "add #{sh4Operands([biop.base, operands[1]])}"
- if biop.offset.value != 0
- $asm.puts "add #{sh4Operands([biop.offset, operands[1]])}"
- end
- elsif operands[0].is_a? Address
- if operands[0].base != operands[1]
- $asm.puts "mov #{sh4Operands([operands[0].base, operands[1]])}"
- end
- if operands[0].offset.value != 0
- $asm.puts "add #{sh4Operands([operands[0].offset, operands[1]])}"
- end
- else
- raise "Unhandled parameters for opcode #{opcode} at #{codeOriginString}"
- end
- when "ldspr"
- $asm.puts "lds #{sh4Operands(operands)}, pr"
- when "stspr"
- $asm.puts "sts pr, #{sh4Operands(operands)}"
- when "memfence"
- $asm.puts "synco"
- when "popCalleeSaves"
- $asm.puts "mov.l @r15+, r8"
- $asm.puts "mov.l @r15+, r9"
- $asm.puts "mov.l @r15+, r10"
- $asm.puts "mov.l @r15+, r11"
- $asm.puts "mov.l @r15+, r13"
- $asm.puts "lds.l @r15+, pr"
- $asm.puts "mov.l @r15+, r14"
- when "pushCalleeSaves"
- $asm.puts "mov.l r14, @-r15"
- $asm.puts "sts.l pr, @-r15"
- $asm.puts "mov.l r13, @-r15"
- $asm.puts "mov.l r11, @-r15"
- $asm.puts "mov.l r10, @-r15"
- $asm.puts "mov.l r9, @-r15"
- $asm.puts "mov.l r8, @-r15"
- when "break"
- # This special opcode always generates an illegal instruction exception.
- $asm.puts ".word 0xfffd"
- else
- lowerDefault
- end
- end
-end
-
diff --git a/Source/JavaScriptCore/offlineasm/transform.rb b/Source/JavaScriptCore/offlineasm/transform.rb
index 302971eb7..84dd0413b 100644
--- a/Source/JavaScriptCore/offlineasm/transform.rb
+++ b/Source/JavaScriptCore/offlineasm/transform.rb
@@ -423,6 +423,11 @@ class Immediate
end
end
+class StringLiteral
+ def validate
+ end
+end
+
class RegisterID
def validate
end
@@ -457,6 +462,13 @@ class Instruction
end
end
+class SubImmediates
+ def validate
+ raise "Invalid operand #{left.dump} to immediate subtraction" unless left.immediateOperand?
+ raise "Invalid operand #{right.dump} to immediate subtraction" unless right.immediateOperand?
+ end
+end
+
class Error
def validate
end
diff --git a/Source/JavaScriptCore/offlineasm/x86.rb b/Source/JavaScriptCore/offlineasm/x86.rb
index e47f29561..c73d0aa18 100644
--- a/Source/JavaScriptCore/offlineasm/x86.rb
+++ b/Source/JavaScriptCore/offlineasm/x86.rb
@@ -1,4 +1,4 @@
-# Copyright (C) 2012 Apple Inc. All rights reserved.
+# Copyright (C) 2012, 2014-2016 Apple Inc. All rights reserved.
# Copyright (C) 2013 Digia Plc. and/or its subsidiary(-ies)
#
# Redistribution and use in source and binary forms, with or without
@@ -24,12 +24,87 @@
require "config"
+# GPR conventions, to match the baseline JIT:
+#
+#
+# On x86-32 bits (windows and non-windows)
+# a0, a1, a2, a3 are only there for ease-of-use of offlineasm; they are not
+# actually considered as such by the ABI and we need to push/pop our arguments
+# on the stack. a0 and a1 are ecx and edx to follow fastcall.
+#
+# eax => t0, a2, r0
+# edx => t1, a1, r1
+# ecx => t2, a0
+# ebx => t3, a3 (callee-save)
+# esi => t4 (callee-save)
+# edi => t5 (callee-save)
+# ebp => cfr
+# esp => sp
+#
+# On x86-64 non-windows
+#
+# rax => t0, r0
+# rdi => a0
+# rsi => t1, a1
+# rdx => t2, a2, r1
+# rcx => t3, a3
+# r8 => t4
+# r10 => t5
+# rbx => csr0 (callee-save, PB, unused in baseline)
+# r12 => csr1 (callee-save)
+# r13 => csr2 (callee-save)
+# r14 => csr3 (callee-save, tagTypeNumber)
+# r15 => csr4 (callee-save, tagMask)
+# rsp => sp
+# rbp => cfr
+# r11 => (scratch)
+#
+# On x86-64 windows
+# Arguments need to be push/pop'd on the stack in addition to being stored in
+# the registers. Also, >8 return types are returned in a weird way.
+#
+# rax => t0, r0
+# rcx => a0
+# rdx => t1, a1, r1
+# r8 => t2, a2
+# r9 => t3, a3
+# r10 => t4
+# rbx => csr0 (callee-save, PB, unused in baseline)
+# rsi => csr1 (callee-save)
+# rdi => csr2 (callee-save)
+# r12 => csr3 (callee-save)
+# r13 => csr4 (callee-save)
+# r14 => csr5 (callee-save, tagTypeNumber)
+# r15 => csr6 (callee-save, tagMask)
+# rsp => sp
+# rbp => cfr
+# r11 => (scratch)
+
def isX64
case $activeBackend
when "X86"
false
+ when "X86_WIN"
+ false
when "X86_64"
true
+ when "X86_64_WIN"
+ true
+ else
+ raise "bad value for $activeBackend: #{$activeBackend}"
+ end
+end
+
+def isWin
+ case $activeBackend
+ when "X86"
+ false
+ when "X86_WIN"
+ true
+ when "X86_64"
+ false
+ when "X86_64_WIN"
+ true
else
raise "bad value for $activeBackend: #{$activeBackend}"
end
@@ -39,225 +114,222 @@ def useX87
case $activeBackend
when "X86"
true
+ when "X86_WIN"
+ true
when "X86_64"
false
+ when "X86_64_WIN"
+ false
else
raise "bad value for $activeBackend: #{$activeBackend}"
end
end
+def isMSVC
+ $options.has_key?(:assembler) && $options[:assembler] == "MASM"
+end
+
+def isIntelSyntax
+ $options.has_key?(:assembler) && $options[:assembler] == "MASM"
+end
+
+def register(name)
+ isIntelSyntax ? name : "%" + name
+end
+
+def offsetRegister(off, register)
+ isIntelSyntax ? "[#{off} + #{register}]" : "#{off}(#{register})"
+end
+
+def callPrefix
+ isIntelSyntax ? "" : "*"
+end
+
+def orderOperands(opA, opB)
+ isIntelSyntax ? "#{opB}, #{opA}" : "#{opA}, #{opB}"
+end
+
+def const(c)
+ isIntelSyntax ? "#{c}" : "$#{c}"
+end
+
+def getSizeString(kind)
+ if !isIntelSyntax
+ return ""
+ end
+
+ size = ""
+ case kind
+ when :byte
+ size = "byte"
+ when :half
+ size = "word"
+ when :int
+ size = "dword"
+ when :ptr
+ size = isX64 ? "qword" : "dword"
+ when :double
+ size = "qword"
+ when :quad
+ size = "qword"
+ else
+ raise "Invalid kind #{kind}"
+ end
+
+ return size + " " + "ptr" + " ";
+end
+
class SpecialRegister < NoChildren
def x86Operand(kind)
raise unless @name =~ /^r/
raise unless isX64
case kind
when :half
- "%" + @name + "w"
+ register(@name + "w")
when :int
- "%" + @name + "d"
+ register(@name + "d")
when :ptr
- "%" + @name
+ register(@name)
when :quad
- "%" + @name
+ register(@name)
else
raise
end
end
def x86CallOperand(kind)
# Call operands are not allowed to be partial registers.
- "*#{x86Operand(:quad)}"
+ "#{callPrefix}#{x86Operand(:quad)}"
end
end
X64_SCRATCH_REGISTER = SpecialRegister.new("r11")
+def x86GPRName(name, kind)
+ case name
+ when "eax", "ebx", "ecx", "edx"
+ name8 = name[1] + 'l'
+ name16 = name[1..2]
+ when "esi", "edi", "ebp", "esp"
+ name16 = name[1..2]
+ name8 = name16 + 'l'
+ when "rax", "rbx", "rcx", "rdx"
+ raise "bad GPR name #{name} in 32-bit X86" unless isX64
+ name8 = name[1] + 'l'
+ name16 = name[1..2]
+ when "r8", "r9", "r10", "r12", "r13", "r14", "r15"
+ raise "bad GPR name #{name} in 32-bit X86" unless isX64
+ case kind
+ when :half
+ return register(name + "w")
+ when :int
+ return register(name + "d")
+ when :ptr
+ return register(name)
+ when :quad
+ return register(name)
+ end
+ else
+ raise "bad GPR name #{name}"
+ end
+ case kind
+ when :byte
+ register(name8)
+ when :half
+ register(name16)
+ when :int
+ register("e" + name16)
+ when :ptr
+ register((isX64 ? "r" : "e") + name16)
+ when :quad
+ isX64 ? register("r" + name16) : raise
+ else
+ raise "invalid kind #{kind} for GPR #{name} in X86"
+ end
+end
+
class RegisterID
def supports8BitOnX86
- case name
- when "t0", "a0", "r0", "t1", "a1", "r1", "t2", "t3", "t4", "t5"
+ case x86GPR
+ when "eax", "ebx", "ecx", "edx", "edi", "esi", "ebp", "esp"
true
- when "cfr", "ttnr", "tmr"
+ when "r8", "r9", "r10", "r12", "r13", "r14", "r15"
false
- when "t6"
- isX64
else
raise
end
end
-
- def x86Operand(kind)
- case name
- when "t0", "a0", "r0"
- case kind
- when :byte
- "%al"
- when :half
- "%ax"
- when :int
- "%eax"
- when :ptr
- isX64 ? "%rax" : "%eax"
- when :quad
- isX64 ? "%rax" : raise
- else
- raise "Invalid kind #{kind} for name #{name}"
- end
- when "t1", "a1", "r1"
- case kind
- when :byte
- "%dl"
- when :half
- "%dx"
- when :int
- "%edx"
- when :ptr
- isX64 ? "%rdx" : "%edx"
- when :quad
- isX64 ? "%rdx" : raise
- else
- raise
- end
- when "t2"
- case kind
- when :byte
- "%cl"
- when :half
- "%cx"
- when :int
- "%ecx"
- when :ptr
- isX64 ? "%rcx" : "%ecx"
- when :quad
- isX64 ? "%rcx" : raise
- else
- raise
- end
- when "t3"
- case kind
- when :byte
- "%bl"
- when :half
- "%bx"
- when :int
- "%ebx"
- when :ptr
- isX64 ? "%rbx" : "%ebx"
- when :quad
- isX64 ? "%rbx" : raise
- else
- raise
- end
- when "t4"
- case kind
- when :byte
- "%sil"
- when :half
- "%si"
- when :int
- "%esi"
- when :ptr
- isX64 ? "%rsi" : "%esi"
- when :quad
- isX64 ? "%rsi" : raise
- else
- raise
- end
- when "cfr"
- if isX64
- case kind
- when :half
- "%bp"
- when :int
- "%ebp"
- when :ptr
- "%rbp"
- when :quad
- "%rbp"
- else
- raise
- end
- else
- case kind
- when :half
- "%bp"
- when :int
- "%ebp"
- when :ptr
- "%ebp"
- else
- raise
- end
- end
- when "sp"
- case kind
- when :byte
- "%spl"
- when :half
- "%sp"
- when :int
- "%esp"
- when :ptr
- isX64 ? "%rsp" : "%esp"
- when :quad
- isX64 ? "%rsp" : raise
+
+ def x86GPR
+ if isX64
+ case name
+ when "t0", "r0"
+ "eax"
+ when "r1"
+ "edx" # t1 = a1 when isWin, t2 = a2 otherwise
+ when "a0"
+ isWin ? "ecx" : "edi"
+ when "t1", "a1"
+ isWin ? "edx" : "esi"
+ when "t2", "a2"
+ isWin ? "r8" : "edx"
+ when "t3", "a3"
+ isWin ? "r9" : "ecx"
+ when "t4"
+ isWin ? "r10" : "r8"
+ when "t5"
+ raise "cannot use register #{name} on X86-64 Windows" unless not isWin
+ "r10"
+ when "csr0"
+ "ebx"
+ when "csr1"
+ isWin ? "esi" : "r12"
+ when "csr2"
+ isWin ? "edi" : "r13"
+ when "csr3"
+ isWin ? "r12" : "r14"
+ when "csr4"
+ isWin ? "r13" : "r15"
+ when "csr5"
+ raise "cannot use register #{name} on X86-64" unless isWin
+ "r14"
+ when "csr6"
+ raise "cannot use register #{name} on X86-64" unless isWin
+ "r15"
+ when "cfr"
+ "ebp"
+ when "sp"
+ "esp"
else
- raise
- end
- when "t5"
- case kind
- when :byte
- "%dil"
- when :half
- "%di"
- when :int
- "%edi"
- when :ptr
- isX64 ? "%rdi" : "%edi"
- when :quad
- isX64 ? "%rdi" : raise
- end
- when "t6"
- raise "Cannot use #{name} in 32-bit X86 at #{codeOriginString}" unless isX64
- case kind
- when :half
- "%r10w"
- when :int
- "%r10d"
- when :ptr
- "%r10"
- when :quad
- "%r10"
- end
- when "csr1"
- raise "Cannot use #{name} in 32-bit X86 at #{codeOriginString}" unless isX64
- case kind
- when :half
- "%r14w"
- when :int
- "%r14d"
- when :ptr
- "%r14"
- when :quad
- "%r14"
- end
- when "csr2"
- raise "Cannot use #{name} in 32-bit X86 at #{codeOriginString}" unless isX64
- case kind
- when :half
- "%r15w"
- when :int
- "%r15d"
- when :ptr
- "%r15"
- when :quad
- "%r15"
+ raise "cannot use register #{name} on X86"
end
else
- raise "Bad register #{name} for X86 at #{codeOriginString}"
+ case name
+ when "t0", "r0", "a2"
+ "eax"
+ when "t1", "r1", "a1"
+ "edx"
+ when "t2", "a0"
+ "ecx"
+ when "t3", "a3"
+ "ebx"
+ when "t4"
+ "esi"
+ when "t5"
+ "edi"
+ when "cfr"
+ "ebp"
+ when "sp"
+ "esp"
+ end
end
end
+
+ def x86Operand(kind)
+ x86GPRName(x86GPR, kind)
+ end
+
def x86CallOperand(kind)
- isX64 ? "*#{x86Operand(:quad)}" : "*#{x86Operand(:ptr)}"
+ "#{callPrefix}#{x86Operand(:ptr)}"
end
end
@@ -267,17 +339,17 @@ class FPRegisterID
raise if useX87
case name
when "ft0", "fa0", "fr"
- "%xmm0"
+ register("xmm0")
when "ft1", "fa1"
- "%xmm1"
+ register("xmm1")
when "ft2", "fa2"
- "%xmm2"
+ register("xmm2")
when "ft3", "fa3"
- "%xmm3"
+ register("xmm3")
when "ft4"
- "%xmm4"
+ register("xmm4")
when "ft5"
- "%xmm5"
+ register("xmm5")
else
raise "Bad register #{name} for X86 at #{codeOriginString}"
end
@@ -297,10 +369,10 @@ class FPRegisterID
def x87Operand(offset)
raise unless useX87
raise unless offset == 0 or offset == 1
- "%st(#{x87DefaultStackPosition + offset})"
+ "#{register("st")}(#{x87DefaultStackPosition + offset})"
end
def x86CallOperand(kind)
- "*#{x86Operand(kind)}"
+ "#{callPrefix}#{x86Operand(kind)}"
end
end
@@ -313,7 +385,7 @@ class Immediate
end
end
def x86Operand(kind)
- "$#{value}"
+ "#{const(value)}"
end
def x86CallOperand(kind)
"#{value}"
@@ -326,13 +398,13 @@ class Address
end
def x86AddressOperand(addressKind)
- "#{offset.value}(#{base.x86Operand(addressKind)})"
+ "#{offsetRegister(offset.value, base.x86Operand(addressKind))}"
end
def x86Operand(kind)
- x86AddressOperand(:ptr)
+ "#{getSizeString(kind)}#{x86AddressOperand(:ptr)}"
end
def x86CallOperand(kind)
- "*#{x86Operand(kind)}"
+ "#{callPrefix}#{x86Operand(kind)}"
end
end
@@ -342,15 +414,23 @@ class BaseIndex
end
def x86AddressOperand(addressKind)
- "#{offset.value}(#{base.x86Operand(addressKind)}, #{index.x86Operand(addressKind)}, #{scale})"
+ if !isIntelSyntax
+ "#{offset.value}(#{base.x86Operand(addressKind)}, #{index.x86Operand(addressKind)}, #{scale})"
+ else
+ "#{getSizeString(addressKind)}[#{offset.value} + #{base.x86Operand(addressKind)} + #{index.x86Operand(addressKind)} * #{scale}]"
+ end
end
def x86Operand(kind)
- x86AddressOperand(:ptr)
+ if !isIntelSyntax
+ x86AddressOperand(:ptr)
+ else
+ "#{getSizeString(kind)}[#{offset.value} + #{base.x86Operand(:ptr)} + #{index.x86Operand(:ptr)} * #{scale}]"
+ end
end
def x86CallOperand(kind)
- "*#{x86Operand(kind)}"
+ "#{callPrefix}#{x86Operand(kind)}"
end
end
@@ -368,7 +448,7 @@ class AbsoluteAddress
end
def x86CallOperand(kind)
- "*#{address.value}"
+ "#{callPrefix}#{address.value}"
end
end
@@ -379,6 +459,9 @@ class LabelReference
end
class LocalLabelReference
+ def x86Operand(kind)
+ asmLabel
+ end
def x86CallOperand(kind)
asmLabel
end
@@ -423,20 +506,29 @@ class Sequence
return newList
end
+ def getModifiedListX86_64_WIN
+ getModifiedListX86_64
+ end
end
class Instruction
+
def x86Operands(*kinds)
raise unless kinds.size == operands.size
result = []
kinds.size.times {
| idx |
- result << operands[idx].x86Operand(kinds[idx])
+ i = isIntelSyntax ? (kinds.size - idx - 1) : idx
+ result << operands[i].x86Operand(kinds[i])
}
result.join(", ")
end
def x86Suffix(kind)
+ if isIntelSyntax
+ return ""
+ end
+
case kind
when :byte
"b"
@@ -473,19 +565,23 @@ class Instruction
raise
end
end
+
+ def getImplicitOperandString
+ isIntelSyntax ? "st(0), " : ""
+ end
def handleX86OpWithNumOperands(opcode, kind, numOperands)
if numOperands == 3
if operands[0] == operands[2]
- $asm.puts "#{opcode} #{operands[1].x86Operand(kind)}, #{operands[2].x86Operand(kind)}"
+ $asm.puts "#{opcode} #{orderOperands(operands[1].x86Operand(kind), operands[2].x86Operand(kind))}"
elsif operands[1] == operands[2]
- $asm.puts "#{opcode} #{operands[0].x86Operand(kind)}, #{operands[2].x86Operand(kind)}"
+ $asm.puts "#{opcode} #{orderOperands(operands[0].x86Operand(kind), operands[2].x86Operand(kind))}"
else
- $asm.puts "mov#{x86Suffix(kind)} #{operands[0].x86Operand(kind)}, #{operands[2].x86Operand(kind)}"
- $asm.puts "#{opcode} #{operands[1].x86Operand(kind)}, #{operands[2].x86Operand(kind)}"
+ $asm.puts "mov#{x86Suffix(kind)} #{orderOperands(operands[0].x86Operand(kind), operands[2].x86Operand(kind))}"
+ $asm.puts "#{opcode} #{orderOperands(operands[1].x86Operand(kind), operands[2].x86Operand(kind))}"
end
else
- $asm.puts "#{opcode} #{operands[0].x86Operand(kind)}, #{operands[1].x86Operand(kind)}"
+ $asm.puts "#{opcode} #{orderOperands(operands[0].x86Operand(kind), operands[1].x86Operand(kind))}"
end
end
@@ -494,13 +590,12 @@ class Instruction
end
def handleX86Shift(opcode, kind)
- if operands[0].is_a? Immediate or operands[0] == RegisterID.forName(nil, "t2")
- $asm.puts "#{opcode} #{operands[0].x86Operand(:byte)}, #{operands[1].x86Operand(kind)}"
+ if operands[0].is_a? Immediate or operands[0].x86GPR == "ecx"
+ $asm.puts "#{opcode} #{orderOperands(operands[0].x86Operand(:byte), operands[1].x86Operand(kind))}"
else
- cx = RegisterID.forName(nil, "t2")
- $asm.puts "xchg#{x86Suffix(:ptr)} #{operands[0].x86Operand(:ptr)}, #{cx.x86Operand(:ptr)}"
- $asm.puts "#{opcode} %cl, #{operands[1].x86Operand(kind)}"
- $asm.puts "xchg#{x86Suffix(:ptr)} #{operands[0].x86Operand(:ptr)}, #{cx.x86Operand(:ptr)}"
+ $asm.puts "xchg#{x86Suffix(:ptr)} #{operands[0].x86Operand(:ptr)}, #{x86GPRName("ecx", :ptr)}"
+ $asm.puts "#{opcode} #{orderOperands(register("cl"), operands[1].x86Operand(kind))}"
+ $asm.puts "xchg#{x86Suffix(:ptr)} #{operands[0].x86Operand(:ptr)}, #{x86GPRName("ecx", :ptr)}"
end
end
@@ -510,9 +605,9 @@ class Instruction
else
case mode
when :normal
- $asm.puts "ucomisd #{operands[1].x86Operand(:double)}, #{operands[0].x86Operand(:double)}"
+ $asm.puts "ucomisd #{orderOperands(operands[1].x86Operand(:double), operands[0].x86Operand(:double))}"
when :reverse
- $asm.puts "ucomisd #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:double)}"
+ $asm.puts "ucomisd #{orderOperands(operands[0].x86Operand(:double), operands[1].x86Operand(:double))}"
else
raise mode.inspect
end
@@ -522,11 +617,11 @@ class Instruction
def handleX86IntCompare(opcodeSuffix, kind)
if operands[0].is_a? Immediate and operands[0].value == 0 and operands[1].is_a? RegisterID and (opcodeSuffix == "e" or opcodeSuffix == "ne")
- $asm.puts "test#{x86Suffix(kind)} #{operands[1].x86Operand(kind)}, #{operands[1].x86Operand(kind)}"
+ $asm.puts "test#{x86Suffix(kind)} #{orderOperands(operands[1].x86Operand(kind), operands[1].x86Operand(kind))}"
elsif operands[1].is_a? Immediate and operands[1].value == 0 and operands[0].is_a? RegisterID and (opcodeSuffix == "e" or opcodeSuffix == "ne")
- $asm.puts "test#{x86Suffix(kind)} #{operands[0].x86Operand(kind)}, #{operands[0].x86Operand(kind)}"
+ $asm.puts "test#{x86Suffix(kind)} #{orderOperands(operands[0].x86Operand(kind), operands[0].x86Operand(kind))}"
else
- $asm.puts "cmp#{x86Suffix(kind)} #{operands[1].x86Operand(kind)}, #{operands[0].x86Operand(kind)}"
+ $asm.puts "cmp#{x86Suffix(kind)} #{orderOperands(operands[1].x86Operand(kind), operands[0].x86Operand(kind))}"
end
end
@@ -538,12 +633,20 @@ class Instruction
def handleX86Set(setOpcode, operand)
if operand.supports8BitOnX86
$asm.puts "#{setOpcode} #{operand.x86Operand(:byte)}"
- $asm.puts "movzbl #{operand.x86Operand(:byte)}, #{operand.x86Operand(:int)}"
+ if !isIntelSyntax
+ $asm.puts "movzbl #{orderOperands(operand.x86Operand(:byte), operand.x86Operand(:int))}"
+ else
+ $asm.puts "movzx #{orderOperands(operand.x86Operand(:byte), operand.x86Operand(:int))}"
+ end
else
- ax = RegisterID.new(nil, "t0")
+ ax = RegisterID.new(nil, "r0")
$asm.puts "xchg#{x86Suffix(:ptr)} #{operand.x86Operand(:ptr)}, #{ax.x86Operand(:ptr)}"
- $asm.puts "#{setOpcode} %al"
- $asm.puts "movzbl %al, %eax"
+ $asm.puts "#{setOpcode} #{ax.x86Operand(:byte)}"
+ if !isIntelSyntax
+ $asm.puts "movzbl #{ax.x86Operand(:byte)}, #{ax.x86Operand(:int)}"
+ else
+ $asm.puts "movzx #{ax.x86Operand(:int)}, #{ax.x86Operand(:byte)}"
+ end
$asm.puts "xchg#{x86Suffix(:ptr)} #{operand.x86Operand(:ptr)}, #{ax.x86Operand(:ptr)}"
end
end
@@ -568,10 +671,10 @@ class Instruction
if value.is_a? RegisterID
$asm.puts "test#{x86Suffix(kind)} #{value.x86Operand(kind)}, #{value.x86Operand(kind)}"
else
- $asm.puts "cmp#{x86Suffix(kind)} $0, #{value.x86Operand(kind)}"
+ $asm.puts "cmp#{x86Suffix(kind)} #{orderOperands(const(0), value.x86Operand(kind))}"
end
else
- $asm.puts "test#{x86Suffix(kind)} #{mask.x86Operand(kind)}, #{value.x86Operand(kind)}"
+ $asm.puts "test#{x86Suffix(kind)} #{orderOperands(mask.x86Operand(kind), value.x86Operand(kind))}"
end
end
@@ -601,7 +704,7 @@ class Instruction
def handleX86SubBranch(branchOpcode, kind)
if operands.size == 4 and operands[1] == operands[2]
$asm.puts "neg#{x86Suffix(kind)} #{operands[2].x86Operand(kind)}"
- $asm.puts "add#{x86Suffix(kind)} #{operands[0].x86Operand(kind)}, #{operands[2].x86Operand(kind)}"
+ $asm.puts "add#{x86Suffix(kind)} #{orderOperands(operands[0].x86Operand(kind), operands[2].x86Operand(kind))}"
else
handleX86OpWithNumOperands("sub#{x86Suffix(kind)}", kind, operands.size - 1)
end
@@ -619,25 +722,29 @@ class Instruction
def handleX86Add(kind)
if operands.size == 3 and operands[1] == operands[2]
unless Immediate.new(nil, 0) == operands[0]
- $asm.puts "add#{x86Suffix(kind)} #{operands[0].x86Operand(kind)}, #{operands[2].x86Operand(kind)}"
+ $asm.puts "add#{x86Suffix(kind)} #{orderOperands(operands[0].x86Operand(kind), operands[2].x86Operand(kind))}"
end
elsif operands.size == 3 and operands[0].is_a? Immediate
raise unless operands[1].is_a? RegisterID
raise unless operands[2].is_a? RegisterID
if operands[0].value == 0
unless operands[1] == operands[2]
- $asm.puts "mov#{x86Suffix(kind)} #{operands[1].x86Operand(kind)}, #{operands[2].x86Operand(kind)}"
+ $asm.puts "mov#{x86Suffix(kind)} #{orderOperands(operands[1].x86Operand(kind), operands[2].x86Operand(kind))}"
end
else
- $asm.puts "lea#{x86Suffix(kind)} #{operands[0].value}(#{operands[1].x86Operand(kind)}), #{operands[2].x86Operand(kind)}"
+ $asm.puts "lea#{x86Suffix(kind)} #{orderOperands(offsetRegister(operands[0].value, operands[1].x86Operand(kind)), operands[2].x86Operand(kind))}"
end
elsif operands.size == 3 and operands[0].is_a? RegisterID
raise unless operands[1].is_a? RegisterID
raise unless operands[2].is_a? RegisterID
if operands[0] == operands[2]
- $asm.puts "add#{x86Suffix(kind)} #{operands[1].x86Operand(kind)}, #{operands[2].x86Operand(kind)}"
+ $asm.puts "add#{x86Suffix(kind)} #{orderOperands(operands[1].x86Operand(kind), operands[2].x86Operand(kind))}"
else
- $asm.puts "lea#{x86Suffix(kind)} (#{operands[0].x86Operand(kind)}, #{operands[1].x86Operand(kind)}), #{operands[2].x86Operand(kind)}"
+ if !isIntelSyntax
+ $asm.puts "lea#{x86Suffix(kind)} (#{operands[0].x86Operand(kind)}, #{operands[1].x86Operand(kind)}), #{operands[2].x86Operand(kind)}"
+ else
+ $asm.puts "lea#{x86Suffix(kind)} #{operands[2].x86Operand(kind)}, [#{operands[0].x86Operand(kind)} + #{operands[1].x86Operand(kind)}]"
+ end
end
else
unless Immediate.new(nil, 0) == operands[0]
@@ -649,7 +756,7 @@ class Instruction
def handleX86Sub(kind)
if operands.size == 3 and operands[1] == operands[2]
$asm.puts "neg#{x86Suffix(kind)} #{operands[2].x86Operand(kind)}"
- $asm.puts "add#{x86Suffix(kind)} #{operands[0].x86Operand(kind)}, #{operands[2].x86Operand(kind)}"
+ $asm.puts "add#{x86Suffix(kind)} #{orderOperands(operands[0].x86Operand(kind), operands[2].x86Operand(kind))}"
else
handleX86Op("sub#{x86Suffix(kind)}", kind)
end
@@ -665,6 +772,20 @@ class Instruction
end
end
+ def handleX86Peek()
+ sp = RegisterID.new(nil, "sp")
+ opA = offsetRegister(operands[0].value * x86Bytes(:ptr), sp.x86Operand(:ptr))
+ opB = operands[1].x86Operand(:ptr)
+ $asm.puts "mov#{x86Suffix(:ptr)} #{orderOperands(opA, opB)}"
+ end
+
+ def handleX86Poke()
+ sp = RegisterID.new(nil, "sp")
+ opA = operands[0].x86Operand(:ptr)
+ opB = offsetRegister(operands[1].value * x86Bytes(:ptr), sp.x86Operand(:ptr))
+ $asm.puts "mov#{x86Suffix(:ptr)} #{orderOperands(opA, opB)}"
+ end
+
def handleMove
if Immediate.new(nil, 0) == operands[0] and operands[1].is_a? RegisterID
if isX64
@@ -680,22 +801,23 @@ class Instruction
end
end
end
-
+
def handleX87Compare(mode)
+ floatingPointCompareImplicitOperand = getImplicitOperandString
case mode
when :normal
if (operands[0].x87DefaultStackPosition == 0)
- $asm.puts "fucomi #{operands[1].x87Operand(0)}"
+ $asm.puts "fucomi #{floatingPointCompareImplicitOperand}#{operands[1].x87Operand(0)}"
else
$asm.puts "fld #{operands[0].x87Operand(0)}"
- $asm.puts "fucomip #{operands[1].x87Operand(1)}"
+ $asm.puts "fucomip #{floatingPointCompareImplicitOperand}#{operands[1].x87Operand(1)}"
end
when :reverse
if (operands[1].x87DefaultStackPosition == 0)
- $asm.puts "fucomi #{operands[0].x87Operand(0)}"
+ $asm.puts "fucomi #{floatingPointCompareImplicitOperand}#{operands[0].x87Operand(0)}"
else
$asm.puts "fld #{operands[1].x87Operand(0)}"
- $asm.puts "fucomip #{operands[0].x87Operand(1)}"
+ $asm.puts "fucomip #{floatingPointCompareImplicitOperand}#{operands[0].x87Operand(1)}"
end
else
raise mode.inspect
@@ -704,12 +826,16 @@ class Instruction
def handleX87BinOp(opcode, opcodereverse)
if (operands[1].x87DefaultStackPosition == 0)
- $asm.puts "#{opcode} #{operands[0].x87Operand(0)}, %st"
+ $asm.puts "#{opcode} #{orderOperands(operands[0].x87Operand(0), register("st"))}"
elsif (operands[0].x87DefaultStackPosition == 0)
- $asm.puts "#{opcodereverse} %st, #{operands[1].x87Operand(0)}"
+ if !isIntelSyntax
+ $asm.puts "#{opcodereverse} #{register("st")}, #{operands[1].x87Operand(0)}"
+ else
+ $asm.puts "#{opcode} #{operands[1].x87Operand(0)}, #{register("st")}"
+ end
else
$asm.puts "fld #{operands[0].x87Operand(0)}"
- $asm.puts "#{opcodereverse}p %st, #{operands[1].x87Operand(1)}"
+ $asm.puts "#{opcodereverse}p #{orderOperands(register("st"), operands[1].x87Operand(1))}"
end
end
@@ -717,15 +843,26 @@ class Instruction
raise unless $activeBackend == "X86"
lowerX86Common
end
+
+ def lowerX86_WIN
+ raise unless $activeBackend == "X86_WIN"
+ lowerX86Common
+ end
def lowerX86_64
raise unless $activeBackend == "X86_64"
lowerX86Common
end
-
+
+ def lowerX86_64_WIN
+ raise unless $activeBackend == "X86_64_WIN"
+ lowerX86Common
+ end
+
def lowerX86Common
$asm.codeOrigin codeOriginString if $enableCodeOriginComments
$asm.annotation annotation if $enableInstrAnnotations
+ $asm.debugAnnotation codeOrigin.debugDirective if $enableDebugAnnotations
case opcode
when "addi"
@@ -735,13 +872,13 @@ class Instruction
when "addq"
handleX86Add(:quad)
when "andi"
- handleX86Op("andl", :int)
+ handleX86Op("and#{x86Suffix(:int)}", :int)
when "andp"
handleX86Op("and#{x86Suffix(:ptr)}", :ptr)
when "andq"
handleX86Op("and#{x86Suffix(:quad)}", :quad)
when "lshifti"
- handleX86Shift("sall", :int)
+ handleX86Shift("sal#{x86Suffix(:int)}", :int)
when "lshiftp"
handleX86Shift("sal#{x86Suffix(:ptr)}", :ptr)
when "lshiftq"
@@ -753,27 +890,27 @@ class Instruction
when "mulq"
handleX86Mul(:quad)
when "negi"
- $asm.puts "negl #{x86Operands(:int)}"
+ $asm.puts "neg#{x86Suffix(:int)} #{x86Operands(:int)}"
when "negp"
$asm.puts "neg#{x86Suffix(:ptr)} #{x86Operands(:ptr)}"
when "negq"
$asm.puts "neg#{x86Suffix(:quad)} #{x86Operands(:quad)}"
when "noti"
- $asm.puts "notl #{x86Operands(:int)}"
+ $asm.puts "not#{x86Suffix(:int)} #{x86Operands(:int)}"
when "ori"
- handleX86Op("orl", :int)
+ handleX86Op("or#{x86Suffix(:int)}", :int)
when "orp"
handleX86Op("or#{x86Suffix(:ptr)}", :ptr)
when "orq"
handleX86Op("or#{x86Suffix(:quad)}", :quad)
when "rshifti"
- handleX86Shift("sarl", :int)
+ handleX86Shift("sar#{x86Suffix(:int)}", :int)
when "rshiftp"
handleX86Shift("sar#{x86Suffix(:ptr)}", :ptr)
when "rshiftq"
handleX86Shift("sar#{x86Suffix(:quad)}", :quad)
when "urshifti"
- handleX86Shift("shrl", :int)
+ handleX86Shift("shr#{x86Suffix(:int)}", :int)
when "urshiftp"
handleX86Shift("shr#{x86Suffix(:ptr)}", :ptr)
when "urshiftq"
@@ -785,36 +922,60 @@ class Instruction
when "subq"
handleX86Sub(:quad)
when "xori"
- handleX86Op("xorl", :int)
+ handleX86Op("xor#{x86Suffix(:int)}", :int)
when "xorp"
handleX86Op("xor#{x86Suffix(:ptr)}", :ptr)
when "xorq"
handleX86Op("xor#{x86Suffix(:quad)}", :quad)
when "loadi", "storei"
- $asm.puts "movl #{x86Operands(:int, :int)}"
+ $asm.puts "mov#{x86Suffix(:int)} #{x86Operands(:int, :int)}"
when "loadis"
if isX64
- $asm.puts "movslq #{x86Operands(:int, :quad)}"
+ if !isIntelSyntax
+ $asm.puts "movslq #{x86Operands(:int, :quad)}"
+ else
+ $asm.puts "movsxd #{x86Operands(:int, :quad)}"
+ end
else
- $asm.puts "movl #{x86Operands(:int, :int)}"
+ $asm.puts "mov#{x86Suffix(:int)} #{x86Operands(:int, :int)}"
end
when "loadp", "storep"
$asm.puts "mov#{x86Suffix(:ptr)} #{x86Operands(:ptr, :ptr)}"
when "loadq", "storeq"
$asm.puts "mov#{x86Suffix(:quad)} #{x86Operands(:quad, :quad)}"
when "loadb"
- $asm.puts "movzbl #{operands[0].x86Operand(:byte)}, #{operands[1].x86Operand(:int)}"
+ if !isIntelSyntax
+ $asm.puts "movzbl #{orderOperands(operands[0].x86Operand(:byte), operands[1].x86Operand(:int))}"
+ else
+ $asm.puts "movzx #{orderOperands(operands[0].x86Operand(:byte), operands[1].x86Operand(:int))}"
+ end
when "loadbs"
- $asm.puts "movsbl #{operands[0].x86Operand(:byte)}, #{operands[1].x86Operand(:int)}"
+ if !isIntelSyntax
+ $asm.puts "movsbl #{orderOperands(operands[0].x86Operand(:byte), operands[1].x86Operand(:int))}"
+ else
+ $asm.puts "movsx #{orderOperands(operands[0].x86Operand(:byte), operands[1].x86Operand(:int))}"
+ end
when "loadh"
- $asm.puts "movzwl #{operands[0].x86Operand(:half)}, #{operands[1].x86Operand(:int)}"
+ if !isIntelSyntax
+ $asm.puts "movzwl #{orderOperands(operands[0].x86Operand(:half), operands[1].x86Operand(:int))}"
+ else
+ $asm.puts "movzx #{orderOperands(operands[0].x86Operand(:half), operands[1].x86Operand(:int))}"
+ end
when "loadhs"
- $asm.puts "movswl #{operands[0].x86Operand(:half)}, #{operands[1].x86Operand(:int)}"
+ if !isIntelSyntax
+ $asm.puts "movswl #{orderOperands(operands[0].x86Operand(:half), operands[1].x86Operand(:int))}"
+ else
+ $asm.puts "movsx #{orderOperands(operands[0].x86Operand(:half), operands[1].x86Operand(:int))}"
+ end
when "storeb"
- $asm.puts "movb #{x86Operands(:byte, :byte)}"
+ $asm.puts "mov#{x86Suffix(:byte)} #{x86Operands(:byte, :byte)}"
when "loadd"
if useX87
- $asm.puts "fldl #{operands[0].x86Operand(:double)}"
+ if !isIntelSyntax
+ $asm.puts "fldl #{operands[0].x86Operand(:double)}"
+ else
+ $asm.puts "fld #{operands[0].x86Operand(:double)}"
+ end
$asm.puts "fstp #{operands[1].x87Operand(1)}"
else
$asm.puts "movsd #{x86Operands(:double, :double)}"
@@ -833,10 +994,14 @@ class Instruction
when "stored"
if useX87
if (operands[0].x87DefaultStackPosition == 0)
- $asm.puts "fstl #{operands[1].x86Operand(:double)}"
+ $asm.puts "fst#{x86Suffix(:int)} #{operands[1].x86Operand(:double)}"
else
$asm.puts "fld #{operands[0].x87Operand(0)}"
- $asm.puts "fstpl #{operands[1].x86Operand(:double)}"
+ if !isIntelSyntax
+ $asm.puts "fstpl #{operands[1].x86Operand(:double)}"
+ else
+ $asm.puts "fstp #{operands[1].x86Operand(:double)}"
+ end
end
else
$asm.puts "movsd #{x86Operands(:double, :double)}"
@@ -876,17 +1041,17 @@ class Instruction
when "ci2d"
if useX87
sp = RegisterID.new(nil, "sp")
- $asm.puts "movl #{operands[0].x86Operand(:int)}, -4(#{sp.x86Operand(:ptr)})"
- $asm.puts "fildl -4(#{sp.x86Operand(:ptr)})"
+ $asm.puts "mov#{x86Suffix(:int)} #{orderOperands(operands[0].x86Operand(:int), offsetRegister(-4, sp.x86Operand(:ptr)))}"
+ $asm.puts "fild#{x86Suffix(:ptr)} #{getSizeString(:ptr)}#{offsetRegister(-4, sp.x86Operand(:ptr))}"
$asm.puts "fstp #{operands[1].x87Operand(1)}"
else
- $asm.puts "cvtsi2sd #{operands[0].x86Operand(:int)}, #{operands[1].x86Operand(:double)}"
+ $asm.puts "cvtsi2sd #{orderOperands(operands[0].x86Operand(:int), operands[1].x86Operand(:double))}"
end
when "bdeq"
if useX87
handleX87Compare(:normal)
else
- $asm.puts "ucomisd #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:double)}"
+ $asm.puts "ucomisd #{orderOperands(operands[0].x86Operand(:double), operands[1].x86Operand(:double))}"
end
if operands[0] == operands[1]
# This is just a jump ordered, which is a jnp.
@@ -913,7 +1078,7 @@ class Instruction
if useX87
handleX87Compare(:normal)
else
- $asm.puts "ucomisd #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:double)}"
+ $asm.puts "ucomisd #{orderOperands(operands[0].x86Operand(:double), operands[1].x86Operand(:double))}"
end
if operands[0] == operands[1]
# This is just a jump unordered, which is a jp.
@@ -947,23 +1112,24 @@ class Instruction
$asm.puts "cvttsd2si #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:int)}"
when "bcd2i"
if useX87
+ floatingPointCompareImplicitOperand = getImplicitOperandString
sp = RegisterID.new(nil, "sp")
if (operands[0].x87DefaultStackPosition == 0)
$asm.puts "fistl -4(#{sp.x86Operand(:ptr)})"
else
$asm.puts "fld #{operands[0].x87Operand(0)}"
- $asm.puts "fistpl -4(#{sp.x86Operand(:ptr)})"
+ $asm.puts "fistp#{x86Suffix(:ptr)} #{getSizeString(:ptr)}#{offsetRegister(-4, sp.x86Operand(:ptr))}"
end
- $asm.puts "movl -4(#{sp.x86Operand(:ptr)}), #{operands[1].x86Operand(:int)}"
- $asm.puts "testl #{operands[1].x86Operand(:int)}, #{operands[1].x86Operand(:int)}"
+ $asm.puts "mov#{x86Suffix(:int)} #{orderOperands(offsetRegister(-4, sp.x86Operand(:ptr)), operands[1].x86Operand(:int))}"
+ $asm.puts "test#{x86Suffix(:int)} #{operands[1].x86Operand(:int)}, #{operands[1].x86Operand(:int)}"
$asm.puts "je #{operands[2].asmLabel}"
- $asm.puts "fildl -4(#{sp.x86Operand(:ptr)})"
- $asm.puts "fucomip #{operands[0].x87Operand(1)}"
+ $asm.puts "fild#{x86Suffix(:int)} #{getSizeString(:int)}#{offsetRegister(-4, sp.x86Operand(:ptr))}"
+ $asm.puts "fucomip #{floatingPointCompareImplicitOperand}#{operands[0].x87Operand(1)}"
$asm.puts "jp #{operands[2].asmLabel}"
$asm.puts "jne #{operands[2].asmLabel}"
else
$asm.puts "cvttsd2si #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:int)}"
- $asm.puts "testl #{operands[1].x86Operand(:int)}, #{operands[1].x86Operand(:int)}"
+ $asm.puts "test#{x86Suffix(:int)} #{operands[1].x86Operand(:int)}, #{operands[1].x86Operand(:int)}"
$asm.puts "je #{operands[2].asmLabel}"
$asm.puts "cvtsi2sd #{operands[1].x86Operand(:int)}, %xmm7"
$asm.puts "ucomisd #{operands[0].x86Operand(:double)}, %xmm7"
@@ -987,36 +1153,16 @@ class Instruction
| op |
$asm.puts "push #{op.x86Operand(:ptr)}"
}
- when "popCalleeSaves"
- if isX64
- $asm.puts "pop %rbx"
- $asm.puts "pop %r15"
- $asm.puts "pop %r14"
- $asm.puts "pop %r13"
- $asm.puts "pop %r12"
- else
- $asm.puts "pop %ebx"
- $asm.puts "pop %edi"
- $asm.puts "pop %esi"
- end
- when "pushCalleeSaves"
- if isX64
- $asm.puts "push %r12"
- $asm.puts "push %r13"
- $asm.puts "push %r14"
- $asm.puts "push %r15"
- $asm.puts "push %rbx"
- else
- $asm.puts "push %esi"
- $asm.puts "push %edi"
- $asm.puts "push %ebx"
- end
when "move"
handleMove
when "sxi2q"
- $asm.puts "movslq #{operands[0].x86Operand(:int)}, #{operands[1].x86Operand(:quad)}"
+ if !isIntelSyntax
+ $asm.puts "movslq #{operands[0].x86Operand(:int)}, #{operands[1].x86Operand(:quad)}"
+ else
+ $asm.puts "movsxd #{orderOperands(operands[0].x86Operand(:int), operands[1].x86Operand(:quad))}"
+ end
when "zxi2q"
- $asm.puts "movl #{operands[0].x86Operand(:int)}, #{operands[1].x86Operand(:int)}"
+ $asm.puts "mov#{x86Suffix(:int)} #{orderOperands(operands[0].x86Operand(:int), operands[1].x86Operand(:int))}"
when "nop"
$asm.puts "nop"
when "bieq"
@@ -1126,25 +1272,25 @@ class Instruction
when "jmp"
$asm.puts "jmp #{operands[0].x86CallOperand(:ptr)}"
when "baddio"
- handleX86OpBranch("addl", "jo", :int)
+ handleX86OpBranch("add#{x86Suffix(:int)}", "jo", :int)
when "baddpo"
handleX86OpBranch("add#{x86Suffix(:ptr)}", "jo", :ptr)
when "baddqo"
handleX86OpBranch("add#{x86Suffix(:quad)}", "jo", :quad)
when "baddis"
- handleX86OpBranch("addl", "js", :int)
+ handleX86OpBranch("add#{x86Suffix(:int)}", "js", :int)
when "baddps"
handleX86OpBranch("add#{x86Suffix(:ptr)}", "js", :ptr)
when "baddqs"
handleX86OpBranch("add#{x86Suffix(:quad)}", "js", :quad)
when "baddiz"
- handleX86OpBranch("addl", "jz", :int)
+ handleX86OpBranch("add#{x86Suffix(:int)}", "jz", :int)
when "baddpz"
handleX86OpBranch("add#{x86Suffix(:ptr)}", "jz", :ptr)
when "baddqz"
handleX86OpBranch("add#{x86Suffix(:quad)}", "jz", :quad)
when "baddinz"
- handleX86OpBranch("addl", "jnz", :int)
+ handleX86OpBranch("add#{x86Suffix(:int)}", "jnz", :int)
when "baddpnz"
handleX86OpBranch("add#{x86Suffix(:ptr)}", "jnz", :ptr)
when "baddqnz"
@@ -1158,13 +1304,13 @@ class Instruction
when "bsubinz"
handleX86SubBranch("jnz", :int)
when "bmulio"
- handleX86OpBranch("imull", "jo", :int)
+ handleX86OpBranch("imul#{x86Suffix(:int)}", "jo", :int)
when "bmulis"
- handleX86OpBranch("imull", "js", :int)
+ handleX86OpBranch("imul#{x86Suffix(:int)}", "js", :int)
when "bmuliz"
- handleX86OpBranch("imull", "jz", :int)
+ handleX86OpBranch("imul#{x86Suffix(:int)}", "jz", :int)
when "bmulinz"
- handleX86OpBranch("imull", "jnz", :int)
+ handleX86OpBranch("imul#{x86Suffix(:int)}", "jnz", :int)
when "borio"
handleX86OpBranch("orl", "jo", :int)
when "boris"
@@ -1174,15 +1320,19 @@ class Instruction
when "borinz"
handleX86OpBranch("orl", "jnz", :int)
when "break"
- $asm.puts "int $3"
+ $asm.puts "int #{const(3)}"
when "call"
if useX87
2.times {
| offset |
- $asm.puts "ffree %st(#{offset})"
+ $asm.puts "ffree #{register("st")}(#{offset})"
}
end
- $asm.puts "call #{operands[0].x86CallOperand(:ptr)}"
+ op = operands[0].x86CallOperand(:ptr)
+ if operands[0].is_a? LabelReference
+ operands[0].used
+ end
+ $asm.puts "call #{op}"
when "ret"
$asm.puts "ret"
when "cieq"
@@ -1290,21 +1440,19 @@ class Instruction
when "tbnz"
handleX86SetTest("setnz", :byte)
when "peek"
- sp = RegisterID.new(nil, "sp")
- $asm.puts "mov#{x86Suffix(:ptr)} #{operands[0].value * x86Bytes(:ptr)}(#{sp.x86Operand(:ptr)}), #{operands[1].x86Operand(:ptr)}"
+ handleX86Peek()
when "poke"
- sp = RegisterID.new(nil, "sp")
- $asm.puts "mov#{x86Suffix(:ptr)} #{operands[0].x86Operand(:ptr)}, #{operands[1].value * x86Bytes(:ptr)}(#{sp.x86Operand(:ptr)})"
+ handleX86Poke()
when "cdqi"
$asm.puts "cdq"
when "idivi"
- $asm.puts "idivl #{operands[0].x86Operand(:int)}"
+ $asm.puts "idiv#{x86Suffix(:int)} #{operands[0].x86Operand(:int)}"
when "fii2d"
if useX87
sp = RegisterID.new(nil, "sp")
- $asm.puts "movl #{operands[0].x86Operand(:int)}, -8(#{sp.x86Operand(:ptr)})"
- $asm.puts "movl #{operands[1].x86Operand(:int)}, -4(#{sp.x86Operand(:ptr)})"
- $asm.puts "fldl -8(#{sp.x86Operand(:ptr)})"
+ $asm.puts "mov#{x86Suffix(:int)} #{orderOperands(operands[0].x86Operand(:int), offsetRegister(-8, sp.x86Operand(:ptr)))}"
+ $asm.puts "mov#{x86Suffix(:int)} #{orderOperands(operands[1].x86Operand(:int), offsetRegister(-4, sp.x86Operand(:ptr)))}"
+ $asm.puts "fld#{x86Suffix(:ptr)} #{getSizeString(:double)}#{offsetRegister(-8, sp.x86Operand(:ptr))}"
$asm.puts "fstp #{operands[2].x87Operand(1)}"
else
$asm.puts "movd #{operands[0].x86Operand(:int)}, #{operands[2].x86Operand(:double)}"
@@ -1316,13 +1464,13 @@ class Instruction
if useX87
sp = RegisterID.new(nil, "sp")
if (operands[0].x87DefaultStackPosition == 0)
- $asm.puts "fstl -8(#{sp.x86Operand(:ptr)})"
+ $asm.puts "fst#{x86Suffix(:ptr)} #{getSizeString(:double)}#{offsetRegister(-8, sp.x86Operand(:ptr))}"
else
$asm.puts "fld #{operands[0].x87Operand(0)}"
$asm.puts "fstpl -8(#{sp.x86Operand(:ptr)})"
end
- $asm.puts "movl -8(#{sp.x86Operand(:ptr)}), #{operands[1].x86Operand(:int)}"
- $asm.puts "movl -4(#{sp.x86Operand(:ptr)}), #{operands[2].x86Operand(:int)}"
+ $asm.puts "mov#{x86Suffix(:int)} #{orderOperands(offsetRegister(-8, sp.x86Operand(:ptr)), operands[1].x86Operand(:int))}"
+ $asm.puts "mov#{x86Suffix(:int)} #{orderOperands(offsetRegister(-4, sp.x86Operand(:ptr)), operands[2].x86Operand(:int))}"
else
$asm.puts "movd #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:int)}"
$asm.puts "movsd #{operands[0].x86Operand(:double)}, %xmm7"
@@ -1336,20 +1484,32 @@ class Instruction
$asm.puts "fldl -8(#{sp.x86Operand(:ptr)})"
$asm.puts "fstp #{operands[1].x87Operand(1)}"
else
- $asm.puts "movq #{operands[0].x86Operand(:quad)}, #{operands[1].x86Operand(:double)}"
+ if !isIntelSyntax
+ $asm.puts "movq #{operands[0].x86Operand(:quad)}, #{operands[1].x86Operand(:double)}"
+ else
+ # MASM does not accept register operands with movq.
+ # Debugging shows that movd actually moves a qword when using MASM.
+ $asm.puts "movd #{operands[1].x86Operand(:double)}, #{operands[0].x86Operand(:quad)}"
+ end
end
when "fd2q"
if useX87
sp = RegisterID.new(nil, "sp")
if (operands[0].x87DefaultStackPosition == 0)
- $asm.puts "fstl -8(#{sp.x86Operand(:ptr)})"
+ $asm.puts "fst#{x86Suffix(:int)} #{getSizeString(:int)}#{offsetRegister(-8, sp.x86Operand(:ptr))}"
else
$asm.puts "fld #{operands[0].x87Operand(0)}"
$asm.puts "fstpl -8(#{sp.x86Operand(:ptr)})"
end
$asm.puts "movq -8(#{sp.x86Operand(:ptr)}), #{operands[1].x86Operand(:quad)}"
else
- $asm.puts "movq #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:quad)}"
+ if !isIntelSyntax
+ $asm.puts "movq #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:quad)}"
+ else
+ # MASM does not accept register operands with movq.
+ # Debugging shows that movd actually moves a qword when using MASM.
+ $asm.puts "movd #{operands[1].x86Operand(:quad)}, #{operands[0].x86Operand(:double)}"
+ end
end
when "bo"
$asm.puts "jo #{operands[0].asmLabel}"
@@ -1360,11 +1520,16 @@ class Instruction
when "bnz"
$asm.puts "jnz #{operands[0].asmLabel}"
when "leai"
- $asm.puts "leal #{operands[0].x86AddressOperand(:int)}, #{operands[1].x86Operand(:int)}"
+ $asm.puts "lea#{x86Suffix(:int)} #{orderOperands(operands[0].x86AddressOperand(:int), operands[1].x86Operand(:int))}"
when "leap"
- $asm.puts "lea#{x86Suffix(:ptr)} #{operands[0].x86AddressOperand(:ptr)}, #{operands[1].x86Operand(:ptr)}"
+ $asm.puts "lea#{x86Suffix(:ptr)} #{orderOperands(operands[0].x86AddressOperand(:ptr), operands[1].x86Operand(:ptr))}"
when "memfence"
- $asm.puts "mfence"
+ sp = RegisterID.new(nil, "sp")
+ if isIntelSyntax
+ $asm.puts "mfence"
+ else
+ $asm.puts "lock; orl $0, (#{sp.x86Operand(:ptr)})"
+ end
else
lowerDefault
end