summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/offlineasm
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2015-10-15 09:45:50 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2015-10-15 09:45:50 +0000
commite15dd966d523731101f70ccf768bba12435a0208 (patch)
treeae9cb828a24ded2585a41af3f21411523b47897d /Source/JavaScriptCore/offlineasm
downloadWebKitGtk-tarball-e15dd966d523731101f70ccf768bba12435a0208.tar.gz
webkitgtk-2.10.2webkitgtk-2.10.2
Diffstat (limited to 'Source/JavaScriptCore/offlineasm')
-rw-r--r--Source/JavaScriptCore/offlineasm/arm.rb606
-rw-r--r--Source/JavaScriptCore/offlineasm/arm64.rb879
-rw-r--r--Source/JavaScriptCore/offlineasm/asm.rb340
-rw-r--r--Source/JavaScriptCore/offlineasm/ast.rb1426
-rw-r--r--Source/JavaScriptCore/offlineasm/backends.rb175
-rw-r--r--Source/JavaScriptCore/offlineasm/cloop.rb1158
-rw-r--r--Source/JavaScriptCore/offlineasm/config.rb57
-rw-r--r--Source/JavaScriptCore/offlineasm/generate_offset_extractor.rb154
-rw-r--r--Source/JavaScriptCore/offlineasm/instructions.rb337
-rw-r--r--Source/JavaScriptCore/offlineasm/mips.rb1017
-rw-r--r--Source/JavaScriptCore/offlineasm/offsets.rb194
-rw-r--r--Source/JavaScriptCore/offlineasm/opt.rb135
-rw-r--r--Source/JavaScriptCore/offlineasm/parser.rb794
-rw-r--r--Source/JavaScriptCore/offlineasm/registers.rb73
-rw-r--r--Source/JavaScriptCore/offlineasm/risc.rb730
-rw-r--r--Source/JavaScriptCore/offlineasm/self_hash.rb73
-rw-r--r--Source/JavaScriptCore/offlineasm/settings.rb249
-rw-r--r--Source/JavaScriptCore/offlineasm/sh4.rb1102
-rw-r--r--Source/JavaScriptCore/offlineasm/transform.rb501
-rw-r--r--Source/JavaScriptCore/offlineasm/x86.rb1526
20 files changed, 11526 insertions, 0 deletions
diff --git a/Source/JavaScriptCore/offlineasm/arm.rb b/Source/JavaScriptCore/offlineasm/arm.rb
new file mode 100644
index 000000000..44cfbe5c9
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/arm.rb
@@ -0,0 +1,606 @@
+# Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+# Copyright (C) 2013 University of Szeged. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+require "config"
+require "ast"
+require "opt"
+require "risc"
+
+def isARMv7
+ case $activeBackend
+ when "ARMv7"
+ true
+ when "ARMv7_TRADITIONAL", "ARM"
+ false
+ else
+ raise "bad value for $activeBackend: #{$activeBackend}"
+ end
+end
+
+def isARMv7Traditional
+ case $activeBackend
+ when "ARMv7_TRADITIONAL"
+ true
+ when "ARMv7", "ARM"
+ false
+ else
+ raise "bad value for $activeBackend: #{$activeBackend}"
+ end
+end
+
+class Node
+ def armSingle
+ doubleOperand = armOperand
+ raise "Bogus register name #{doubleOperand}" unless doubleOperand =~ /^d/
+ "s" + ($~.post_match.to_i * 2).to_s
+ end
+end
+
+class SpecialRegister
+ def armOperand
+ @name
+ end
+end
+
+ARM_EXTRA_GPRS = [SpecialRegister.new("r6"), SpecialRegister.new("r10"), SpecialRegister.new("r12")]
+ARM_EXTRA_FPRS = [SpecialRegister.new("d7")]
+ARM_SCRATCH_FPR = SpecialRegister.new("d6")
+
+def armMoveImmediate(value, register)
+ # Currently we only handle the simple cases, and fall back to mov/movt for the complex ones.
+ if value.is_a? String
+ $asm.puts "mov #{register.armOperand}, (#{value})"
+ elsif value >= 0 && value < 256
+ $asm.puts "mov #{register.armOperand}, \##{value}"
+ elsif (~value) >= 0 && (~value) < 256
+ $asm.puts "mvn #{register.armOperand}, \##{~value}"
+ elsif isARMv7 or isARMv7Traditional
+ $asm.puts "movw #{register.armOperand}, \##{value & 0xffff}"
+ if (value & 0xffff0000) != 0
+ $asm.puts "movt #{register.armOperand}, \##{(value >> 16) & 0xffff}"
+ end
+ else
+ $asm.puts "ldr #{register.armOperand}, =#{value}"
+ end
+end
+
+class RegisterID
+ def armOperand
+ case name
+ when "t0", "a0", "r0"
+ "r0"
+ when "t1", "a1", "r1"
+ "r1"
+ when "t2", "a2"
+ "r2"
+ when "a3"
+ "r3"
+ when "t3"
+ "r4"
+ when "t4"
+ "r8"
+ when "t5"
+ "r9"
+ when "cfr"
+ isARMv7 ? "r7" : "r11"
+ when "lr"
+ "lr"
+ when "sp"
+ "sp"
+ when "pc"
+ "pc"
+ else
+ raise "Bad register #{name} for ARM at #{codeOriginString}"
+ end
+ end
+end
+
+class FPRegisterID
+ def armOperand
+ case name
+ when "ft0", "fr"
+ "d0"
+ when "ft1"
+ "d1"
+ when "ft2"
+ "d2"
+ when "ft3"
+ "d3"
+ when "ft4"
+ "d4"
+ when "ft5"
+ "d5"
+ else
+ raise "Bad register #{name} for ARM at #{codeOriginString}"
+ end
+ end
+end
+
+class Immediate
+ def armOperand
+ raise "Invalid immediate #{value} at #{codeOriginString}" if value < 0 or value > 255
+ "\##{value}"
+ end
+end
+
+class Address
+ def armOperand
+ raise "Bad offset at #{codeOriginString}" if offset.value < -0xff or offset.value > 0xfff
+ "[#{base.armOperand}, \##{offset.value}]"
+ end
+end
+
+class BaseIndex
+ def armOperand
+ raise "Bad offset at #{codeOriginString}" if offset.value != 0
+ "[#{base.armOperand}, #{index.armOperand}, lsl \##{scaleShift}]"
+ end
+end
+
+class AbsoluteAddress
+ def armOperand
+ raise "Unconverted absolute address at #{codeOriginString}"
+ end
+end
+
+#
+# Lea support.
+#
+
+class Address
+ def armEmitLea(destination)
+ if destination == base
+ $asm.puts "adds #{destination.armOperand}, \##{offset.value}"
+ else
+ $asm.puts "adds #{destination.armOperand}, #{base.armOperand}, \##{offset.value}"
+ end
+ end
+end
+
+class BaseIndex
+ def armEmitLea(destination)
+ raise "Malformed BaseIndex, offset should be zero at #{codeOriginString}" unless offset.value == 0
+ $asm.puts "add #{destination.armOperand}, #{base.armOperand}, #{index.armOperand}, lsl \##{scaleShift}"
+ end
+end
+
+# FIXME: we could support AbsoluteAddress for lea, but we don't.
+
+#
+# Actual lowering code follows.
+#
+
+class Sequence
+ def getModifiedListARM
+ raise unless $activeBackend == "ARM"
+ getModifiedListARMCommon
+ end
+
+ def getModifiedListARMv7
+ raise unless $activeBackend == "ARMv7"
+ getModifiedListARMCommon
+ end
+
+ def getModifiedListARMv7_TRADITIONAL
+ raise unless $activeBackend == "ARMv7_TRADITIONAL"
+ getModifiedListARMCommon
+ end
+
+ def getModifiedListARMCommon
+ result = @list
+ result = riscLowerSimpleBranchOps(result)
+ result = riscLowerHardBranchOps(result)
+ result = riscLowerShiftOps(result)
+ result = riscLowerMalformedAddresses(result) {
+ | node, address |
+ if address.is_a? BaseIndex
+ address.offset.value == 0
+ elsif address.is_a? Address
+ (-0xff..0xfff).include? address.offset.value
+ else
+ false
+ end
+ }
+ result = riscLowerMalformedAddressesDouble(result)
+ result = riscLowerMisplacedImmediates(result, ["storeb", "storei", "storep", "storeq"])
+ result = riscLowerMalformedImmediates(result, 0..0xff)
+ result = riscLowerMisplacedAddresses(result)
+ result = riscLowerRegisterReuse(result)
+ result = assignRegistersToTemporaries(result, :gpr, ARM_EXTRA_GPRS)
+ result = assignRegistersToTemporaries(result, :fpr, ARM_EXTRA_FPRS)
+ return result
+ end
+end
+
+def armOperands(operands)
+ operands.map{|v| v.armOperand}.join(", ")
+end
+
+def armFlippedOperands(operands)
+ armOperands([operands[-1]] + operands[0..-2])
+end
+
+def emitArmCompact(opcode2, opcode3, operands)
+ if operands.size == 3
+ $asm.puts "#{opcode3} #{armFlippedOperands(operands)}"
+ else
+ raise unless operands.size == 2
+ raise unless operands[1].register?
+ if operands[0].immediate?
+ $asm.puts "#{opcode3} #{operands[1].armOperand}, #{operands[1].armOperand}, #{operands[0].armOperand}"
+ else
+ $asm.puts "#{opcode2} #{armFlippedOperands(operands)}"
+ end
+ end
+end
+
+def emitArm(opcode, operands)
+ if operands.size == 3
+ $asm.puts "#{opcode} #{armFlippedOperands(operands)}"
+ else
+ raise unless operands.size == 2
+ $asm.puts "#{opcode} #{operands[1].armOperand}, #{operands[1].armOperand}, #{operands[0].armOperand}"
+ end
+end
+
+def emitArmDoubleBranch(branchOpcode, operands)
+ $asm.puts "vcmpe.f64 #{armOperands(operands[0..1])}"
+ $asm.puts "vmrs apsr_nzcv, fpscr"
+ $asm.puts "#{branchOpcode} #{operands[2].asmLabel}"
+end
+
+def emitArmTest(operands)
+ value = operands[0]
+ case operands.size
+ when 2
+ mask = Immediate.new(codeOrigin, -1)
+ when 3
+ mask = operands[1]
+ else
+ raise "Expected 2 or 3 operands but got #{operands.size} at #{codeOriginString}"
+ end
+
+ if mask.immediate? and mask.value == -1
+ $asm.puts "tst #{value.armOperand}, #{value.armOperand}"
+ else
+ $asm.puts "tst #{value.armOperand}, #{mask.armOperand}"
+ end
+end
+
+def emitArmCompare(operands, code)
+ $asm.puts "movs #{operands[2].armOperand}, \#0"
+ $asm.puts "cmp #{operands[0].armOperand}, #{operands[1].armOperand}"
+ $asm.puts "it #{code}"
+ $asm.puts "mov#{code} #{operands[2].armOperand}, \#1"
+end
+
+def emitArmTestSet(operands, code)
+ $asm.puts "movs #{operands[-1].armOperand}, \#0"
+ emitArmTest(operands)
+ $asm.puts "it #{code}"
+ $asm.puts "mov#{code} #{operands[-1].armOperand}, \#1"
+end
+
+class Instruction
+ def lowerARM
+ raise unless $activeBackend == "ARM"
+ lowerARMCommon
+ end
+
+ def lowerARMv7
+ raise unless $activeBackend == "ARMv7"
+ lowerARMCommon
+ end
+
+ def lowerARMv7_TRADITIONAL
+ raise unless $activeBackend == "ARMv7_TRADITIONAL"
+ lowerARMCommon
+ end
+
+ def lowerARMCommon
+ $asm.codeOrigin codeOriginString if $enableCodeOriginComments
+ $asm.annotation annotation if $enableInstrAnnotations
+
+ case opcode
+ when "addi", "addp", "addis", "addps"
+ if opcode == "addis" or opcode == "addps"
+ suffix = "s"
+ else
+ suffix = ""
+ end
+ if operands.size == 3 and operands[0].immediate?
+ raise unless operands[1].register?
+ raise unless operands[2].register?
+ if operands[0].value == 0 and suffix.empty?
+ unless operands[1] == operands[2]
+ $asm.puts "mov #{operands[2].armOperand}, #{operands[1].armOperand}"
+ end
+ else
+ $asm.puts "adds #{operands[2].armOperand}, #{operands[1].armOperand}, #{operands[0].armOperand}"
+ end
+ elsif operands.size == 3 and operands[0].register?
+ raise unless operands[1].register?
+ raise unless operands[2].register?
+ $asm.puts "adds #{armFlippedOperands(operands)}"
+ else
+ if operands[0].immediate?
+ unless Immediate.new(nil, 0) == operands[0]
+ $asm.puts "adds #{armFlippedOperands(operands)}"
+ end
+ else
+ $asm.puts "add#{suffix} #{armFlippedOperands(operands)}"
+ end
+ end
+ when "andi", "andp"
+ emitArmCompact("ands", "and", operands)
+ when "ori", "orp"
+ emitArmCompact("orrs", "orr", operands)
+ when "oris"
+ emitArmCompact("orrs", "orrs", operands)
+ when "xori", "xorp"
+ emitArmCompact("eors", "eor", operands)
+ when "lshifti", "lshiftp"
+ emitArmCompact("lsls", "lsls", operands)
+ when "rshifti", "rshiftp"
+ emitArmCompact("asrs", "asrs", operands)
+ when "urshifti", "urshiftp"
+ emitArmCompact("lsrs", "lsrs", operands)
+ when "muli", "mulp"
+ emitArm("mul", operands)
+ when "subi", "subp", "subis"
+ emitArmCompact("subs", "subs", operands)
+ when "negi", "negp"
+ $asm.puts "rsbs #{operands[0].armOperand}, #{operands[0].armOperand}, \#0"
+ when "noti"
+ $asm.puts "mvns #{operands[0].armOperand}, #{operands[0].armOperand}"
+ when "loadi", "loadis", "loadp"
+ $asm.puts "ldr #{armFlippedOperands(operands)}"
+ when "storei", "storep"
+ $asm.puts "str #{armOperands(operands)}"
+ when "loadb"
+ $asm.puts "ldrb #{armFlippedOperands(operands)}"
+ when "loadbs"
+ $asm.puts "ldrsb.w #{armFlippedOperands(operands)}"
+ when "storeb"
+ $asm.puts "strb #{armOperands(operands)}"
+ when "loadh"
+ $asm.puts "ldrh #{armFlippedOperands(operands)}"
+ when "loadhs"
+ $asm.puts "ldrsh.w #{armFlippedOperands(operands)}"
+ when "storeh"
+ $asm.puts "strh #{armOperands(operands)}"
+ when "loadd"
+ $asm.puts "vldr.64 #{armFlippedOperands(operands)}"
+ when "stored"
+ $asm.puts "vstr.64 #{armOperands(operands)}"
+ when "addd"
+ emitArm("vadd.f64", operands)
+ when "divd"
+ emitArm("vdiv.f64", operands)
+ when "subd"
+ emitArm("vsub.f64", operands)
+ when "muld"
+ emitArm("vmul.f64", operands)
+ when "sqrtd"
+ $asm.puts "vsqrt.f64 #{armFlippedOperands(operands)}"
+ when "ci2d"
+ $asm.puts "vmov #{operands[1].armSingle}, #{operands[0].armOperand}"
+ $asm.puts "vcvt.f64.s32 #{operands[1].armOperand}, #{operands[1].armSingle}"
+ when "bdeq"
+ emitArmDoubleBranch("beq", operands)
+ when "bdneq"
+ $asm.puts "vcmpe.f64 #{armOperands(operands[0..1])}"
+ $asm.puts "vmrs apsr_nzcv, fpscr"
+ isUnordered = LocalLabel.unique("bdneq")
+ $asm.puts "bvs #{LocalLabelReference.new(codeOrigin, isUnordered).asmLabel}"
+ $asm.puts "bne #{operands[2].asmLabel}"
+ isUnordered.lower("ARM")
+ when "bdgt"
+ emitArmDoubleBranch("bgt", operands)
+ when "bdgteq"
+ emitArmDoubleBranch("bge", operands)
+ when "bdlt"
+ emitArmDoubleBranch("bmi", operands)
+ when "bdlteq"
+ emitArmDoubleBranch("bls", operands)
+ when "bdequn"
+ $asm.puts "vcmpe.f64 #{armOperands(operands[0..1])}"
+ $asm.puts "vmrs apsr_nzcv, fpscr"
+ $asm.puts "bvs #{operands[2].asmLabel}"
+ $asm.puts "beq #{operands[2].asmLabel}"
+ when "bdnequn"
+ emitArmDoubleBranch("bne", operands)
+ when "bdgtun"
+ emitArmDoubleBranch("bhi", operands)
+ when "bdgtequn"
+ emitArmDoubleBranch("bpl", operands)
+ when "bdltun"
+ emitArmDoubleBranch("blt", operands)
+ when "bdltequn"
+ emitArmDoubleBranch("ble", operands)
+ when "btd2i"
+ # FIXME: may be a good idea to just get rid of this instruction, since the interpreter
+ # currently does not use it.
+ raise "ARM does not support this opcode yet, #{codeOrigin}"
+ when "td2i"
+ $asm.puts "vcvt.s32.f64 #{ARM_SCRATCH_FPR.armSingle}, #{operands[0].armOperand}"
+ $asm.puts "vmov #{operands[1].armOperand}, #{ARM_SCRATCH_FPR.armSingle}"
+ when "bcd2i"
+ $asm.puts "vcvt.s32.f64 #{ARM_SCRATCH_FPR.armSingle}, #{operands[0].armOperand}"
+ $asm.puts "vmov #{operands[1].armOperand}, #{ARM_SCRATCH_FPR.armSingle}"
+ $asm.puts "vcvt.f64.s32 #{ARM_SCRATCH_FPR.armOperand}, #{ARM_SCRATCH_FPR.armSingle}"
+ emitArmDoubleBranch("bne", [ARM_SCRATCH_FPR, operands[0], operands[2]])
+ $asm.puts "tst #{operands[1].armOperand}, #{operands[1].armOperand}"
+ $asm.puts "beq #{operands[2].asmLabel}"
+ when "movdz"
+ # FIXME: either support this or remove it.
+ raise "ARM does not support this opcode yet, #{codeOrigin}"
+ when "pop"
+ operands.each {
+ | op |
+ $asm.puts "pop { #{op.armOperand} }"
+ }
+ when "push"
+ operands.each {
+ | op |
+ $asm.puts "push { #{op.armOperand} }"
+ }
+ when "move"
+ if operands[0].immediate?
+ armMoveImmediate(operands[0].value, operands[1])
+ else
+ $asm.puts "mov #{armFlippedOperands(operands)}"
+ end
+ when "mvlbl"
+ $asm.puts "movw #{operands[1].armOperand}, \#:lower16:#{operands[0].value}"
+ $asm.puts "movt #{operands[1].armOperand}, \#:upper16:#{operands[0].value}"
+ when "nop"
+ $asm.puts "nop"
+ when "bieq", "bpeq", "bbeq"
+ if Immediate.new(nil, 0) == operands[0]
+ $asm.puts "tst #{operands[1].armOperand}, #{operands[1].armOperand}"
+ elsif Immediate.new(nil, 0) == operands[1]
+ $asm.puts "tst #{operands[0].armOperand}, #{operands[0].armOperand}"
+ else
+ $asm.puts "cmp #{armOperands(operands[0..1])}"
+ end
+ $asm.puts "beq #{operands[2].asmLabel}"
+ when "bineq", "bpneq", "bbneq"
+ if Immediate.new(nil, 0) == operands[0]
+ $asm.puts "tst #{operands[1].armOperand}, #{operands[1].armOperand}"
+ elsif Immediate.new(nil, 0) == operands[1]
+ $asm.puts "tst #{operands[0].armOperand}, #{operands[0].armOperand}"
+ else
+ $asm.puts "cmp #{armOperands(operands[0..1])}"
+ end
+ $asm.puts "bne #{operands[2].asmLabel}"
+ when "bia", "bpa", "bba"
+ $asm.puts "cmp #{armOperands(operands[0..1])}"
+ $asm.puts "bhi #{operands[2].asmLabel}"
+ when "biaeq", "bpaeq", "bbaeq"
+ $asm.puts "cmp #{armOperands(operands[0..1])}"
+ $asm.puts "bhs #{operands[2].asmLabel}"
+ when "bib", "bpb", "bbb"
+ $asm.puts "cmp #{armOperands(operands[0..1])}"
+ $asm.puts "blo #{operands[2].asmLabel}"
+ when "bibeq", "bpbeq", "bbbeq"
+ $asm.puts "cmp #{armOperands(operands[0..1])}"
+ $asm.puts "bls #{operands[2].asmLabel}"
+ when "bigt", "bpgt", "bbgt"
+ $asm.puts "cmp #{armOperands(operands[0..1])}"
+ $asm.puts "bgt #{operands[2].asmLabel}"
+ when "bigteq", "bpgteq", "bbgteq"
+ $asm.puts "cmp #{armOperands(operands[0..1])}"
+ $asm.puts "bge #{operands[2].asmLabel}"
+ when "bilt", "bplt", "bblt"
+ $asm.puts "cmp #{armOperands(operands[0..1])}"
+ $asm.puts "blt #{operands[2].asmLabel}"
+ when "bilteq", "bplteq", "bblteq"
+ $asm.puts "cmp #{armOperands(operands[0..1])}"
+ $asm.puts "ble #{operands[2].asmLabel}"
+ when "btiz", "btpz", "btbz"
+ emitArmTest(operands)
+ $asm.puts "beq #{operands[-1].asmLabel}"
+ when "btinz", "btpnz", "btbnz"
+ emitArmTest(operands)
+ $asm.puts "bne #{operands[-1].asmLabel}"
+ when "btis", "btps", "btbs"
+ emitArmTest(operands)
+ $asm.puts "bmi #{operands[-1].asmLabel}"
+ when "jmp"
+ if operands[0].label?
+ $asm.puts "b #{operands[0].asmLabel}"
+ else
+ $asm.puts "mov pc, #{operands[0].armOperand}"
+ end
+ if not isARMv7 and not isARMv7Traditional
+ $asm.puts ".ltorg"
+ end
+ when "call"
+ if operands[0].label?
+ $asm.puts "blx #{operands[0].asmLabel}"
+ else
+ $asm.puts "blx #{operands[0].armOperand}"
+ end
+ when "break"
+ $asm.puts "bkpt #0"
+ when "ret"
+ $asm.puts "bx lr"
+ when "cieq", "cpeq", "cbeq"
+ emitArmCompare(operands, "eq")
+ when "cineq", "cpneq", "cbneq"
+ emitArmCompare(operands, "ne")
+ when "cia", "cpa", "cba"
+ emitArmCompare(operands, "hi")
+ when "ciaeq", "cpaeq", "cbaeq"
+ emitArmCompare(operands, "hs")
+ when "cib", "cpb", "cbb"
+ emitArmCompare(operands, "lo")
+ when "cibeq", "cpbeq", "cbbeq"
+ emitArmCompare(operands, "ls")
+ when "cigt", "cpgt", "cbgt"
+ emitArmCompare(operands, "gt")
+ when "cigteq", "cpgteq", "cbgteq"
+ emitArmCompare(operands, "ge")
+ when "cilt", "cplt", "cblt"
+ emitArmCompare(operands, "lt")
+ when "cilteq", "cplteq", "cblteq"
+ emitArmCompare(operands, "le")
+ when "tis", "tbs", "tps"
+ emitArmTestSet(operands, "mi")
+ when "tiz", "tbz", "tpz"
+ emitArmTestSet(operands, "eq")
+ when "tinz", "tbnz", "tpnz"
+ emitArmTestSet(operands, "ne")
+ when "peek"
+ $asm.puts "ldr #{operands[1].armOperand}, [sp, \##{operands[0].value * 4}]"
+ when "poke"
+ $asm.puts "str #{operands[1].armOperand}, [sp, \##{operands[0].value * 4}]"
+ when "fii2d"
+ $asm.puts "vmov #{operands[2].armOperand}, #{operands[0].armOperand}, #{operands[1].armOperand}"
+ when "fd2ii"
+ $asm.puts "vmov #{operands[1].armOperand}, #{operands[2].armOperand}, #{operands[0].armOperand}"
+ when "bo"
+ $asm.puts "bvs #{operands[0].asmLabel}"
+ when "bs"
+ $asm.puts "bmi #{operands[0].asmLabel}"
+ when "bz"
+ $asm.puts "beq #{operands[0].asmLabel}"
+ when "bnz"
+ $asm.puts "bne #{operands[0].asmLabel}"
+ when "leai", "leap"
+ operands[0].armEmitLea(operands[1])
+ when "smulli"
+ raise "Wrong number of arguments to smull in #{self.inspect} at #{codeOriginString}" unless operands.length == 4
+ $asm.puts "smull #{operands[2].armOperand}, #{operands[3].armOperand}, #{operands[0].armOperand}, #{operands[1].armOperand}"
+ when "memfence"
+ $asm.puts "dmb sy"
+ when "clrbp"
+ $asm.puts "bic #{operands[2].armOperand}, #{operands[0].armOperand}, #{operands[1].armOperand}"
+ else
+ lowerDefault
+ end
+ end
+end
+
diff --git a/Source/JavaScriptCore/offlineasm/arm64.rb b/Source/JavaScriptCore/offlineasm/arm64.rb
new file mode 100644
index 000000000..3a0d786c8
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/arm64.rb
@@ -0,0 +1,879 @@
+# Copyright (C) 2011, 2012, 2014 Apple Inc. All rights reserved.
+# Copyright (C) 2014 University of Szeged. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+require "ast"
+require "opt"
+require "risc"
+
+# Naming conventions:
+#
+# x<number> => GPR. This is both the generic name of the register, and the name used
+# to indicate that the register is used in 64-bit mode.
+# w<number> => GPR in 32-bit mode. This is the low 32-bits of the GPR. If it is
+# mutated then the high 32-bit part of the register is zero filled.
+# q<number> => FPR. This is the generic name of the register.
+# d<number> => FPR used as an IEEE 64-bit binary floating point number (i.e. double).
+#
+# GPR conventions, to match the baseline JIT:
+#
+# x0 => return value, cached result, first argument, t0, a0, r0
+# x1 => t1, a1, r1
+# x2 => t2, a2
+# x3 => a3
+# x5 => t4
+# x6 => t6
+# x9 => (nonArgGPR1 in baseline)
+# x13 => scratch (unused in baseline)
+# x16 => scratch
+# x17 => scratch
+# x23 => t3
+# x24 => t5
+# x27 => csr1 (tagTypeNumber)
+# x28 => csr2 (tagMask)
+# x29 => cfr
+# sp => sp
+# lr => lr
+#
+# FPR conentions, to match the baseline JIT:
+#
+# q0 => ft0
+# q1 => ft1
+# q2 => ft2
+# q3 => ft3
+# q4 => ft4 (unused in baseline)
+# q5 => ft5 (unused in baseline)
+# q31 => scratch
+
+def arm64GPRName(name, kind)
+ raise "bad GPR name #{name}" unless name =~ /^x/
+ number = name[1..-1]
+ case kind
+ when :int
+ "w" + number
+ when :ptr
+ "x" + number
+ else
+ raise "Wrong kind: #{kind}"
+ end
+end
+
+def arm64FPRName(name, kind)
+ raise "bad FPR kind #{kind}" unless kind == :double
+ raise "bad FPR name #{name}" unless name =~ /^q/
+ "d" + name[1..-1]
+end
+
+class SpecialRegister
+ def arm64Operand(kind)
+ case @name
+ when /^x/
+ arm64GPRName(@name, kind)
+ when /^q/
+ arm64FPRName(@name, kind)
+ else
+ raise "Bad name: #{@name}"
+ end
+ end
+end
+
+ARM64_EXTRA_GPRS = [SpecialRegister.new("x16"), SpecialRegister.new("x17"), SpecialRegister.new("x13")]
+ARM64_EXTRA_FPRS = [SpecialRegister.new("q31")]
+
+class RegisterID
+ def arm64Operand(kind)
+ case @name
+ when 't0', 'a0', 'r0'
+ arm64GPRName('x0', kind)
+ when 't1', 'a1', 'r1'
+ arm64GPRName('x1', kind)
+ when 't2', 'a2'
+ arm64GPRName('x2', kind)
+ when 'a3'
+ arm64GPRName('x3', kind)
+ when 't3'
+ arm64GPRName('x23', kind)
+ when 't4'
+ arm64GPRName('x5', kind)
+ when 't5'
+ arm64GPRName('x24', kind)
+ when 't6'
+ arm64GPRName('x6', kind)
+ when 't7'
+ arm64GPRName('x7', kind)
+ when 'cfr'
+ arm64GPRName('x29', kind)
+ when 'csr1'
+ arm64GPRName('x27', kind)
+ when 'csr2'
+ arm64GPRName('x28', kind)
+ when 'sp'
+ 'sp'
+ when 'lr'
+ 'x30'
+ else
+ raise "Bad register name #{@name} at #{codeOriginString}"
+ end
+ end
+end
+
+class FPRegisterID
+ def arm64Operand(kind)
+ case @name
+ when 'ft0'
+ arm64FPRName('q0', kind)
+ when 'ft1'
+ arm64FPRName('q1', kind)
+ when 'ft2'
+ arm64FPRName('q2', kind)
+ when 'ft3'
+ arm64FPRName('q3', kind)
+ when 'ft4'
+ arm64FPRName('q4', kind)
+ when 'ft5'
+ arm64FPRName('q5', kind)
+ else "Bad register name #{@name} at #{codeOriginString}"
+ end
+ end
+end
+
+class Immediate
+ def arm64Operand(kind)
+ raise "Invalid immediate #{value} at #{codeOriginString}" if value < 0 or value > 4095
+ "\##{value}"
+ end
+end
+
+class Address
+ def arm64Operand(kind)
+ raise "Invalid offset #{offset.value} at #{codeOriginString}" if offset.value < -255 or offset.value > 4095
+ "[#{base.arm64Operand(:ptr)}, \##{offset.value}]"
+ end
+
+ def arm64EmitLea(destination, kind)
+ $asm.puts "add #{destination.arm64Operand(kind)}, #{base.arm64Operand(kind)}, \##{offset.value}"
+ end
+end
+
+class BaseIndex
+ def arm64Operand(kind)
+ raise "Invalid offset #{offset.value} at #{codeOriginString}" if offset.value != 0
+ "[#{base.arm64Operand(:ptr)}, #{index.arm64Operand(:ptr)}, lsl \##{scaleShift}]"
+ end
+
+ def arm64EmitLea(destination, kind)
+ $asm.puts "add #{destination.arm64Operand(kind)}, #{base.arm64Operand(kind)}, #{index.arm64Operand(kind)}, lsl \##{scaleShift}"
+ end
+end
+
+class AbsoluteAddress
+ def arm64Operand(kind)
+ raise "Unconverted absolute address #{address.value} at #{codeOriginString}"
+ end
+end
+
+# FIXME: We could support AbsoluteAddress for lea, but we don't.
+
+#
+# Actual lowering code follows.
+#
+
+def arm64LowerMalformedLoadStoreAddresses(list)
+ newList = []
+
+ def isAddressMalformed(operand)
+ operand.is_a? Address and not (-255..4095).include? operand.offset.value
+ end
+
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ if node.opcode =~ /^store/ and isAddressMalformed(node.operands[1])
+ address = node.operands[1]
+ tmp = Tmp.new(codeOrigin, :gpr)
+ newList << Instruction.new(node.codeOrigin, "move", [address.offset, tmp])
+ newList << Instruction.new(node.codeOrigin, node.opcode, [node.operands[0], BaseIndex.new(node.codeOrigin, address.base, tmp, 1, Immediate.new(codeOrigin, 0))], node.annotation)
+ elsif node.opcode =~ /^load/ and isAddressMalformed(node.operands[0])
+ address = node.operands[0]
+ tmp = Tmp.new(codeOrigin, :gpr)
+ newList << Instruction.new(node.codeOrigin, "move", [address.offset, tmp])
+ newList << Instruction.new(node.codeOrigin, node.opcode, [BaseIndex.new(node.codeOrigin, address.base, tmp, 1, Immediate.new(codeOrigin, 0)), node.operands[1]], node.annotation)
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+# Workaround for Cortex-A53 erratum (835769)
+def arm64CortexA53Fix835769(list)
+ newList = []
+ lastOpcodeUnsafe = false
+
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ case node.opcode
+ when /^store/, /^load/
+ # List all macro instructions that can be lowered to a load, store or prefetch ARM64 assembly instruction
+ lastOpcodeUnsafe = true
+ when "muli", "mulp", "mulq", "smulli"
+ # List all macro instructions that can be lowered to a 64-bit multiply-accumulate ARM64 assembly instruction
+ # (defined as one of MADD, MSUB, SMADDL, SMSUBL, UMADDL or UMSUBL).
+ if lastOpcodeUnsafe
+ newList << Instruction.new(node.codeOrigin, "nopCortexA53Fix835769", [])
+ end
+ lastOpcodeUnsafe = false
+ else
+ lastOpcodeUnsafe = false
+ end
+ end
+ newList << node
+ }
+ newList
+end
+
+class Sequence
+ def getModifiedListARM64
+ result = @list
+ result = riscLowerNot(result)
+ result = riscLowerSimpleBranchOps(result)
+ result = riscLowerHardBranchOps64(result)
+ result = riscLowerShiftOps(result)
+ result = arm64LowerMalformedLoadStoreAddresses(result)
+ result = riscLowerMalformedAddresses(result) {
+ | node, address |
+ case node.opcode
+ when "loadb", "loadbs", "storeb", /^bb/, /^btb/, /^cb/, /^tb/
+ size = 1
+ when "loadh", "loadhs"
+ size = 2
+ when "loadi", "loadis", "storei", "addi", "andi", "lshifti", "muli", "negi",
+ "noti", "ori", "rshifti", "urshifti", "subi", "xori", /^bi/, /^bti/,
+ /^ci/, /^ti/, "addis", "subis", "mulis", "smulli", "leai"
+ size = 4
+ when "loadp", "storep", "loadq", "storeq", "loadd", "stored", "lshiftp", "lshiftq", "negp", "negq", "rshiftp", "rshiftq",
+ "urshiftp", "urshiftq", "addp", "addq", "mulp", "mulq", "andp", "andq", "orp", "orq", "subp", "subq", "xorp", "xorq", "addd",
+ "divd", "subd", "muld", "sqrtd", /^bp/, /^bq/, /^btp/, /^btq/, /^cp/, /^cq/, /^tp/, /^tq/, /^bd/,
+ "jmp", "call", "leap", "leaq"
+ size = 8
+ else
+ raise "Bad instruction #{node.opcode} for heap access at #{node.codeOriginString}"
+ end
+
+ if address.is_a? BaseIndex
+ address.offset.value == 0 and
+ (node.opcode =~ /^lea/ or address.scale == 1 or address.scale == size)
+ elsif address.is_a? Address
+ (-255..4095).include? address.offset.value
+ else
+ false
+ end
+ }
+ result = riscLowerMisplacedImmediates(result, ["storeb", "storei", "storep", "storeq"])
+ result = riscLowerMalformedImmediates(result, 0..4095)
+ result = riscLowerMisplacedAddresses(result)
+ result = riscLowerMalformedAddresses(result) {
+ | node, address |
+ case node.opcode
+ when /^load/
+ true
+ when /^store/
+ not (address.is_a? Address and address.offset.value < 0)
+ when /^lea/
+ true
+ else
+ raise "Bad instruction #{node.opcode} for heap access at #{node.codeOriginString}"
+ end
+ }
+ result = riscLowerTest(result)
+ result = assignRegistersToTemporaries(result, :gpr, ARM64_EXTRA_GPRS)
+ result = assignRegistersToTemporaries(result, :fpr, ARM64_EXTRA_FPRS)
+ result = arm64CortexA53Fix835769(result)
+ return result
+ end
+end
+
+def arm64Operands(operands, kinds)
+ if kinds.is_a? Array
+ raise "Mismatched operand lists: #{operands.inspect} and #{kinds.inspect}" if operands.size != kinds.size
+ else
+ kinds = operands.map{ kinds }
+ end
+ (0...operands.size).map {
+ | index |
+ operands[index].arm64Operand(kinds[index])
+ }.join(', ')
+end
+
+def arm64FlippedOperands(operands, kinds)
+ if kinds.is_a? Array
+ kinds = [kinds[-1]] + kinds[0..-2]
+ end
+ arm64Operands([operands[-1]] + operands[0..-2], kinds)
+end
+
+# TAC = three address code.
+def arm64TACOperands(operands, kind)
+ if operands.size == 3
+ return arm64FlippedOperands(operands, kind)
+ end
+
+ raise unless operands.size == 2
+
+ return operands[1].arm64Operand(kind) + ", " + arm64FlippedOperands(operands, kind)
+end
+
+def emitARM64Add(opcode, operands, kind)
+ if operands.size == 3
+ raise unless operands[1].register?
+ raise unless operands[2].register?
+
+ if operands[0].immediate?
+ if operands[0].value == 0 and flag !~ /s$/
+ unless operands[1] == operands[2]
+ $asm.puts "mov #{arm64FlippedOperands(operands[1..2], kind)}"
+ end
+ else
+ $asm.puts "#{opcode} #{arm64Operands(operands.reverse, kind)}"
+ end
+ return
+ end
+
+ raise unless operands[0].register?
+ $asm.puts "#{opcode} #{arm64FlippedOperands(operands, kind)}"
+ return
+ end
+
+ raise unless operands.size == 2
+
+ if operands[0].immediate? and operands[0].value == 0 and opcode !~ /s$/
+ return
+ end
+
+ $asm.puts "#{opcode} #{arm64TACOperands(operands, kind)}"
+end
+
+def emitARM64Unflipped(opcode, operands, kind)
+ $asm.puts "#{opcode} #{arm64Operands(operands, kind)}"
+end
+
+def emitARM64TAC(opcode, operands, kind)
+ $asm.puts "#{opcode} #{arm64TACOperands(operands, kind)}"
+end
+
+def emitARM64(opcode, operands, kind)
+ $asm.puts "#{opcode} #{arm64FlippedOperands(operands, kind)}"
+end
+
+def emitARM64Access(opcode, opcodeNegativeOffset, register, memory, kind)
+ if memory.is_a? Address and memory.offset.value < 0
+ $asm.puts "#{opcodeNegativeOffset} #{register.arm64Operand(kind)}, #{memory.arm64Operand(kind)}"
+ return
+ end
+
+ $asm.puts "#{opcode} #{register.arm64Operand(kind)}, #{memory.arm64Operand(kind)}"
+end
+
+def emitARM64Shift(opcodeRegs, opcodeImmediate, operands, kind)
+ if operands.size == 3 and operands[1].immediate?
+ magicNumbers = yield operands[1].value
+ $asm.puts "#{opcodeImmediate} #{operands[2].arm64Operand(kind)}, #{operands[0].arm64Operand(kind)}, \##{magicNumbers[0]}, \##{magicNumbers[1]}"
+ return
+ end
+
+ if operands.size == 2 and operands[0].immediate?
+ magicNumbers = yield operands[0].value
+ $asm.puts "#{opcodeImmediate} #{operands[1].arm64Operand(kind)}, #{operands[1].arm64Operand(kind)}, \##{magicNumbers[0]}, \##{magicNumbers[1]}"
+ return
+ end
+
+ emitARM64TAC(opcodeRegs, operands, kind)
+end
+
+def emitARM64Branch(opcode, operands, kind, branchOpcode)
+ emitARM64Unflipped(opcode, operands[0..-2], kind)
+ $asm.puts "#{branchOpcode} #{operands[-1].asmLabel}"
+end
+
+def emitARM64Compare(operands, kind, compareCode)
+ emitARM64Unflipped("subs #{arm64GPRName('xzr', kind)}, ", operands[0..-2], kind)
+ $asm.puts "csinc #{operands[-1].arm64Operand(:int)}, wzr, wzr, #{compareCode}"
+end
+
+def emitARM64MoveImmediate(value, target)
+ first = true
+ isNegative = value < 0
+ [48, 32, 16, 0].each {
+ | shift |
+ currentValue = (value >> shift) & 0xffff
+ next if currentValue == (isNegative ? 0xffff : 0) and (shift != 0 or !first)
+ if first
+ if isNegative
+ $asm.puts "movn #{target.arm64Operand(:ptr)}, \##{(~currentValue) & 0xffff}, lsl \##{shift}"
+ else
+ $asm.puts "movz #{target.arm64Operand(:ptr)}, \##{currentValue}, lsl \##{shift}"
+ end
+ first = false
+ else
+ $asm.puts "movk #{target.arm64Operand(:ptr)}, \##{currentValue}, lsl \##{shift}"
+ end
+ }
+end
+
+class Instruction
+ def lowerARM64
+ $asm.comment codeOriginString
+ $asm.annotation annotation if $enableInstrAnnotations
+
+ case opcode
+ when 'addi'
+ emitARM64Add("add", operands, :int)
+ when 'addis'
+ emitARM64Add("adds", operands, :int)
+ when 'addp'
+ emitARM64Add("add", operands, :ptr)
+ when 'addps'
+ emitARM64Add("adds", operands, :ptr)
+ when 'addq'
+ emitARM64Add("add", operands, :ptr)
+ when "andi"
+ emitARM64TAC("and", operands, :int)
+ when "andp"
+ emitARM64TAC("and", operands, :ptr)
+ when "andq"
+ emitARM64TAC("and", operands, :ptr)
+ when "ori"
+ emitARM64TAC("orr", operands, :int)
+ when "orp"
+ emitARM64TAC("orr", operands, :ptr)
+ when "orq"
+ emitARM64TAC("orr", operands, :ptr)
+ when "xori"
+ emitARM64TAC("eor", operands, :int)
+ when "xorp"
+ emitARM64TAC("eor", operands, :ptr)
+ when "xorq"
+ emitARM64TAC("eor", operands, :ptr)
+ when "lshifti"
+ emitARM64Shift("lslv", "ubfm", operands, :int) {
+ | value |
+ [32 - value, 31 - value]
+ }
+ when "lshiftp"
+ emitARM64Shift("lslv", "ubfm", operands, :ptr) {
+ | value |
+ [64 - value, 63 - value]
+ }
+ when "lshiftq"
+ emitARM64Shift("lslv", "ubfm", operands, :ptr) {
+ | value |
+ [64 - value, 63 - value]
+ }
+ when "rshifti"
+ emitARM64Shift("asrv", "sbfm", operands, :int) {
+ | value |
+ [value, 31]
+ }
+ when "rshiftp"
+ emitARM64Shift("asrv", "sbfm", operands, :ptr) {
+ | value |
+ [value, 63]
+ }
+ when "rshiftq"
+ emitARM64Shift("asrv", "sbfm", operands, :ptr) {
+ | value |
+ [value, 63]
+ }
+ when "urshifti"
+ emitARM64Shift("lsrv", "ubfm", operands, :int) {
+ | value |
+ [value, 31]
+ }
+ when "urshiftp"
+ emitARM64Shift("lsrv", "ubfm", operands, :ptr) {
+ | value |
+ [value, 63]
+ }
+ when "urshiftq"
+ emitARM64Shift("lsrv", "ubfm", operands, :ptr) {
+ | value |
+ [value, 63]
+ }
+ when "muli"
+ $asm.puts "madd #{arm64TACOperands(operands, :int)}, wzr"
+ when "mulp"
+ $asm.puts "madd #{arm64TACOperands(operands, :ptr)}, xzr"
+ when "mulq"
+ $asm.puts "madd #{arm64TACOperands(operands, :ptr)}, xzr"
+ when "subi"
+ emitARM64TAC("sub", operands, :int)
+ when "subp"
+ emitARM64TAC("sub", operands, :ptr)
+ when "subq"
+ emitARM64TAC("sub", operands, :ptr)
+ when "subis"
+ emitARM64TAC("subs", operands, :int)
+ when "negi"
+ $asm.puts "sub #{operands[0].arm64Operand(:int)}, wzr, #{operands[0].arm64Operand(:int)}"
+ when "negp"
+ $asm.puts "sub #{operands[0].arm64Operand(:ptr)}, xzr, #{operands[0].arm64Operand(:ptr)}"
+ when "negq"
+ $asm.puts "sub #{operands[0].arm64Operand(:ptr)}, xzr, #{operands[0].arm64Operand(:ptr)}"
+ when "loadi"
+ emitARM64Access("ldr", "ldur", operands[1], operands[0], :int)
+ when "loadis"
+ emitARM64Access("ldrsw", "ldursw", operands[1], operands[0], :ptr)
+ when "loadp"
+ emitARM64Access("ldr", "ldur", operands[1], operands[0], :ptr)
+ when "loadq"
+ emitARM64Access("ldr", "ldur", operands[1], operands[0], :ptr)
+ when "storei"
+ emitARM64Unflipped("str", operands, :int)
+ when "storep"
+ emitARM64Unflipped("str", operands, :ptr)
+ when "storeq"
+ emitARM64Unflipped("str", operands, :ptr)
+ when "loadb"
+ emitARM64Access("ldrb", "ldurb", operands[1], operands[0], :int)
+ when "loadbs"
+ emitARM64Access("ldrsb", "ldursb", operands[1], operands[0], :int)
+ when "storeb"
+ emitARM64Unflipped("strb", operands, :int)
+ when "loadh"
+ emitARM64Access("ldrh", "ldurh", operands[1], operands[0], :int)
+ when "loadhs"
+ emitARM64Access("ldrsh", "ldursh", operands[1], operands[0], :int)
+ when "storeh"
+ emitARM64Unflipped("strh", operands, :int)
+ when "loadd"
+ emitARM64Access("ldr", "ldur", operands[1], operands[0], :double)
+ when "stored"
+ emitARM64Unflipped("str", operands, :double)
+ when "addd"
+ emitARM64TAC("fadd", operands, :double)
+ when "divd"
+ emitARM64TAC("fdiv", operands, :double)
+ when "subd"
+ emitARM64TAC("fsub", operands, :double)
+ when "muld"
+ emitARM64TAC("fmul", operands, :double)
+ when "sqrtd"
+ emitARM64("fsqrt", operands, :double)
+ when "ci2d"
+ emitARM64("scvtf", operands, [:int, :double])
+ when "bdeq"
+ emitARM64Branch("fcmp", operands, :double, "b.eq")
+ when "bdneq"
+ emitARM64Unflipped("fcmp", operands[0..1], :double)
+ isUnordered = LocalLabel.unique("bdneq")
+ $asm.puts "b.vs #{LocalLabelReference.new(codeOrigin, isUnordered).asmLabel}"
+ $asm.puts "b.ne #{operands[2].asmLabel}"
+ isUnordered.lower("ARM64")
+ when "bdgt"
+ emitARM64Branch("fcmp", operands, :double, "b.gt")
+ when "bdgteq"
+ emitARM64Branch("fcmp", operands, :double, "b.ge")
+ when "bdlt"
+ emitARM64Branch("fcmp", operands, :double, "b.mi")
+ when "bdlteq"
+ emitARM64Branch("fcmp", operands, :double, "b.ls")
+ when "bdequn"
+ emitARM64Unflipped("fcmp", operands[0..1], :double)
+ $asm.puts "b.vs #{operands[2].asmLabel}"
+ $asm.puts "b.eq #{operands[2].asmLabel}"
+ when "bdnequn"
+ emitARM64Branch("fcmp", operands, :double, "b.ne")
+ when "bdgtun"
+ emitARM64Branch("fcmp", operands, :double, "b.hi")
+ when "bdgtequn"
+ emitARM64Branch("fcmp", operands, :double, "b.pl")
+ when "bdltun"
+ emitARM64Branch("fcmp", operands, :double, "b.lt")
+ when "bdltequn"
+ emitARM64Branch("fcmp", operands, :double, "b.le")
+ when "btd2i"
+ # FIXME: May be a good idea to just get rid of this instruction, since the interpreter
+ # currently does not use it.
+ raise "ARM64 does not support this opcode yet, #{codeOriginString}"
+ when "td2i"
+ emitARM64("fcvtzs", operands, [:double, :int])
+ when "bcd2i"
+ # FIXME: Remove this instruction, or use it and implement it. Currently it's not
+ # used.
+ raise "ARM64 does not support this opcode yet, #{codeOriginString}"
+ when "movdz"
+ # FIXME: Remove it or support it.
+ raise "ARM64 does not support this opcode yet, #{codeOriginString}"
+ when "pop"
+ operands.each_slice(2) {
+ | ops |
+ # Note that the operands are in the reverse order of the case for push.
+ # This is due to the fact that order matters for pushing and popping, and
+ # on platforms that only push/pop one slot at a time they pop their
+ # arguments in the reverse order that they were pushed. In order to remain
+ # compatible with those platforms we assume here that that's what has been done.
+
+ # So for example, if we did push(A, B, C, D), we would then pop(D, C, B, A).
+ # But since the ordering of arguments doesn't change on arm64 between the stp and ldp
+ # instructions we need to flip flop the argument positions that were passed to us.
+ $asm.puts "ldp #{ops[1].arm64Operand(:ptr)}, #{ops[0].arm64Operand(:ptr)}, [sp], #16"
+ }
+ when "push"
+ operands.each_slice(2) {
+ | ops |
+ $asm.puts "stp #{ops[0].arm64Operand(:ptr)}, #{ops[1].arm64Operand(:ptr)}, [sp, #-16]!"
+ }
+ when "move"
+ if operands[0].immediate?
+ emitARM64MoveImmediate(operands[0].value, operands[1])
+ else
+ emitARM64("mov", operands, :ptr)
+ end
+ when "sxi2p"
+ emitARM64("sxtw", operands, [:int, :ptr])
+ when "sxi2q"
+ emitARM64("sxtw", operands, [:int, :ptr])
+ when "zxi2p"
+ emitARM64("uxtw", operands, [:int, :ptr])
+ when "zxi2q"
+ emitARM64("uxtw", operands, [:int, :ptr])
+ when "nop"
+ $asm.puts "nop"
+ when "bieq", "bbeq"
+ if operands[0].immediate? and operands[0].value == 0
+ $asm.puts "cbz #{operands[1].arm64Operand(:int)}, #{operands[2].asmLabel}"
+ elsif operands[1].immediate? and operands[1].value == 0
+ $asm.puts "cbz #{operands[0].arm64Operand(:int)}, #{operands[2].asmLabel}"
+ else
+ emitARM64Branch("subs wzr, ", operands, :int, "b.eq")
+ end
+ when "bpeq"
+ if operands[0].immediate? and operands[0].value == 0
+ $asm.puts "cbz #{operands[1].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
+ elsif operands[1].immediate? and operands[1].value == 0
+ $asm.puts "cbz #{operands[0].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
+ else
+ emitARM64Branch("subs xzr, ", operands, :ptr, "b.eq")
+ end
+ when "bqeq"
+ if operands[0].immediate? and operands[0].value == 0
+ $asm.puts "cbz #{operands[1].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
+ elsif operands[1].immediate? and operands[1].value == 0
+ $asm.puts "cbz #{operands[0].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
+ else
+ emitARM64Branch("subs xzr, ", operands, :ptr, "b.eq")
+ end
+ when "bineq", "bbneq"
+ if operands[0].immediate? and operands[0].value == 0
+ $asm.puts "cbnz #{operands[1].arm64Operand(:int)}, #{operands[2].asmLabel}"
+ elsif operands[1].immediate? and operands[1].value == 0
+ $asm.puts "cbnz #{operands[0].arm64Operand(:int)}, #{operands[2].asmLabel}"
+ else
+ emitARM64Branch("subs wzr, ", operands, :int, "b.ne")
+ end
+ when "bpneq"
+ if operands[0].immediate? and operands[0].value == 0
+ $asm.puts "cbnz #{operands[1].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
+ elsif operands[1].immediate? and operands[1].value == 0
+ $asm.puts "cbnz #{operands[0].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
+ else
+ emitARM64Branch("subs xzr, ", operands, :ptr, "b.ne")
+ end
+ when "bqneq"
+ if operands[0].immediate? and operands[0].value == 0
+ $asm.puts "cbnz #{operands[1].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
+ elsif operands[1].immediate? and operands[1].value == 0
+ $asm.puts "cbnz #{operands[0].arm64Operand(:ptr)}, #{operands[2].asmLabel}"
+ else
+ emitARM64Branch("subs xzr, ", operands, :ptr, "b.ne")
+ end
+ when "bia", "bba"
+ emitARM64Branch("subs wzr, ", operands, :int, "b.hi")
+ when "bpa"
+ emitARM64Branch("subs xzr, ", operands, :ptr, "b.hi")
+ when "bqa"
+ emitARM64Branch("subs xzr, ", operands, :ptr, "b.hi")
+ when "biaeq", "bbaeq"
+ emitARM64Branch("subs wzr, ", operands, :int, "b.hs")
+ when "bpaeq"
+ emitARM64Branch("subs xzr, ", operands, :ptr, "b.hs")
+ when "bqaeq"
+ emitARM64Branch("subs xzr, ", operands, :ptr, "b.hs")
+ when "bib", "bbb"
+ emitARM64Branch("subs wzr, ", operands, :int, "b.lo")
+ when "bpb"
+ emitARM64Branch("subs xzr, ", operands, :ptr, "b.lo")
+ when "bqb"
+ emitARM64Branch("subs xzr, ", operands, :ptr, "b.lo")
+ when "bibeq", "bbbeq"
+ emitARM64Branch("subs wzr, ", operands, :int, "b.ls")
+ when "bpbeq"
+ emitARM64Branch("subs xzr, ", operands, :ptr, "b.ls")
+ when "bqbeq"
+ emitARM64Branch("subs xzr, ", operands, :ptr, "b.ls")
+ when "bigt", "bbgt"
+ emitARM64Branch("subs wzr, ", operands, :int, "b.gt")
+ when "bpgt"
+ emitARM64Branch("subs xzr, ", operands, :ptr, "b.gt")
+ when "bqgt"
+ emitARM64Branch("subs xzr, ", operands, :ptr, "b.gt")
+ when "bigteq", "bbgteq"
+ emitARM64Branch("subs wzr, ", operands, :int, "b.ge")
+ when "bpgteq"
+ emitARM64Branch("subs xzr, ", operands, :ptr, "b.ge")
+ when "bqgteq"
+ emitARM64Branch("subs xzr, ", operands, :ptr, "b.ge")
+ when "bilt", "bblt"
+ emitARM64Branch("subs wzr, ", operands, :int, "b.lt")
+ when "bplt"
+ emitARM64Branch("subs xzr, ", operands, :ptr, "b.lt")
+ when "bqlt"
+ emitARM64Branch("subs xzr, ", operands, :ptr, "b.lt")
+ when "bilteq", "bblteq"
+ emitARM64Branch("subs wzr, ", operands, :int, "b.le")
+ when "bplteq"
+ emitARM64Branch("subs xzr, ", operands, :ptr, "b.le")
+ when "bqlteq"
+ emitARM64Branch("subs xzr, ", operands, :ptr, "b.le")
+ when "jmp"
+ if operands[0].label?
+ $asm.puts "b #{operands[0].asmLabel}"
+ else
+ emitARM64Unflipped("br", operands, :ptr)
+ end
+ when "call"
+ if operands[0].label?
+ $asm.puts "bl #{operands[0].asmLabel}"
+ else
+ emitARM64Unflipped("blr", operands, :ptr)
+ end
+ when "break"
+ $asm.puts "brk \#0"
+ when "ret"
+ $asm.puts "ret"
+ when "cieq", "cbeq"
+ emitARM64Compare(operands, :int, "ne")
+ when "cpeq"
+ emitARM64Compare(operands, :ptr, "ne")
+ when "cqeq"
+ emitARM64Compare(operands, :ptr, "ne")
+ when "cineq", "cbneq"
+ emitARM64Compare(operands, :int, "eq")
+ when "cpneq"
+ emitARM64Compare(operands, :ptr, "eq")
+ when "cqneq"
+ emitARM64Compare(operands, :ptr, "eq")
+ when "cia", "cba"
+ emitARM64Compare(operands, :int, "ls")
+ when "cpa"
+ emitARM64Compare(operands, :ptr, "ls")
+ when "cqa"
+ emitARM64Compare(operands, :ptr, "ls")
+ when "ciaeq", "cbaeq"
+ emitARM64Compare(operands, :int, "lo")
+ when "cpaeq"
+ emitARM64Compare(operands, :ptr, "lo")
+ when "cqaeq"
+ emitARM64Compare(operands, :ptr, "lo")
+ when "cib", "cbb"
+ emitARM64Compare(operands, :int, "hs")
+ when "cpb"
+ emitARM64Compare(operands, :ptr, "hs")
+ when "cqb"
+ emitARM64Compare(operands, :ptr, "hs")
+ when "cibeq", "cbbeq"
+ emitARM64Compare(operands, :int, "hi")
+ when "cpbeq"
+ emitARM64Compare(operands, :ptr, "hi")
+ when "cqbeq"
+ emitARM64Compare(operands, :ptr, "hi")
+ when "cilt", "cblt"
+ emitARM64Compare(operands, :int, "ge")
+ when "cplt"
+ emitARM64Compare(operands, :ptr, "ge")
+ when "cqlt"
+ emitARM64Compare(operands, :ptr, "ge")
+ when "cilteq", "cblteq"
+ emitARM64Compare(operands, :int, "gt")
+ when "cplteq"
+ emitARM64Compare(operands, :ptr, "gt")
+ when "cqlteq"
+ emitARM64Compare(operands, :ptr, "gt")
+ when "cigt", "cbgt"
+ emitARM64Compare(operands, :int, "le")
+ when "cpgt"
+ emitARM64Compare(operands, :ptr, "le")
+ when "cqgt"
+ emitARM64Compare(operands, :ptr, "le")
+ when "cigteq", "cbgteq"
+ emitARM64Compare(operands, :int, "lt")
+ when "cpgteq"
+ emitARM64Compare(operands, :ptr, "lt")
+ when "cqgteq"
+ emitARM64Compare(operands, :ptr, "lt")
+ when "peek"
+ $asm.puts "ldr #{operands[1].arm64Operand(:ptr)}, [sp, \##{operands[0].value * 8}]"
+ when "poke"
+ $asm.puts "str #{operands[1].arm64Operand(:ptr)}, [sp, \##{operands[0].value * 8}]"
+ when "fp2d"
+ emitARM64("fmov", operands, [:ptr, :double])
+ when "fq2d"
+ emitARM64("fmov", operands, [:ptr, :double])
+ when "fd2p"
+ emitARM64("fmov", operands, [:double, :ptr])
+ when "fd2q"
+ emitARM64("fmov", operands, [:double, :ptr])
+ when "bo"
+ $asm.puts "b.vs #{operands[0].asmLabel}"
+ when "bs"
+ $asm.puts "b.mi #{operands[0].asmLabel}"
+ when "bz"
+ $asm.puts "b.eq #{operands[0].asmLabel}"
+ when "bnz"
+ $asm.puts "b.ne #{operands[0].asmLabel}"
+ when "leai"
+ operands[0].arm64EmitLea(operands[1], :int)
+ when "leap"
+ operands[0].arm64EmitLea(operands[1], :ptr)
+ when "leaq"
+ operands[0].arm64EmitLea(operands[1], :ptr)
+ when "smulli"
+ $asm.puts "smaddl #{operands[2].arm64Operand(:ptr)}, #{operands[0].arm64Operand(:int)}, #{operands[1].arm64Operand(:int)}, xzr"
+ when "memfence"
+ $asm.puts "dmb sy"
+ when "pcrtoaddr"
+ $asm.puts "adr #{operands[1].arm64Operand(:ptr)}, #{operands[0].value}"
+ when "nopCortexA53Fix835769"
+ $asm.putStr("#if CPU(ARM64_CORTEXA53)")
+ $asm.puts "nop"
+ $asm.putStr("#endif")
+ else
+ lowerDefault
+ end
+ end
+end
+
diff --git a/Source/JavaScriptCore/offlineasm/asm.rb b/Source/JavaScriptCore/offlineasm/asm.rb
new file mode 100644
index 000000000..88c7d7abb
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/asm.rb
@@ -0,0 +1,340 @@
+#!/usr/bin/env ruby
+
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+$: << File.dirname(__FILE__)
+
+require "config"
+require "backends"
+require "digest/sha1"
+require "offsets"
+require "parser"
+require "self_hash"
+require "settings"
+require "transform"
+
+class Assembler
+ def initialize(outp)
+ @outp = outp
+ @state = :cpp
+ @commentState = :none
+ @comment = nil
+ @internalComment = nil
+ @annotation = nil
+ @codeOrigin = nil
+ @numLocalLabels = 0
+ @numGlobalLabels = 0
+
+ @newlineSpacerState = :none
+ @lastlabel = ""
+ end
+
+ def enterAsm
+ @outp.puts "OFFLINE_ASM_BEGIN" if !$emitWinAsm
+ @state = :asm
+ end
+
+ def leaveAsm
+ putsProcEndIfNeeded if $emitWinAsm
+ putsLastComment
+ @outp.puts "OFFLINE_ASM_END" if !$emitWinAsm
+ @state = :cpp
+ end
+
+ def inAsm
+ enterAsm
+ yield
+ leaveAsm
+ end
+
+ # Concatenates all the various components of the comment to dump.
+ def lastComment
+ separator = " "
+ result = ""
+ result = "#{@comment}" if @comment
+ if @annotation and $enableInstrAnnotations
+ result += separator if result != ""
+ result += "#{@annotation}"
+ end
+ if @internalComment
+ result += separator if result != ""
+ result += "#{@internalComment}"
+ end
+ if @codeOrigin and $enableCodeOriginComments
+ result += separator if result != ""
+ result += "#{@codeOrigin}"
+ end
+ if result != ""
+ result = $commentPrefix + " " + result
+ end
+
+ # Reset all the components that we've just sent to be dumped.
+ @commentState = :none
+ @comment = nil
+ @annotation = nil
+ @codeOrigin = nil
+ @internalComment = nil
+ result
+ end
+
+ # Puts a C Statement in the output stream.
+ def putc(*line)
+ raise unless @state == :asm
+ @outp.puts(formatDump(" " + line.join(''), lastComment))
+ end
+
+ def formatDump(dumpStr, comment, commentColumns=$preferredCommentStartColumn)
+ if comment.length > 0
+ "%-#{commentColumns}s %s" % [dumpStr, comment]
+ else
+ dumpStr
+ end
+ end
+
+ # private method for internal use only.
+ def putAnnotation(text)
+ raise unless @state == :asm
+ if $enableInstrAnnotations
+ @outp.puts text
+ @annotation = nil
+ end
+ end
+
+ def putLocalAnnotation()
+ putAnnotation " // #{@annotation}" if @annotation
+ end
+
+ def putGlobalAnnotation()
+ putsNewlineSpacerIfAppropriate(:annotation)
+ putAnnotation "// #{@annotation}" if @annotation
+ end
+
+ def putsLastComment
+ comment = lastComment
+ unless comment.empty?
+ @outp.puts comment
+ end
+ end
+
+ def puts(*line)
+ raise unless @state == :asm
+ if !$emitWinAsm
+ @outp.puts(formatDump(" \"\\t" + line.join('') + "\\n\"", lastComment))
+ else
+ @outp.puts(formatDump(" " + line.join(''), lastComment))
+ end
+ end
+
+ def print(line)
+ raise unless @state == :asm
+ @outp.print("\"" + line + "\"")
+ end
+
+ def putsNewlineSpacerIfAppropriate(state)
+ if @newlineSpacerState != state
+ @outp.puts("\n")
+ @newlineSpacerState = state
+ end
+ end
+
+ def putsProc(label, comment)
+ raise unless $emitWinAsm
+ @outp.puts(formatDump("#{label} PROC PUBLIC", comment))
+ @lastlabel = label
+ end
+
+ def putsProcEndIfNeeded
+ raise unless $emitWinAsm
+ if @lastlabel != ""
+ @outp.puts("#{@lastlabel} ENDP")
+ end
+ @lastlabel = ""
+ end
+
+ def putsLabel(labelName, isGlobal)
+ raise unless @state == :asm
+ @numGlobalLabels += 1
+ putsProcEndIfNeeded if $emitWinAsm and isGlobal
+ putsNewlineSpacerIfAppropriate(:global)
+ @internalComment = $enableLabelCountComments ? "Global Label #{@numGlobalLabels}" : nil
+ if isGlobal
+ if !$emitWinAsm
+ @outp.puts(formatDump("OFFLINE_ASM_GLOBAL_LABEL(#{labelName})", lastComment))
+ else
+ putsProc(labelName, lastComment)
+ end
+ elsif /\Allint_op_/.match(labelName)
+ if !$emitWinAsm
+ @outp.puts(formatDump("OFFLINE_ASM_OPCODE_LABEL(op_#{$~.post_match})", lastComment))
+ else
+ label = "llint_" + "op_#{$~.post_match}"
+ @outp.puts(formatDump(" _#{label}:", lastComment))
+ end
+ else
+ if !$emitWinAsm
+ @outp.puts(formatDump("OFFLINE_ASM_GLUE_LABEL(#{labelName})", lastComment))
+ else
+ @outp.puts(formatDump(" _#{labelName}:", lastComment))
+ end
+ end
+ @newlineSpacerState = :none # After a global label, we can use another spacer.
+ end
+
+ def putsLocalLabel(labelName)
+ raise unless @state == :asm
+ @numLocalLabels += 1
+ @outp.puts("\n")
+ @internalComment = $enableLabelCountComments ? "Local Label #{@numLocalLabels}" : nil
+ if !$emitWinAsm
+ @outp.puts(formatDump(" OFFLINE_ASM_LOCAL_LABEL(#{labelName})", lastComment))
+ else
+ @outp.puts(formatDump(" #{labelName}:", lastComment))
+ end
+ end
+
+ def self.externLabelReference(labelName)
+ if !$emitWinAsm
+ "\" LOCAL_REFERENCE(#{labelName}) \""
+ else
+ "#{labelName}"
+ end
+ end
+
+ def self.labelReference(labelName)
+ if !$emitWinAsm
+ "\" LOCAL_LABEL_STRING(#{labelName}) \""
+ else
+ "_#{labelName}"
+ end
+ end
+
+ def self.localLabelReference(labelName)
+ if !$emitWinAsm
+ "\" LOCAL_LABEL_STRING(#{labelName}) \""
+ else
+ "#{labelName}"
+ end
+ end
+
+ def self.cLabelReference(labelName)
+ if /\Allint_op_/.match(labelName)
+ "op_#{$~.post_match}" # strip opcodes of their llint_ prefix.
+ else
+ "#{labelName}"
+ end
+ end
+
+ def self.cLocalLabelReference(labelName)
+ "#{labelName}"
+ end
+
+ def codeOrigin(text)
+ case @commentState
+ when :none
+ @codeOrigin = text
+ @commentState = :one
+ when :one
+ if $enableCodeOriginComments
+ @outp.puts " " + $commentPrefix + " #{@codeOrigin}"
+ @outp.puts " " + $commentPrefix + " #{text}"
+ end
+ @codeOrigin = nil
+ @commentState = :many
+ when :many
+ @outp.puts $commentPrefix + " #{text}" if $enableCodeOriginComments
+ else
+ raise
+ end
+ end
+
+ def comment(text)
+ @comment = text
+ end
+ def annotation(text)
+ @annotation = text
+ end
+end
+
+IncludeFile.processIncludeOptions()
+
+asmFile = ARGV.shift
+offsetsFile = ARGV.shift
+outputFlnm = ARGV.shift
+
+$stderr.puts "offlineasm: Parsing #{asmFile} and #{offsetsFile} and creating assembly file #{outputFlnm}."
+
+begin
+ configurationList = offsetsAndConfigurationIndex(offsetsFile)
+rescue MissingMagicValuesException
+ $stderr.puts "offlineasm: No magic values found. Skipping assembly file generation."
+ exit 0
+end
+
+$emitWinAsm = isMSVC ? outputFlnm.index(".asm") != nil : false
+$commentPrefix = $emitWinAsm ? ";" : "//"
+
+inputHash =
+ $commentPrefix + " offlineasm input hash: " + parseHash(asmFile) +
+ " " + Digest::SHA1.hexdigest(configurationList.map{|v| (v[0] + [v[1]]).join(' ')}.join(' ')) +
+ " " + selfHash
+
+if FileTest.exist? outputFlnm
+ File.open(outputFlnm, "r") {
+ | inp |
+ firstLine = inp.gets
+ if firstLine and firstLine.chomp == inputHash
+ $stderr.puts "offlineasm: Nothing changed."
+ exit 0
+ end
+ }
+end
+
+File.open(outputFlnm, "w") {
+ | outp |
+ $output = outp
+ $output.puts inputHash
+
+ $asm = Assembler.new($output)
+
+ ast = parse(asmFile)
+
+ configurationList.each {
+ | configuration |
+ offsetsList = configuration[0]
+ configIndex = configuration[1]
+ forSettings(computeSettingsCombinations(ast)[configIndex], ast) {
+ | concreteSettings, lowLevelAST, backend |
+ lowLevelAST = lowLevelAST.resolve(*buildOffsetsMap(lowLevelAST, offsetsList))
+ lowLevelAST.validate
+ emitCodeInConfiguration(concreteSettings, lowLevelAST, backend) {
+ $asm.inAsm {
+ lowLevelAST.lower(backend)
+ }
+ }
+ }
+ }
+}
+
+$stderr.puts "offlineasm: Assembly file #{outputFlnm} successfully generated."
+
diff --git a/Source/JavaScriptCore/offlineasm/ast.rb b/Source/JavaScriptCore/offlineasm/ast.rb
new file mode 100644
index 000000000..1241b7fe5
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/ast.rb
@@ -0,0 +1,1426 @@
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+require "config"
+
+#
+# Base utility types for the AST.
+#
+
+# Valid methods for Node:
+#
+# node.children -> Returns an array of immediate children.
+#
+# node.descendents -> Returns an array of all strict descendants (children
+# and children of children, transitively).
+#
+# node.flatten -> Returns an array containing the strict descendants and
+# the node itself.
+#
+# node.filter(type) -> Returns an array containing those elements in
+# node.flatten that are of the given type (is_a? type returns true).
+#
+# node.mapChildren{|v| ...} -> Returns a new node with all children
+# replaced according to the given block.
+#
+# Examples:
+#
+# node.filter(Setting).uniq -> Returns all of the settings that the AST's
+# IfThenElse blocks depend on.
+#
+# node.filter(StructOffset).uniq -> Returns all of the structure offsets
+# that the AST depends on.
+
+class Node
+ attr_reader :codeOrigin
+
+ def initialize(codeOrigin)
+ @codeOrigin = codeOrigin
+ end
+
+ def codeOriginString
+ @codeOrigin.to_s
+ end
+
+ def descendants
+ children.collect{|v| v.flatten}.flatten
+ end
+
+ def flatten
+ [self] + descendants
+ end
+
+ def filter(type)
+ flatten.select{|v| v.is_a? type}
+ end
+end
+
+class NoChildren < Node
+ def initialize(codeOrigin)
+ super(codeOrigin)
+ end
+
+ def children
+ []
+ end
+
+ def mapChildren
+ self
+ end
+end
+
+class StructOffsetKey
+ attr_reader :struct, :field
+
+ def initialize(struct, field)
+ @struct = struct
+ @field = field
+ end
+
+ def hash
+ @struct.hash + @field.hash * 3
+ end
+
+ def eql?(other)
+ @struct == other.struct and @field == other.field
+ end
+end
+
+#
+# AST nodes.
+#
+
+class StructOffset < NoChildren
+ attr_reader :struct, :field
+
+ def initialize(codeOrigin, struct, field)
+ super(codeOrigin)
+ @struct = struct
+ @field = field
+ end
+
+ @@mapping = {}
+
+ def self.forField(codeOrigin, struct, field)
+ key = StructOffsetKey.new(struct, field)
+
+ unless @@mapping[key]
+ @@mapping[key] = StructOffset.new(codeOrigin, struct, field)
+ end
+ @@mapping[key]
+ end
+
+ def dump
+ "#{struct}::#{field}"
+ end
+
+ def <=>(other)
+ if @struct != other.struct
+ return @struct <=> other.struct
+ end
+ @field <=> other.field
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ true
+ end
+
+ def register?
+ false
+ end
+end
+
+class Sizeof < NoChildren
+ attr_reader :struct
+
+ def initialize(codeOrigin, struct)
+ super(codeOrigin)
+ @struct = struct
+ end
+
+ @@mapping = {}
+
+ def self.forName(codeOrigin, struct)
+ unless @@mapping[struct]
+ @@mapping[struct] = Sizeof.new(codeOrigin, struct)
+ end
+ @@mapping[struct]
+ end
+
+ def dump
+ "sizeof #{@struct}"
+ end
+
+ def <=>(other)
+ @struct <=> other.struct
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ true
+ end
+
+ def register?
+ false
+ end
+end
+
+class Immediate < NoChildren
+ attr_reader :value
+
+ def initialize(codeOrigin, value)
+ super(codeOrigin)
+ @value = value
+ raise "Bad immediate value #{value.inspect} at #{codeOriginString}" unless value.is_a? Integer
+ end
+
+ def dump
+ "#{value}"
+ end
+
+ def ==(other)
+ other.is_a? Immediate and other.value == @value
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ true
+ end
+
+ def immediateOperand?
+ true
+ end
+
+ def register?
+ false
+ end
+end
+
+class AddImmediates < Node
+ attr_reader :left, :right
+
+ def initialize(codeOrigin, left, right)
+ super(codeOrigin)
+ @left = left
+ @right = right
+ end
+
+ def children
+ [@left, @right]
+ end
+
+ def mapChildren
+ AddImmediates.new(codeOrigin, (yield @left), (yield @right))
+ end
+
+ def dump
+ "(#{left.dump} + #{right.dump})"
+ end
+
+ def value
+ "#{left.value} + #{right.value}"
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ true
+ end
+
+ def immediateOperand?
+ true
+ end
+
+ def register?
+ false
+ end
+end
+
+class SubImmediates < Node
+ attr_reader :left, :right
+
+ def initialize(codeOrigin, left, right)
+ super(codeOrigin)
+ @left = left
+ @right = right
+ end
+
+ def children
+ [@left, @right]
+ end
+
+ def mapChildren
+ SubImmediates.new(codeOrigin, (yield @left), (yield @right))
+ end
+
+ def dump
+ "(#{left.dump} - #{right.dump})"
+ end
+
+ def value
+ "#{left.value} - #{right.value}"
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ true
+ end
+
+ def immediateOperand?
+ true
+ end
+
+ def register?
+ false
+ end
+end
+
+class MulImmediates < Node
+ attr_reader :left, :right
+
+ def initialize(codeOrigin, left, right)
+ super(codeOrigin)
+ @left = left
+ @right = right
+ end
+
+ def children
+ [@left, @right]
+ end
+
+ def mapChildren
+ MulImmediates.new(codeOrigin, (yield @left), (yield @right))
+ end
+
+ def dump
+ "(#{left.dump} * #{right.dump})"
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ true
+ end
+
+ def immediateOperand?
+ false
+ end
+
+ def register?
+ false
+ end
+end
+
+class NegImmediate < Node
+ attr_reader :child
+
+ def initialize(codeOrigin, child)
+ super(codeOrigin)
+ @child = child
+ end
+
+ def children
+ [@child]
+ end
+
+ def mapChildren
+ NegImmediate.new(codeOrigin, (yield @child))
+ end
+
+ def dump
+ "(-#{@child.dump})"
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ true
+ end
+
+ def immediateOperand?
+ false
+ end
+
+ def register?
+ false
+ end
+end
+
+class OrImmediates < Node
+ attr_reader :left, :right
+
+ def initialize(codeOrigin, left, right)
+ super(codeOrigin)
+ @left = left
+ @right = right
+ end
+
+ def children
+ [@left, @right]
+ end
+
+ def mapChildren
+ OrImmediates.new(codeOrigin, (yield @left), (yield @right))
+ end
+
+ def dump
+ "(#{left.dump} | #{right.dump})"
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ true
+ end
+
+ def immediateOperand?
+ false
+ end
+
+ def register?
+ false
+ end
+end
+
+class AndImmediates < Node
+ attr_reader :left, :right
+
+ def initialize(codeOrigin, left, right)
+ super(codeOrigin)
+ @left = left
+ @right = right
+ end
+
+ def children
+ [@left, @right]
+ end
+
+ def mapChildren
+ AndImmediates.new(codeOrigin, (yield @left), (yield @right))
+ end
+
+ def dump
+ "(#{left.dump} & #{right.dump})"
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ true
+ end
+
+ def immediateOperand?
+ false
+ end
+
+ def register?
+ false
+ end
+end
+
+class XorImmediates < Node
+ attr_reader :left, :right
+
+ def initialize(codeOrigin, left, right)
+ super(codeOrigin)
+ @left = left
+ @right = right
+ end
+
+ def children
+ [@left, @right]
+ end
+
+ def mapChildren
+ XorImmediates.new(codeOrigin, (yield @left), (yield @right))
+ end
+
+ def dump
+ "(#{left.dump} ^ #{right.dump})"
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ true
+ end
+
+ def immediateOperand?
+ false
+ end
+
+ def register?
+ false
+ end
+end
+
+class BitnotImmediate < Node
+ attr_reader :child
+
+ def initialize(codeOrigin, child)
+ super(codeOrigin)
+ @child = child
+ end
+
+ def children
+ [@child]
+ end
+
+ def mapChildren
+ BitnotImmediate.new(codeOrigin, (yield @child))
+ end
+
+ def dump
+ "(~#{@child.dump})"
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ true
+ end
+
+ def immediateOperand?
+ false
+ end
+
+ def register?
+ false
+ end
+end
+
+class StringLiteral < NoChildren
+ attr_reader :value
+
+ def initialize(codeOrigin, value)
+ super(codeOrigin)
+ @value = value[1..-2]
+ raise "Bad string literal #{value.inspect} at #{codeOriginString}" unless value.is_a? String
+ end
+
+ def dump
+ "#{value}"
+ end
+
+ def ==(other)
+ other.is_a? StringLiteral and other.value == @value
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ false
+ end
+
+ def immediateOperand?
+ false
+ end
+
+ def register?
+ false
+ end
+end
+
+class RegisterID < NoChildren
+ attr_reader :name
+
+ def initialize(codeOrigin, name)
+ super(codeOrigin)
+ @name = name
+ end
+
+ @@mapping = {}
+
+ def self.forName(codeOrigin, name)
+ unless @@mapping[name]
+ @@mapping[name] = RegisterID.new(codeOrigin, name)
+ end
+ @@mapping[name]
+ end
+
+ def dump
+ name
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ false
+ end
+
+ def register?
+ true
+ end
+end
+
+class FPRegisterID < NoChildren
+ attr_reader :name
+
+ def initialize(codeOrigin, name)
+ super(codeOrigin)
+ @name = name
+ end
+
+ @@mapping = {}
+
+ def self.forName(codeOrigin, name)
+ unless @@mapping[name]
+ @@mapping[name] = FPRegisterID.new(codeOrigin, name)
+ end
+ @@mapping[name]
+ end
+
+ def dump
+ name
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ false
+ end
+
+ def immediateOperand?
+ false
+ end
+
+ def register?
+ true
+ end
+end
+
+class SpecialRegister < NoChildren
+ def initialize(name)
+ @name = name
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ false
+ end
+
+ def immediateOperand?
+ false
+ end
+
+ def register?
+ true
+ end
+end
+
+class Variable < NoChildren
+ attr_reader :name
+
+ def initialize(codeOrigin, name)
+ super(codeOrigin)
+ @name = name
+ end
+
+ @@mapping = {}
+
+ def self.forName(codeOrigin, name)
+ unless @@mapping[name]
+ @@mapping[name] = Variable.new(codeOrigin, name)
+ end
+ @@mapping[name]
+ end
+
+ def dump
+ name
+ end
+
+ def inspect
+ "<variable #{name} at #{codeOriginString}>"
+ end
+end
+
+class Address < Node
+ attr_reader :base, :offset
+
+ def initialize(codeOrigin, base, offset)
+ super(codeOrigin)
+ @base = base
+ @offset = offset
+ raise "Bad base for address #{base.inspect} at #{codeOriginString}" unless base.is_a? Variable or base.register?
+ raise "Bad offset for address #{offset.inspect} at #{codeOriginString}" unless offset.is_a? Variable or offset.immediate?
+ end
+
+ def withOffset(extraOffset)
+ Address.new(codeOrigin, @base, Immediate.new(codeOrigin, @offset.value + extraOffset))
+ end
+
+ def children
+ [@base, @offset]
+ end
+
+ def mapChildren
+ Address.new(codeOrigin, (yield @base), (yield @offset))
+ end
+
+ def dump
+ "#{offset.dump}[#{base.dump}]"
+ end
+
+ def address?
+ true
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ false
+ end
+
+ def immediateOperand?
+ true
+ end
+
+ def register?
+ false
+ end
+end
+
+class BaseIndex < Node
+ attr_reader :base, :index, :scale, :offset
+
+ def initialize(codeOrigin, base, index, scale, offset)
+ super(codeOrigin)
+ @base = base
+ @index = index
+ @scale = scale
+ raise unless [1, 2, 4, 8].member? @scale
+ @offset = offset
+ end
+
+ def scaleShift
+ case scale
+ when 1
+ 0
+ when 2
+ 1
+ when 4
+ 2
+ when 8
+ 3
+ else
+ raise "Bad scale at #{codeOriginString}"
+ end
+ end
+
+ def withOffset(extraOffset)
+ BaseIndex.new(codeOrigin, @base, @index, @scale, Immediate.new(codeOrigin, @offset.value + extraOffset))
+ end
+
+ def children
+ [@base, @index, @offset]
+ end
+
+ def mapChildren
+ BaseIndex.new(codeOrigin, (yield @base), (yield @index), @scale, (yield @offset))
+ end
+
+ def dump
+ "#{offset.dump}[#{base.dump}, #{index.dump}, #{scale}]"
+ end
+
+ def address?
+ true
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ false
+ end
+
+ def immediateOperand?
+ false
+ end
+
+ def register?
+ false
+ end
+end
+
+class AbsoluteAddress < NoChildren
+ attr_reader :address
+
+ def initialize(codeOrigin, address)
+ super(codeOrigin)
+ @address = address
+ end
+
+ def withOffset(extraOffset)
+ AbsoluteAddress.new(codeOrigin, Immediate.new(codeOrigin, @address.value + extraOffset))
+ end
+
+ def dump
+ "#{address.dump}[]"
+ end
+
+ def address?
+ true
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ false
+ end
+
+ def immediateOperand?
+ true
+ end
+
+ def register?
+ false
+ end
+end
+
+class Instruction < Node
+ attr_reader :opcode, :operands, :annotation
+
+ def initialize(codeOrigin, opcode, operands, annotation=nil)
+ super(codeOrigin)
+ @opcode = opcode
+ @operands = operands
+ @annotation = annotation
+ end
+
+ def children
+ operands
+ end
+
+ def mapChildren(&proc)
+ Instruction.new(codeOrigin, @opcode, @operands.map(&proc), @annotation)
+ end
+
+ def dump
+ "\t" + opcode.to_s + " " + operands.collect{|v| v.dump}.join(", ")
+ end
+
+ def lowerDefault
+ case opcode
+ when "localAnnotation"
+ $asm.putLocalAnnotation
+ when "globalAnnotation"
+ $asm.putGlobalAnnotation
+ when "emit"
+ $asm.puts "#{operands[0].dump}"
+ else
+ raise "Unhandled opcode #{opcode} at #{codeOriginString}"
+ end
+ end
+end
+
+class Error < NoChildren
+ def initialize(codeOrigin)
+ super(codeOrigin)
+ end
+
+ def dump
+ "\terror"
+ end
+end
+
+class ConstDecl < Node
+ attr_reader :variable, :value
+
+ def initialize(codeOrigin, variable, value)
+ super(codeOrigin)
+ @variable = variable
+ @value = value
+ end
+
+ def children
+ [@variable, @value]
+ end
+
+ def mapChildren
+ ConstDecl.new(codeOrigin, (yield @variable), (yield @value))
+ end
+
+ def dump
+ "const #{@variable.dump} = #{@value.dump}"
+ end
+end
+
+$labelMapping = {}
+$referencedExternLabels = Array.new
+
+class Label < NoChildren
+ attr_reader :name
+
+ def initialize(codeOrigin, name)
+ super(codeOrigin)
+ @name = name
+ @extern = true
+ @global = false
+ end
+
+ def self.forName(codeOrigin, name, definedInFile = false)
+ if $labelMapping[name]
+ raise "Label name collision: #{name}" unless $labelMapping[name].is_a? Label
+ else
+ $labelMapping[name] = Label.new(codeOrigin, name)
+ end
+ if definedInFile
+ $labelMapping[name].clearExtern()
+ end
+ $labelMapping[name]
+ end
+
+ def self.setAsGlobal(codeOrigin, name)
+ if $labelMapping[name]
+ label = $labelMapping[name]
+ raise "Label: #{name} declared global multiple times" unless not label.global?
+ label.setGlobal()
+ else
+ newLabel = Label.new(codeOrigin, name)
+ newLabel.setGlobal()
+ $labelMapping[name] = newLabel
+ end
+ end
+
+ def self.resetReferenced
+ $referencedExternLabels = Array.new
+ end
+
+ def self.forReferencedExtern()
+ $referencedExternLabels.each {
+ | label |
+ yield "#{label.name}"
+ }
+ end
+
+ def clearExtern
+ @extern = false
+ end
+
+ def extern?
+ @extern
+ end
+
+ def setGlobal
+ @global = true
+ end
+
+ def global?
+ @global
+ end
+
+ def dump
+ "#{name}:"
+ end
+end
+
+class LocalLabel < NoChildren
+ attr_reader :name
+
+ def initialize(codeOrigin, name)
+ super(codeOrigin)
+ @name = name
+ end
+
+ @@uniqueNameCounter = 0
+
+ def self.forName(codeOrigin, name)
+ if $labelMapping[name]
+ raise "Label name collision: #{name}" unless $labelMapping[name].is_a? LocalLabel
+ else
+ $labelMapping[name] = LocalLabel.new(codeOrigin, name)
+ end
+ $labelMapping[name]
+ end
+
+ def self.unique(comment)
+ newName = "_#{comment}"
+ if $labelMapping[newName]
+ while $labelMapping[newName = "_#{@@uniqueNameCounter}_#{comment}"]
+ @@uniqueNameCounter += 1
+ end
+ end
+ forName(nil, newName)
+ end
+
+ def cleanName
+ if name =~ /^\./
+ "_" + name[1..-1]
+ else
+ name
+ end
+ end
+
+ def dump
+ "#{name}:"
+ end
+end
+
+class LabelReference < Node
+ attr_reader :label
+
+ def initialize(codeOrigin, label)
+ super(codeOrigin)
+ @label = label
+ end
+
+ def children
+ [@label]
+ end
+
+ def mapChildren
+ LabelReference.new(codeOrigin, (yield @label))
+ end
+
+ def name
+ label.name
+ end
+
+ def extern?
+ $labelMapping[name].is_a? Label and $labelMapping[name].extern?
+ end
+
+ def used
+ if !$referencedExternLabels.include?(@label) and extern?
+ $referencedExternLabels.push(@label)
+ end
+ end
+
+ def dump
+ label.name
+ end
+
+ def value
+ asmLabel()
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ true
+ end
+
+ def immediate?
+ false
+ end
+
+ def immediateOperand?
+ true
+ end
+end
+
+class LocalLabelReference < NoChildren
+ attr_reader :label
+
+ def initialize(codeOrigin, label)
+ super(codeOrigin)
+ @label = label
+ end
+
+ def children
+ [@label]
+ end
+
+ def mapChildren
+ LocalLabelReference.new(codeOrigin, (yield @label))
+ end
+
+ def name
+ label.name
+ end
+
+ def dump
+ label.name
+ end
+
+ def value
+ asmLabel()
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ true
+ end
+
+ def immediate?
+ false
+ end
+
+ def immediateOperand?
+ true
+ end
+end
+
+class Sequence < Node
+ attr_reader :list
+
+ def initialize(codeOrigin, list)
+ super(codeOrigin)
+ @list = list
+ end
+
+ def children
+ list
+ end
+
+ def mapChildren(&proc)
+ Sequence.new(codeOrigin, @list.map(&proc))
+ end
+
+ def dump
+ list.collect{|v| v.dump}.join("\n")
+ end
+end
+
+class True < NoChildren
+ def initialize
+ super(nil)
+ end
+
+ @@instance = True.new
+
+ def self.instance
+ @@instance
+ end
+
+ def value
+ true
+ end
+
+ def dump
+ "true"
+ end
+end
+
+class False < NoChildren
+ def initialize
+ super(nil)
+ end
+
+ @@instance = False.new
+
+ def self.instance
+ @@instance
+ end
+
+ def value
+ false
+ end
+
+ def dump
+ "false"
+ end
+end
+
+class TrueClass
+ def asNode
+ True.instance
+ end
+end
+
+class FalseClass
+ def asNode
+ False.instance
+ end
+end
+
+class Setting < NoChildren
+ attr_reader :name
+
+ def initialize(codeOrigin, name)
+ super(codeOrigin)
+ @name = name
+ end
+
+ @@mapping = {}
+
+ def self.forName(codeOrigin, name)
+ unless @@mapping[name]
+ @@mapping[name] = Setting.new(codeOrigin, name)
+ end
+ @@mapping[name]
+ end
+
+ def dump
+ name
+ end
+end
+
+class And < Node
+ attr_reader :left, :right
+
+ def initialize(codeOrigin, left, right)
+ super(codeOrigin)
+ @left = left
+ @right = right
+ end
+
+ def children
+ [@left, @right]
+ end
+
+ def mapChildren
+ And.new(codeOrigin, (yield @left), (yield @right))
+ end
+
+ def dump
+ "(#{left.dump} and #{right.dump})"
+ end
+end
+
+class Or < Node
+ attr_reader :left, :right
+
+ def initialize(codeOrigin, left, right)
+ super(codeOrigin)
+ @left = left
+ @right = right
+ end
+
+ def children
+ [@left, @right]
+ end
+
+ def mapChildren
+ Or.new(codeOrigin, (yield @left), (yield @right))
+ end
+
+ def dump
+ "(#{left.dump} or #{right.dump})"
+ end
+end
+
+class Not < Node
+ attr_reader :child
+
+ def initialize(codeOrigin, child)
+ super(codeOrigin)
+ @child = child
+ end
+
+ def children
+ [@child]
+ end
+
+ def mapChildren
+ Not.new(codeOrigin, (yield @child))
+ end
+
+ def dump
+ "(not #{child.dump})"
+ end
+end
+
+class Skip < NoChildren
+ def initialize(codeOrigin)
+ super(codeOrigin)
+ end
+
+ def dump
+ "\tskip"
+ end
+end
+
+class IfThenElse < Node
+ attr_reader :predicate, :thenCase
+ attr_accessor :elseCase
+
+ def initialize(codeOrigin, predicate, thenCase)
+ super(codeOrigin)
+ @predicate = predicate
+ @thenCase = thenCase
+ @elseCase = Skip.new(codeOrigin)
+ end
+
+ def children
+ if @elseCase
+ [@predicate, @thenCase, @elseCase]
+ else
+ [@predicate, @thenCase]
+ end
+ end
+
+ def mapChildren
+ IfThenElse.new(codeOrigin, (yield @predicate), (yield @thenCase), (yield @elseCase))
+ end
+
+ def dump
+ "if #{predicate.dump}\n" + thenCase.dump + "\nelse\n" + elseCase.dump + "\nend"
+ end
+end
+
+class Macro < Node
+ attr_reader :name, :variables, :body
+
+ def initialize(codeOrigin, name, variables, body)
+ super(codeOrigin)
+ @name = name
+ @variables = variables
+ @body = body
+ end
+
+ def children
+ @variables + [@body]
+ end
+
+ def mapChildren
+ Macro.new(codeOrigin, @name, @variables.map{|v| yield v}, (yield @body))
+ end
+
+ def dump
+ "macro #{name}(" + variables.collect{|v| v.dump}.join(", ") + ")\n" + body.dump + "\nend"
+ end
+end
+
+class MacroCall < Node
+ attr_reader :name, :operands, :annotation
+
+ def initialize(codeOrigin, name, operands, annotation)
+ super(codeOrigin)
+ @name = name
+ @operands = operands
+ raise unless @operands
+ @operands.each{|v| raise unless v}
+ @annotation = annotation
+ end
+
+ def children
+ @operands
+ end
+
+ def mapChildren(&proc)
+ MacroCall.new(codeOrigin, @name, @operands.map(&proc), @annotation)
+ end
+
+ def dump
+ "\t#{name}(" + operands.collect{|v| v.dump}.join(", ") + ")"
+ end
+end
+
diff --git a/Source/JavaScriptCore/offlineasm/backends.rb b/Source/JavaScriptCore/offlineasm/backends.rb
new file mode 100644
index 000000000..e7805dfe4
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/backends.rb
@@ -0,0 +1,175 @@
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+require "config"
+require "arm"
+require "arm64"
+require "ast"
+require "x86"
+require "mips"
+require "sh4"
+require "cloop"
+
+BACKENDS =
+ [
+ "X86",
+ "X86_WIN",
+ "X86_64",
+ "X86_64_WIN",
+ "ARM",
+ "ARMv7",
+ "ARMv7_TRADITIONAL",
+ "ARM64",
+ "MIPS",
+ "SH4",
+ "C_LOOP"
+ ]
+
+# Keep the set of working backends separate from the set of backends that might be
+# supported. This is great because the BACKENDS list is almost like a reserved
+# words list, in that it causes settings resolution to treat those words specially.
+# Hence this lets us set aside the name of a backend we might want to support in
+# the future while not actually supporting the backend yet.
+WORKING_BACKENDS =
+ [
+ "X86",
+ "X86_WIN",
+ "X86_64",
+ "X86_64_WIN",
+ "ARM",
+ "ARMv7",
+ "ARMv7_TRADITIONAL",
+ "ARM64",
+ "MIPS",
+ "SH4",
+ "C_LOOP"
+ ]
+
+BACKEND_PATTERN = Regexp.new('\\A(' + BACKENDS.join(')|(') + ')\\Z')
+
+$allBackends = {}
+$validBackends = {}
+BACKENDS.each {
+ | backend |
+ $validBackends[backend] = true
+ $allBackends[backend] = true
+}
+
+def includeOnlyBackends(list)
+ newValidBackends = {}
+ list.each {
+ | backend |
+ if $validBackends[backend]
+ newValidBackends[backend] = true
+ end
+ }
+ $validBackends = newValidBackends
+end
+
+def isBackend?(backend)
+ $allBackends[backend]
+end
+
+def isValidBackend?(backend)
+ $validBackends[backend]
+end
+
+def validBackends
+ $validBackends.keys
+end
+
+class Node
+ def lower(name)
+ begin
+ $activeBackend = name
+ send("lower" + name)
+ rescue => e
+ e.message << "At #{codeOriginString}"
+ raise e
+ end
+ end
+end
+
+# Overrides for lower() for those nodes that are backend-agnostic
+
+class Label
+ def lower(name)
+ $asm.putsLabel(self.name[1..-1], @global)
+ end
+end
+
+class LocalLabel
+ def lower(name)
+ $asm.putsLocalLabel "_offlineasm_#{self.name[1..-1]}"
+ end
+end
+
+class LabelReference
+ def asmLabel
+ if extern?
+ Assembler.externLabelReference(name[1..-1])
+ else
+ Assembler.labelReference(name[1..-1])
+ end
+ end
+
+ def cLabel
+ Assembler.cLabelReference(name[1..-1])
+ end
+end
+
+class LocalLabelReference
+ def asmLabel
+ Assembler.localLabelReference("_offlineasm_"+name[1..-1])
+ end
+
+ def cLabel
+ Assembler.cLocalLabelReference("_offlineasm_"+name[1..-1])
+ end
+end
+
+class Skip
+ def lower(name)
+ end
+end
+
+class Sequence
+ def lower(name)
+ $activeBackend = name
+ if respond_to? "getModifiedList#{name}"
+ newList = send("getModifiedList#{name}")
+ newList.each {
+ | node |
+ node.lower(name)
+ }
+ elsif respond_to? "lower#{name}"
+ send("lower#{name}")
+ else
+ @list.each {
+ | node |
+ node.lower(name)
+ }
+ end
+ end
+end
+
diff --git a/Source/JavaScriptCore/offlineasm/cloop.rb b/Source/JavaScriptCore/offlineasm/cloop.rb
new file mode 100644
index 000000000..04a699814
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/cloop.rb
@@ -0,0 +1,1158 @@
+# Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+require "config"
+require "ast"
+require "opt"
+
+# The CLoop llint backend is initially based on the ARMv7 backend, and
+# then further enhanced with a few instructions from the x86 backend to
+# support building for X64 targets. Hence, the shape of the generated
+# code and the usage convention of registers will look a lot like the
+# ARMv7 backend's.
+
+def cloopMapType(type)
+ case type
+ when :int; ".i"
+ when :uint; ".u"
+ when :int32; ".i32"
+ when :uint32; ".u32"
+ when :int64; ".i64"
+ when :uint64; ".u64"
+ when :int8; ".i8"
+ when :uint8; ".u8"
+ when :int8Ptr; ".i8p"
+ when :voidPtr; ".vp"
+ when :nativeFunc; ".nativeFunc"
+ when :double; ".d"
+ when :castToDouble; ".castToDouble"
+ when :castToInt64; ".castToInt64"
+ when :opcode; ".opcode"
+ else;
+ raise "Unsupported type"
+ end
+end
+
+
+class SpecialRegister < NoChildren
+ def clDump
+ @name
+ end
+ def clValue(type=:int)
+ @name + cloopMapType(type)
+ end
+end
+
+C_LOOP_SCRATCH_FPR = SpecialRegister.new("d6")
+
+class RegisterID
+ def clDump
+ case name
+ # The cloop is modelled on the ARM implementation. Hence, the a0-a3
+ # registers are aliases for r0-r3 i.e. t0-t3 in our case.
+ when "t0", "a0"
+ "t0"
+ when "t1", "a1"
+ "t1"
+ when "t2", "a2"
+ "t2"
+ when "t3", "a3"
+ "t3"
+ when "t4"
+ "pc"
+ when "t5"
+ "t5"
+ when "t6"
+ "pcBase"
+ when "t7"
+ "t7"
+ when "csr1"
+ "tagTypeNumber"
+ when "csr2"
+ "tagMask"
+ when "cfr"
+ "cfr"
+ when "lr"
+ "lr"
+ when "sp"
+ "sp"
+ else
+ raise "Bad register #{name} for C_LOOP at #{codeOriginString}"
+ end
+ end
+ def clValue(type=:int)
+ clDump + cloopMapType(type)
+ end
+end
+
+class FPRegisterID
+ def clDump
+ case name
+ when "ft0", "fr"
+ "d0"
+ when "ft1"
+ "d1"
+ when "ft2"
+ "d2"
+ when "ft3"
+ "d3"
+ when "ft4"
+ "d4"
+ when "ft5"
+ "d5"
+ else
+ raise "Bad register #{name} for C_LOOP at #{codeOriginString}"
+ end
+ end
+ def clValue(type=:int)
+ clDump + cloopMapType(type)
+ end
+end
+
+class Immediate
+ def clDump
+ "#{value}"
+ end
+ def clValue(type=:int)
+ # There is a case of a very large unsigned number (0x8000000000000000)
+ # which we wish to encode. Unfortunately, the C/C++ compiler
+ # complains if we express that number as a positive decimal integer.
+ # Hence, for positive values, we just convert the number into hex form
+ # to keep the compiler happy.
+ #
+ # However, for negative values, the to_s(16) hex conversion method does
+ # not strip the "-" sign resulting in a meaningless "0x-..." valueStr.
+ # To workaround this, we simply don't encode negative numbers as hex.
+
+ valueStr = (value < 0) ? "#{value}" : "0x#{value.to_s(16)}"
+
+ case type
+ when :int8; "int8_t(#{valueStr})"
+ when :int32; "int32_t(#{valueStr})"
+ when :int64; "int64_t(#{valueStr})"
+ when :int; "intptr_t(#{valueStr})"
+ when :uint8; "uint8_t(#{valueStr})"
+ when :uint32; "uint32_t(#{valueStr})"
+ when :uint64; "uint64_t(#{valueStr})"
+ when :uint; "uintptr_t(#{valueStr})"
+ else
+ raise "Not implemented immediate of type: #{type}"
+ end
+ end
+end
+
+class Address
+ def clDump
+ "[#{base.clDump}, #{offset.value}]"
+ end
+ def clValue(type=:int)
+ case type
+ when :int8; int8MemRef
+ when :int32; int32MemRef
+ when :int64; int64MemRef
+ when :int; intMemRef
+ when :uint8; uint8MemRef
+ when :uint32; uint32MemRef
+ when :uint64; uint64MemRef
+ when :uint; uintMemRef
+ when :opcode; opcodeMemRef
+ when :nativeFunc; nativeFuncMemRef
+ else
+ raise "Unexpected Address type: #{type}"
+ end
+ end
+ def pointerExpr
+ if offset.value == 0
+ "#{base.clValue(:int8Ptr)}"
+ elsif offset.value > 0
+ "#{base.clValue(:int8Ptr)} + #{offset.value}"
+ else
+ "#{base.clValue(:int8Ptr)} - #{-offset.value}"
+ end
+ end
+ def int8MemRef
+ "*CAST<int8_t*>(#{pointerExpr})"
+ end
+ def int16MemRef
+ "*CAST<int16_t*>(#{pointerExpr})"
+ end
+ def int32MemRef
+ "*CAST<int32_t*>(#{pointerExpr})"
+ end
+ def int64MemRef
+ "*CAST<int64_t*>(#{pointerExpr})"
+ end
+ def intMemRef
+ "*CAST<intptr_t*>(#{pointerExpr})"
+ end
+ def uint8MemRef
+ "*CAST<uint8_t*>(#{pointerExpr})"
+ end
+ def uint16MemRef
+ "*CAST<uint16_t*>(#{pointerExpr})"
+ end
+ def uint32MemRef
+ "*CAST<uint32_t*>(#{pointerExpr})"
+ end
+ def uint64MemRef
+ "*CAST<uint64_t*>(#{pointerExpr})"
+ end
+ def uintMemRef
+ "*CAST<uintptr_t*>(#{pointerExpr})"
+ end
+ def nativeFuncMemRef
+ "*CAST<NativeFunction*>(#{pointerExpr})"
+ end
+ def opcodeMemRef
+ "*CAST<Opcode*>(#{pointerExpr})"
+ end
+ def dblMemRef
+ "*CAST<double*>(#{pointerExpr})"
+ end
+end
+
+class BaseIndex
+ def clDump
+ "[#{base.clDump}, #{offset.clDump}, #{index.clDump} << #{scaleShift}]"
+ end
+ def clValue(type=:int)
+ case type
+ when :int8; int8MemRef
+ when :int32; int32MemRef
+ when :int64; int64MemRef
+ when :int; intMemRef
+ when :uint8; uint8MemRef
+ when :uint32; uint32MemRef
+ when :uint64; uint64MemRef
+ when :uint; uintMemRef
+ when :opcode; opcodeMemRef
+ else
+ raise "Unexpected BaseIndex type: #{type}"
+ end
+ end
+ def pointerExpr
+ if offset.value == 0
+ "#{base.clValue(:int8Ptr)} + (#{index.clValue} << #{scaleShift})"
+ else
+ "#{base.clValue(:int8Ptr)} + (#{index.clValue} << #{scaleShift}) + #{offset.clValue}"
+ end
+ end
+ def int8MemRef
+ "*CAST<int8_t*>(#{pointerExpr})"
+ end
+ def int16MemRef
+ "*CAST<int16_t*>(#{pointerExpr})"
+ end
+ def int32MemRef
+ "*CAST<int32_t*>(#{pointerExpr})"
+ end
+ def int64MemRef
+ "*CAST<int64_t*>(#{pointerExpr})"
+ end
+ def intMemRef
+ "*CAST<intptr_t*>(#{pointerExpr})"
+ end
+ def uint8MemRef
+ "*CAST<uint8_t*>(#{pointerExpr})"
+ end
+ def uint16MemRef
+ "*CAST<uint16_t*>(#{pointerExpr})"
+ end
+ def uint32MemRef
+ "*CAST<uint32_t*>(#{pointerExpr})"
+ end
+ def uint64MemRef
+ "*CAST<uint64_t*>(#{pointerExpr})"
+ end
+ def uintMemRef
+ "*CAST<uintptr_t*>(#{pointerExpr})"
+ end
+ def opcodeMemRef
+ "*CAST<Opcode*>(#{pointerExpr})"
+ end
+ def dblMemRef
+ "*CAST<double*>(#{pointerExpr})"
+ end
+end
+
+class AbsoluteAddress
+ def clDump
+ "#{codeOriginString}"
+ end
+ def clValue
+ clDump
+ end
+end
+
+
+#
+# Lea support.
+#
+
+class Address
+ def cloopEmitLea(destination, type)
+ if destination == base
+ $asm.putc "#{destination.clValue(:int8Ptr)} += #{offset.clValue(type)};"
+ else
+ $asm.putc "#{destination.clValue(:int8Ptr)} = #{base.clValue(:int8Ptr)} + #{offset.clValue(type)};"
+ end
+ end
+end
+
+class BaseIndex
+ def cloopEmitLea(destination, type)
+ raise "Malformed BaseIndex, offset should be zero at #{codeOriginString}" unless offset.value == 0
+ $asm.putc "#{destination.clValue(:int8Ptr)} = #{base.clValue(:int8Ptr)} + (#{index.clValue} << #{scaleShift});"
+ end
+end
+
+#
+# Actual lowering code follows.
+#
+
+class Sequence
+ def getModifiedListC_LOOP
+ myList = @list
+
+ # Verify that we will only see instructions and labels.
+ myList.each {
+ | node |
+ unless node.is_a? Instruction or
+ node.is_a? Label or
+ node.is_a? LocalLabel or
+ node.is_a? Skip
+ raise "Unexpected #{node.inspect} at #{node.codeOrigin}"
+ end
+ }
+
+ return myList
+ end
+end
+
+def clOperands(operands)
+ operands.map{|v| v.clDump}.join(", ")
+end
+
+
+def cloopEmitOperation(operands, type, operator)
+ raise unless type == :int || type == :uint || type == :int32 || type == :uint32 || \
+ type == :int64 || type == :uint64 || type == :double
+ if operands.size == 3
+ $asm.putc "#{operands[2].clValue(type)} = #{operands[0].clValue(type)} #{operator} #{operands[1].clValue(type)};"
+ if operands[2].is_a? RegisterID and (type == :int32 or type == :uint32)
+ $asm.putc "#{operands[2].clDump}.clearHighWord();" # Just clear it. It does nothing on the 32-bit port.
+ end
+ else
+ raise unless operands.size == 2
+ raise unless not operands[1].is_a? Immediate
+ $asm.putc "#{operands[1].clValue(type)} = #{operands[1].clValue(type)} #{operator} #{operands[0].clValue(type)};"
+ if operands[1].is_a? RegisterID and (type == :int32 or type == :uint32)
+ $asm.putc "#{operands[1].clDump}.clearHighWord();" # Just clear it. It does nothing on the 32-bit port.
+ end
+ end
+end
+
+def cloopEmitShiftOperation(operands, type, operator)
+ raise unless type == :int || type == :uint || type == :int32 || type == :uint32 || type == :int64 || type == :uint64
+ if operands.size == 3
+ $asm.putc "#{operands[2].clValue(type)} = #{operands[1].clValue(type)} #{operator} (#{operands[0].clValue(:int)} & 0x1f);"
+ if operands[2].is_a? RegisterID and (type == :int32 or type == :uint32)
+ $asm.putc "#{operands[2].clDump}.clearHighWord();" # Just clear it. It does nothing on the 32-bit port.
+ end
+ else
+ raise unless operands.size == 2
+ raise unless not operands[1].is_a? Immediate
+ $asm.putc "#{operands[1].clValue(type)} = #{operands[1].clValue(type)} #{operator} (#{operands[0].clValue(:int)} & 0x1f);"
+ if operands[1].is_a? RegisterID and (type == :int32 or type == :uint32)
+ $asm.putc "#{operands[1].clDump}.clearHighWord();" # Just clear it. It does nothing on the 32-bit port.
+ end
+ end
+end
+
+def cloopEmitUnaryOperation(operands, type, operator)
+ raise unless type == :int || type == :uint || type == :int32 || type == :uint32 || type == :int64 || type == :uint64
+ raise unless operands.size == 1
+ raise unless not operands[0].is_a? Immediate
+ $asm.putc "#{operands[0].clValue(type)} = #{operator}#{operands[0].clValue(type)};"
+ if operands[0].is_a? RegisterID and (type == :int32 or type == :uint32)
+ $asm.putc "#{operands[0].clDump}.clearHighWord();" # Just clear it. It does nothing on the 32-bit port.
+ end
+end
+
+def cloopEmitCompareDoubleWithNaNCheckAndBranch(operands, condition)
+ $asm.putc "if (std::isnan(#{operands[0].clValue(:double)}) || std::isnan(#{operands[1].clValue(:double)})"
+ $asm.putc " || (#{operands[0].clValue(:double)} #{condition} #{operands[1].clValue(:double)}))"
+ $asm.putc " goto #{operands[2].cLabel};"
+end
+
+
+def cloopEmitCompareAndSet(operands, type, comparator)
+ # The result is a boolean. Hence, it doesn't need to be based on the type
+ # of the arguments being compared.
+ $asm.putc "#{operands[2].clValue} = (#{operands[0].clValue(type)} #{comparator} #{op2 = operands[1].clValue(type)});"
+end
+
+
+def cloopEmitCompareAndBranch(operands, type, comparator)
+ $asm.putc "if (#{operands[0].clValue(type)} #{comparator} #{operands[1].clValue(type)})"
+ $asm.putc " goto #{operands[2].cLabel};"
+end
+
+
+# conditionTest should contain a string that provides a comparator and a RHS
+# value e.g. "< 0".
+def cloopGenerateConditionExpression(operands, type, conditionTest)
+ op1 = operands[0].clValue(type)
+
+ # The operands must consist of 2 or 3 values.
+ case operands.size
+ when 2 # Just test op1 against the conditionTest.
+ lhs = op1
+ when 3 # Mask op1 with op2 before testing against the conditionTest.
+ lhs = "(#{op1} & #{operands[1].clValue(type)})"
+ else
+ raise "Expected 2 or 3 operands but got #{operands.size} at #{codeOriginString}"
+ end
+
+ "#{lhs} #{conditionTest}"
+end
+
+# conditionTest should contain a string that provides a comparator and a RHS
+# value e.g. "< 0".
+def cloopEmitTestAndBranchIf(operands, type, conditionTest, branchTarget)
+ conditionExpr = cloopGenerateConditionExpression(operands, type, conditionTest)
+ $asm.putc "if (#{conditionExpr})"
+ $asm.putc " goto #{branchTarget};"
+end
+
+def cloopEmitTestSet(operands, type, conditionTest)
+ # The result is a boolean condition. Hence, the result type is always an
+ # int. The passed in type is only used for the values being tested in
+ # the condition test.
+ conditionExpr = cloopGenerateConditionExpression(operands, type, conditionTest)
+ $asm.putc "#{operands[-1].clValue} = (#{conditionExpr});"
+end
+
+def cloopEmitOpAndBranch(operands, operator, type, conditionTest)
+ case type
+ when :int; tempType = "intptr_t"
+ when :int32; tempType = "int32_t"
+ when :int64; tempType = "int64_t"
+ else
+ raise "Unimplemented type"
+ end
+
+ op1 = operands[0].clValue(type)
+ op2 = operands[1].clValue(type)
+
+ $asm.putc "{"
+ $asm.putc " #{tempType} temp = #{op2} #{operator} #{op1};"
+ $asm.putc " #{op2} = temp;"
+ $asm.putc " if (temp #{conditionTest})"
+ $asm.putc " goto #{operands[2].cLabel};"
+ $asm.putc "}"
+end
+
+def cloopAddOverflowTest(operands, type)
+ case type
+ when :int32
+ tempType = "int32_t"
+ signBit = "SIGN_BIT32"
+ else
+ raise "Unimplemented type"
+ end
+
+ $asm.putc " #{tempType} a = #{operands[0].clValue(type)};"
+ $asm.putc " #{tempType} b = #{operands[1].clValue(type)};"
+ $asm.putc " // sign(b) sign(a) | Overflows if:"
+ $asm.putc " // 0 0 | sign(b+a) = 1 (pos + pos != neg)"
+ $asm.putc " // 0 1 | never"
+ $asm.putc " // 1 0 | never"
+ $asm.putc " // 1 1 | sign(b+a) = 0 (neg + neg != pos)"
+ "((#{signBit}(b) == #{signBit}(a)) && (#{signBit}(b+a) != #{signBit}(a)))"
+end
+
+def cloopSubOverflowTest(operands, type)
+ case type
+ when :int32
+ tempType = "int32_t"
+ signBit = "SIGN_BIT32"
+ else
+ raise "Unimplemented type"
+ end
+
+ $asm.putc " #{tempType} a = #{operands[0].clValue(type)};"
+ $asm.putc " #{tempType} b = #{operands[1].clValue(type)};"
+ $asm.putc " // sign(b) sign(a) | Overflows if:"
+ $asm.putc " // 0 0 | never"
+ $asm.putc " // 0 1 | sign(b-a) = 1 (pos - neg != pos)"
+ $asm.putc " // 1 0 | sign(b-a) = 0 (neg - pos != pos)"
+ $asm.putc " // 1 1 | never"
+ "((#{signBit}(b) != #{signBit}(a)) && (#{signBit}(b-a) == #{signBit}(a)))"
+end
+
+def cloopMulOverflowTest(operands, type)
+ case type
+ when :int32
+ tempType = "uint32_t"
+ else
+ raise "Unimplemented type"
+ end
+ $asm.putc " #{tempType} a = #{operands[0].clValue(type)};"
+ $asm.putc " #{tempType} b = #{operands[1].clValue(type)};"
+ "((b | a) >> 15)"
+end
+
+def cloopEmitOpAndBranchIfOverflow(operands, operator, type)
+ $asm.putc "{"
+
+ # Emit the overflow test based on the operands and the type:
+ case operator
+ when "+"; overflowTest = cloopAddOverflowTest(operands, type)
+ when "-"; overflowTest = cloopSubOverflowTest(operands, type)
+ when "*"; overflowTest = cloopMulOverflowTest(operands, type)
+ else
+ raise "Unimplemented opeartor"
+ end
+
+ $asm.putc " bool didOverflow = #{overflowTest};"
+ $asm.putc " #{operands[1].clValue(type)} = #{operands[1].clValue(type)} #{operator} #{operands[0].clValue(type)};"
+ $asm.putc " if (didOverflow)"
+ $asm.putc " goto #{operands[2].cLabel};"
+ $asm.putc "}"
+end
+
+# operands: callTarget, currentFrame, currentPC
+def cloopEmitCallSlowPath(operands)
+ $asm.putc "{"
+ $asm.putc " SlowPathReturnType result = #{operands[0].cLabel}(#{operands[1].clDump}, #{operands[2].clDump});"
+ $asm.putc " decodeResult(result, t0.vp, t1.vp);"
+ $asm.putc "}"
+end
+
+def cloopEmitCallSlowPathVoid(operands)
+ $asm.putc "#{operands[0].cLabel}(#{operands[1].clDump}, #{operands[2].clDump});"
+end
+
+class Instruction
+ @@didReturnFromJSLabelCounter = 0
+
+ def lowerC_LOOP
+ $asm.codeOrigin codeOriginString if $enableCodeOriginComments
+ $asm.annotation annotation if $enableInstrAnnotations && (opcode != "cloopDo")
+
+ case opcode
+ when "addi"
+ cloopEmitOperation(operands, :int32, "+")
+ when "addq"
+ cloopEmitOperation(operands, :int64, "+")
+ when "addp"
+ cloopEmitOperation(operands, :int, "+")
+
+ when "andi"
+ cloopEmitOperation(operands, :int32, "&")
+ when "andq"
+ cloopEmitOperation(operands, :int64, "&")
+ when "andp"
+ cloopEmitOperation(operands, :int, "&")
+
+ when "ori"
+ cloopEmitOperation(operands, :int32, "|")
+ when "orq"
+ cloopEmitOperation(operands, :int64, "|")
+ when "orp"
+ cloopEmitOperation(operands, :int, "|")
+
+ when "xori"
+ cloopEmitOperation(operands, :int32, "^")
+ when "xorq"
+ cloopEmitOperation(operands, :int64, "^")
+ when "xorp"
+ cloopEmitOperation(operands, :int, "^")
+
+ when "lshifti"
+ cloopEmitShiftOperation(operands, :int32, "<<")
+ when "lshiftq"
+ cloopEmitShiftOperation(operands, :int64, "<<")
+ when "lshiftp"
+ cloopEmitShiftOperation(operands, :int, "<<")
+
+ when "rshifti"
+ cloopEmitShiftOperation(operands, :int32, ">>")
+ when "rshiftq"
+ cloopEmitShiftOperation(operands, :int64, ">>")
+ when "rshiftp"
+ cloopEmitShiftOperation(operands, :int, ">>")
+
+ when "urshifti"
+ cloopEmitShiftOperation(operands, :uint32, ">>")
+ when "urshiftq"
+ cloopEmitShiftOperation(operands, :uint64, ">>")
+ when "urshiftp"
+ cloopEmitShiftOperation(operands, :uint, ">>")
+
+ when "muli"
+ cloopEmitOperation(operands, :int32, "*")
+ when "mulq"
+ cloopEmitOperation(operands, :int64, "*")
+ when "mulp"
+ cloopEmitOperation(operands, :int, "*")
+
+ when "subi"
+ cloopEmitOperation(operands, :int32, "-")
+ when "subq"
+ cloopEmitOperation(operands, :int64, "-")
+ when "subp"
+ cloopEmitOperation(operands, :int, "-")
+
+ when "negi"
+ cloopEmitUnaryOperation(operands, :int32, "-")
+ when "negq"
+ cloopEmitUnaryOperation(operands, :int64, "-")
+ when "negp"
+ cloopEmitUnaryOperation(operands, :int, "-")
+
+ when "noti"
+ cloopEmitUnaryOperation(operands, :int32, "!")
+
+ when "loadi"
+ $asm.putc "#{operands[1].clValue(:uint)} = #{operands[0].uint32MemRef};"
+ # There's no need to call clearHighWord() here because the above will
+ # automatically take care of 0 extension.
+ when "loadis"
+ $asm.putc "#{operands[1].clValue(:int)} = #{operands[0].int32MemRef};"
+ when "loadq"
+ $asm.putc "#{operands[1].clValue(:int64)} = #{operands[0].int64MemRef};"
+ when "loadp"
+ $asm.putc "#{operands[1].clValue(:int)} = #{operands[0].intMemRef};"
+ when "storei"
+ $asm.putc "#{operands[1].int32MemRef} = #{operands[0].clValue(:int32)};"
+ when "storeq"
+ $asm.putc "#{operands[1].int64MemRef} = #{operands[0].clValue(:int64)};"
+ when "storep"
+ $asm.putc "#{operands[1].intMemRef} = #{operands[0].clValue(:int)};"
+ when "loadb"
+ $asm.putc "#{operands[1].clValue(:int)} = #{operands[0].uint8MemRef};"
+ when "loadbs"
+ $asm.putc "#{operands[1].clValue(:int)} = #{operands[0].int8MemRef};"
+ when "storeb"
+ $asm.putc "#{operands[1].uint8MemRef} = #{operands[0].clValue(:int8)};"
+ when "loadh"
+ $asm.putc "#{operands[1].clValue(:int)} = #{operands[0].uint16MemRef};"
+ when "loadhs"
+ $asm.putc "#{operands[1].clValue(:int)} = #{operands[0].int16MemRef};"
+ when "storeh"
+ $asm.putc "*#{operands[1].uint16MemRef} = #{operands[0].clValue(:int16)};"
+ when "loadd"
+ $asm.putc "#{operands[1].clValue(:double)} = #{operands[0].dblMemRef};"
+ when "stored"
+ $asm.putc "#{operands[1].dblMemRef} = #{operands[0].clValue(:double)};"
+
+ when "addd"
+ cloopEmitOperation(operands, :double, "+")
+ when "divd"
+ cloopEmitOperation(operands, :double, "/")
+ when "subd"
+ cloopEmitOperation(operands, :double, "-")
+ when "muld"
+ cloopEmitOperation(operands, :double, "*")
+
+ # Convert an int value to its double equivalent, and store it in a double register.
+ when "ci2d"
+ $asm.putc "#{operands[1].clValue(:double)} = #{operands[0].clValue(:int32)};"
+
+ when "bdeq"
+ cloopEmitCompareAndBranch(operands, :double, "==")
+ when "bdneq"
+ cloopEmitCompareAndBranch(operands, :double, "!=")
+ when "bdgt"
+ cloopEmitCompareAndBranch(operands, :double, ">");
+ when "bdgteq"
+ cloopEmitCompareAndBranch(operands, :double, ">=");
+ when "bdlt"
+ cloopEmitCompareAndBranch(operands, :double, "<");
+ when "bdlteq"
+ cloopEmitCompareAndBranch(operands, :double, "<=");
+
+ when "bdequn"
+ cloopEmitCompareDoubleWithNaNCheckAndBranch(operands, "==")
+ when "bdnequn"
+ cloopEmitCompareDoubleWithNaNCheckAndBranch(operands, "!=")
+ when "bdgtun"
+ cloopEmitCompareDoubleWithNaNCheckAndBranch(operands, ">")
+ when "bdgtequn"
+ cloopEmitCompareDoubleWithNaNCheckAndBranch(operands, ">=")
+ when "bdltun"
+ cloopEmitCompareDoubleWithNaNCheckAndBranch(operands, "<")
+ when "bdltequn"
+ cloopEmitCompareDoubleWithNaNCheckAndBranch(operands, "<=")
+
+ when "td2i"
+ $asm.putc "#{operands[1].clValue(:int)} = #{operands[0].clValue(:double)};"
+ $asm.putc "#{operands[1].clDump}.clearHighWord();"
+
+ when "bcd2i" # operands: srcDbl dstInt slowPath
+ $asm.putc "{"
+ $asm.putc " double d = #{operands[0].clValue(:double)};"
+ $asm.putc " const int32_t asInt32 = int32_t(d);"
+ $asm.putc " if (asInt32 != d || (!asInt32 && std::signbit(d))) // true for -0.0"
+ $asm.putc " goto #{operands[2].cLabel};"
+ $asm.putc " #{operands[1].clValue} = asInt32;"
+ $asm.putc " #{operands[1].clDump}.clearHighWord();"
+ $asm.putc "}"
+
+ when "move"
+ $asm.putc "#{operands[1].clValue(:int)} = #{operands[0].clValue(:int)};"
+ when "sxi2q"
+ $asm.putc "#{operands[1].clValue(:int64)} = #{operands[0].clValue(:int32)};"
+ when "zxi2q"
+ $asm.putc "#{operands[1].clValue(:uint64)} = #{operands[0].clValue(:uint32)};"
+ when "nop"
+ $asm.putc "// nop"
+ when "bbeq"
+ cloopEmitCompareAndBranch(operands, :int8, "==")
+ when "bieq"
+ cloopEmitCompareAndBranch(operands, :int32, "==")
+ when "bqeq"
+ cloopEmitCompareAndBranch(operands, :int64, "==")
+ when "bpeq"
+ cloopEmitCompareAndBranch(operands, :int, "==")
+
+ when "bbneq"
+ cloopEmitCompareAndBranch(operands, :int8, "!=")
+ when "bineq"
+ cloopEmitCompareAndBranch(operands, :int32, "!=")
+ when "bqneq"
+ cloopEmitCompareAndBranch(operands, :int64, "!=")
+ when "bpneq"
+ cloopEmitCompareAndBranch(operands, :int, "!=")
+
+ when "bba"
+ cloopEmitCompareAndBranch(operands, :uint8, ">")
+ when "bia"
+ cloopEmitCompareAndBranch(operands, :uint32, ">")
+ when "bqa"
+ cloopEmitCompareAndBranch(operands, :uint64, ">")
+ when "bpa"
+ cloopEmitCompareAndBranch(operands, :uint, ">")
+
+ when "bbaeq"
+ cloopEmitCompareAndBranch(operands, :uint8, ">=")
+ when "biaeq"
+ cloopEmitCompareAndBranch(operands, :uint32, ">=")
+ when "bqaeq"
+ cloopEmitCompareAndBranch(operands, :uint64, ">=")
+ when "bpaeq"
+ cloopEmitCompareAndBranch(operands, :uint, ">=")
+
+ when "bbb"
+ cloopEmitCompareAndBranch(operands, :uint8, "<")
+ when "bib"
+ cloopEmitCompareAndBranch(operands, :uint32, "<")
+ when "bqb"
+ cloopEmitCompareAndBranch(operands, :uint64, "<")
+ when "bpb"
+ cloopEmitCompareAndBranch(operands, :uint, "<")
+
+ when "bbbeq"
+ cloopEmitCompareAndBranch(operands, :uint8, "<=")
+ when "bibeq"
+ cloopEmitCompareAndBranch(operands, :uint32, "<=")
+ when "bqbeq"
+ cloopEmitCompareAndBranch(operands, :uint64, "<=")
+ when "bpbeq"
+ cloopEmitCompareAndBranch(operands, :uint, "<=")
+
+ when "bbgt"
+ cloopEmitCompareAndBranch(operands, :int8, ">")
+ when "bigt"
+ cloopEmitCompareAndBranch(operands, :int32, ">")
+ when "bqgt"
+ cloopEmitCompareAndBranch(operands, :int64, ">")
+ when "bpgt"
+ cloopEmitCompareAndBranch(operands, :int, ">")
+
+ when "bbgteq"
+ cloopEmitCompareAndBranch(operands, :int8, ">=")
+ when "bigteq"
+ cloopEmitCompareAndBranch(operands, :int32, ">=")
+ when "bqgteq"
+ cloopEmitCompareAndBranch(operands, :int64, ">=")
+ when "bpgteq"
+ cloopEmitCompareAndBranch(operands, :int, ">=")
+
+ when "bblt"
+ cloopEmitCompareAndBranch(operands, :int8, "<")
+ when "bilt"
+ cloopEmitCompareAndBranch(operands, :int32, "<")
+ when "bqlt"
+ cloopEmitCompareAndBranch(operands, :int64, "<")
+ when "bplt"
+ cloopEmitCompareAndBranch(operands, :int, "<")
+
+ when "bblteq"
+ cloopEmitCompareAndBranch(operands, :int8, "<=")
+ when "bilteq"
+ cloopEmitCompareAndBranch(operands, :int32, "<=")
+ when "bqlteq"
+ cloopEmitCompareAndBranch(operands, :int64, "<=")
+ when "bplteq"
+ cloopEmitCompareAndBranch(operands, :int, "<=")
+
+ when "btbz"
+ cloopEmitTestAndBranchIf(operands, :int8, "== 0", operands[-1].cLabel)
+ when "btiz"
+ cloopEmitTestAndBranchIf(operands, :int32, "== 0", operands[-1].cLabel)
+ when "btqz"
+ cloopEmitTestAndBranchIf(operands, :int64, "== 0", operands[-1].cLabel)
+ when "btpz"
+ cloopEmitTestAndBranchIf(operands, :int, "== 0", operands[-1].cLabel)
+
+ when "btbnz"
+ cloopEmitTestAndBranchIf(operands, :int8, "!= 0", operands[-1].cLabel)
+ when "btinz"
+ cloopEmitTestAndBranchIf(operands, :int32, "!= 0", operands[-1].cLabel)
+ when "btqnz"
+ cloopEmitTestAndBranchIf(operands, :int64, "!= 0", operands[-1].cLabel)
+ when "btpnz"
+ cloopEmitTestAndBranchIf(operands, :int, "!= 0", operands[-1].cLabel)
+
+ when "btbs"
+ cloopEmitTestAndBranchIf(operands, :int8, "< 0", operands[-1].cLabel)
+ when "btis"
+ cloopEmitTestAndBranchIf(operands, :int32, "< 0", operands[-1].cLabel)
+ when "btqs"
+ cloopEmitTestAndBranchIf(operands, :int64, "< 0", operands[-1].cLabel)
+ when "btps"
+ cloopEmitTestAndBranchIf(operands, :int, "< 0", operands[-1].cLabel)
+
+ # For jmp, we do not want to assume that we have COMPUTED_GOTO support.
+ # Fortunately, the only times we should ever encounter indirect jmps is
+ # when the jmp target is a CLoop opcode (by design).
+ #
+ # Hence, we check if the jmp target is a known label reference. If so,
+ # we can emit a goto directly. If it is not a known target, then we set
+ # the target in the opcode, and dispatch to it via whatever dispatch
+ # mechanism is in used.
+ when "jmp"
+ if operands[0].is_a? LocalLabelReference or operands[0].is_a? LabelReference
+ # Handles jumps local or global labels.
+ $asm.putc "goto #{operands[0].cLabel};"
+ else
+ # Handles jumps to some computed target.
+ # NOTE: must be an opcode handler or a llint glue helper.
+ $asm.putc "opcode = #{operands[0].clValue(:opcode)};"
+ $asm.putc "DISPATCH_OPCODE();"
+ end
+
+ when "call"
+ $asm.putc "CRASH(); // generic call instruction not supported by design!"
+ when "break"
+ $asm.putc "CRASH(); // break instruction not implemented."
+ when "ret"
+ $asm.putc "opcode = lr.opcode;"
+ $asm.putc "DISPATCH_OPCODE();"
+
+ when "cbeq"
+ cloopEmitCompareAndSet(operands, :uint8, "==")
+ when "cieq"
+ cloopEmitCompareAndSet(operands, :uint32, "==")
+ when "cqeq"
+ cloopEmitCompareAndSet(operands, :uint64, "==")
+ when "cpeq"
+ cloopEmitCompareAndSet(operands, :uint, "==")
+
+ when "cbneq"
+ cloopEmitCompareAndSet(operands, :uint8, "!=")
+ when "cineq"
+ cloopEmitCompareAndSet(operands, :uint32, "!=")
+ when "cqneq"
+ cloopEmitCompareAndSet(operands, :uint64, "!=")
+ when "cpneq"
+ cloopEmitCompareAndSet(operands, :uint, "!=")
+
+ when "cba"
+ cloopEmitCompareAndSet(operands, :uint8, ">")
+ when "cia"
+ cloopEmitCompareAndSet(operands, :uint32, ">")
+ when "cqa"
+ cloopEmitCompareAndSet(operands, :uint64, ">")
+ when "cpa"
+ cloopEmitCompareAndSet(operands, :uint, ">")
+
+ when "cbaeq"
+ cloopEmitCompareAndSet(operands, :uint8, ">=")
+ when "ciaeq"
+ cloopEmitCompareAndSet(operands, :uint32, ">=")
+ when "cqaeq"
+ cloopEmitCompareAndSet(operands, :uint64, ">=")
+ when "cpaeq"
+ cloopEmitCompareAndSet(operands, :uint, ">=")
+
+ when "cbb"
+ cloopEmitCompareAndSet(operands, :uint8, "<")
+ when "cib"
+ cloopEmitCompareAndSet(operands, :uint32, "<")
+ when "cqb"
+ cloopEmitCompareAndSet(operands, :uint64, "<")
+ when "cpb"
+ cloopEmitCompareAndSet(operands, :uint, "<")
+
+ when "cbbeq"
+ cloopEmitCompareAndSet(operands, :uint8, "<=")
+ when "cibeq"
+ cloopEmitCompareAndSet(operands, :uint32, "<=")
+ when "cqbeq"
+ cloopEmitCompareAndSet(operands, :uint64, "<=")
+ when "cpbeq"
+ cloopEmitCompareAndSet(operands, :uint, "<=")
+
+ when "cbgt"
+ cloopEmitCompareAndSet(operands, :int8, ">")
+ when "cigt"
+ cloopEmitCompareAndSet(operands, :int32, ">")
+ when "cqgt"
+ cloopEmitCompareAndSet(operands, :int64, ">")
+ when "cpgt"
+ cloopEmitCompareAndSet(operands, :int, ">")
+
+ when "cbgteq"
+ cloopEmitCompareAndSet(operands, :int8, ">=")
+ when "cigteq"
+ cloopEmitCompareAndSet(operands, :int32, ">=")
+ when "cqgteq"
+ cloopEmitCompareAndSet(operands, :int64, ">=")
+ when "cpgteq"
+ cloopEmitCompareAndSet(operands, :int, ">=")
+
+ when "cblt"
+ cloopEmitCompareAndSet(operands, :int8, "<")
+ when "cilt"
+ cloopEmitCompareAndSet(operands, :int32, "<")
+ when "cqlt"
+ cloopEmitCompareAndSet(operands, :int64, "<")
+ when "cplt"
+ cloopEmitCompareAndSet(operands, :int, "<")
+
+ when "cblteq"
+ cloopEmitCompareAndSet(operands, :int8, "<=")
+ when "cilteq"
+ cloopEmitCompareAndSet(operands, :int32, "<=")
+ when "cqlteq"
+ cloopEmitCompareAndSet(operands, :int64, "<=")
+ when "cplteq"
+ cloopEmitCompareAndSet(operands, :int, "<=")
+
+ when "tbs"
+ cloopEmitTestSet(operands, :int8, "< 0")
+ when "tis"
+ cloopEmitTestSet(operands, :int32, "< 0")
+ when "tqs"
+ cloopEmitTestSet(operands, :int64, "< 0")
+ when "tps"
+ cloopEmitTestSet(operands, :int, "< 0")
+
+ when "tbz"
+ cloopEmitTestSet(operands, :int8, "== 0")
+ when "tiz"
+ cloopEmitTestSet(operands, :int32, "== 0")
+ when "tqz"
+ cloopEmitTestSet(operands, :int64, "== 0")
+ when "tpz"
+ cloopEmitTestSet(operands, :int, "== 0")
+
+ when "tbnz"
+ cloopEmitTestSet(operands, :int8, "!= 0")
+ when "tinz"
+ cloopEmitTestSet(operands, :int32, "!= 0")
+ when "tqnz"
+ cloopEmitTestSet(operands, :int64, "!= 0")
+ when "tpnz"
+ cloopEmitTestSet(operands, :int, "!= 0")
+
+ # 64-bit instruction: cdqi (based on X64)
+ # Sign extends the lower 32 bits of t0, but put the sign extension into
+ # the lower 32 bits of t1. Leave the upper 32 bits of t0 and t1 unchanged.
+ when "cdqi"
+ $asm.putc "{"
+ $asm.putc " int64_t temp = t0.i32; // sign extend the low 32bit"
+ $asm.putc " t0.i32 = temp; // low word"
+ $asm.putc " t0.clearHighWord();"
+ $asm.putc " t1.i32 = uint64_t(temp) >> 32; // high word"
+ $asm.putc " t1.clearHighWord();"
+ $asm.putc "}"
+
+ # 64-bit instruction: idivi op1 (based on X64)
+ # Divide a 64-bit integer numerator by the specified denominator.
+ # The numerator is specified in t0 and t1 as follows:
+ # 1. low 32 bits of the numerator is in the low 32 bits of t0.
+ # 2. high 32 bits of the numerator is in the low 32 bits of t1.
+ #
+ # The resultant quotient is a signed 32-bit int, and is to be stored
+ # in the lower 32 bits of t0.
+ # The resultant remainder is a signed 32-bit int, and is to be stored
+ # in the lower 32 bits of t1.
+ when "idivi"
+ # Divide t1,t0 (EDX,EAX) by the specified arg, and store the remainder in t1,
+ # and quotient in t0:
+ $asm.putc "{"
+ $asm.putc " int64_t dividend = (int64_t(t1.u32) << 32) | t0.u32;"
+ $asm.putc " int64_t divisor = #{operands[0].clValue(:int)};"
+ $asm.putc " t1.i32 = dividend % divisor; // remainder"
+ $asm.putc " t1.clearHighWord();"
+ $asm.putc " t0.i32 = dividend / divisor; // quotient"
+ $asm.putc " t0.clearHighWord();"
+ $asm.putc "}"
+
+ # 32-bit instruction: fii2d int32LoOp int32HiOp dblOp (based on ARMv7)
+ # Decode 2 32-bit ints (low and high) into a 64-bit double.
+ when "fii2d"
+ $asm.putc "#{operands[2].clValue(:double)} = Ints2Double(#{operands[0].clValue(:uint32)}, #{operands[1].clValue(:uint32)});"
+
+ # 32-bit instruction: f2dii dblOp int32LoOp int32HiOp (based on ARMv7)
+ # Encode a 64-bit double into 2 32-bit ints (low and high).
+ when "fd2ii"
+ $asm.putc "Double2Ints(#{operands[0].clValue(:double)}, #{operands[1].clValue(:uint32)}, #{operands[2].clValue(:uint32)});"
+
+ # 64-bit instruction: fq2d int64Op dblOp (based on X64)
+ # Copy a bit-encoded double in a 64-bit int register to a double register.
+ when "fq2d"
+ $asm.putc "#{operands[1].clValue(:double)} = #{operands[0].clValue(:castToDouble)};"
+
+ # 64-bit instruction: fd2q dblOp int64Op (based on X64 instruction set)
+ # Copy a double as a bit-encoded double into a 64-bit int register.
+ when "fd2q"
+ $asm.putc "#{operands[1].clValue(:int64)} = #{operands[0].clValue(:castToInt64)};"
+
+ when "leai"
+ operands[0].cloopEmitLea(operands[1], :int32)
+ when "leap"
+ operands[0].cloopEmitLea(operands[1], :int)
+
+ when "baddio"
+ cloopEmitOpAndBranchIfOverflow(operands, "+", :int32)
+ when "bsubio"
+ cloopEmitOpAndBranchIfOverflow(operands, "-", :int32)
+ when "bmulio"
+ cloopEmitOpAndBranchIfOverflow(operands, "*", :int32)
+
+ when "baddis"
+ cloopEmitOpAndBranch(operands, "+", :int32, "< 0")
+ when "baddiz"
+ cloopEmitOpAndBranch(operands, "+", :int32, "== 0")
+ when "baddinz"
+ cloopEmitOpAndBranch(operands, "+", :int32, "!= 0")
+
+ when "baddqs"
+ cloopEmitOpAndBranch(operands, "+", :int64, "< 0")
+ when "baddqz"
+ cloopEmitOpAndBranch(operands, "+", :int64, "== 0")
+ when "baddqnz"
+ cloopEmitOpAndBranch(operands, "+", :int64, "!= 0")
+
+ when "baddps"
+ cloopEmitOpAndBranch(operands, "+", :int, "< 0")
+ when "baddpz"
+ cloopEmitOpAndBranch(operands, "+", :int, "== 0")
+ when "baddpnz"
+ cloopEmitOpAndBranch(operands, "+", :int, "!= 0")
+
+ when "bsubis"
+ cloopEmitOpAndBranch(operands, "-", :int32, "< 0")
+ when "bsubiz"
+ cloopEmitOpAndBranch(operands, "-", :int32, "== 0")
+ when "bsubinz"
+ cloopEmitOpAndBranch(operands, "-", :int32, "!= 0")
+
+ when "borris"
+ cloopEmitOpAndBranch(operands, "|", :int32, "< 0")
+ when "borriz"
+ cloopEmitOpAndBranch(operands, "|", :int32, "== 0")
+ when "borrinz"
+ cloopEmitOpAndBranch(operands, "|", :int32, "!= 0")
+
+ when "memfence"
+
+ when "push"
+ operands.each {
+ | op |
+ $asm.putc "PUSH(#{op.clDump});"
+ }
+ when "pop"
+ operands.each {
+ | op |
+ $asm.putc "POP(#{op.clDump});"
+ }
+
+
+ # A convenience and compact call to crash because we don't want to use
+ # the generic llint crash mechanism which relies on the availability
+ # of the call instruction (which cannot be implemented in a generic
+ # way, and can be abused if we made it just work for this special case).
+ # Using a special cloopCrash instruction is cleaner.
+ when "cloopCrash"
+ $asm.putc "CRASH();"
+
+ # We can't rely on the llint JS call mechanism which actually makes
+ # use of the call instruction. Instead, we just implement JS calls
+ # as an opcode dispatch.
+ when "cloopCallJSFunction"
+ @@didReturnFromJSLabelCounter += 1
+ $asm.putc "lr.opcode = getOpcode(llint_cloop_did_return_from_js_#{@@didReturnFromJSLabelCounter});"
+ $asm.putc "opcode = #{operands[0].clValue(:opcode)};"
+ $asm.putc "DISPATCH_OPCODE();"
+ $asm.putsLabel("llint_cloop_did_return_from_js_#{@@didReturnFromJSLabelCounter}", false)
+
+ # We can't do generic function calls with an arbitrary set of args, but
+ # fortunately we don't have to here. All native function calls always
+ # have a fixed prototype of 1 args: the passed ExecState.
+ when "cloopCallNative"
+ $asm.putc "nativeFunc = #{operands[0].clValue(:nativeFunc)};"
+ $asm.putc "functionReturnValue = JSValue::decode(nativeFunc(t0.execState));"
+ $asm.putc "#if USE(JSVALUE32_64)"
+ $asm.putc " t1.i = functionReturnValue.tag();"
+ $asm.putc " t0.i = functionReturnValue.payload();"
+ $asm.putc "#else // USE_JSVALUE64)"
+ $asm.putc " t0.encodedJSValue = JSValue::encode(functionReturnValue);"
+ $asm.putc "#endif // USE_JSVALUE64)"
+
+ # We can't do generic function calls with an arbitrary set of args, but
+ # fortunately we don't have to here. All slow path function calls always
+ # have a fixed prototype too. See cloopEmitCallSlowPath() for details.
+ when "cloopCallSlowPath"
+ cloopEmitCallSlowPath(operands)
+
+ when "cloopCallSlowPathVoid"
+ cloopEmitCallSlowPathVoid(operands)
+
+ # For debugging only. This is used to insert instrumentation into the
+ # generated LLIntAssembly.h during llint development only. Do not use
+ # for production code.
+ when "cloopDo"
+ $asm.putc "#{annotation}"
+
+ else
+ lowerDefault
+ end
+ end
+end
diff --git a/Source/JavaScriptCore/offlineasm/config.rb b/Source/JavaScriptCore/offlineasm/config.rb
new file mode 100644
index 000000000..4c86eeceb
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/config.rb
@@ -0,0 +1,57 @@
+# Copyright (C) 2012 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+$preferredCommentStartColumn = 60
+
+
+# Turns on dumping of the count of labels.
+# For example, the output will look like this:
+#
+# ...
+# OFFLINE_ASM_LOCAL_LABEL(_offlineasm_4_functionArityCheck__continue) // Local Label 24 .
+# ...
+# OFFLINE_ASM_GLOBAL_LABEL(llint_op_enter) // Global Label 8 .
+# ...
+#
+$enableLabelCountComments = false
+
+# Turns on dumping of source file and line numbers in the output file.
+# For example, the output will look like this:
+#
+# ...
+# "\tmovq -8(%r13), %rcx\n" // JavaScriptCore/llint/LowLevelInterpreter64.asm:185
+# "\tmovl 52(%rcx), %ecx\n" // JavaScriptCore/llint/LowLevelInterpreter64.asm:186
+# ...
+#
+$enableCodeOriginComments = true
+
+# Turns on recording and dumping of annotations in the generated output file.
+# An annotations can be specified for each instruction in the source asm files.
+# For example, the output will look like this:
+#
+# ...
+# "\tmovq -8(%r13), %rcx\n" // t2<CodeBlock> = cfr.CodeBlock
+# "\tmovl 52(%rcx), %ecx\n" // t2<size_t> = t2<CodeBlock>.m_numVars
+# ...
+#
+$enableInstrAnnotations = false
diff --git a/Source/JavaScriptCore/offlineasm/generate_offset_extractor.rb b/Source/JavaScriptCore/offlineasm/generate_offset_extractor.rb
new file mode 100644
index 000000000..fc4579c17
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/generate_offset_extractor.rb
@@ -0,0 +1,154 @@
+#!/usr/bin/env ruby
+
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+$: << File.dirname(__FILE__)
+
+require "config"
+require "backends"
+require "digest/sha1"
+require "offsets"
+require "parser"
+require "self_hash"
+require "settings"
+require "transform"
+
+IncludeFile.processIncludeOptions()
+
+inputFlnm = ARGV.shift
+outputFlnm = ARGV.shift
+
+validBackends = ARGV.shift
+if validBackends
+ $stderr.puts "Only dealing with backends: #{validBackends}"
+ includeOnlyBackends(validBackends.split(","))
+end
+
+$stderr.puts "offlineasm: Parsing #{inputFlnm} and creating offset extractor #{outputFlnm}."
+
+def emitMagicNumber
+ OFFSET_MAGIC_NUMBERS.each {
+ | number |
+ $output.puts "unsigned(#{number}),"
+ }
+end
+
+inputHash = "// offlineasm input hash: #{parseHash(inputFlnm)} #{selfHash}"
+
+if FileTest.exist? outputFlnm
+ File.open(outputFlnm, "r") {
+ | inp |
+ firstLine = inp.gets
+ if firstLine and firstLine.chomp == inputHash
+ $stderr.puts "offlineasm: Nothing changed."
+ exit 0
+ end
+ }
+end
+
+originalAST = parse(inputFlnm)
+
+#
+# Optimize the AST to make configuration extraction faster. This reduces the AST to a form
+# that only contains the things that matter for our purposes: offsets, sizes, and if
+# statements.
+#
+
+class Node
+ def offsetsPruneTo(sequence)
+ children.each {
+ | child |
+ child.offsetsPruneTo(sequence)
+ }
+ end
+
+ def offsetsPrune
+ result = Sequence.new(codeOrigin, [])
+ offsetsPruneTo(result)
+ result
+ end
+end
+
+class IfThenElse
+ def offsetsPruneTo(sequence)
+ ifThenElse = IfThenElse.new(codeOrigin, predicate, thenCase.offsetsPrune)
+ ifThenElse.elseCase = elseCase.offsetsPrune
+ sequence.list << ifThenElse
+ end
+end
+
+class StructOffset
+ def offsetsPruneTo(sequence)
+ sequence.list << self
+ end
+end
+
+class Sizeof
+ def offsetsPruneTo(sequence)
+ sequence.list << self
+ end
+end
+
+prunedAST = originalAST.offsetsPrune
+
+File.open(outputFlnm, "w") {
+ | outp |
+ $output = outp
+ outp.puts inputHash
+ length = 0
+ emitCodeInAllConfigurations(prunedAST) {
+ | settings, ast, backend, index |
+ offsetsList = ast.filter(StructOffset).uniq.sort
+ sizesList = ast.filter(Sizeof).uniq.sort
+ length += OFFSET_HEADER_MAGIC_NUMBERS.size + (OFFSET_MAGIC_NUMBERS.size + 1) * (1 + offsetsList.size + sizesList.size)
+ }
+ outp.puts "static const unsigned extractorTable[#{length}] = {"
+ emitCodeInAllConfigurations(prunedAST) {
+ | settings, ast, backend, index |
+ OFFSET_HEADER_MAGIC_NUMBERS.each {
+ | number |
+ $output.puts "unsigned(#{number}),"
+ }
+
+ offsetsList = ast.filter(StructOffset).uniq.sort
+ sizesList = ast.filter(Sizeof).uniq.sort
+
+ emitMagicNumber
+ outp.puts "#{index},"
+ offsetsList.each {
+ | offset |
+ emitMagicNumber
+ outp.puts "OFFLINE_ASM_OFFSETOF(#{offset.struct}, #{offset.field}),"
+ }
+ sizesList.each {
+ | offset |
+ emitMagicNumber
+ outp.puts "sizeof(#{offset.struct}),"
+ }
+ }
+ outp.puts "};"
+}
+
+$stderr.puts "offlineasm: offset extractor #{outputFlnm} successfully generated."
+
diff --git a/Source/JavaScriptCore/offlineasm/instructions.rb b/Source/JavaScriptCore/offlineasm/instructions.rb
new file mode 100644
index 000000000..1d0d8676a
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/instructions.rb
@@ -0,0 +1,337 @@
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+require "config"
+require "set"
+
+# Interesting invariant, which we take advantage of: branching instructions
+# always begin with "b", and no non-branching instructions begin with "b".
+# Terminal instructions are "jmp" and "ret".
+
+MACRO_INSTRUCTIONS =
+ [
+ "emit",
+ "addi",
+ "andi",
+ "lshifti",
+ "lshiftp",
+ "lshiftq",
+ "muli",
+ "negi",
+ "negp",
+ "negq",
+ "noti",
+ "ori",
+ "rshifti",
+ "urshifti",
+ "rshiftp",
+ "urshiftp",
+ "rshiftq",
+ "urshiftq",
+ "subi",
+ "xori",
+ "loadi",
+ "loadis",
+ "loadb",
+ "loadbs",
+ "loadh",
+ "loadhs",
+ "storei",
+ "storeb",
+ "loadd",
+ "moved",
+ "stored",
+ "addd",
+ "divd",
+ "subd",
+ "muld",
+ "sqrtd",
+ "ci2d",
+ "fii2d", # usage: fii2d <gpr with least significant bits>, <gpr with most significant bits>, <fpr>
+ "fd2ii", # usage: fd2ii <fpr>, <gpr with least significant bits>, <gpr with most significant bits>
+ "fq2d",
+ "fd2q",
+ "bdeq",
+ "bdneq",
+ "bdgt",
+ "bdgteq",
+ "bdlt",
+ "bdlteq",
+ "bdequn",
+ "bdnequn",
+ "bdgtun",
+ "bdgtequn",
+ "bdltun",
+ "bdltequn",
+ "btd2i",
+ "td2i",
+ "bcd2i",
+ "movdz",
+ "pop",
+ "push",
+ "move",
+ "sxi2q",
+ "zxi2q",
+ "nop",
+ "bieq",
+ "bineq",
+ "bia",
+ "biaeq",
+ "bib",
+ "bibeq",
+ "bigt",
+ "bigteq",
+ "bilt",
+ "bilteq",
+ "bbeq",
+ "bbneq",
+ "bba",
+ "bbaeq",
+ "bbb",
+ "bbbeq",
+ "bbgt",
+ "bbgteq",
+ "bblt",
+ "bblteq",
+ "btis",
+ "btiz",
+ "btinz",
+ "btbs",
+ "btbz",
+ "btbnz",
+ "jmp",
+ "baddio",
+ "baddis",
+ "baddiz",
+ "baddinz",
+ "bsubio",
+ "bsubis",
+ "bsubiz",
+ "bsubinz",
+ "bmulio",
+ "bmulis",
+ "bmuliz",
+ "bmulinz",
+ "borio",
+ "boris",
+ "boriz",
+ "borinz",
+ "break",
+ "call",
+ "ret",
+ "cbeq",
+ "cbneq",
+ "cba",
+ "cbaeq",
+ "cbb",
+ "cbbeq",
+ "cbgt",
+ "cbgteq",
+ "cblt",
+ "cblteq",
+ "cieq",
+ "cineq",
+ "cia",
+ "ciaeq",
+ "cib",
+ "cibeq",
+ "cigt",
+ "cigteq",
+ "cilt",
+ "cilteq",
+ "tis",
+ "tiz",
+ "tinz",
+ "tbs",
+ "tbz",
+ "tbnz",
+ "tps",
+ "tpz",
+ "tpnz",
+ "peek",
+ "poke",
+ "bpeq",
+ "bpneq",
+ "bpa",
+ "bpaeq",
+ "bpb",
+ "bpbeq",
+ "bpgt",
+ "bpgteq",
+ "bplt",
+ "bplteq",
+ "addp",
+ "mulp",
+ "andp",
+ "orp",
+ "subp",
+ "xorp",
+ "loadp",
+ "cpeq",
+ "cpneq",
+ "cpa",
+ "cpaeq",
+ "cpb",
+ "cpbeq",
+ "cpgt",
+ "cpgteq",
+ "cplt",
+ "cplteq",
+ "storep",
+ "btps",
+ "btpz",
+ "btpnz",
+ "baddpo",
+ "baddps",
+ "baddpz",
+ "baddpnz",
+ "tqs",
+ "tqz",
+ "tqnz",
+ "bqeq",
+ "bqneq",
+ "bqa",
+ "bqaeq",
+ "bqb",
+ "bqbeq",
+ "bqgt",
+ "bqgteq",
+ "bqlt",
+ "bqlteq",
+ "addq",
+ "mulq",
+ "andq",
+ "orq",
+ "subq",
+ "xorq",
+ "loadq",
+ "cqeq",
+ "cqneq",
+ "cqa",
+ "cqaeq",
+ "cqb",
+ "cqbeq",
+ "cqgt",
+ "cqgteq",
+ "cqlt",
+ "cqlteq",
+ "storeq",
+ "btqs",
+ "btqz",
+ "btqnz",
+ "baddqo",
+ "baddqs",
+ "baddqz",
+ "baddqnz",
+ "bo",
+ "bs",
+ "bz",
+ "bnz",
+ "leai",
+ "leap",
+ "memfence"
+ ]
+
+X86_INSTRUCTIONS =
+ [
+ "cdqi",
+ "idivi"
+ ]
+
+ARM_INSTRUCTIONS =
+ [
+ "clrbp",
+ "mvlbl"
+ ]
+
+ARM64_INSTRUCTIONS =
+ [
+ "pcrtoaddr", # Address from PC relative offset - adr instruction
+ "nopFixCortexA53Err835769" # nop on Cortex-A53 (nothing otherwise)
+ ]
+
+RISC_INSTRUCTIONS =
+ [
+ "smulli", # Multiply two 32-bit words and produce a 64-bit word
+ "addis", # Add integers and set a flag.
+ "subis", # Same, but for subtraction.
+ "oris", # Same, but for bitwise or.
+ "addps" # addis but for pointers.
+ ]
+
+MIPS_INSTRUCTIONS =
+ [
+ "la",
+ "movz",
+ "movn",
+ "slt",
+ "sltu",
+ "pichdr"
+ ]
+
+SH4_INSTRUCTIONS =
+ [
+ "flushcp",
+ "alignformova",
+ "mova",
+ "shllx",
+ "shlrx",
+ "shld",
+ "shad",
+ "bdnan",
+ "loaddReversedAndIncrementAddress",
+ "storedReversedAndDecrementAddress",
+ "ldspr",
+ "stspr",
+ "setargs"
+ ]
+
+CXX_INSTRUCTIONS =
+ [
+ "cloopCrash", # no operands
+ "cloopCallJSFunction", # operands: callee
+ "cloopCallNative", # operands: callee
+ "cloopCallSlowPath", # operands: callTarget, currentFrame, currentPC
+ "cloopCallSlowPathVoid", # operands: callTarget, currentFrame, currentPC
+
+ # For debugging only:
+ # Takes no operands but simply emits whatever follows in // comments as
+ # a line of C++ code in the generated LLIntAssembly.h file. This can be
+ # used to insert instrumentation into the interpreter loop to inspect
+ # variables of interest. Do not leave these instructions in production
+ # code.
+ "cloopDo", # no operands
+ ]
+
+INSTRUCTIONS = MACRO_INSTRUCTIONS + X86_INSTRUCTIONS + ARM_INSTRUCTIONS + ARM64_INSTRUCTIONS + RISC_INSTRUCTIONS + MIPS_INSTRUCTIONS + SH4_INSTRUCTIONS + CXX_INSTRUCTIONS
+
+INSTRUCTION_SET = INSTRUCTIONS.to_set
+
+def isBranch(instruction)
+ instruction =~ /^b/
+end
+
+def hasFallThrough(instruction)
+ instruction != "ret" and instruction != "jmp"
+end
+
diff --git a/Source/JavaScriptCore/offlineasm/mips.rb b/Source/JavaScriptCore/offlineasm/mips.rb
new file mode 100644
index 000000000..cc107ec37
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/mips.rb
@@ -0,0 +1,1017 @@
+# Copyright (C) 2012 Apple Inc. All rights reserved.
+# Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY MIPS TECHNOLOGIES, INC. ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL MIPS TECHNOLOGIES, INC. OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+require 'risc'
+
+class Assembler
+ def putStr(str)
+ @outp.puts str
+ end
+end
+
+class Node
+ def mipsSingleHi
+ doubleOperand = mipsOperand
+ raise "Bogus register name #{doubleOperand}" unless doubleOperand =~ /^\$f/
+ "$f" + ($~.post_match.to_i + 1).to_s
+ end
+ def mipsSingleLo
+ doubleOperand = mipsOperand
+ raise "Bogus register name #{doubleOperand}" unless doubleOperand =~ /^\$f/
+ doubleOperand
+ end
+end
+
+class SpecialRegister < NoChildren
+ def mipsOperand
+ @name
+ end
+
+ def dump
+ @name
+ end
+
+ def register?
+ true
+ end
+end
+
+MIPS_TEMP_GPRS = [SpecialRegister.new("$t5"), SpecialRegister.new("$t6"), SpecialRegister.new("$t7"),
+ SpecialRegister.new("$t8")]
+MIPS_ZERO_REG = SpecialRegister.new("$zero")
+MIPS_GP_REG = SpecialRegister.new("$gp")
+MIPS_GPSAVE_REG = SpecialRegister.new("$s4")
+MIPS_CALL_REG = SpecialRegister.new("$t9")
+MIPS_TEMP_FPRS = [SpecialRegister.new("$f16")]
+MIPS_SCRATCH_FPR = SpecialRegister.new("$f18")
+
+def mipsMoveImmediate(value, register)
+ if value == 0
+ $asm.puts "add #{register.mipsOperand}, $zero, $zero"
+ else
+ $asm.puts "li #{register.mipsOperand}, #{value}"
+ end
+end
+
+class RegisterID
+ def mipsOperand
+ case name
+ when "a0"
+ "$a0"
+ when "a1"
+ "$a1"
+ when "a2"
+ "$a2"
+ when "a3"
+ "$a3"
+ when "r0", "t0"
+ "$v0"
+ when "r1", "t1"
+ "$v1"
+ when "t2"
+ "$t2"
+ when "t3"
+ "$s3"
+ when "t4" # PC reg in llint
+ "$s2"
+ when "t5"
+ "$t5"
+ when "t6"
+ "$t6"
+ when "t7"
+ "$t7"
+ when "t8"
+ "$t8"
+ when "cfr"
+ "$fp"
+ when "lr"
+ "$ra"
+ when "sp"
+ "$sp"
+ else
+ raise "Bad register #{name} for MIPS at #{codeOriginString}"
+ end
+ end
+end
+
+class FPRegisterID
+ def mipsOperand
+ case name
+ when "ft0", "fr"
+ "$f0"
+ when "ft1"
+ "$f2"
+ when "ft2"
+ "$f4"
+ when "ft3"
+ "$f6"
+ when "ft4"
+ "$f8"
+ when "ft5"
+ "$f10"
+ when "fa0"
+ "$f12"
+ when "fa1"
+ "$f14"
+ else
+ raise "Bad register #{name} for MIPS at #{codeOriginString}"
+ end
+ end
+end
+
+class Immediate
+ def mipsOperand
+ raise "Invalid immediate #{value} at #{codeOriginString}" if value < -0x7fff or value > 0xffff
+ "#{value}"
+ end
+end
+
+class Address
+ def mipsOperand
+ raise "Bad offset at #{codeOriginString}" if offset.value < -0x7fff or offset.value > 0x7fff
+ "#{offset.value}(#{base.mipsOperand})"
+ end
+end
+
+class AbsoluteAddress
+ def mipsOperand
+ raise "Unconverted absolute address at #{codeOriginString}"
+ end
+end
+
+#
+# Negate condition of branches to labels.
+#
+
+class Instruction
+ def mipsNegateCondition(list)
+ /^(b(add|sub|or|mul|t)?)([ipb])/.match(opcode)
+ case $~.post_match
+ when "eq"
+ op = "neq"
+ when "neq"
+ op = "eq"
+ when "z"
+ op = "nz"
+ when "nz"
+ op = "z"
+ when "gt"
+ op = "lteq"
+ when "gteq"
+ op = "lt"
+ when "lt"
+ op = "gteq"
+ when "lteq"
+ op = "gt"
+ when "a"
+ op = "beq"
+ when "b"
+ op = "aeq"
+ when "aeq"
+ op = "b"
+ when "beq"
+ op = "a"
+ else
+ raise "Can't negate #{opcode} branch."
+ end
+ noBranch = LocalLabel.unique("nobranch")
+ noBranchRef = LocalLabelReference.new(codeOrigin, noBranch)
+ toRef = operands[-1]
+ list << Instruction.new(codeOrigin, "#{$1}#{$3}#{op}", operands[0..-2].push(noBranchRef), annotation)
+ list << Instruction.new(codeOrigin, "la", [toRef, MIPS_CALL_REG])
+ list << Instruction.new(codeOrigin, "jmp", [MIPS_CALL_REG])
+ list << noBranch
+ end
+end
+
+def mipsLowerFarBranchOps(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ annotation = node.annotation
+ case node.opcode
+ when /^b(add|sub|or|mul|t)?([ipb])/
+ if node.operands[-1].is_a? LabelReference
+ node.mipsNegateCondition(newList)
+ next
+ end
+ end
+ end
+ newList << node
+ }
+ newList
+end
+
+#
+# Lower 'and' masked branches
+#
+
+def lowerMIPSCondBranch(list, condOp, node)
+ if node.operands.size == 2
+ list << Instruction.new(node.codeOrigin,
+ condOp,
+ [node.operands[0], MIPS_ZERO_REG, node.operands[-1]],
+ node.annotation)
+ elsif node.operands.size == 3
+ tmp = Tmp.new(node.codeOrigin, :gpr)
+ list << Instruction.new(node.codeOrigin,
+ "andi",
+ [node.operands[0], node.operands[1], tmp],
+ node.annotation)
+ list << Instruction.new(node.codeOrigin,
+ condOp,
+ [tmp, MIPS_ZERO_REG, node.operands[-1]])
+ else
+ raise "Expected 2 or 3 operands but got #{node.operands.size} at #{node.codeOriginString}"
+ end
+end
+
+#
+# Lowering of branch ops. For example:
+#
+# baddiz foo, bar, baz
+#
+# will become:
+#
+# addi foo, bar
+# bz baz
+#
+
+def mipsLowerSimpleBranchOps(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ annotation = node.annotation
+ case node.opcode
+ when /^b(addi|subi|ori|addp)/
+ op = $1
+ bc = $~.post_match
+ branch = "b" + bc
+
+ case op
+ when "addi", "addp"
+ op = "addi"
+ when "subi"
+ op = "subi"
+ when "ori"
+ op = "ori"
+ end
+
+ if bc == "o"
+ case op
+ when "addi"
+ # addu $s0, $s1, $s2
+ # xor $t0, $s1, $s2
+ # blt $t0, $zero, no overflow
+ # xor $t0, $s0, $s1
+ # blt $t0, $zero, overflow
+ # no overflow:
+ #
+ tr = Tmp.new(node.codeOrigin, :gpr)
+ tmp = Tmp.new(node.codeOrigin, :gpr)
+ noFlow = LocalLabel.unique("noflow")
+ noFlowRef = LocalLabelReference.new(node.codeOrigin, noFlow)
+ newList << Instruction.new(node.codeOrigin, op, [node.operands[0], node.operands[1], tr], annotation)
+ newList << Instruction.new(node.codeOrigin, "xori", [node.operands[0], node.operands[1], tmp])
+ newList << Instruction.new(node.codeOrigin, "bilt", [tmp, MIPS_ZERO_REG, noFlowRef])
+ newList << Instruction.new(node.codeOrigin, "xori", [tr, node.operands[0], tmp])
+ newList << Instruction.new(node.codeOrigin, "bilt", [tmp, MIPS_ZERO_REG, node.operands[2]])
+ newList << noFlow
+ newList << Instruction.new(node.codeOrigin, "move", [tr, node.operands[1]])
+ when "subi"
+ # subu $s0, $s1, $s2
+ # xor $t0, $s1, $s2
+ # bge $t0, $zero, no overflow
+ # xor $t0, $s0, $s1
+ # blt $t0, $zero, overflow
+ # no overflow:
+ #
+ tr = Tmp.new(node.codeOrigin, :gpr)
+ tmp = Tmp.new(node.codeOrigin, :gpr)
+ noFlow = LocalLabel.unique("noflow")
+ noFlowRef = LocalLabelReference.new(node.codeOrigin, noFlow)
+ newList << Instruction.new(node.codeOrigin, op, [node.operands[1], node.operands[0], tr], annotation)
+ newList << Instruction.new(node.codeOrigin, "xori", [node.operands[1], node.operands[0], tmp])
+ newList << Instruction.new(node.codeOrigin, "bigteq", [tmp, MIPS_ZERO_REG, noFlowRef])
+ newList << Instruction.new(node.codeOrigin, "xori", [tr, node.operands[1], tmp])
+ newList << Instruction.new(node.codeOrigin, "bilt", [tmp, MIPS_ZERO_REG, node.operands[2]])
+ newList << noFlow
+ newList << Instruction.new(node.codeOrigin, "move", [tr, node.operands[1]])
+ when "ori"
+ # no ovwerflow at ori
+ newList << Instruction.new(node.codeOrigin, op, node.operands[0..1], annotation)
+ end
+ else
+ if node.operands[1].is_a? Address
+ addr = node.operands[1]
+ tr = Tmp.new(node.codeOrigin, :gpr)
+ newList << Instruction.new(node.codeOrigin, "loadp", [addr, tr], annotation)
+ newList << Instruction.new(node.codeOrigin, op, [node.operands[0], tr])
+ newList << Instruction.new(node.codeOrigin, "storep", [tr, addr])
+ else
+ tr = node.operands[1]
+ newList << Instruction.new(node.codeOrigin, op, node.operands[0..-2], annotation)
+ end
+ newList << Instruction.new(node.codeOrigin, branch, [tr, MIPS_ZERO_REG, node.operands[-1]])
+ end
+ when "bia", "bpa", "bba"
+ tmp = Tmp.new(node.codeOrigin, :gpr)
+ comp = node.opcode[1] == ?b ? "sltub" : "sltu"
+ newList << Instruction.new(node.codeOrigin, comp, [tmp, node.operands[1], node.operands[0]], annotation)
+ newList << Instruction.new(node.codeOrigin, "bnz", [tmp, MIPS_ZERO_REG, node.operands[2]])
+ when "biaeq", "bpaeq", "bbaeq"
+ tmp = Tmp.new(node.codeOrigin, :gpr)
+ comp = node.opcode[1] == ?b ? "sltub" : "sltu"
+ newList << Instruction.new(node.codeOrigin, comp, [tmp, node.operands[0], node.operands[1]], annotation)
+ newList << Instruction.new(node.codeOrigin, "bz", [tmp, MIPS_ZERO_REG, node.operands[2]])
+ when "bib", "bpb", "bbb"
+ tmp = Tmp.new(node.codeOrigin, :gpr)
+ comp = node.opcode[1] == ?b ? "sltub" : "sltu"
+ newList << Instruction.new(node.codeOrigin, comp, [tmp, node.operands[0], node.operands[1]], annotation)
+ newList << Instruction.new(node.codeOrigin, "bnz", [tmp, MIPS_ZERO_REG, node.operands[2]])
+ when "bibeq", "bpbeq", "bbbeq"
+ tmp = Tmp.new(node.codeOrigin, :gpr)
+ comp = node.opcode[1] == ?b ? "sltub" : "sltu"
+ newList << Instruction.new(node.codeOrigin, comp, [tmp, node.operands[1], node.operands[0]], annotation)
+ newList << Instruction.new(node.codeOrigin, "bz", [tmp, MIPS_ZERO_REG, node.operands[2]])
+ when /^bt(i|p|b)/
+ lowerMIPSCondBranch(newList, "b" + $~.post_match + $1, node)
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+#
+# Specialization of lowering of malformed BaseIndex addresses.
+#
+
+class Node
+ def mipsLowerMalformedAddressesRecurse(list)
+ mapChildren {
+ | subNode |
+ subNode.mipsLowerMalformedAddressesRecurse(list)
+ }
+ end
+
+ def mipsLowerShiftedAddressesRecurse(list, isFirst, tmp)
+ mapChildren {
+ | subNode |
+ subNode.mipsLowerShiftedAddressesRecurse(list, isFirst, tmp)
+ }
+ end
+end
+
+class BaseIndex
+ def mipsLowerMalformedAddressesRecurse(list)
+ tmp = Tmp.new(codeOrigin, :gpr)
+ if scaleShift == 0
+ list << Instruction.new(codeOrigin, "addp", [base, index, tmp])
+ Address.new(codeOrigin, tmp, Immediate.new(codeOrigin, offset.value));
+ end
+ end
+
+ def mipsLowerShiftedAddressesRecurse(list, isFirst, tmp)
+ if isFirst
+ list << Instruction.new(codeOrigin, "lshifti", [index, Immediate.new(codeOrigin, scaleShift), tmp]);
+ list << Instruction.new(codeOrigin, "addp", [base, tmp])
+ end
+ Address.new(codeOrigin, tmp, Immediate.new(codeOrigin, offset.value));
+ end
+end
+
+#
+# Lowering of BaseIndex addresses with optimization for MIPS.
+#
+# offline asm instruction pair:
+# loadi 4[cfr, t0, 8], t2
+# loadi 0[cfr, t0, 8], t0
+#
+# lowered instructions:
+# lshifti t0, 3, tmp
+# addp cfr, tmp
+# loadi 4[tmp], t2
+# loadi 0[tmp], t0
+#
+
+def mipsHasShiftedBaseIndexAddress(instruction)
+ instruction.operands.each_with_index {
+ | operand, index |
+ if operand.is_a? BaseIndex and operand.scaleShift != 0
+ return index
+ end
+ }
+ -1
+end
+
+def mipsScaleOfBaseIndexMatches(baseIndex0, baseIndex1)
+ baseIndex0.base == baseIndex1.base and
+ baseIndex0.index == baseIndex1.index and
+ baseIndex0.scale == baseIndex1.scale
+end
+
+def mipsLowerBaseIndexAddresses(list)
+ newList = [ list[0] ]
+ tmp = nil
+ list.each_cons(2) {
+ | nodes |
+ if nodes[1].is_a? Instruction
+ ind = mipsHasShiftedBaseIndexAddress(nodes[1])
+ if ind != -1
+ if nodes[0].is_a? Instruction and
+ nodes[0].opcode == nodes[1].opcode and
+ ind == mipsHasShiftedBaseIndexAddress(nodes[0]) and
+ mipsScaleOfBaseIndexMatches(nodes[0].operands[ind], nodes[1].operands[ind])
+
+ newList << nodes[1].mipsLowerShiftedAddressesRecurse(newList, false, tmp)
+ else
+ tmp = Tmp.new(codeOrigin, :gpr)
+ newList << nodes[1].mipsLowerShiftedAddressesRecurse(newList, true, tmp)
+ end
+ else
+ newList << nodes[1].mipsLowerMalformedAddressesRecurse(newList)
+ end
+ else
+ newList << nodes[1]
+ end
+ }
+ newList
+end
+
+#
+# Lowering of misplaced immediates of MIPS specific instructions. For example:
+#
+# sltu reg, 4, 2
+#
+# will become:
+#
+# move 4, tmp
+# sltu reg, tmp, 2
+#
+
+def mipsLowerMisplacedImmediates(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ case node.opcode
+ when "slt", "sltu", "sltb", "sltub"
+ if node.operands[1].is_a? Immediate
+ tmp = Tmp.new(node.codeOrigin, :gpr)
+ newList << Instruction.new(node.codeOrigin, "move", [node.operands[1], tmp], node.annotation)
+ newList << Instruction.new(node.codeOrigin, node.opcode,
+ [node.operands[0], tmp, node.operands[2]],
+ node.annotation)
+ else
+ newList << node
+ end
+ when /^(addi|subi)/
+ newList << node.riscLowerMalformedImmediatesRecurse(newList, -0x7fff..0x7fff)
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+#
+# Specialization of lowering of misplaced addresses.
+#
+
+class LocalLabelReference
+ def register?
+ false
+ end
+end
+
+def mipsAsRegister(preList, postList, operand, needRestore)
+ tmp = MIPS_CALL_REG
+ if operand.address?
+ preList << Instruction.new(operand.codeOrigin, "loadp", [operand, MIPS_CALL_REG])
+ elsif operand.is_a? LabelReference
+ preList << Instruction.new(operand.codeOrigin, "la", [operand, MIPS_CALL_REG])
+ elsif operand.register? and operand != MIPS_CALL_REG
+ preList << Instruction.new(operand.codeOrigin, "move", [operand, MIPS_CALL_REG])
+ else
+ needRestore = false
+ tmp = operand
+ end
+ if needRestore
+ postList << Instruction.new(operand.codeOrigin, "move", [MIPS_GPSAVE_REG, MIPS_GP_REG])
+ end
+ tmp
+end
+
+def mipsLowerMisplacedAddresses(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ postInstructions = []
+ annotation = node.annotation
+ case node.opcode
+ when "jmp"
+ newList << Instruction.new(node.codeOrigin,
+ node.opcode,
+ [mipsAsRegister(newList, [], node.operands[0], false)])
+ when "call"
+ newList << Instruction.new(node.codeOrigin,
+ node.opcode,
+ [mipsAsRegister(newList, postInstructions, node.operands[0], true)])
+ when "slt", "sltu"
+ newList << Instruction.new(node.codeOrigin,
+ node.opcode,
+ riscAsRegisters(newList, [], node.operands, "i"))
+ when "sltub", "sltb"
+ newList << Instruction.new(node.codeOrigin,
+ node.opcode,
+ riscAsRegisters(newList, [], node.operands, "b"))
+ when /^(bz|bnz|bs|bo)/
+ tl = $~.post_match == "" ? "i" : $~.post_match
+ newList << Instruction.new(node.codeOrigin,
+ node.opcode,
+ riscAsRegisters(newList, [], node.operands, tl))
+ else
+ newList << node
+ end
+ newList += postInstructions
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+#
+# Lowering compares and tests.
+#
+
+def mipsLowerCompareTemplate(list, node, opCmp, opMov)
+ tmp0 = Tmp.new(node.codeOrigin, :gpr)
+ tmp1 = Tmp.new(node.codeOrigin, :gpr)
+ list << Instruction.new(node.codeOrigin, "move", [Immediate.new(nil, 0), node.operands[2]])
+ list << Instruction.new(node.codeOrigin, opCmp, [node.operands[1], node.operands[0], tmp0])
+ list << Instruction.new(node.codeOrigin, "move", [Immediate.new(nil, 1), tmp1])
+ list << Instruction.new(node.codeOrigin, opMov, [node.operands[2], tmp1, tmp0])
+end
+
+def mipsLowerCompares(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ case node.opcode
+ when "cieq", "cpeq", "cbeq"
+ mipsLowerCompareTemplate(newList, node, "subp", "movz")
+ when "cineq", "cpneq", "cbneq"
+ mipsLowerCompareTemplate(newList, node, "subp", "movn")
+ when "tiz", "tbz", "tpz"
+ mipsLowerCompareTemplate(newList, node, "andp", "movz")
+ when "tinz", "tbnz", "tpnz"
+ mipsLowerCompareTemplate(newList, node, "andp", "movn")
+ when "tio", "tbo", "tpo"
+ tmp = Tmp.new(node.codeOrigin, :gpr)
+ list << Instruction.new(node.codeOrigin, "andp", [node.operands[1], node.operands[0], tmp])
+ list << Instruction.new(node.codeOrigin, "slt", [node.operands[2], MIPS_ZERO_REG, tmp])
+ when "tis", "tbs", "tps"
+ tmp = Tmp.new(node.codeOrigin, :gpr)
+ list << Instruction.new(node.codeOrigin, "andp", [node.operands[1], node.operands[0], tmp])
+ list << Instruction.new(node.codeOrigin, "slt", [node.operands[2], tmp, MIPS_ZERO_REG])
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+#
+# Lea support.
+#
+
+class Address
+ def mipsEmitLea(destination)
+ if destination == base
+ $asm.puts "addiu #{destination.mipsOperand}, #{offset.value}"
+ else
+ $asm.puts "addiu #{destination.mipsOperand}, #{base.mipsOperand}, #{offset.value}"
+ end
+ end
+end
+
+#
+# Add PIC compatible header code to all the LLInt rutins.
+#
+
+def mipsAddPICCode(list)
+ myList = []
+ list.each {
+ | node |
+ myList << node
+ if node.is_a? Label
+ myList << Instruction.new(node.codeOrigin, "pichdr", [])
+ end
+ }
+ myList
+end
+
+#
+# Actual lowering code follows.
+#
+
+class Sequence
+ def getModifiedListMIPS
+ result = @list
+
+ # Verify that we will only see instructions and labels.
+ result.each {
+ | node |
+ unless node.is_a? Instruction or
+ node.is_a? Label or
+ node.is_a? LocalLabel or
+ node.is_a? Skip
+ raise "Unexpected #{node.inspect} at #{node.codeOrigin}"
+ end
+ }
+
+ result = mipsAddPICCode(result)
+ result = mipsLowerFarBranchOps(result)
+ result = mipsLowerSimpleBranchOps(result)
+ result = riscLowerSimpleBranchOps(result)
+ result = riscLowerHardBranchOps(result)
+ result = riscLowerShiftOps(result)
+ result = mipsLowerBaseIndexAddresses(result)
+ result = riscLowerMalformedAddresses(result) {
+ | node, address |
+ if address.is_a? Address
+ (-0xffff..0xffff).include? address.offset.value
+ else
+ false
+ end
+ }
+ result = riscLowerMalformedAddressesDouble(result)
+ result = riscLowerMisplacedImmediates(result, ["storeb", "storei", "storep"])
+ result = mipsLowerMisplacedImmediates(result)
+ result = riscLowerMalformedImmediates(result, -0xffff..0xffff)
+ result = mipsLowerMisplacedAddresses(result)
+ result = riscLowerMisplacedAddresses(result)
+ result = riscLowerRegisterReuse(result)
+ result = mipsLowerCompares(result)
+ result = assignRegistersToTemporaries(result, :gpr, MIPS_TEMP_GPRS)
+ result = assignRegistersToTemporaries(result, :fpr, MIPS_TEMP_FPRS)
+
+ return result
+ end
+end
+
+def mipsOperands(operands)
+ operands.map{|v| v.mipsOperand}.join(", ")
+end
+
+def mipsFlippedOperands(operands)
+ mipsOperands([operands[-1]] + operands[0..-2])
+end
+
+def getMIPSOpcode(opcode, suffix)
+
+end
+
+def emitMIPSCompact(opcode, opcodei, operands)
+ postfix = ""
+ if opcode == "sub"
+ if operands[0].is_a? Immediate
+ opcode = "add"
+ operands[0] = Immediate.new(operands[0].codeOrigin, -1 * operands[0].value)
+ elsif operands[1].is_a? Immediate
+ opcode = "add"
+ operands[1] = Immediate.new(operands[1].codeOrigin, -1 * operands[1].value)
+ end
+ postfix = "u"
+ elsif opcode == "add"
+ postfix = "u"
+ end
+ if operands.size == 3
+ if operands[0].is_a? Immediate
+ $asm.puts "#{opcode}i#{postfix} #{operands[2].mipsOperand}, #{operands[1].mipsOperand}, #{operands[0].value}"
+ elsif operands[1].is_a? Immediate
+ $asm.puts "#{opcode}i#{postfix} #{operands[2].mipsOperand}, #{operands[0].mipsOperand}, #{operands[1].value}"
+ else
+ $asm.puts "#{opcode}#{postfix} #{mipsFlippedOperands(operands)}"
+ end
+ else
+ raise unless operands.size == 2
+ raise unless operands[1].register?
+ if operands[0].is_a? Immediate
+ $asm.puts "#{opcode}i#{postfix} #{operands[1].mipsOperand}, #{operands[1].mipsOperand}, #{operands[0].mipsOperand}"
+ else
+ $asm.puts "#{opcode}#{postfix} #{operands[1].mipsOperand}, #{operands[1].mipsOperand}, #{operands[0].mipsOperand}"
+ end
+ end
+end
+
+def emitMIPSShiftCompact(opcode, operands)
+ if operands.size == 3
+ if (operands[1].is_a? Immediate)
+ $asm.puts "#{opcode} #{operands[2].mipsOperand}, #{operands[0].mipsOperand}, #{operands[1].value}"
+ else
+ $asm.puts "#{opcode}v #{mipsFlippedOperands(operands)}"
+ end
+ else
+ raise unless operands.size == 2
+ if operands[0].register?
+ $asm.puts "#{opcode}v #{operands[1].mipsOperand}, #{operands[1].mipsOperand}, #{operands[0].mipsOperand}"
+ else
+ $asm.puts "#{opcode} #{operands[1].mipsOperand}, #{operands[1].mipsOperand}, #{operands[0].value}"
+ end
+ end
+end
+
+def emitMIPS(opcode, operands)
+ if operands.size == 3
+ $asm.puts "#{opcode} #{mipsFlippedOperands(operands)}"
+ else
+ raise unless operands.size == 2
+ $asm.puts "#{opcode} #{operands[1].mipsOperand}, #{operands[1].mipsOperand}, #{operands[0].mipsOperand}"
+ end
+end
+
+def emitMIPSDoubleBranch(branchOpcode, neg, operands)
+ $asm.puts "c.#{branchOpcode}.d #{mipsOperands(operands[0..1])}"
+ if (!neg)
+ $asm.puts "bc1t #{operands[2].asmLabel}"
+ else
+ $asm.puts "bc1f #{operands[2].asmLabel}"
+ end
+end
+
+def emitMIPSJumpOrCall(opcode, operand)
+ if operand.label?
+ raise "Direct call/jump to a not local label." unless operand.is_a? LocalLabelReference
+ $asm.puts "#{opcode} #{operand.asmLabel}"
+ else
+ raise "Invalid call/jump register." unless operand == MIPS_CALL_REG
+ $asm.puts "#{opcode}r #{MIPS_CALL_REG.mipsOperand}"
+ end
+end
+
+class Instruction
+ def lowerMIPS
+ $asm.comment codeOriginString
+ case opcode
+ when "addi", "addp", "addis"
+ if operands.size == 3 and operands[0].is_a? Immediate
+ raise unless operands[1].register?
+ raise unless operands[2].register?
+ if operands[0].value == 0 #and suffix.empty?
+ unless operands[1] == operands[2]
+ $asm.puts "move #{operands[2].mipsOperand}, #{operands[1].mipsOperand}"
+ end
+ else
+ $asm.puts "addiu #{operands[2].mipsOperand}, #{operands[1].mipsOperand}, #{operands[0].mipsOperand}"
+ end
+ elsif operands.size == 3 and operands[0].register?
+ raise unless operands[1].register?
+ raise unless operands[2].register?
+ $asm.puts "addu #{mipsFlippedOperands(operands)}"
+ else
+ if operands[0].is_a? Immediate
+ unless Immediate.new(nil, 0) == operands[0]
+ $asm.puts "addiu #{operands[1].mipsOperand}, #{mipsFlippedOperands(operands)}"
+ end
+ else
+ $asm.puts "addu #{operands[1].mipsOperand}, #{operands[1].mipsOperand}, #{operands[0].mipsOperand}"
+ end
+ end
+ when "andi", "andp"
+ emitMIPSCompact("and", "and", operands)
+ when "ori", "orp"
+ emitMIPSCompact("or", "orr", operands)
+ when "oris"
+ emitMIPSCompact("or", "orrs", operands)
+ when "xori", "xorp"
+ emitMIPSCompact("xor", "eor", operands)
+ when "lshifti", "lshiftp"
+ emitMIPSShiftCompact("sll", operands)
+ when "rshifti", "rshiftp"
+ emitMIPSShiftCompact("sra", operands)
+ when "urshifti", "urshiftp"
+ emitMIPSShiftCompact("srl", operands)
+ when "muli", "mulp"
+ emitMIPS("mul", operands)
+ when "subi", "subp", "subis"
+ emitMIPSCompact("sub", "subs", operands)
+ when "negi", "negp"
+ $asm.puts "negu #{operands[0].mipsOperand}, #{operands[0].mipsOperand}"
+ when "noti"
+ $asm.puts "nor #{operands[0].mipsOperand}, #{operands[0].mipsOperand}, $zero"
+ when "loadi", "loadis", "loadp"
+ $asm.puts "lw #{mipsFlippedOperands(operands)}"
+ when "storei", "storep"
+ $asm.puts "sw #{mipsOperands(operands)}"
+ when "loadb"
+ $asm.puts "lbu #{mipsFlippedOperands(operands)}"
+ when "loadbs"
+ $asm.puts "lb #{mipsFlippedOperands(operands)}"
+ when "storeb"
+ $asm.puts "sb #{mipsOperands(operands)}"
+ when "loadh"
+ $asm.puts "lhu #{mipsFlippedOperands(operands)}"
+ when "loadhs"
+ $asm.puts "lh #{mipsFlippedOperands(operands)}"
+ when "storeh"
+ $asm.puts "shv #{mipsOperands(operands)}"
+ when "loadd"
+ $asm.puts "ldc1 #{mipsFlippedOperands(operands)}"
+ when "stored"
+ $asm.puts "sdc1 #{mipsOperands(operands)}"
+ when "la"
+ $asm.puts "la #{operands[1].mipsOperand}, #{operands[0].asmLabel}"
+ when "addd"
+ emitMIPS("add.d", operands)
+ when "divd"
+ emitMIPS("div.d", operands)
+ when "subd"
+ emitMIPS("sub.d", operands)
+ when "muld"
+ emitMIPS("mul.d", operands)
+ when "sqrtd"
+ $asm.puts "sqrt.d #{mipsFlippedOperands(operands)}"
+ when "ci2d"
+ raise "invalid ops of #{self.inspect} at #{codeOriginString}" unless operands[1].is_a? FPRegisterID and operands[0].register?
+ $asm.puts "mtc1 #{operands[0].mipsOperand}, #{operands[1].mipsOperand}"
+ $asm.puts "cvt.d.w #{operands[1].mipsOperand}, #{operands[1].mipsOperand}"
+ when "bdeq"
+ emitMIPSDoubleBranch("eq", false, operands)
+ when "bdneq"
+ emitMIPSDoubleBranch("ueq", true, operands)
+ when "bdgt"
+ emitMIPSDoubleBranch("ule", true, operands)
+ when "bdgteq"
+ emitMIPSDoubleBranch("ult", true, operands)
+ when "bdlt"
+ emitMIPSDoubleBranch("olt", false, operands)
+ when "bdlteq"
+ emitMIPSDoubleBranch("ole", false, operands)
+ when "bdequn"
+ emitMIPSDoubleBranch("ueq", false, operands)
+ when "bdnequn"
+ emitMIPSDoubleBranch("eq", true, operands)
+ when "bdgtun"
+ emitMIPSDoubleBranch("ole", true, operands)
+ when "bdgtequn"
+ emitMIPSDoubleBranch("olt", true, operands)
+ when "bdltun"
+ emitMIPSDoubleBranch("ult", false, operands)
+ when "bdltequn"
+ emitMIPSDoubleBranch("ule", false, operands)
+ when "btd2i"
+ # FIXME: may be a good idea to just get rid of this instruction, since the interpreter
+ # currently does not use it.
+ raise "MIPS does not support this opcode yet, #{codeOrigin}"
+ when "td2i"
+ $asm.puts "cvt.w.d #{MIPS_SCRATCH_FPR.mipsSingleLo}, #{operands[0].mipsOperand}"
+ $asm.puts "mfc1 #{operands[1].mipsOperand}, #{MIPS_SCRATCH_FPR.mipsSingleLo}"
+ when "bcd2i"
+ $asm.puts "cvt.w.d #{MIPS_SCRATCH_FPR.mipsSingleLo}, #{operands[0].mipsOperand}"
+ $asm.puts "mfc1 #{operands[1].mipsOperand}, #{MIPS_SCRATCH_FPR.mipsSingleLo}"
+ $asm.puts "cvt.d.w #{MIPS_SCRATCH_FPR.mipsOperand}, #{MIPS_SCRATCH_FPR.mipsSingleLo}"
+ emitMIPSDoubleBranch("eq", true, [MIPS_SCRATCH_FPR, operands[0], operands[2]])
+ $asm.puts "beq #{operands[1].mipsOperand}, $zero, #{operands[2].asmLabel}"
+ when "movdz"
+ # FIXME: either support this or remove it.
+ raise "MIPS does not support this opcode yet, #{codeOrigin}"
+ when "pop"
+ operands.each {
+ | op |
+ $asm.puts "lw #{op.mipsOperand}, 0($sp)"
+ $asm.puts "addiu $sp, $sp, 4"
+ }
+ when "push"
+ operands.each {
+ | op |
+ $asm.puts "addiu $sp, $sp, -4"
+ $asm.puts "sw #{op.mipsOperand}, 0($sp)"
+ }
+ when "move", "sxi2p", "zxi2p"
+ if operands[0].is_a? Immediate
+ mipsMoveImmediate(operands[0].value, operands[1])
+ else
+ $asm.puts "move #{mipsFlippedOperands(operands)}"
+ end
+ when "nop"
+ $asm.puts "nop"
+ when "bieq", "bpeq", "bbeq"
+ $asm.puts "beq #{mipsOperands(operands[0..1])}, #{operands[2].asmLabel}"
+ when "bineq", "bpneq", "bbneq"
+ $asm.puts "bne #{mipsOperands(operands[0..1])}, #{operands[2].asmLabel}"
+ when "bigt", "bpgt", "bbgt"
+ $asm.puts "bgt #{mipsOperands(operands[0..1])}, #{operands[2].asmLabel}"
+ when "bigteq", "bpgteq", "bbgteq"
+ $asm.puts "bge #{mipsOperands(operands[0..1])}, #{operands[2].asmLabel}"
+ when "bilt", "bplt", "bblt"
+ $asm.puts "blt #{mipsOperands(operands[0..1])}, #{operands[2].asmLabel}"
+ when "bilteq", "bplteq", "bblteq"
+ $asm.puts "ble #{mipsOperands(operands[0..1])}, #{operands[2].asmLabel}"
+ when "jmp"
+ emitMIPSJumpOrCall("j", operands[0])
+ when "call"
+ emitMIPSJumpOrCall("jal", operands[0])
+ when "break"
+ $asm.puts "break"
+ when "ret"
+ $asm.puts "jr $ra"
+ when "cia", "cpa", "cba"
+ $asm.puts "sltu #{operands[2].mipsOperand}, #{operands[1].mipsOperand}, #{operands[0].mipsOperand}"
+ when "ciaeq", "cpaeq", "cbaeq"
+ $asm.puts "sltu #{operands[2].mipsOperand}, #{operands[0].mipsOperand}, #{operands[1].mipsOperand}"
+ $asm.puts "xori #{operands[2].mipsOperand}, 1"
+ when "cib", "cpb", "cbb"
+ $asm.puts "sltu #{operands[2].mipsOperand}, #{operands[0].mipsOperand}, #{operands[1].mipsOperand}"
+ when "cibeq", "cpbeq", "cbbeq"
+ $asm.puts "sltu #{operands[2].mipsOperand}, #{operands[1].mipsOperand}, #{operands[0].mipsOperand}"
+ $asm.puts "xori #{operands[2].mipsOperand}, 1"
+ when "cigt", "cpgt", "cbgt"
+ $asm.puts "slt #{operands[2].mipsOperand}, #{operands[1].mipsOperand}, #{operands[0].mipsOperand}"
+ when "cigteq", "cpgteq", "cbgteq"
+ $asm.puts "slt #{operands[2].mipsOperand}, #{operands[0].mipsOperand}, #{operands[1].mipsOperand}"
+ $asm.puts "xori #{operands[2].mipsOperand}, 1"
+ when "cilt", "cplt", "cblt"
+ $asm.puts "slt #{operands[2].mipsOperand}, #{operands[0].mipsOperand}, #{operands[1].mipsOperand}"
+ when "cilteq", "cplteq", "cblteq"
+ $asm.puts "slt #{operands[2].mipsOperand}, #{operands[1].mipsOperand}, #{operands[0].mipsOperand}"
+ $asm.puts "xori #{operands[2].mipsOperand}, 1"
+ when "peek"
+ $asm.puts "lw #{operands[1].mipsOperand}, #{operands[0].value * 4}($sp)"
+ when "poke"
+ $asm.puts "sw #{operands[1].mipsOperand}, #{operands[0].value * 4}($sp)"
+ when "fii2d"
+ $asm.puts "mtc1 #{operands[0].mipsOperand}, #{operands[2].mipsSingleLo}"
+ $asm.puts "mtc1 #{operands[1].mipsOperand}, #{operands[2].mipsSingleHi}"
+ when "fd2ii"
+ $asm.puts "mfc1 #{operands[1].mipsOperand}, #{operands[0].mipsSingleLo}"
+ $asm.puts "mfc1 #{operands[2].mipsOperand}, #{operands[0].mipsSingleHi}"
+ when /^bo/
+ $asm.puts "bgt #{operands[0].mipsOperand}, #{operands[1].mipsOperand}, #{operands[2].asmLabel}"
+ when /^bs/
+ $asm.puts "blt #{operands[0].mipsOperand}, #{operands[1].mipsOperand}, #{operands[2].asmLabel}"
+ when /^bz/
+ $asm.puts "beq #{operands[0].mipsOperand}, #{operands[1].mipsOperand}, #{operands[2].asmLabel}"
+ when /^bnz/
+ $asm.puts "bne #{operands[0].mipsOperand}, #{operands[1].mipsOperand}, #{operands[2].asmLabel}"
+ when "leai", "leap"
+ operands[0].mipsEmitLea(operands[1])
+ when "smulli"
+ raise "Wrong number of arguments to smull in #{self.inspect} at #{codeOriginString}" unless operands.length == 4
+ $asm.puts "mult #{operands[0].mipsOperand}, #{operands[1].mipsOperand}"
+ $asm.puts "mflo #{operands[2].mipsOperand}"
+ $asm.puts "mfhi #{operands[3].mipsOperand}"
+ when "movz"
+ $asm.puts "movz #{operands[0].mipsOperand}, #{operands[1].mipsOperand}, #{operands[2].mipsOperand}"
+ when "movn"
+ $asm.puts "movn #{operands[0].mipsOperand}, #{operands[1].mipsOperand}, #{operands[2].mipsOperand}"
+ when "slt", "sltb"
+ $asm.puts "slt #{operands[0].mipsOperand}, #{operands[1].mipsOperand}, #{operands[2].mipsOperand}"
+ when "sltu", "sltub"
+ $asm.puts "sltu #{operands[0].mipsOperand}, #{operands[1].mipsOperand}, #{operands[2].mipsOperand}"
+ when "pichdr"
+ $asm.putStr("OFFLINE_ASM_CPLOAD(#{MIPS_CALL_REG.mipsOperand})")
+ $asm.puts "move #{MIPS_GPSAVE_REG.mipsOperand}, #{MIPS_GP_REG.mipsOperand}"
+ when "memfence"
+ $asm.puts "sync"
+ else
+ lowerDefault
+ end
+ end
+end
diff --git a/Source/JavaScriptCore/offlineasm/offsets.rb b/Source/JavaScriptCore/offlineasm/offsets.rb
new file mode 100644
index 000000000..50b963b72
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/offsets.rb
@@ -0,0 +1,194 @@
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+require "config"
+require "ast"
+
+def to32Bit(value)
+ if value > 0x7fffffff
+ value -= 1 << 32
+ end
+ value
+end
+
+OFFSET_HEADER_MAGIC_NUMBERS = [ to32Bit(0x9e43fd66), to32Bit(0x4379bfba) ]
+OFFSET_MAGIC_NUMBERS = [ to32Bit(0xec577ac7), to32Bit(0x0ff5e755) ]
+
+#
+# MissingMagicValuesException
+#
+# Thrown when magic values are missing from the binary.
+#
+
+class MissingMagicValuesException < Exception
+end
+
+#
+# offsetsList(ast)
+# sizesList(ast)
+#
+# Returns a list of offsets and sizes used by the AST.
+#
+
+def offsetsList(ast)
+ ast.filter(StructOffset).uniq.sort
+end
+
+def sizesList(ast)
+ ast.filter(Sizeof).uniq.sort
+end
+
+#
+# offsetsAndConfigurationIndex(ast, file) ->
+# [[offsets, index], ...]
+#
+# Parses the offsets from a file and returns a list of offsets and the
+# index of the configuration that is valid in this build target.
+#
+
+def offsetsAndConfigurationIndex(file)
+ endiannessMarkerBytes = nil
+ result = {}
+
+ def readInt(endianness, bytes)
+ if endianness == :little
+ # Little endian
+ (bytes[0] << 0 |
+ bytes[1] << 8 |
+ bytes[2] << 16 |
+ bytes[3] << 24)
+ else
+ # Big endian
+ (bytes[0] << 24 |
+ bytes[1] << 16 |
+ bytes[2] << 8 |
+ bytes[3] << 0)
+ end
+ end
+
+ def prepareMagic(endianness, numbers)
+ magicBytes = []
+ numbers.each {
+ | number |
+ currentBytes = []
+ 4.times {
+ currentBytes << (number & 0xff)
+ number >>= 8
+ }
+ if endianness == :big
+ currentBytes.reverse!
+ end
+ magicBytes += currentBytes
+ }
+ magicBytes
+ end
+
+ fileBytes = []
+
+ File.open(file, "rb") {
+ | inp |
+ loop {
+ byte = inp.getbyte
+ break unless byte
+ fileBytes << byte
+ }
+ }
+
+ def sliceByteArrays(byteArray, pattern)
+ result = []
+ lastSlicePoint = 0
+ (byteArray.length - pattern.length + 1).times {
+ | index |
+ foundOne = true
+ pattern.length.times {
+ | subIndex |
+ if byteArray[index + subIndex] != pattern[subIndex]
+ foundOne = false
+ break
+ end
+ }
+ if foundOne
+ result << byteArray[lastSlicePoint...index]
+ lastSlicePoint = index + pattern.length
+ end
+ }
+
+ result << byteArray[lastSlicePoint...(byteArray.length)]
+
+ result
+ end
+
+ [:little, :big].each {
+ | endianness |
+ headerMagicBytes = prepareMagic(endianness, OFFSET_HEADER_MAGIC_NUMBERS)
+ magicBytes = prepareMagic(endianness, OFFSET_MAGIC_NUMBERS)
+
+ bigArray = sliceByteArrays(fileBytes, headerMagicBytes)
+ unless bigArray.size <= 1
+ bigArray[1..-1].each {
+ | configArray |
+ array = sliceByteArrays(configArray, magicBytes)
+ index = readInt(endianness, array[1])
+ offsets = []
+ array[2..-1].each {
+ | data |
+ offsets << readInt(endianness, data)
+ }
+ result[index] = offsets
+ }
+ end
+ }
+
+ raise MissingMagicValuesException unless result.length >= 1
+
+ # result is {index1=>offsets1, index2=>offsets2} but we want to return
+ # [[offsets1, index1], [offsets2, index2]].
+ return result.map {
+ | pair |
+ pair.reverse
+ }
+end
+
+#
+# buildOffsetsMap(ast, offsetsList) -> [offsets, sizes]
+#
+# Builds a mapping between StructOffset nodes and their values.
+#
+
+def buildOffsetsMap(ast, offsetsList)
+ offsetsMap = {}
+ sizesMap = {}
+ astOffsetsList = offsetsList(ast)
+ astSizesList = sizesList(ast)
+ raise unless astOffsetsList.size + astSizesList.size == offsetsList.size
+ offsetsList(ast).each_with_index {
+ | structOffset, index |
+ offsetsMap[structOffset] = offsetsList.shift
+ }
+ sizesList(ast).each_with_index {
+ | sizeof, index |
+ sizesMap[sizeof] = offsetsList.shift
+ }
+ [offsetsMap, sizesMap]
+end
+
diff --git a/Source/JavaScriptCore/offlineasm/opt.rb b/Source/JavaScriptCore/offlineasm/opt.rb
new file mode 100644
index 000000000..c721758f7
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/opt.rb
@@ -0,0 +1,135 @@
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+require "config"
+require "ast"
+
+#
+# "Optimization" passes. These are used to lower the representation for
+# backends that cannot handle some of our higher-level instructions.
+#
+
+#
+# A temporary - a variable that will be allocated to a register after we're
+# done.
+#
+
+class Node
+ def replaceTemporariesWithRegisters(kind)
+ mapChildren {
+ | node |
+ node.replaceTemporariesWithRegisters(kind)
+ }
+ end
+end
+
+class Tmp < NoChildren
+ attr_reader :firstMention, :lastMention
+ attr_reader :kind
+ attr_accessor :register
+
+ def initialize(codeOrigin, kind)
+ super(codeOrigin)
+ @kind = kind
+ end
+
+ def dump
+ "$tmp#{object_id}"
+ end
+
+ def mention!(position)
+ if not @firstMention or position < @firstMention
+ @firstMention = position
+ end
+ if not @lastMention or position > @lastMention
+ @lastMention = position
+ end
+ end
+
+ def replaceTemporariesWithRegisters(kind)
+ if @kind == kind
+ raise "Did not allocate register to temporary at #{codeOriginString}" unless @register
+ @register
+ else
+ self
+ end
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ false
+ end
+
+ def register?
+ true
+ end
+end
+
+# Assign registers to temporaries, by finding which temporaries interfere
+# with each other. Note that this relies on temporary live ranges not crossing
+# basic block boundaries.
+
+def assignRegistersToTemporaries(list, kind, registers)
+ list.each_with_index {
+ | node, index |
+ node.filter(Tmp).uniq.each {
+ | tmp |
+ if tmp.kind == kind
+ tmp.mention! index
+ end
+ }
+ }
+
+ freeRegisters = registers.dup
+ list.each_with_index {
+ | node, index |
+ tmpList = node.filter(Tmp).uniq
+ tmpList.each {
+ | tmp |
+ if tmp.kind == kind and tmp.firstMention == index
+ raise "Could not allocate register to temporary at #{node.codeOriginString}" if freeRegisters.empty?
+ tmp.register = freeRegisters.pop
+ end
+ }
+ tmpList.each {
+ | tmp |
+ if tmp.kind == kind and tmp.lastMention == index
+ freeRegisters.push tmp.register
+ raise "Register allocation inconsistency at #{node.codeOriginString}" if freeRegisters.size > registers.size
+ end
+ }
+ }
+
+ list.map {
+ | node |
+ node.replaceTemporariesWithRegisters(kind)
+ }
+end
+
diff --git a/Source/JavaScriptCore/offlineasm/parser.rb b/Source/JavaScriptCore/offlineasm/parser.rb
new file mode 100644
index 000000000..a122a68c4
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/parser.rb
@@ -0,0 +1,794 @@
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+require "config"
+require "ast"
+require "instructions"
+require "pathname"
+require "registers"
+require "self_hash"
+
+class CodeOrigin
+ attr_reader :fileName, :lineNumber
+
+ def initialize(fileName, lineNumber)
+ @fileName = fileName
+ @lineNumber = lineNumber
+ end
+
+ def to_s
+ "#{fileName}:#{lineNumber}"
+ end
+end
+
+class IncludeFile
+ @@includeDirs = []
+
+ attr_reader :fileName
+
+ def initialize(moduleName, defaultDir)
+ directory = nil
+ @@includeDirs.each {
+ | includePath |
+ fileName = includePath + (moduleName + ".asm")
+ directory = includePath unless not File.file?(fileName)
+ }
+ if not directory
+ directory = defaultDir
+ end
+
+ @fileName = directory + (moduleName + ".asm")
+ end
+
+ def self.processIncludeOptions()
+ while ARGV[0][/-I/]
+ path = ARGV.shift[2..-1]
+ if not path
+ path = ARGV.shift
+ end
+ @@includeDirs << (path + "/")
+ end
+ end
+end
+
+class Token
+ attr_reader :codeOrigin, :string
+
+ def initialize(codeOrigin, string)
+ @codeOrigin = codeOrigin
+ @string = string
+ end
+
+ def ==(other)
+ if other.is_a? Token
+ @string == other.string
+ else
+ @string == other
+ end
+ end
+
+ def =~(other)
+ @string =~ other
+ end
+
+ def to_s
+ "#{@string.inspect} at #{codeOrigin}"
+ end
+
+ def parseError(*comment)
+ if comment.empty?
+ raise "Parse error: #{to_s}"
+ else
+ raise "Parse error: #{to_s}: #{comment[0]}"
+ end
+ end
+end
+
+class Annotation
+ attr_reader :codeOrigin, :type, :string
+ def initialize(codeOrigin, type, string)
+ @codeOrigin = codeOrigin
+ @type = type
+ @string = string
+ end
+end
+
+#
+# The lexer. Takes a string and returns an array of tokens.
+#
+
+def lex(str, fileName)
+ fileName = Pathname.new(fileName)
+ result = []
+ lineNumber = 1
+ annotation = nil
+ whitespaceFound = false
+ while not str.empty?
+ case str
+ when /\A\#([^\n]*)/
+ # comment, ignore
+ when /\A\/\/\ ?([^\n]*)/
+ # annotation
+ annotation = $1
+ annotationType = whitespaceFound ? :local : :global
+ when /\A\n/
+ # We've found a '\n'. Emit the last comment recorded if appropriate:
+ # We need to parse annotations regardless of whether the backend does
+ # anything with them or not. This is because the C++ backend may make
+ # use of this for its cloopDo debugging utility even if
+ # enableInstrAnnotations is not enabled.
+ if annotation
+ result << Annotation.new(CodeOrigin.new(fileName, lineNumber),
+ annotationType, annotation)
+ annotation = nil
+ end
+ result << Token.new(CodeOrigin.new(fileName, lineNumber), $&)
+ lineNumber += 1
+ when /\A[a-zA-Z]([a-zA-Z0-9_.]*)/
+ result << Token.new(CodeOrigin.new(fileName, lineNumber), $&)
+ when /\A\.([a-zA-Z0-9_]*)/
+ result << Token.new(CodeOrigin.new(fileName, lineNumber), $&)
+ when /\A_([a-zA-Z0-9_]*)/
+ result << Token.new(CodeOrigin.new(fileName, lineNumber), $&)
+ when /\A([ \t]+)/
+ # whitespace, ignore
+ whitespaceFound = true
+ str = $~.post_match
+ next
+ when /\A0x([0-9a-fA-F]+)/
+ result << Token.new(CodeOrigin.new(fileName, lineNumber), $&.hex.to_s)
+ when /\A0([0-7]+)/
+ result << Token.new(CodeOrigin.new(fileName, lineNumber), $&.oct.to_s)
+ when /\A([0-9]+)/
+ result << Token.new(CodeOrigin.new(fileName, lineNumber), $&)
+ when /\A::/
+ result << Token.new(CodeOrigin.new(fileName, lineNumber), $&)
+ when /\A[:,\(\)\[\]=\+\-~\|&^*]/
+ result << Token.new(CodeOrigin.new(fileName, lineNumber), $&)
+ when /\A".*"/
+ result << Token.new(CodeOrigin.new(fileName, lineNumber), $&)
+ else
+ raise "Lexer error at #{CodeOrigin.new(fileName, lineNumber).to_s}, unexpected sequence #{str[0..20].inspect}"
+ end
+ whitespaceFound = false
+ str = $~.post_match
+ end
+ result
+end
+
+#
+# Token identification.
+#
+
+def isRegister(token)
+ token =~ REGISTER_PATTERN
+end
+
+def isInstruction(token)
+ INSTRUCTION_SET.member? token.string
+end
+
+def isKeyword(token)
+ token =~ /\A((true)|(false)|(if)|(then)|(else)|(elsif)|(end)|(and)|(or)|(not)|(global)|(macro)|(const)|(sizeof)|(error)|(include))\Z/ or
+ token =~ REGISTER_PATTERN or
+ isInstruction(token)
+end
+
+def isIdentifier(token)
+ token =~ /\A[a-zA-Z]([a-zA-Z0-9_.]*)\Z/ and not isKeyword(token)
+end
+
+def isLabel(token)
+ token =~ /\A_([a-zA-Z0-9_]*)\Z/
+end
+
+def isLocalLabel(token)
+ token =~ /\A\.([a-zA-Z0-9_]*)\Z/
+end
+
+def isVariable(token)
+ isIdentifier(token) or isRegister(token)
+end
+
+def isInteger(token)
+ token =~ /\A[0-9]/
+end
+
+def isString(token)
+ token =~ /\A".*"/
+end
+
+#
+# The parser. Takes an array of tokens and returns an AST. Methods
+# other than parse(tokens) are not for public consumption.
+#
+
+class Parser
+ def initialize(data, fileName)
+ @tokens = lex(data, fileName)
+ @idx = 0
+ @annotation = nil
+ end
+
+ def parseError(*comment)
+ if @tokens[@idx]
+ @tokens[@idx].parseError(*comment)
+ else
+ if comment.empty?
+ raise "Parse error at end of file"
+ else
+ raise "Parse error at end of file: #{comment[0]}"
+ end
+ end
+ end
+
+ def consume(regexp)
+ if regexp
+ parseError unless @tokens[@idx] =~ regexp
+ else
+ parseError unless @idx == @tokens.length
+ end
+ @idx += 1
+ end
+
+ def skipNewLine
+ while @tokens[@idx] == "\n"
+ @idx += 1
+ end
+ end
+
+ def parsePredicateAtom
+ if @tokens[@idx] == "not"
+ codeOrigin = @tokens[@idx].codeOrigin
+ @idx += 1
+ Not.new(codeOrigin, parsePredicateAtom)
+ elsif @tokens[@idx] == "("
+ @idx += 1
+ skipNewLine
+ result = parsePredicate
+ parseError unless @tokens[@idx] == ")"
+ @idx += 1
+ result
+ elsif @tokens[@idx] == "true"
+ result = True.instance
+ @idx += 1
+ result
+ elsif @tokens[@idx] == "false"
+ result = False.instance
+ @idx += 1
+ result
+ elsif isIdentifier @tokens[@idx]
+ result = Setting.forName(@tokens[@idx].codeOrigin, @tokens[@idx].string)
+ @idx += 1
+ result
+ else
+ parseError
+ end
+ end
+
+ def parsePredicateAnd
+ result = parsePredicateAtom
+ while @tokens[@idx] == "and"
+ codeOrigin = @tokens[@idx].codeOrigin
+ @idx += 1
+ skipNewLine
+ right = parsePredicateAtom
+ result = And.new(codeOrigin, result, right)
+ end
+ result
+ end
+
+ def parsePredicate
+ # some examples of precedence:
+ # not a and b -> (not a) and b
+ # a and b or c -> (a and b) or c
+ # a or b and c -> a or (b and c)
+
+ result = parsePredicateAnd
+ while @tokens[@idx] == "or"
+ codeOrigin = @tokens[@idx].codeOrigin
+ @idx += 1
+ skipNewLine
+ right = parsePredicateAnd
+ result = Or.new(codeOrigin, result, right)
+ end
+ result
+ end
+
+ def parseVariable
+ if isRegister(@tokens[@idx])
+ if @tokens[@idx] =~ FPR_PATTERN
+ result = FPRegisterID.forName(@tokens[@idx].codeOrigin, @tokens[@idx].string)
+ else
+ result = RegisterID.forName(@tokens[@idx].codeOrigin, @tokens[@idx].string)
+ end
+ elsif isIdentifier(@tokens[@idx])
+ result = Variable.forName(@tokens[@idx].codeOrigin, @tokens[@idx].string)
+ else
+ parseError
+ end
+ @idx += 1
+ result
+ end
+
+ def parseAddress(offset)
+ parseError unless @tokens[@idx] == "["
+ codeOrigin = @tokens[@idx].codeOrigin
+
+ # Three possibilities:
+ # [] -> AbsoluteAddress
+ # [a] -> Address
+ # [a,b] -> BaseIndex with scale = 1
+ # [a,b,c] -> BaseIndex
+
+ @idx += 1
+ if @tokens[@idx] == "]"
+ @idx += 1
+ return AbsoluteAddress.new(codeOrigin, offset)
+ end
+ a = parseVariable
+ if @tokens[@idx] == "]"
+ result = Address.new(codeOrigin, a, offset)
+ else
+ parseError unless @tokens[@idx] == ","
+ @idx += 1
+ b = parseVariable
+ if @tokens[@idx] == "]"
+ result = BaseIndex.new(codeOrigin, a, b, 1, offset)
+ else
+ parseError unless @tokens[@idx] == ","
+ @idx += 1
+ parseError unless ["1", "2", "4", "8"].member? @tokens[@idx].string
+ c = @tokens[@idx].string.to_i
+ @idx += 1
+ parseError unless @tokens[@idx] == "]"
+ result = BaseIndex.new(codeOrigin, a, b, c, offset)
+ end
+ end
+ @idx += 1
+ result
+ end
+
+ def parseColonColon
+ skipNewLine
+ codeOrigin = @tokens[@idx].codeOrigin
+ parseError unless isIdentifier @tokens[@idx]
+ names = [@tokens[@idx].string]
+ @idx += 1
+ while @tokens[@idx] == "::"
+ @idx += 1
+ parseError unless isIdentifier @tokens[@idx]
+ names << @tokens[@idx].string
+ @idx += 1
+ end
+ raise if names.empty?
+ [codeOrigin, names]
+ end
+
+ def parseExpressionAtom
+ skipNewLine
+ if @tokens[@idx] == "-"
+ @idx += 1
+ NegImmediate.new(@tokens[@idx - 1].codeOrigin, parseExpressionAtom)
+ elsif @tokens[@idx] == "~"
+ @idx += 1
+ BitnotImmediate.new(@tokens[@idx - 1].codeOrigin, parseExpressionAtom)
+ elsif @tokens[@idx] == "("
+ @idx += 1
+ result = parseExpression
+ parseError unless @tokens[@idx] == ")"
+ @idx += 1
+ result
+ elsif isInteger @tokens[@idx]
+ result = Immediate.new(@tokens[@idx].codeOrigin, @tokens[@idx].string.to_i)
+ @idx += 1
+ result
+ elsif isString @tokens[@idx]
+ result = StringLiteral.new(@tokens[@idx].codeOrigin, @tokens[@idx].string)
+ @idx += 1
+ result
+ elsif isIdentifier @tokens[@idx]
+ codeOrigin, names = parseColonColon
+ if names.size > 1
+ StructOffset.forField(codeOrigin, names[0..-2].join('::'), names[-1])
+ else
+ Variable.forName(codeOrigin, names[0])
+ end
+ elsif isRegister @tokens[@idx]
+ parseVariable
+ elsif @tokens[@idx] == "sizeof"
+ @idx += 1
+ codeOrigin, names = parseColonColon
+ Sizeof.forName(codeOrigin, names.join('::'))
+ elsif isLabel @tokens[@idx]
+ result = LabelReference.new(@tokens[@idx].codeOrigin, Label.forName(@tokens[@idx].codeOrigin, @tokens[@idx].string))
+ @idx += 1
+ result
+ elsif isLocalLabel @tokens[@idx]
+ result = LocalLabelReference.new(@tokens[@idx].codeOrigin, LocalLabel.forName(@tokens[@idx].codeOrigin, @tokens[@idx].string))
+ @idx += 1
+ result
+ else
+ parseError
+ end
+ end
+
+ def parseExpressionMul
+ skipNewLine
+ result = parseExpressionAtom
+ while @tokens[@idx] == "*"
+ if @tokens[@idx] == "*"
+ @idx += 1
+ result = MulImmediates.new(@tokens[@idx - 1].codeOrigin, result, parseExpressionAtom)
+ else
+ raise
+ end
+ end
+ result
+ end
+
+ def couldBeExpression
+ @tokens[@idx] == "-" or @tokens[@idx] == "~" or @tokens[@idx] == "sizeof" or isInteger(@tokens[@idx]) or isString(@tokens[@idx]) or isVariable(@tokens[@idx]) or @tokens[@idx] == "("
+ end
+
+ def parseExpressionAdd
+ skipNewLine
+ result = parseExpressionMul
+ while @tokens[@idx] == "+" or @tokens[@idx] == "-"
+ if @tokens[@idx] == "+"
+ @idx += 1
+ result = AddImmediates.new(@tokens[@idx - 1].codeOrigin, result, parseExpressionMul)
+ elsif @tokens[@idx] == "-"
+ @idx += 1
+ result = SubImmediates.new(@tokens[@idx - 1].codeOrigin, result, parseExpressionMul)
+ else
+ raise
+ end
+ end
+ result
+ end
+
+ def parseExpressionAnd
+ skipNewLine
+ result = parseExpressionAdd
+ while @tokens[@idx] == "&"
+ @idx += 1
+ result = AndImmediates.new(@tokens[@idx - 1].codeOrigin, result, parseExpressionAdd)
+ end
+ result
+ end
+
+ def parseExpression
+ skipNewLine
+ result = parseExpressionAnd
+ while @tokens[@idx] == "|" or @tokens[@idx] == "^"
+ if @tokens[@idx] == "|"
+ @idx += 1
+ result = OrImmediates.new(@tokens[@idx - 1].codeOrigin, result, parseExpressionAnd)
+ elsif @tokens[@idx] == "^"
+ @idx += 1
+ result = XorImmediates.new(@tokens[@idx - 1].codeOrigin, result, parseExpressionAnd)
+ else
+ raise
+ end
+ end
+ result
+ end
+
+ def parseOperand(comment)
+ skipNewLine
+ if couldBeExpression
+ expr = parseExpression
+ if @tokens[@idx] == "["
+ parseAddress(expr)
+ else
+ expr
+ end
+ elsif @tokens[@idx] == "["
+ parseAddress(Immediate.new(@tokens[@idx].codeOrigin, 0))
+ elsif isLabel @tokens[@idx]
+ result = LabelReference.new(@tokens[@idx].codeOrigin, Label.forName(@tokens[@idx].codeOrigin, @tokens[@idx].string))
+ @idx += 1
+ result
+ elsif isLocalLabel @tokens[@idx]
+ result = LocalLabelReference.new(@tokens[@idx].codeOrigin, LocalLabel.forName(@tokens[@idx].codeOrigin, @tokens[@idx].string))
+ @idx += 1
+ result
+ else
+ parseError(comment)
+ end
+ end
+
+ def parseMacroVariables
+ skipNewLine
+ consume(/\A\(\Z/)
+ variables = []
+ loop {
+ skipNewLine
+ if @tokens[@idx] == ")"
+ @idx += 1
+ break
+ elsif isIdentifier(@tokens[@idx])
+ variables << Variable.forName(@tokens[@idx].codeOrigin, @tokens[@idx].string)
+ @idx += 1
+ skipNewLine
+ if @tokens[@idx] == ")"
+ @idx += 1
+ break
+ elsif @tokens[@idx] == ","
+ @idx += 1
+ else
+ parseError
+ end
+ else
+ parseError
+ end
+ }
+ variables
+ end
+
+ def parseSequence(final, comment)
+ firstCodeOrigin = @tokens[@idx].codeOrigin
+ list = []
+ loop {
+ if (@idx == @tokens.length and not final) or (final and @tokens[@idx] =~ final)
+ break
+ elsif @tokens[@idx].is_a? Annotation
+ # This is the only place where we can encounter a global
+ # annotation, and hence need to be able to distinguish between
+ # them.
+ # globalAnnotations are the ones that start from column 0. All
+ # others are considered localAnnotations. The only reason to
+ # distinguish between them is so that we can format the output
+ # nicely as one would expect.
+
+ codeOrigin = @tokens[@idx].codeOrigin
+ annotationOpcode = (@tokens[@idx].type == :global) ? "globalAnnotation" : "localAnnotation"
+ list << Instruction.new(codeOrigin, annotationOpcode, [], @tokens[@idx].string)
+ @annotation = nil
+ @idx += 2 # Consume the newline as well.
+ elsif @tokens[@idx] == "\n"
+ # ignore
+ @idx += 1
+ elsif @tokens[@idx] == "const"
+ @idx += 1
+ parseError unless isVariable @tokens[@idx]
+ variable = Variable.forName(@tokens[@idx].codeOrigin, @tokens[@idx].string)
+ @idx += 1
+ parseError unless @tokens[@idx] == "="
+ @idx += 1
+ value = parseOperand("while inside of const #{variable.name}")
+ list << ConstDecl.new(@tokens[@idx].codeOrigin, variable, value)
+ elsif @tokens[@idx] == "error"
+ list << Error.new(@tokens[@idx].codeOrigin)
+ @idx += 1
+ elsif @tokens[@idx] == "if"
+ codeOrigin = @tokens[@idx].codeOrigin
+ @idx += 1
+ skipNewLine
+ predicate = parsePredicate
+ consume(/\A((then)|(\n))\Z/)
+ skipNewLine
+ ifThenElse = IfThenElse.new(codeOrigin, predicate, parseSequence(/\A((else)|(end)|(elsif))\Z/, "while inside of \"if #{predicate.dump}\""))
+ list << ifThenElse
+ while @tokens[@idx] == "elsif"
+ codeOrigin = @tokens[@idx].codeOrigin
+ @idx += 1
+ skipNewLine
+ predicate = parsePredicate
+ consume(/\A((then)|(\n))\Z/)
+ skipNewLine
+ elseCase = IfThenElse.new(codeOrigin, predicate, parseSequence(/\A((else)|(end)|(elsif))\Z/, "while inside of \"if #{predicate.dump}\""))
+ ifThenElse.elseCase = elseCase
+ ifThenElse = elseCase
+ end
+ if @tokens[@idx] == "else"
+ @idx += 1
+ ifThenElse.elseCase = parseSequence(/\Aend\Z/, "while inside of else case for \"if #{predicate.dump}\"")
+ @idx += 1
+ else
+ parseError unless @tokens[@idx] == "end"
+ @idx += 1
+ end
+ elsif @tokens[@idx] == "macro"
+ codeOrigin = @tokens[@idx].codeOrigin
+ @idx += 1
+ skipNewLine
+ parseError unless isIdentifier(@tokens[@idx])
+ name = @tokens[@idx].string
+ @idx += 1
+ variables = parseMacroVariables
+ body = parseSequence(/\Aend\Z/, "while inside of macro #{name}")
+ @idx += 1
+ list << Macro.new(codeOrigin, name, variables, body)
+ elsif @tokens[@idx] == "global"
+ codeOrigin = @tokens[@idx].codeOrigin
+ @idx += 1
+ skipNewLine
+ parseError unless isLabel(@tokens[@idx])
+ name = @tokens[@idx].string
+ @idx += 1
+ Label.setAsGlobal(codeOrigin, name)
+ elsif isInstruction @tokens[@idx]
+ codeOrigin = @tokens[@idx].codeOrigin
+ name = @tokens[@idx].string
+ @idx += 1
+ if (not final and @idx == @tokens.size) or (final and @tokens[@idx] =~ final)
+ # Zero operand instruction, and it's the last one.
+ list << Instruction.new(codeOrigin, name, [], @annotation)
+ @annotation = nil
+ break
+ elsif @tokens[@idx].is_a? Annotation
+ list << Instruction.new(codeOrigin, name, [], @tokens[@idx].string)
+ @annotation = nil
+ @idx += 2 # Consume the newline as well.
+ elsif @tokens[@idx] == "\n"
+ # Zero operand instruction.
+ list << Instruction.new(codeOrigin, name, [], @annotation)
+ @annotation = nil
+ @idx += 1
+ else
+ # It's definitely an instruction, and it has at least one operand.
+ operands = []
+ endOfSequence = false
+ loop {
+ operands << parseOperand("while inside of instruction #{name}")
+ if (not final and @idx == @tokens.size) or (final and @tokens[@idx] =~ final)
+ # The end of the instruction and of the sequence.
+ endOfSequence = true
+ break
+ elsif @tokens[@idx] == ","
+ # Has another operand.
+ @idx += 1
+ elsif @tokens[@idx].is_a? Annotation
+ @annotation = @tokens[@idx].string
+ @idx += 2 # Consume the newline as well.
+ break
+ elsif @tokens[@idx] == "\n"
+ # The end of the instruction.
+ @idx += 1
+ break
+ else
+ parseError("Expected a comma, newline, or #{final} after #{operands.last.dump}")
+ end
+ }
+ list << Instruction.new(codeOrigin, name, operands, @annotation)
+ @annotation = nil
+ if endOfSequence
+ break
+ end
+ end
+
+ # Check for potential macro invocation:
+ elsif isIdentifier @tokens[@idx]
+ codeOrigin = @tokens[@idx].codeOrigin
+ name = @tokens[@idx].string
+ @idx += 1
+ if @tokens[@idx] == "("
+ # Macro invocation.
+ @idx += 1
+ operands = []
+ skipNewLine
+ if @tokens[@idx] == ")"
+ @idx += 1
+ else
+ loop {
+ skipNewLine
+ if @tokens[@idx] == "macro"
+ # It's a macro lambda!
+ codeOriginInner = @tokens[@idx].codeOrigin
+ @idx += 1
+ variables = parseMacroVariables
+ body = parseSequence(/\Aend\Z/, "while inside of anonymous macro passed as argument to #{name}")
+ @idx += 1
+ operands << Macro.new(codeOriginInner, nil, variables, body)
+ else
+ operands << parseOperand("while inside of macro call to #{name}")
+ end
+ skipNewLine
+ if @tokens[@idx] == ")"
+ @idx += 1
+ break
+ elsif @tokens[@idx] == ","
+ @idx += 1
+ else
+ parseError "Unexpected #{@tokens[@idx].string.inspect} while parsing invocation of macro #{name}"
+ end
+ }
+ end
+ # Check if there's a trailing annotation after the macro invoke:
+ if @tokens[@idx].is_a? Annotation
+ @annotation = @tokens[@idx].string
+ @idx += 2 # Consume the newline as well.
+ end
+ list << MacroCall.new(codeOrigin, name, operands, @annotation)
+ @annotation = nil
+ else
+ parseError "Expected \"(\" after #{name}"
+ end
+ elsif isLabel @tokens[@idx] or isLocalLabel @tokens[@idx]
+ codeOrigin = @tokens[@idx].codeOrigin
+ name = @tokens[@idx].string
+ @idx += 1
+ parseError unless @tokens[@idx] == ":"
+ # It's a label.
+ if isLabel name
+ list << Label.forName(codeOrigin, name, true)
+ else
+ list << LocalLabel.forName(codeOrigin, name)
+ end
+ @idx += 1
+ elsif @tokens[@idx] == "include"
+ @idx += 1
+ parseError unless isIdentifier(@tokens[@idx])
+ moduleName = @tokens[@idx].string
+ fileName = IncludeFile.new(moduleName, @tokens[@idx].codeOrigin.fileName.dirname).fileName
+ @idx += 1
+ $stderr.puts "offlineasm: Including file #{fileName}"
+ list << parse(fileName)
+ else
+ parseError "Expecting terminal #{final} #{comment}"
+ end
+ }
+ Sequence.new(firstCodeOrigin, list)
+ end
+
+ def parseIncludes(final, comment)
+ firstCodeOrigin = @tokens[@idx].codeOrigin
+ fileList = []
+ fileList << @tokens[@idx].codeOrigin.fileName
+ loop {
+ if (@idx == @tokens.length and not final) or (final and @tokens[@idx] =~ final)
+ break
+ elsif @tokens[@idx] == "include"
+ @idx += 1
+ parseError unless isIdentifier(@tokens[@idx])
+ moduleName = @tokens[@idx].string
+ fileName = IncludeFile.new(moduleName, @tokens[@idx].codeOrigin.fileName.dirname).fileName
+ @idx += 1
+
+ fileList << fileName
+ else
+ @idx += 1
+ end
+ }
+
+ return fileList
+ end
+end
+
+def parseData(data, fileName)
+ parser = Parser.new(data, fileName)
+ parser.parseSequence(nil, "")
+end
+
+def parse(fileName)
+ parseData(IO::read(fileName), fileName)
+end
+
+def parseHash(fileName)
+ parser = Parser.new(IO::read(fileName), fileName)
+ fileList = parser.parseIncludes(nil, "")
+ fileListHash(fileList)
+end
+
diff --git a/Source/JavaScriptCore/offlineasm/registers.rb b/Source/JavaScriptCore/offlineasm/registers.rb
new file mode 100644
index 000000000..168667e0c
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/registers.rb
@@ -0,0 +1,73 @@
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+require "config"
+
+GPRS =
+ [
+ "t0",
+ "t1",
+ "t2",
+ "t3",
+ "t4",
+ "t5",
+ "t6",
+ "t7",
+ "t8",
+ "t9",
+ "cfr",
+ "a0",
+ "a1",
+ "a2",
+ "a3",
+ "r0",
+ "r1",
+ "sp",
+ "lr",
+ "pc",
+ # 64-bit only registers:
+ "csr1", # tag type number register
+ "csr2" # tag mask register
+ ]
+
+FPRS =
+ [
+ "ft0",
+ "ft1",
+ "ft2",
+ "ft3",
+ "ft4",
+ "ft5",
+ "fa0",
+ "fa1",
+ "fa2",
+ "fa3",
+ "fr"
+ ]
+
+REGISTERS = GPRS + FPRS
+
+GPR_PATTERN = Regexp.new('\\A((' + GPRS.join(')|(') + '))\\Z')
+FPR_PATTERN = Regexp.new('\\A((' + FPRS.join(')|(') + '))\\Z')
+
+REGISTER_PATTERN = Regexp.new('\\A((' + REGISTERS.join(')|(') + '))\\Z')
diff --git a/Source/JavaScriptCore/offlineasm/risc.rb b/Source/JavaScriptCore/offlineasm/risc.rb
new file mode 100644
index 000000000..3fbc07d0b
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/risc.rb
@@ -0,0 +1,730 @@
+# Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+require 'config'
+require 'ast'
+require 'opt'
+
+# This file contains utilities that are useful for implementing a backend
+# for RISC-like processors (ARM, MIPS, etc).
+
+#
+# Lowering of simple branch ops. For example:
+#
+# baddiz foo, bar, baz
+#
+# will become:
+#
+# addi foo, bar
+# bz baz
+#
+
+def riscLowerSimpleBranchOps(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ annotation = node.annotation
+ case node.opcode
+ when /^b(addi|subi|ori|addp)/
+ op = $1
+ branch = "b" + $~.post_match
+
+ case op
+ when "addi"
+ op = "addis"
+ when "addp"
+ op = "addps"
+ when "subi"
+ op = "subis"
+ when "ori"
+ op = "oris"
+ end
+
+ newList << Instruction.new(node.codeOrigin, op, node.operands[0..-2], annotation)
+ newList << Instruction.new(node.codeOrigin, branch, [node.operands[-1]])
+ when 'bmulis', 'bmulz', 'bmulnz'
+ condition = $~.post_match
+ newList << Instruction.new(node.codeOrigin, "muli", node.operands[0..-2], annotation)
+ newList << Instruction.new(node.codeOrigin, "bti" + condition, [node.operands[-2], node.operands[-1]])
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+#
+# Lowing of complex branch ops. For example:
+#
+# bmulio foo, bar, baz
+#
+# becomes:
+#
+# smulli foo, bar, bar, tmp1
+# rshifti bar, 31, tmp2
+# bineq tmp1, tmp2, baz
+#
+
+def riscLowerHardBranchOps(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction and node.opcode == "bmulio"
+ tmp1 = Tmp.new(node.codeOrigin, :gpr)
+ tmp2 = Tmp.new(node.codeOrigin, :gpr)
+ newList << Instruction.new(node.codeOrigin, "smulli", [node.operands[0], node.operands[1], node.operands[1], tmp1], node.annotation)
+ newList << Instruction.new(node.codeOrigin, "rshifti", [node.operands[-2], Immediate.new(node.codeOrigin, 31), tmp2])
+ newList << Instruction.new(node.codeOrigin, "bineq", [tmp1, tmp2, node.operands[-1]])
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+#
+# Lowering of shift ops. For example:
+#
+# lshifti foo, bar
+#
+# will become:
+#
+# andi foo, 31, tmp
+# lshifti tmp, bar
+#
+
+def riscSanitizeShift(operand, list)
+ return operand if operand.immediate?
+
+ tmp = Tmp.new(operand.codeOrigin, :gpr)
+ list << Instruction.new(operand.codeOrigin, "andi", [operand, Immediate.new(operand.codeOrigin, 31), tmp])
+ tmp
+end
+
+def riscLowerShiftOps(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ case node.opcode
+ when "lshifti", "rshifti", "urshifti", "lshiftp", "rshiftp", "urshiftp"
+ if node.operands.size == 2
+ newList << Instruction.new(node.codeOrigin, node.opcode, [riscSanitizeShift(node.operands[0], newList), node.operands[1]], node.annotation)
+ else
+ newList << Instruction.new(node.codeOrigin, node.opcode, [node.operands[0], riscSanitizeShift(node.operands[1], newList), node.operands[2]], node.annotation)
+ raise "Wrong number of operands for shift at #{node.codeOriginString}" unless node.operands.size == 3
+ end
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+#
+# Lowering of malformed addresses. For example:
+#
+# loadp 10000[foo], bar
+#
+# will become:
+#
+# move 10000, tmp
+# addp foo, tmp
+# loadp 0[tmp], bar
+#
+# Note that you use this lowering phase by passing it a block that returns true
+# if you don't want to lower the address, or false if you do. For example to get
+# the effect of the example above, the block would have to say something like:
+#
+# riscLowerMalformedAddresses(thingy) {
+# | node, address |
+# if address.is_a? Address
+# address.offset.value > 1000
+# else
+# true # don't lower anything else, in this example
+# end
+# }
+#
+# See arm.rb for a different example, in which we lower all BaseIndex addresses
+# that have non-zero offset, all Address addresses that have large offsets, and
+# all other addresses (like AbsoluteAddress).
+#
+
+class Node
+ def riscLowerMalformedAddressesRecurse(list, topLevelNode, &block)
+ mapChildren {
+ | subNode |
+ subNode.riscLowerMalformedAddressesRecurse(list, topLevelNode, &block)
+ }
+ end
+end
+
+class Address
+ def riscLowerMalformedAddressesRecurse(list, node, &block)
+ return self if yield node, self
+
+ tmp = Tmp.new(codeOrigin, :gpr)
+ list << Instruction.new(codeOrigin, "move", [offset, tmp])
+ list << Instruction.new(codeOrigin, "addp", [base, tmp])
+ Address.new(codeOrigin, tmp, Immediate.new(codeOrigin, 0))
+ end
+end
+
+class BaseIndex
+ def riscLowerMalformedAddressesRecurse(list, node, &block)
+ return self if yield node, self
+
+ tmp = Tmp.new(codeOrigin, :gpr)
+ list << Instruction.new(codeOrigin, "leap", [BaseIndex.new(codeOrigin, base, index, scale, Immediate.new(codeOrigin, 0)), tmp])
+ Address.new(codeOrigin, tmp, offset).riscLowerMalformedAddressesRecurse(list, node, &block)
+ end
+end
+
+class AbsoluteAddress
+ def riscLowerMalformedAddressesRecurse(list, node, &block)
+ return self if yield node, self
+
+ tmp = Tmp.new(codeOrigin, :gpr)
+ list << Instruction.new(codeOrigin, "move", [address, tmp])
+ Address.new(codeOrigin, tmp, Immediate.new(codeOrigin, 0))
+ end
+end
+
+def riscLowerMalformedAddresses(list, &block)
+ newList = []
+ list.each {
+ | node |
+ newList << node.riscLowerMalformedAddressesRecurse(newList, node, &block)
+ }
+ newList
+end
+
+#
+# Lowering of malformed addresses in double loads and stores. For example:
+#
+# loadd [foo, bar, 8], baz
+#
+# becomes:
+#
+# leap [foo, bar, 8], tmp
+# loadd [tmp], baz
+#
+
+class Node
+ def riscDoubleAddress(list)
+ self
+ end
+end
+
+class BaseIndex
+ def riscDoubleAddress(list)
+ tmp = Tmp.new(codeOrigin, :gpr)
+ list << Instruction.new(codeOrigin, "leap", [self, tmp])
+ Address.new(codeOrigin, tmp, Immediate.new(codeOrigin, 0))
+ end
+end
+
+def riscLowerMalformedAddressesDouble(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ case node.opcode
+ when "loadd"
+ newList << Instruction.new(node.codeOrigin, "loadd", [node.operands[0].riscDoubleAddress(newList), node.operands[1]], node.annotation)
+ when "stored"
+ newList << Instruction.new(node.codeOrigin, "stored", [node.operands[0], node.operands[1].riscDoubleAddress(newList)], node.annotation)
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+#
+# Lowering of misplaced immediates for opcodes in opcodeList. For example, if storei is in opcodeList:
+#
+# storei 0, [foo]
+#
+# will become:
+#
+# move 0, tmp
+# storei tmp, [foo]
+#
+
+def riscLowerMisplacedImmediates(list, opcodeList)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ if opcodeList.include? node.opcode
+ operands = node.operands
+ newOperands = []
+ operands.each {
+ | operand |
+ if operand.is_a? Immediate
+ tmp = Tmp.new(operand.codeOrigin, :gpr)
+ newList << Instruction.new(operand.codeOrigin, "move", [operand, tmp])
+ newOperands << tmp
+ else
+ newOperands << operand
+ end
+ }
+ newList << Instruction.new(node.codeOrigin, node.opcode, newOperands, node.annotation)
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+#
+# Lowering of malformed immediates except when used in a "move" instruction.
+# For example:
+#
+# addp 642641, foo
+#
+# will become:
+#
+# move 642641, tmp
+# addp tmp, foo
+#
+
+class Node
+ def riscLowerMalformedImmediatesRecurse(list, validImmediates)
+ mapChildren {
+ | node |
+ node.riscLowerMalformedImmediatesRecurse(list, validImmediates)
+ }
+ end
+end
+
+class Address
+ def riscLowerMalformedImmediatesRecurse(list, validImmediates)
+ self
+ end
+end
+
+class BaseIndex
+ def riscLowerMalformedImmediatesRecurse(list, validImmediates)
+ self
+ end
+end
+
+class AbsoluteAddress
+ def riscLowerMalformedImmediatesRecurse(list, validImmediates)
+ self
+ end
+end
+
+class Immediate
+ def riscLowerMalformedImmediatesRecurse(list, validImmediates)
+ unless validImmediates.include? value
+ tmp = Tmp.new(codeOrigin, :gpr)
+ list << Instruction.new(codeOrigin, "move", [self, tmp])
+ tmp
+ else
+ self
+ end
+ end
+end
+
+def riscLowerMalformedImmediates(list, validImmediates)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ annotation = node.annotation
+ case node.opcode
+ when "move"
+ newList << node
+ when "addi", "addp", "addq", "addis", "subi", "subp", "subq", "subis"
+ if node.operands[0].is_a? Immediate and
+ (not validImmediates.include? node.operands[0].value) and
+ validImmediates.include? -node.operands[0].value
+ node.operands.size == 2
+ if node.opcode =~ /add/
+ newOpcode = "sub" + $~.post_match
+ else
+ newOpcode = "add" + $~.post_match
+ end
+ newList << Instruction.new(node.codeOrigin, newOpcode,
+ [Immediate.new(node.codeOrigin, -node.operands[0].value)] + node.operands[1..-1],
+ annotation)
+ else
+ newList << node.riscLowerMalformedImmediatesRecurse(newList, validImmediates)
+ end
+ when "muli", "mulp", "mulq"
+ if node.operands[0].is_a? Immediate
+ tmp = Tmp.new(codeOrigin, :gpr)
+ newList << Instruction.new(node.codeOrigin, "move", [node.operands[0], tmp], annotation)
+ newList << Instruction.new(node.codeOrigin, node.opcode, [tmp] + node.operands[1..-1])
+ else
+ newList << node.riscLowerMalformedImmediatesRecurse(newList, validImmediates)
+ end
+ else
+ newList << node.riscLowerMalformedImmediatesRecurse(newList, validImmediates)
+ end
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+#
+# Lowering of misplaced addresses. For example:
+#
+# addi foo, [bar]
+#
+# will become:
+#
+# loadi [bar], tmp
+# addi foo, tmp
+# storei tmp, [bar]
+#
+# Another example:
+#
+# addi [foo], bar
+#
+# will become:
+#
+# loadi [foo], tmp
+# addi tmp, bar
+#
+
+def riscAsRegister(preList, postList, operand, suffix, needStore)
+ return operand unless operand.address?
+
+ tmp = Tmp.new(operand.codeOrigin, if suffix == "d" then :fpr else :gpr end)
+ preList << Instruction.new(operand.codeOrigin, "load" + suffix, [operand, tmp])
+ if needStore
+ postList << Instruction.new(operand.codeOrigin, "store" + suffix, [tmp, operand])
+ end
+ tmp
+end
+
+def riscAsRegisters(preList, postList, operands, suffix)
+ newOperands = []
+ operands.each_with_index {
+ | operand, index |
+ newOperands << riscAsRegister(preList, postList, operand, suffix, index == operands.size - 1)
+ }
+ newOperands
+end
+
+def riscLowerMisplacedAddresses(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ postInstructions = []
+ annotation = node.annotation
+ case node.opcode
+ when "addi", "addis", "andi", "lshifti", "muli", "negi", "noti", "ori", "oris",
+ "rshifti", "urshifti", "subi", "subis", "xori", /^bi/, /^bti/, /^ci/, /^ti/
+ newList << Instruction.new(node.codeOrigin,
+ node.opcode,
+ riscAsRegisters(newList, postInstructions, node.operands, "i"),
+ annotation)
+ when "addp", "andp", "lshiftp", "mulp", "negp", "orp", "rshiftp", "urshiftp",
+ "subp", "xorp", /^bp/, /^btp/, /^cp/
+ newList << Instruction.new(node.codeOrigin,
+ node.opcode,
+ riscAsRegisters(newList, postInstructions, node.operands, "p"),
+ annotation)
+ when "addq", "andq", "lshiftq", "mulq", "negq", "orq", "rshiftq", "urshiftq",
+ "subq", "xorq", /^bq/, /^btq/, /^cq/
+ newList << Instruction.new(node.codeOrigin,
+ node.opcode,
+ riscAsRegisters(newList, postInstructions, node.operands, "q"),
+ annotation)
+ when "bbeq", "bbneq", "bba", "bbaeq", "bbb", "bbbeq", "btbz", "btbnz", "tbz", "tbnz",
+ "cbeq", "cbneq", "cba", "cbaeq", "cbb", "cbbeq"
+ newList << Instruction.new(node.codeOrigin,
+ node.opcode,
+ riscAsRegisters(newList, postInstructions, node.operands, "b"),
+ annotation)
+ when "bbgt", "bbgteq", "bblt", "bblteq", "btbs", "tbs", "cbgt", "cbgteq", "cblt", "cblteq"
+ newList << Instruction.new(node.codeOrigin,
+ node.opcode,
+ riscAsRegisters(newList, postInstructions, node.operands, "bs"),
+ annotation)
+ when "addd", "divd", "subd", "muld", "sqrtd", /^bd/
+ newList << Instruction.new(node.codeOrigin,
+ node.opcode,
+ riscAsRegisters(newList, postInstructions, node.operands, "d"),
+ annotation)
+ when "jmp", "call"
+ newList << Instruction.new(node.codeOrigin,
+ node.opcode,
+ [riscAsRegister(newList, postInstructions, node.operands[0], "p", false)],
+ annotation)
+ else
+ newList << node
+ end
+ newList += postInstructions
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+#
+# Lowering of register reuse in compare instructions. For example:
+#
+# cieq t0, t1, t0
+#
+# will become:
+#
+# mov tmp, t0
+# cieq tmp, t1, t0
+#
+
+def riscLowerRegisterReuse(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ annotation = node.annotation
+ case node.opcode
+ when "cieq", "cineq", "cia", "ciaeq", "cib", "cibeq", "cigt", "cigteq", "cilt", "cilteq",
+ "cpeq", "cpneq", "cpa", "cpaeq", "cpb", "cpbeq", "cpgt", "cpgteq", "cplt", "cplteq",
+ "tis", "tiz", "tinz", "tbs", "tbz", "tbnz", "tps", "tpz", "tpnz", "cbeq", "cbneq",
+ "cba", "cbaeq", "cbb", "cbbeq", "cbgt", "cbgteq", "cblt", "cblteq"
+ if node.operands.size == 2
+ if node.operands[0] == node.operands[1]
+ tmp = Tmp.new(node.codeOrigin, :gpr)
+ newList << Instruction.new(node.codeOrigin, "move", [node.operands[0], tmp], annotation)
+ newList << Instruction.new(node.codeOrigin, node.opcode, [tmp, node.operands[1]])
+ else
+ newList << node
+ end
+ else
+ raise "Wrong number of arguments at #{node.codeOriginString}" unless node.operands.size == 3
+ if node.operands[0] == node.operands[2]
+ tmp = Tmp.new(node.codeOrigin, :gpr)
+ newList << Instruction.new(node.codeOrigin, "move", [node.operands[0], tmp], annotation)
+ newList << Instruction.new(node.codeOrigin, node.opcode, [tmp, node.operands[1], node.operands[2]])
+ elsif node.operands[1] == node.operands[2]
+ tmp = Tmp.new(node.codeOrigin, :gpr)
+ newList << Instruction.new(node.codeOrigin, "move", [node.operands[1], tmp], annotation)
+ newList << Instruction.new(node.codeOrigin, node.opcode, [node.operands[0], tmp, node.operands[2]])
+ else
+ newList << node
+ end
+ end
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+#
+# Lowering of the not instruction. The following:
+#
+# noti t0
+#
+# becomes:
+#
+# xori -1, t0
+#
+
+def riscLowerNot(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ case node.opcode
+ when "noti", "notp"
+ raise "Wrong nubmer of operands at #{node.codeOriginString}" unless node.operands.size == 1
+ suffix = node.opcode[-1..-1]
+ newList << Instruction.new(node.codeOrigin, "xor" + suffix,
+ [Immediate.new(node.codeOrigin, -1), node.operands[0]])
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ }
+ return newList
+end
+
+#
+# Lowing of complex branch ops on 64-bit. For example:
+#
+# bmulio foo, bar, baz
+#
+# becomes:
+#
+# smulli foo, bar, bar
+# rshiftp bar, 32, tmp1
+# rshifti bar, 31, tmp2
+# zxi2p bar, bar
+# bineq tmp1, tmp2, baz
+#
+
+def riscLowerHardBranchOps64(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction and node.opcode == "bmulio"
+ tmp1 = Tmp.new(node.codeOrigin, :gpr)
+ tmp2 = Tmp.new(node.codeOrigin, :gpr)
+ newList << Instruction.new(node.codeOrigin, "smulli", [node.operands[0], node.operands[1], node.operands[1]])
+ newList << Instruction.new(node.codeOrigin, "rshiftp", [node.operands[1], Immediate.new(node.codeOrigin, 32), tmp1])
+ newList << Instruction.new(node.codeOrigin, "rshifti", [node.operands[1], Immediate.new(node.codeOrigin, 31), tmp2])
+ newList << Instruction.new(node.codeOrigin, "zxi2p", [node.operands[1], node.operands[1]])
+ newList << Instruction.new(node.codeOrigin, "bineq", [tmp1, tmp2, node.operands[2]])
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+#
+# Lowering of test instructions. For example:
+#
+# btiz t0, t1, .foo
+#
+# becomes:
+#
+# andi t0, t1, tmp
+# bieq tmp, 0, .foo
+#
+# and another example:
+#
+# tiz t0, t1, t2
+#
+# becomes:
+#
+# andi t0, t1, tmp
+# cieq tmp, 0, t2
+#
+
+def riscLowerTest(list)
+ def emit(newList, andOpcode, branchOpcode, node)
+ if node.operands.size == 2
+ newList << Instruction.new(node.codeOrigin, branchOpcode, [node.operands[0], Immediate.new(node.codeOrigin, 0), node.operands[1]])
+ return
+ end
+
+ raise "Incorrect number of operands at #{codeOriginString}" unless node.operands.size == 3
+
+ if node.operands[0].immediate? and node.operands[0].value == -1
+ newList << Instruction.new(node.codeOrigin, branchOpcode, [node.operands[1], Immediate.new(node.codeOrigin, 0), node.operands[2]])
+ return
+ end
+
+ if node.operands[1].immediate? and node.operands[1].value == -1
+ newList << Instruction.new(node.codeOrigin, branchOpcode, [node.operands[0], Immediate.new(node.codeOrigin, 0), node.operands[2]])
+ return
+ end
+
+ tmp = Tmp.new(node.codeOrigin, :gpr)
+ newList << Instruction.new(node.codeOrigin, andOpcode, [node.operands[0], node.operands[1], tmp])
+ newList << Instruction.new(node.codeOrigin, branchOpcode, [tmp, Immediate.new(node.codeOrigin, 0), node.operands[2]])
+ end
+
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ case node.opcode
+ when "btis"
+ emit(newList, "andi", "bilt", node)
+ when "btiz"
+ emit(newList, "andi", "bieq", node)
+ when "btinz"
+ emit(newList, "andi", "bineq", node)
+ when "btps"
+ emit(newList, "andp", "bplt", node)
+ when "btpz"
+ emit(newList, "andp", "bpeq", node)
+ when "btpnz"
+ emit(newList, "andp", "bpneq", node)
+ when "btqs"
+ emit(newList, "andq", "bqlt", node)
+ when "btqz"
+ emit(newList, "andq", "bqeq", node)
+ when "btqnz"
+ emit(newList, "andq", "bqneq", node)
+ when "btbs"
+ emit(newList, "andi", "bblt", node)
+ when "btbz"
+ emit(newList, "andi", "bbeq", node)
+ when "btbnz"
+ emit(newList, "andi", "bbneq", node)
+ when "tis"
+ emit(newList, "andi", "cilt", node)
+ when "tiz"
+ emit(newList, "andi", "cieq", node)
+ when "tinz"
+ emit(newList, "andi", "cineq", node)
+ when "tps"
+ emit(newList, "andp", "cplt", node)
+ when "tpz"
+ emit(newList, "andp", "cpeq", node)
+ when "tpnz"
+ emit(newList, "andp", "cpneq", node)
+ when "tqs"
+ emit(newList, "andq", "cqlt", node)
+ when "tqz"
+ emit(newList, "andq", "cqeq", node)
+ when "tqnz"
+ emit(newList, "andq", "cqneq", node)
+ when "tbs"
+ emit(newList, "andi", "cblt", node)
+ when "tbz"
+ emit(newList, "andi", "cbeq", node)
+ when "tbnz"
+ emit(newList, "andi", "cbneq", node)
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ }
+ return newList
+end
diff --git a/Source/JavaScriptCore/offlineasm/self_hash.rb b/Source/JavaScriptCore/offlineasm/self_hash.rb
new file mode 100644
index 000000000..6c736ff5b
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/self_hash.rb
@@ -0,0 +1,73 @@
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+require "config"
+require "digest/sha1"
+require "pathname"
+
+#
+# dirHash(directory, regexp) -> SHA1 hexdigest
+#
+# Returns a hash of all files in the given directory that fit the given
+# pattern.
+#
+
+def dirHash(directory, regexp)
+ directory = Pathname.new(directory)
+ contents = ""
+ Dir.foreach(directory) {
+ | entry |
+ if entry =~ regexp
+ contents += IO::read(directory + entry)
+ end
+ }
+ return Digest::SHA1.hexdigest(contents)
+end
+
+#
+# fileListHash(fileList) -> SHA1 hexdigest
+#
+# Returns a hash of all files in the list.
+#
+
+def fileListHash(fileList)
+ contents = ""
+ fileList.each {
+ | fileName |
+ contents += IO::read(fileName)
+ }
+ return Digest::SHA1.hexdigest(contents)
+end
+
+#
+# selfHash -> SHA1 hexdigest
+#
+# Returns a hash of the offlineasm source code. This allows dependency
+# tracking for not just changes in input, but also changes in the assembler
+# itself.
+#
+
+def selfHash
+ dirHash(Pathname.new(__FILE__).dirname, /\.rb$/)
+end
+
diff --git a/Source/JavaScriptCore/offlineasm/settings.rb b/Source/JavaScriptCore/offlineasm/settings.rb
new file mode 100644
index 000000000..eec092584
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/settings.rb
@@ -0,0 +1,249 @@
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+require "config"
+require "ast"
+require "backends"
+require "parser"
+require "transform"
+
+#
+# computeSettingsCombinations(ast) -> settingsCombiations
+#
+# Computes an array of settings maps, where a settings map constitutes
+# a configuration for the assembly code being generated. The map
+# contains key value pairs where keys are settings names (strings) and
+# the values are booleans (true for enabled, false for disabled).
+#
+
+def computeSettingsCombinations(ast)
+ settingsCombinations = []
+
+ def settingsCombinator(settingsCombinations, mapSoFar, remaining)
+ if remaining.empty?
+ settingsCombinations << mapSoFar
+ return
+ end
+
+ newMap = mapSoFar.dup
+ newMap[remaining[0]] = true
+ settingsCombinator(settingsCombinations, newMap, remaining[1..-1])
+
+ newMap = mapSoFar.dup
+ newMap[remaining[0]] = false
+ settingsCombinator(settingsCombinations, newMap, remaining[1..-1])
+ end
+
+ nonBackendSettings = ast.filter(Setting).uniq.collect{ |v| v.name }
+ nonBackendSettings.delete_if {
+ | setting |
+ isBackend? setting
+ }
+
+ allBackendsFalse = {}
+ BACKENDS.each {
+ | backend |
+ allBackendsFalse[backend] = false
+ }
+
+ # This will create entries for invalid backends. That's fine. It's necessary
+ # because it ensures that generate_offsets_extractor (which knows about valid
+ # backends) has settings indices that are compatible with what asm will see
+ # (asm doesn't know about valid backends).
+ BACKENDS.each {
+ | backend |
+ map = allBackendsFalse.clone
+ map[backend] = true
+ settingsCombinator(settingsCombinations, map, nonBackendSettings)
+ }
+
+ settingsCombinations
+end
+
+#
+# forSettings(concreteSettings, ast) {
+# | concreteSettings, lowLevelAST, backend | ... }
+#
+# Determines if the settings combination is valid, and if so, calls
+# the block with the information you need to generate code.
+#
+
+def forSettings(concreteSettings, ast)
+ # Check which architectures this combinator claims to support.
+ numClaimedBackends = 0
+ selectedBackend = nil
+ BACKENDS.each {
+ | backend |
+ if concreteSettings[backend]
+ raise if selectedBackend
+ selectedBackend = backend
+ end
+ }
+
+ return unless isValidBackend? selectedBackend
+
+ # Resolve the AST down to a low-level form (no macros or conditionals).
+ lowLevelAST = ast.resolveSettings(concreteSettings)
+
+ yield concreteSettings, lowLevelAST, selectedBackend
+end
+
+#
+# forEachValidSettingsCombination(ast) {
+# | concreteSettings, ast, backend, index | ... }
+#
+# forEachValidSettingsCombination(ast, settingsCombinations) {
+# | concreteSettings, ast, backend, index | ... }
+#
+# Executes the given block for each valid settings combination in the
+# settings map. The ast passed into the block is resolved
+# (ast.resolve) against the settings.
+#
+# The first form will call computeSettingsCombinations(ast) for you.
+#
+
+def forEachValidSettingsCombination(ast, *optionalSettingsCombinations)
+ raise if optionalSettingsCombinations.size > 1
+
+ if optionalSettingsCombinations.empty?
+ settingsCombinations = computeSettingsCombinations(ast)
+ else
+ settingsCombinations = optionalSettingsCombiations[0]
+ end
+
+ settingsCombinations.each_with_index {
+ | concreteSettings, index |
+ forSettings(concreteSettings, ast) {
+ | concreteSettings_, lowLevelAST, backend |
+ yield concreteSettings, lowLevelAST, backend, index
+ }
+ }
+end
+
+#
+# cppSettingsTest(concreteSettings)
+#
+# Returns the C++ code used to test if we are in a configuration that
+# corresponds to the given concrete settings.
+#
+
+def cppSettingsTest(concreteSettings)
+ "#if " + concreteSettings.to_a.collect{
+ | pair |
+ (if pair[1]
+ ""
+ else
+ "!"
+ end) + "OFFLINE_ASM_" + pair[0]
+ }.join(" && ")
+end
+
+#
+# isASTErroneous(ast)
+#
+# Tests to see if the AST claims that there is an error - i.e. if the
+# user's code, after settings resolution, has Error nodes.
+#
+
+def isASTErroneous(ast)
+ not ast.filter(Error).empty?
+end
+
+#
+# assertConfiguration(concreteSettings)
+#
+# Emits a check that asserts that we're using the given configuration.
+#
+
+def assertConfiguration(concreteSettings)
+ $output.puts cppSettingsTest(concreteSettings)
+ $output.puts "#else"
+ $output.puts "#error \"Configuration mismatch.\""
+ $output.puts "#endif"
+end
+
+#
+# emitCodeInConfiguration(concreteSettings, ast, backend) {
+# | concreteSettings, ast, backend | ... }
+#
+# Emits all relevant guards to see if the configuration holds and
+# calls the block if the configuration is not erroneous.
+#
+
+def emitCodeInConfiguration(concreteSettings, ast, backend)
+ Label.resetReferenced
+
+ if !$emitWinAsm
+ $output.puts cppSettingsTest(concreteSettings)
+ else
+ if backend == "X86_WIN"
+ $output.puts ".MODEL FLAT, C"
+ end
+ $output.puts "INCLUDE #{File.basename($output.path)}.sym"
+ $output.puts "_TEXT SEGMENT"
+ end
+
+ if isASTErroneous(ast)
+ $output.puts "#error \"Invalid configuration.\""
+ elsif not WORKING_BACKENDS.include? backend
+ $output.puts "#error \"This backend is not supported yet.\""
+ else
+ yield concreteSettings, ast, backend
+ end
+
+ if !$emitWinAsm
+ $output.puts "#endif"
+ else
+ $output.puts "_TEXT ENDS"
+ $output.puts "END"
+
+ # Write symbols needed by MASM
+ File.open("#{File.basename($output.path)}.sym", "w") {
+ | outp |
+ Label.forReferencedExtern {
+ | name |
+ outp.puts "EXTERN #{name[1..-1]} : near"
+ }
+ }
+ end
+end
+
+#
+# emitCodeInAllConfigurations(ast) {
+# | concreteSettings, ast, backend, index | ... }
+#
+# Emits guard codes for all valid configurations, and calls the block
+# for those configurations that are valid and not erroneous.
+#
+
+def emitCodeInAllConfigurations(ast)
+ forEachValidSettingsCombination(ast) {
+ | concreteSettings, lowLevelAST, backend, index |
+ $output.puts cppSettingsTest(concreteSettings)
+ yield concreteSettings, lowLevelAST, backend, index
+ $output.puts "#endif"
+ }
+end
+
+
+
diff --git a/Source/JavaScriptCore/offlineasm/sh4.rb b/Source/JavaScriptCore/offlineasm/sh4.rb
new file mode 100644
index 000000000..0241f38d8
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/sh4.rb
@@ -0,0 +1,1102 @@
+# Copyright (C) 2013 Apple Inc. All rights reserved.
+# Copyright (C) 2013 Cisco Systems, Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY CISCO SYSTEMS, INC. ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CISCO SYSTEMS, INC. OR ITS
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+require 'risc'
+
+class Node
+ def sh4SingleHi
+ doubleOperand = sh4Operand
+ raise "Bogus register name #{doubleOperand}" unless doubleOperand =~ /^dr/
+ "fr" + ($~.post_match.to_i).to_s
+ end
+ def sh4SingleLo
+ doubleOperand = sh4Operand
+ raise "Bogus register name #{doubleOperand}" unless doubleOperand =~ /^dr/
+ "fr" + ($~.post_match.to_i + 1).to_s
+ end
+end
+
+class SpecialRegister < NoChildren
+ def sh4Operand
+ @name
+ end
+
+ def dump
+ @name
+ end
+
+ def register?
+ true
+ end
+end
+
+SH4_TMP_GPRS = [ SpecialRegister.new("r3"), SpecialRegister.new("r11"), SpecialRegister.new("r13") ]
+SH4_TMP_FPRS = [ SpecialRegister.new("dr10") ]
+
+class RegisterID
+ def sh4Operand
+ case name
+ when "t0"
+ "r0"
+ when "t1"
+ "r1"
+ when "t2"
+ "r2"
+ when "t3"
+ "r10"
+ when "t4", "a0"
+ "r4"
+ when "t5", "a1"
+ "r5"
+ when "t6", "a2"
+ "r6"
+ when "t7", "a3"
+ "r7"
+ when "t8"
+ "r8"
+ when "t9"
+ "r9"
+ when "cfr"
+ "r14"
+ when "sp"
+ "r15"
+ when "lr"
+ "pr"
+ else
+ raise "Bad register #{name} for SH4 at #{codeOriginString}"
+ end
+ end
+end
+
+class FPRegisterID
+ def sh4Operand
+ case name
+ when "ft0", "fr"
+ "dr0"
+ when "ft1"
+ "dr2"
+ when "ft2"
+ "dr4"
+ when "ft3"
+ "dr6"
+ when "ft4"
+ "dr8"
+ when "fa0"
+ "dr12"
+ else
+ raise "Bad register #{name} for SH4 at #{codeOriginString}"
+ end
+ end
+end
+
+class Immediate
+ def sh4Operand
+ raise "Invalid immediate #{value} at #{codeOriginString}" if value < -128 or value > 127
+ "##{value}"
+ end
+end
+
+class Address
+ def sh4Operand
+ raise "Bad offset #{offset.value} at #{codeOriginString}" if offset.value < 0 or offset.value > 60
+ if offset.value == 0
+ "@#{base.sh4Operand}"
+ else
+ "@(#{offset.value}, #{base.sh4Operand})"
+ end
+ end
+
+ def sh4OperandPostInc
+ raise "Bad offset #{offset.value} for post inc at #{codeOriginString}" unless offset.value == 0
+ "@#{base.sh4Operand}+"
+ end
+
+ def sh4OperandPreDec
+ raise "Bad offset #{offset.value} for pre dec at #{codeOriginString}" unless offset.value == 0
+ "@-#{base.sh4Operand}"
+ end
+end
+
+class BaseIndex
+ def sh4Operand
+ raise "Unconverted base index at #{codeOriginString}"
+ end
+end
+
+class AbsoluteAddress
+ def sh4Operand
+ raise "Unconverted absolute address at #{codeOriginString}"
+ end
+end
+
+class LabelReference
+ def sh4Operand
+ value
+ end
+end
+
+class SubImmediates < Node
+ def sh4Operand
+ "#{@left.sh4Operand} - #{@right.sh4Operand}"
+ end
+end
+
+class ConstPool < Node
+ attr_reader :size
+ attr_reader :entries
+
+ def initialize(codeOrigin, entries, size)
+ super(codeOrigin)
+ raise "Invalid size #{size} for ConstPool" unless size == 16 or size == 32
+ @size = size
+ @entries = entries
+ end
+
+ def dump
+ "#{size}: #{entries}"
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ false
+ end
+
+ def register?
+ false
+ end
+
+ def lowerSH4
+ if size == 16
+ $asm.puts ".balign 2"
+ else
+ $asm.puts ".balign 4"
+ end
+ entries.map {
+ |e|
+ e.label.lower("SH4")
+ if e.size == 16
+ $asm.puts ".word #{e.value}"
+ else
+ $asm.puts ".long #{e.value}"
+ end
+ }
+ end
+end
+
+class ConstPoolEntry < Node
+ attr_reader :size
+ attr_reader :value
+ attr_reader :label
+ attr_reader :labelref
+
+ def initialize(codeOrigin, value, size)
+ super(codeOrigin)
+ raise "Invalid size #{size} for ConstPoolEntry" unless size == 16 or size == 32
+ @size = size
+ @value = value
+ @label = LocalLabel.unique("constpool#{size}")
+ @labelref = LocalLabelReference.new(codeOrigin, label)
+ end
+
+ def dump
+ "#{value} (#{size} @ #{label})"
+ end
+
+ def ==(other)
+ other.is_a? ConstPoolEntry and other.value == @value
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ false
+ end
+
+ def register?
+ false
+ end
+end
+
+
+#
+# Lowering of shift ops for SH4. For example:
+#
+# rshifti foo, bar
+#
+# becomes:
+#
+# negi foo, tmp
+# shad tmp, bar
+#
+
+def sh4LowerShiftOps(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ case node.opcode
+ when "ulshifti", "ulshiftp", "urshifti", "urshiftp", "lshifti", "lshiftp", "rshifti", "rshiftp"
+ if node.opcode[0, 1] == "u"
+ type = "l"
+ direction = node.opcode[1, 1]
+ else
+ type = "a"
+ direction = node.opcode[0, 1]
+ end
+ if node.operands[0].is_a? Immediate
+ maskedImm = Immediate.new(node.operands[0].codeOrigin, node.operands[0].value & 31)
+ if maskedImm.value == 0
+ # There is nothing to do here.
+ elsif maskedImm.value == 1 or (type == "l" and [2, 8, 16].include? maskedImm.value)
+ newList << Instruction.new(node.codeOrigin, "sh#{type}#{direction}x", [maskedImm, node.operands[1]])
+ else
+ tmp = Tmp.new(node.codeOrigin, :gpr)
+ if direction == "l"
+ newList << Instruction.new(node.codeOrigin, "move", [maskedImm, tmp])
+ else
+ newList << Instruction.new(node.codeOrigin, "move", [Immediate.new(node.operands[0].codeOrigin, -1 * maskedImm.value), tmp])
+ end
+ newList << Instruction.new(node.codeOrigin, "sh#{type}d", [tmp, node.operands[1]])
+ end
+ else
+ tmp = Tmp.new(node.codeOrigin, :gpr)
+ newList << Instruction.new(node.codeOrigin, "move", [Immediate.new(node.operands[0].codeOrigin, 31), tmp])
+ newList << Instruction.new(node.codeOrigin, "andi", [node.operands[0], tmp])
+ if direction == "r"
+ newList << Instruction.new(node.codeOrigin, "negi", [tmp, tmp])
+ end
+ newList << Instruction.new(node.codeOrigin, "sh#{type}d", [tmp, node.operands[1]])
+ end
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+
+#
+# Lowering of simple branch ops for SH4. For example:
+#
+# baddis foo, bar, baz
+#
+# will become:
+#
+# addi foo, bar
+# bs bar, baz
+#
+
+def sh4LowerSimpleBranchOps(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ annotation = node.annotation
+ case node.opcode
+ when /^b(addi|subi|ori|addp)/
+ op = $1
+ bc = $~.post_match
+
+ case op
+ when "addi", "addp"
+ op = "addi"
+ when "subi", "subp"
+ op = "subi"
+ when "ori", "orp"
+ op = "ori"
+ end
+
+ if bc == "s"
+ raise "Invalid operands number (#{node.operands.size})" unless node.operands.size == 3
+ if node.operands[1].is_a? RegisterID or node.operands[1].is_a? SpecialRegister
+ newList << Instruction.new(node.codeOrigin, op, node.operands[0..1])
+ newList << Instruction.new(node.codeOrigin, "bs", node.operands[1..2])
+ else
+ tmpVal = Tmp.new(node.codeOrigin, :gpr)
+ tmpPtr = Tmp.new(node.codeOrigin, :gpr)
+ addr = Address.new(node.codeOrigin, tmpPtr, Immediate.new(node.codeOrigin, 0))
+ newList << Instruction.new(node.codeOrigin, "leap", [node.operands[1], tmpPtr])
+ newList << Instruction.new(node.codeOrigin, "loadi", [addr, tmpVal])
+ newList << Instruction.new(node.codeOrigin, op, [node.operands[0], tmpVal])
+ newList << Instruction.new(node.codeOrigin, "storei", [tmpVal, addr])
+ newList << Instruction.new(node.codeOrigin, "bs", [tmpVal, node.operands[2]])
+ end
+ elsif bc == "nz"
+ raise "Invalid operands number (#{node.operands.size})" unless node.operands.size == 3
+ newList << Instruction.new(node.codeOrigin, op, node.operands[0..1])
+ newList << Instruction.new(node.codeOrigin, "btinz", node.operands[1..2])
+ else
+ newList << node
+ end
+ when "bmulio", "bmulpo"
+ raise "Invalid operands number (#{node.operands.size})" unless node.operands.size == 3
+ tmp1 = Tmp.new(node.codeOrigin, :gpr)
+ tmp2 = Tmp.new(node.codeOrigin, :gpr)
+ newList << Instruction.new(node.codeOrigin, node.opcode, [tmp1, tmp2].concat(node.operands))
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+
+#
+# Lowering of double accesses for SH4. For example:
+#
+# loadd [foo, bar, 8], baz
+#
+# becomes:
+#
+# leap [foo, bar, 8], tmp
+# loaddReversedAndIncrementAddress [tmp], baz
+#
+
+def sh4LowerDoubleAccesses(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ case node.opcode
+ when "loadd"
+ tmp = Tmp.new(codeOrigin, :gpr)
+ addr = Address.new(codeOrigin, tmp, Immediate.new(codeOrigin, 0))
+ newList << Instruction.new(codeOrigin, "leap", [node.operands[0], tmp])
+ newList << Instruction.new(node.codeOrigin, "loaddReversedAndIncrementAddress", [addr, node.operands[1]], node.annotation)
+ when "stored"
+ tmp = Tmp.new(codeOrigin, :gpr)
+ addr = Address.new(codeOrigin, tmp, Immediate.new(codeOrigin, 0))
+ newList << Instruction.new(codeOrigin, "leap", [node.operands[1].withOffset(8), tmp])
+ newList << Instruction.new(node.codeOrigin, "storedReversedAndDecrementAddress", [node.operands[0], addr], node.annotation)
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+
+#
+# Lowering of double specials for SH4.
+#
+
+def sh4LowerDoubleSpecials(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ case node.opcode
+ when "bdltun", "bdgtun"
+ # Handle specific floating point unordered opcodes.
+ newList << Instruction.new(codeOrigin, "bdnan", [node.operands[0], node.operands[2]])
+ newList << Instruction.new(codeOrigin, "bdnan", [node.operands[1], node.operands[2]])
+ newList << Instruction.new(codeOrigin, node.opcode[0..-3], node.operands)
+ when "bdnequn", "bdgtequn", "bdltequn"
+ newList << Instruction.new(codeOrigin, node.opcode[0..-3], node.operands)
+ when "bdneq", "bdgteq", "bdlteq"
+ # Handle specific floating point ordered opcodes.
+ outlabel = LocalLabel.unique("out_#{node.opcode}")
+ outref = LocalLabelReference.new(codeOrigin, outlabel)
+ newList << Instruction.new(codeOrigin, "bdnan", [node.operands[0], outref])
+ newList << Instruction.new(codeOrigin, "bdnan", [node.operands[1], outref])
+ newList << Instruction.new(codeOrigin, node.opcode, node.operands)
+ newList << outlabel
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+
+#
+# Lowering of misplaced labels for SH4.
+#
+
+def sh4LowerMisplacedLabels(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ operands = node.operands
+ newOperands = []
+ operands.each {
+ | operand |
+ if operand.is_a? LabelReference and node.opcode != "mova"
+ tmp = Tmp.new(operand.codeOrigin, :gpr)
+ newList << Instruction.new(operand.codeOrigin, "move", [operand, tmp])
+ newOperands << tmp
+ else
+ newOperands << operand
+ end
+ }
+ newList << Instruction.new(node.codeOrigin, node.opcode, newOperands, node.annotation)
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+
+#
+# Lowering of misplaced special registers for SH4. For example:
+#
+# storep pr, foo
+#
+# becomes:
+#
+# stspr tmp
+# storep tmp, foo
+#
+
+def sh4LowerMisplacedSpecialRegisters(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ case node.opcode
+ when "move"
+ if node.operands[0].is_a? RegisterID and node.operands[0].sh4Operand == "pr"
+ newList << Instruction.new(codeOrigin, "stspr", [node.operands[1]])
+ elsif node.operands[1].is_a? RegisterID and node.operands[1].sh4Operand == "pr"
+ newList << Instruction.new(codeOrigin, "ldspr", [node.operands[0]])
+ else
+ newList << node
+ end
+ when "loadi", "loadis", "loadp"
+ if node.operands[1].is_a? RegisterID and node.operands[1].sh4Operand == "pr"
+ tmp = Tmp.new(codeOrigin, :gpr)
+ newList << Instruction.new(codeOrigin, node.opcode, [node.operands[0], tmp])
+ newList << Instruction.new(codeOrigin, "ldspr", [tmp])
+ else
+ newList << node
+ end
+ when "storei", "storep"
+ if node.operands[0].is_a? RegisterID and node.operands[0].sh4Operand == "pr"
+ tmp = Tmp.new(codeOrigin, :gpr)
+ newList << Instruction.new(codeOrigin, "stspr", [tmp])
+ newList << Instruction.new(codeOrigin, node.opcode, [tmp, node.operands[1]])
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+
+#
+# Group immediate values outside -128..127 range into constant pools for SH4.
+# These constant pools will be placed behind non-return opcodes jmp and ret, for example:
+#
+# move 1024, foo
+# ...
+# ret
+#
+# becomes:
+#
+# move [label], foo
+# ...
+# ret
+# label: 1024
+#
+
+def sh4LowerConstPool(list)
+ newList = []
+ currentPool16 = []
+ currentPool32 = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ case node.opcode
+ when "jmp", "ret", "flushcp"
+ if node.opcode == "flushcp"
+ outlabel = LocalLabel.unique("flushcp")
+ newList << Instruction.new(codeOrigin, "jmp", [LocalLabelReference.new(codeOrigin, outlabel)])
+ else
+ newList << node
+ end
+ if not currentPool16.empty?
+ newList << ConstPool.new(codeOrigin, currentPool16, 16)
+ currentPool16 = []
+ end
+ if not currentPool32.empty?
+ newList << ConstPool.new(codeOrigin, currentPool32, 32)
+ currentPool32 = []
+ end
+ if node.opcode == "flushcp"
+ newList << outlabel
+ end
+ when "move"
+ if node.operands[0].is_a? Immediate and not (-128..127).include? node.operands[0].value
+ poolEntry = nil
+ if (-32768..32767).include? node.operands[0].value
+ currentPool16.each { |e|
+ if e.value == node.operands[0].value
+ poolEntry = e
+ end
+ }
+ if !poolEntry
+ poolEntry = ConstPoolEntry.new(codeOrigin, node.operands[0].value, 16)
+ currentPool16 << poolEntry
+ end
+ else
+ currentPool32.each { |e|
+ if e.value == node.operands[0].value
+ poolEntry = e
+ end
+ }
+ if !poolEntry
+ poolEntry = ConstPoolEntry.new(codeOrigin, node.operands[0].value, 32)
+ currentPool32 << poolEntry
+ end
+ end
+ newList << Instruction.new(codeOrigin, "move", [poolEntry, node.operands[1]])
+ elsif node.operands[0].is_a? LabelReference
+ poolEntry = nil
+ currentPool32.each { |e|
+ if e.value == node.operands[0].asmLabel
+ poolEntry = e
+ end
+ }
+ if !poolEntry
+ poolEntry = ConstPoolEntry.new(codeOrigin, node.operands[0].asmLabel, 32)
+ currentPool32 << poolEntry
+ end
+ newList << Instruction.new(codeOrigin, "move", [poolEntry, node.operands[1]])
+ elsif node.operands[0].is_a? SubImmediates
+ poolEntry = ConstPoolEntry.new(codeOrigin, node.operands[0].sh4Operand, 32)
+ currentPool32 << poolEntry
+ newList << Instruction.new(codeOrigin, "move", [poolEntry, node.operands[1]])
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ }
+ if not currentPool16.empty?
+ newList << ConstPool.new(codeOrigin, currentPool16, 16)
+ end
+ if not currentPool32.empty?
+ newList << ConstPool.new(codeOrigin, currentPool32, 32)
+ end
+ newList
+end
+
+
+#
+# Lowering of argument setup for SH4.
+# This phase avoids argument register trampling. For example, if a0 == t4:
+#
+# setargs t1, t4
+#
+# becomes:
+#
+# move t4, a1
+# move t1, a0
+#
+
+def sh4LowerArgumentSetup(list)
+ a0 = RegisterID.forName(codeOrigin, "a0")
+ a1 = RegisterID.forName(codeOrigin, "a1")
+ a2 = RegisterID.forName(codeOrigin, "a2")
+ a3 = RegisterID.forName(codeOrigin, "a3")
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ case node.opcode
+ when "setargs"
+ if node.operands.size == 2
+ if node.operands[1].sh4Operand != a0.sh4Operand
+ newList << Instruction.new(codeOrigin, "move", [node.operands[0], a0])
+ newList << Instruction.new(codeOrigin, "move", [node.operands[1], a1])
+ elsif node.operands[0].sh4Operand != a1.sh4Operand
+ newList << Instruction.new(codeOrigin, "move", [node.operands[1], a1])
+ newList << Instruction.new(codeOrigin, "move", [node.operands[0], a0])
+ else
+ # As (operands[0] == a1) and (operands[1] == a0), we just need to swap a0 and a1.
+ newList << Instruction.new(codeOrigin, "xori", [a0, a1])
+ newList << Instruction.new(codeOrigin, "xori", [a1, a0])
+ newList << Instruction.new(codeOrigin, "xori", [a0, a1])
+ end
+ elsif node.operands.size == 4
+ # FIXME: We just raise an error if something is likely to go wrong for now.
+ # It would be better to implement a recovering algorithm.
+ if (node.operands[0].sh4Operand == a1.sh4Operand) or
+ (node.operands[0].sh4Operand == a2.sh4Operand) or
+ (node.operands[0].sh4Operand == a3.sh4Operand) or
+ (node.operands[1].sh4Operand == a0.sh4Operand) or
+ (node.operands[1].sh4Operand == a2.sh4Operand) or
+ (node.operands[1].sh4Operand == a3.sh4Operand) or
+ (node.operands[2].sh4Operand == a0.sh4Operand) or
+ (node.operands[2].sh4Operand == a1.sh4Operand) or
+ (node.operands[2].sh4Operand == a3.sh4Operand) or
+ (node.operands[3].sh4Operand == a0.sh4Operand) or
+ (node.operands[3].sh4Operand == a1.sh4Operand) or
+ (node.operands[3].sh4Operand == a2.sh4Operand)
+ raise "Potential argument register trampling detected."
+ end
+
+ newList << Instruction.new(codeOrigin, "move", [node.operands[0], a0])
+ newList << Instruction.new(codeOrigin, "move", [node.operands[1], a1])
+ newList << Instruction.new(codeOrigin, "move", [node.operands[2], a2])
+ newList << Instruction.new(codeOrigin, "move", [node.operands[3], a3])
+ else
+ raise "Invalid operands number (#{node.operands.size}) for setargs"
+ end
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+
+class Sequence
+ def getModifiedListSH4
+ result = @list
+
+ # Verify that we will only see instructions and labels.
+ result.each {
+ | node |
+ unless node.is_a? Instruction or
+ node.is_a? Label or
+ node.is_a? LocalLabel or
+ node.is_a? Skip
+ raise "Unexpected #{node.inspect} at #{node.codeOrigin}"
+ end
+ }
+
+ result = sh4LowerShiftOps(result)
+ result = sh4LowerSimpleBranchOps(result)
+ result = riscLowerMalformedAddresses(result) {
+ | node, address |
+ if address.is_a? Address
+ case node.opcode
+ when "btbz", "btbnz", "cbeq", "bbeq", "bbneq", "bbb", "loadb", "storeb"
+ (0..15).include? address.offset.value and
+ ((node.operands[0].is_a? RegisterID and node.operands[0].sh4Operand == "r0") or
+ (node.operands[1].is_a? RegisterID and node.operands[1].sh4Operand == "r0"))
+ when "loadh"
+ (0..30).include? address.offset.value and
+ ((node.operands[0].is_a? RegisterID and node.operands[0].sh4Operand == "r0") or
+ (node.operands[1].is_a? RegisterID and node.operands[1].sh4Operand == "r0"))
+ else
+ (0..60).include? address.offset.value
+ end
+ else
+ false
+ end
+ }
+ result = sh4LowerDoubleAccesses(result)
+ result = sh4LowerDoubleSpecials(result)
+ result = riscLowerMisplacedImmediates(result, ["storeb", "storei", "storep", "muli", "mulp", "andi", "ori", "xori",
+ "cbeq", "cieq", "cpeq", "cineq", "cpneq", "cib", "baddio", "bsubio", "bmulio", "baddis",
+ "bbeq", "bbneq", "bbb", "bieq", "bpeq", "bineq", "bpneq", "bia", "bpa", "biaeq", "bpaeq", "bib", "bpb",
+ "bigteq", "bpgteq", "bilt", "bplt", "bigt", "bpgt", "bilteq", "bplteq", "btiz", "btpz", "btinz", "btpnz", "btbz", "btbnz"])
+ result = riscLowerMalformedImmediates(result, -128..127)
+ result = riscLowerMisplacedAddresses(result)
+ result = sh4LowerMisplacedLabels(result)
+ result = sh4LowerMisplacedSpecialRegisters(result)
+
+ result = assignRegistersToTemporaries(result, :gpr, SH4_TMP_GPRS)
+ result = assignRegistersToTemporaries(result, :gpr, SH4_TMP_FPRS)
+
+ result = sh4LowerConstPool(result)
+ result = sh4LowerArgumentSetup(result)
+
+ return result
+ end
+end
+
+def sh4Operands(operands)
+ operands.map{|v| v.sh4Operand}.join(", ")
+end
+
+def emitSH4Branch(sh4opcode, operand)
+ raise "Invalid operand #{operand}" unless operand.is_a? RegisterID or operand.is_a? SpecialRegister
+ $asm.puts "#{sh4opcode} @#{operand.sh4Operand}"
+ $asm.puts "nop"
+end
+
+def emitSH4ShiftImm(val, operand, direction)
+ tmp = val
+ while tmp > 0
+ if tmp >= 16
+ $asm.puts "shl#{direction}16 #{operand.sh4Operand}"
+ tmp -= 16
+ elsif tmp >= 8
+ $asm.puts "shl#{direction}8 #{operand.sh4Operand}"
+ tmp -= 8
+ elsif tmp >= 2
+ $asm.puts "shl#{direction}2 #{operand.sh4Operand}"
+ tmp -= 2
+ else
+ $asm.puts "shl#{direction} #{operand.sh4Operand}"
+ tmp -= 1
+ end
+ end
+end
+
+def emitSH4BranchIfT(dest, neg)
+ outlabel = LocalLabel.unique("branchIfT")
+ sh4opcode = neg ? "bt" : "bf"
+ $asm.puts "#{sh4opcode} #{LocalLabelReference.new(codeOrigin, outlabel).asmLabel}"
+ if dest.is_a? LocalLabelReference
+ $asm.puts "bra #{dest.asmLabel}"
+ $asm.puts "nop"
+ else
+ emitSH4Branch("jmp", dest)
+ end
+ outlabel.lower("SH4")
+end
+
+def emitSH4IntCompare(cmpOpcode, operands)
+ $asm.puts "cmp/#{cmpOpcode} #{sh4Operands([operands[1], operands[0]])}"
+end
+
+def emitSH4CondBranch(cmpOpcode, neg, operands)
+ emitSH4IntCompare(cmpOpcode, operands)
+ emitSH4BranchIfT(operands[2], neg)
+end
+
+def emitSH4CompareSet(cmpOpcode, neg, operands)
+ emitSH4IntCompare(cmpOpcode, operands)
+ if !neg
+ $asm.puts "movt #{operands[2].sh4Operand}"
+ else
+ outlabel = LocalLabel.unique("compareSet")
+ $asm.puts "mov #0, #{operands[2].sh4Operand}"
+ $asm.puts "bt #{LocalLabelReference.new(codeOrigin, outlabel).asmLabel}"
+ $asm.puts "mov #1, #{operands[2].sh4Operand}"
+ outlabel.lower("SH4")
+ end
+end
+
+def emitSH4BranchIfNaN(operands)
+ raise "Invalid operands number (#{operands.size})" unless operands.size == 2
+ $asm.puts "fcmp/eq #{sh4Operands([operands[0], operands[0]])}"
+ $asm.puts "bf #{operands[1].asmLabel}"
+end
+
+def emitSH4DoubleCondBranch(cmpOpcode, neg, operands)
+ if cmpOpcode == "lt"
+ $asm.puts "fcmp/gt #{sh4Operands([operands[0], operands[1]])}"
+ else
+ $asm.puts "fcmp/#{cmpOpcode} #{sh4Operands([operands[1], operands[0]])}"
+ end
+ emitSH4BranchIfT(operands[2], neg)
+end
+
+class Instruction
+ def lowerSH4
+ $asm.comment codeOriginString
+ case opcode
+ when "addi", "addp"
+ if operands.size == 3
+ if operands[0].sh4Operand == operands[2].sh4Operand
+ $asm.puts "add #{sh4Operands([operands[1], operands[2]])}"
+ elsif operands[1].sh4Operand == operands[2].sh4Operand
+ $asm.puts "add #{sh4Operands([operands[0], operands[2]])}"
+ else
+ $asm.puts "mov #{sh4Operands([operands[0], operands[2]])}"
+ $asm.puts "add #{sh4Operands([operands[1], operands[2]])}"
+ end
+ else
+ $asm.puts "add #{sh4Operands(operands)}"
+ end
+ when "subi", "subp"
+ if operands.size == 3
+ if operands[1].is_a? Immediate
+ $asm.puts "mov #{sh4Operands([Immediate.new(codeOrigin, -1 * operands[1].value), operands[2]])}"
+ $asm.puts "add #{sh4Operands([operands[0], operands[2]])}"
+ elsif operands[1].sh4Operand == operands[2].sh4Operand
+ $asm.puts "neg #{sh4Operands([operands[2], operands[2]])}"
+ $asm.puts "add #{sh4Operands([operands[0], operands[2]])}"
+ else
+ $asm.puts "mov #{sh4Operands([operands[0], operands[2]])}"
+ $asm.puts "sub #{sh4Operands([operands[1], operands[2]])}"
+ end
+ else
+ if operands[0].is_a? Immediate
+ $asm.puts "add #{sh4Operands([Immediate.new(codeOrigin, -1 * operands[0].value), operands[1]])}"
+ else
+ $asm.puts "sub #{sh4Operands(operands)}"
+ end
+ end
+ when "muli", "mulp"
+ $asm.puts "mul.l #{sh4Operands(operands[0..1])}"
+ $asm.puts "sts macl, #{operands[-1].sh4Operand}"
+ when "negi", "negp"
+ if operands.size == 2
+ $asm.puts "neg #{sh4Operands(operands)}"
+ else
+ $asm.puts "neg #{sh4Operands([operands[0], operands[0]])}"
+ end
+ when "andi", "andp", "ori", "orp", "xori", "xorp"
+ raise "#{opcode} with #{operands.size} operands is not handled yet" unless operands.size == 2
+ sh4opcode = opcode[0..-2]
+ $asm.puts "#{sh4opcode} #{sh4Operands(operands)}"
+ when "shllx", "shlrx"
+ raise "Unhandled parameters for opcode #{opcode}" unless operands[0].is_a? Immediate
+ if operands[0].value == 1
+ $asm.puts "shl#{opcode[3, 1]} #{operands[1].sh4Operand}"
+ else
+ $asm.puts "shl#{opcode[3, 1]}#{operands[0].value} #{operands[1].sh4Operand}"
+ end
+ when "shalx", "sharx"
+ raise "Unhandled parameters for opcode #{opcode}" unless operands[0].is_a? Immediate and operands[0].value == 1
+ $asm.puts "sha#{opcode[3, 1]} #{operands[1].sh4Operand}"
+ when "shld", "shad"
+ $asm.puts "#{opcode} #{sh4Operands(operands)}"
+ when "loaddReversedAndIncrementAddress"
+ # As we are little endian, we don't use "fmov @Rm, DRn" here.
+ $asm.puts "fmov.s #{operands[0].sh4OperandPostInc}, #{operands[1].sh4SingleLo}"
+ $asm.puts "fmov.s #{operands[0].sh4OperandPostInc}, #{operands[1].sh4SingleHi}"
+ when "storedReversedAndDecrementAddress"
+ # As we are little endian, we don't use "fmov DRm, @Rn" here.
+ $asm.puts "fmov.s #{operands[0].sh4SingleHi}, #{operands[1].sh4OperandPreDec}"
+ $asm.puts "fmov.s #{operands[0].sh4SingleLo}, #{operands[1].sh4OperandPreDec}"
+ when "ci2d"
+ $asm.puts "lds #{operands[0].sh4Operand}, fpul"
+ $asm.puts "float fpul, #{operands[1].sh4Operand}"
+ when "fii2d"
+ $asm.puts "lds #{operands[0].sh4Operand}, fpul"
+ $asm.puts "fsts fpul, #{operands[2].sh4SingleLo}"
+ $asm.puts "lds #{operands[1].sh4Operand}, fpul"
+ $asm.puts "fsts fpul, #{operands[2].sh4SingleHi}"
+ when "fd2ii"
+ $asm.puts "flds #{operands[0].sh4SingleLo}, fpul"
+ $asm.puts "sts fpul, #{operands[1].sh4Operand}"
+ $asm.puts "flds #{operands[0].sh4SingleHi}, fpul"
+ $asm.puts "sts fpul, #{operands[2].sh4Operand}"
+ when "addd", "subd", "muld", "divd"
+ sh4opcode = opcode[0..-2]
+ $asm.puts "f#{sh4opcode} #{sh4Operands(operands)}"
+ when "bcd2i"
+ $asm.puts "ftrc #{operands[0].sh4Operand}, fpul"
+ $asm.puts "sts fpul, #{operands[1].sh4Operand}"
+ $asm.puts "float fpul, #{SH4_TMP_FPRS[0].sh4Operand}"
+ $asm.puts "fcmp/eq #{sh4Operands([operands[0], SH4_TMP_FPRS[0]])}"
+ $asm.puts "bf #{operands[2].asmLabel}"
+ $asm.puts "tst #{sh4Operands([operands[1], operands[1]])}"
+ $asm.puts "bt #{operands[2].asmLabel}"
+ when "bdnan"
+ emitSH4BranchIfNaN(operands)
+ when "bdneq"
+ emitSH4DoubleCondBranch("eq", true, operands)
+ when "bdgteq"
+ emitSH4DoubleCondBranch("lt", true, operands)
+ when "bdlt"
+ emitSH4DoubleCondBranch("lt", false, operands)
+ when "bdlteq"
+ emitSH4DoubleCondBranch("gt", true, operands)
+ when "bdgt"
+ emitSH4DoubleCondBranch("gt", false, operands)
+ when "baddio", "baddpo", "bsubio", "bsubpo"
+ raise "#{opcode} with #{operands.size} operands is not handled yet" unless operands.size == 3
+ $asm.puts "#{opcode[1, 3]}v #{sh4Operands([operands[0], operands[1]])}"
+ $asm.puts "bt #{operands[2].asmLabel}"
+ when "bmulio", "bmulpo"
+ raise "Invalid operands number (#{operands.size})" unless operands.size == 5
+ $asm.puts "dmuls.l #{sh4Operands([operands[2], operands[3]])}"
+ $asm.puts "sts macl, #{operands[3].sh4Operand}"
+ $asm.puts "cmp/pz #{operands[3].sh4Operand}"
+ $asm.puts "movt #{operands[1].sh4Operand}"
+ $asm.puts "add #-1, #{operands[1].sh4Operand}"
+ $asm.puts "sts mach, #{operands[0].sh4Operand}"
+ $asm.puts "cmp/eq #{sh4Operands([operands[0], operands[1]])}"
+ $asm.puts "bf #{operands[4].asmLabel}"
+ when "btiz", "btpz", "btbz", "btinz", "btpnz", "btbnz"
+ if operands.size == 3
+ $asm.puts "tst #{sh4Operands([operands[0], operands[1]])}"
+ else
+ if operands[0].sh4Operand == "r0"
+ $asm.puts "cmp/eq #0, r0"
+ else
+ $asm.puts "tst #{sh4Operands([operands[0], operands[0]])}"
+ end
+ end
+ emitSH4BranchIfT(operands[-1], (opcode[-2, 2] == "nz"))
+ when "cieq", "cpeq", "cbeq"
+ emitSH4CompareSet("eq", false, operands)
+ when "cineq", "cpneq", "cbneq"
+ emitSH4CompareSet("eq", true, operands)
+ when "cib", "cpb", "cbb"
+ emitSH4CompareSet("hs", true, operands)
+ when "bieq", "bpeq", "bbeq"
+ emitSH4CondBranch("eq", false, operands)
+ when "bineq", "bpneq", "bbneq"
+ emitSH4CondBranch("eq", true, operands)
+ when "bib", "bpb", "bbb"
+ emitSH4CondBranch("hs", true, operands)
+ when "bia", "bpa", "bba"
+ emitSH4CondBranch("hi", false, operands)
+ when "bibeq", "bpbeq"
+ emitSH4CondBranch("hi", true, operands)
+ when "biaeq", "bpaeq"
+ emitSH4CondBranch("hs", false, operands)
+ when "bigteq", "bpgteq", "bbgteq"
+ emitSH4CondBranch("ge", false, operands)
+ when "bilt", "bplt", "bblt"
+ emitSH4CondBranch("ge", true, operands)
+ when "bigt", "bpgt", "bbgt"
+ emitSH4CondBranch("gt", false, operands)
+ when "bilteq", "bplteq", "bblteq"
+ emitSH4CondBranch("gt", true, operands)
+ when "bs"
+ $asm.puts "cmp/pz #{operands[0].sh4Operand}"
+ $asm.puts "bf #{operands[1].asmLabel}"
+ when "call"
+ if operands[0].is_a? LocalLabelReference
+ $asm.puts "bsr #{operands[0].asmLabel}"
+ $asm.puts "nop"
+ elsif operands[0].is_a? RegisterID or operands[0].is_a? SpecialRegister
+ emitSH4Branch("jsr", operands[0])
+ else
+ raise "Unhandled parameters for opcode #{opcode} at #{codeOriginString}"
+ end
+ when "jmp"
+ if operands[0].is_a? LocalLabelReference
+ $asm.puts "bra #{operands[0].asmLabel}"
+ $asm.puts "nop"
+ elsif operands[0].is_a? RegisterID or operands[0].is_a? SpecialRegister
+ emitSH4Branch("jmp", operands[0])
+ else
+ raise "Unhandled parameters for opcode #{opcode} at #{codeOriginString}"
+ end
+ when "ret"
+ $asm.puts "rts"
+ $asm.puts "nop"
+ when "loadb"
+ $asm.puts "mov.b #{sh4Operands(operands)}"
+ $asm.puts "extu.b #{sh4Operands([operands[1], operands[1]])}"
+ when "storeb"
+ $asm.puts "mov.b #{sh4Operands(operands)}"
+ when "loadh"
+ $asm.puts "mov.w #{sh4Operands(operands)}"
+ $asm.puts "extu.w #{sh4Operands([operands[1], operands[1]])}"
+ when "loadi", "loadis", "loadp", "storei", "storep"
+ $asm.puts "mov.l #{sh4Operands(operands)}"
+ when "alignformova"
+ $asm.puts ".balign 4" # As balign directive is in a code section, fill value is 'nop' instruction.
+ when "mova"
+ $asm.puts "mova #{sh4Operands(operands)}"
+ when "move"
+ if operands[0].is_a? ConstPoolEntry
+ if operands[0].size == 16
+ $asm.puts "mov.w #{operands[0].labelref.asmLabel}, #{operands[1].sh4Operand}"
+ else
+ $asm.puts "mov.l #{operands[0].labelref.asmLabel}, #{operands[1].sh4Operand}"
+ end
+ elsif operands[0].sh4Operand != operands[1].sh4Operand
+ $asm.puts "mov #{sh4Operands(operands)}"
+ end
+ when "leap"
+ if operands[0].is_a? BaseIndex
+ biop = operands[0]
+ $asm.puts "mov #{sh4Operands([biop.index, operands[1]])}"
+ if biop.scaleShift > 0
+ emitSH4ShiftImm(biop.scaleShift, operands[1], "l")
+ end
+ $asm.puts "add #{sh4Operands([biop.base, operands[1]])}"
+ if biop.offset.value != 0
+ $asm.puts "add #{sh4Operands([biop.offset, operands[1]])}"
+ end
+ elsif operands[0].is_a? Address
+ if operands[0].base != operands[1]
+ $asm.puts "mov #{sh4Operands([operands[0].base, operands[1]])}"
+ end
+ if operands[0].offset.value != 0
+ $asm.puts "add #{sh4Operands([operands[0].offset, operands[1]])}"
+ end
+ else
+ raise "Unhandled parameters for opcode #{opcode} at #{codeOriginString}"
+ end
+ when "ldspr"
+ $asm.puts "lds #{sh4Operands(operands)}, pr"
+ when "stspr"
+ $asm.puts "sts pr, #{sh4Operands(operands)}"
+ when "memfence"
+ $asm.puts "synco"
+ when "pop"
+ if operands[0].sh4Operand == "pr"
+ $asm.puts "lds.l @r15+, #{sh4Operands(operands)}"
+ else
+ $asm.puts "mov.l @r15+, #{sh4Operands(operands)}"
+ end
+ when "push"
+ if operands[0].sh4Operand == "pr"
+ $asm.puts "sts.l #{sh4Operands(operands)}, @-r15"
+ else
+ $asm.puts "mov.l #{sh4Operands(operands)}, @-r15"
+ end
+ when "break"
+ # This special opcode always generates an illegal instruction exception.
+ $asm.puts ".word 0xfffd"
+ else
+ lowerDefault
+ end
+ end
+end
+
diff --git a/Source/JavaScriptCore/offlineasm/transform.rb b/Source/JavaScriptCore/offlineasm/transform.rb
new file mode 100644
index 000000000..84dd0413b
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/transform.rb
@@ -0,0 +1,501 @@
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+require "config"
+require "ast"
+
+#
+# node.resolveSettings(settings)
+#
+# Construct a new AST that does not have any IfThenElse nodes by
+# substituting concrete boolean values for each Setting.
+#
+
+class Node
+ def resolveSettings(settings)
+ mapChildren {
+ | child |
+ child.resolveSettings(settings)
+ }
+ end
+end
+
+class True
+ def resolveSettings(settings)
+ self
+ end
+end
+
+class False
+ def resolveSettings(settings)
+ self
+ end
+end
+
+class Setting
+ def resolveSettings(settings)
+ settings[@name].asNode
+ end
+end
+
+class And
+ def resolveSettings(settings)
+ (@left.resolveSettings(settings).value and @right.resolveSettings(settings).value).asNode
+ end
+end
+
+class Or
+ def resolveSettings(settings)
+ (@left.resolveSettings(settings).value or @right.resolveSettings(settings).value).asNode
+ end
+end
+
+class Not
+ def resolveSettings(settings)
+ (not @child.resolveSettings(settings).value).asNode
+ end
+end
+
+class IfThenElse
+ def resolveSettings(settings)
+ if @predicate.resolveSettings(settings).value
+ @thenCase.resolveSettings(settings)
+ else
+ @elseCase.resolveSettings(settings)
+ end
+ end
+end
+
+class Sequence
+ def resolveSettings(settings)
+ newList = []
+ @list.each {
+ | item |
+ item = item.resolveSettings(settings)
+ if item.is_a? Sequence
+ newList += item.list
+ else
+ newList << item
+ end
+ }
+ Sequence.new(codeOrigin, newList)
+ end
+end
+
+#
+# node.demacroify(macros)
+# node.substitute(mapping)
+#
+# demacroify() constructs a new AST that does not have any Macro
+# nodes, while substitute() replaces Variable nodes with the given
+# nodes in the mapping.
+#
+
+class Node
+ def demacroify(macros)
+ mapChildren {
+ | child |
+ child.demacroify(macros)
+ }
+ end
+
+ def substitute(mapping)
+ mapChildren {
+ | child |
+ child.substitute(mapping)
+ }
+ end
+
+ def substituteLabels(mapping)
+ mapChildren {
+ | child |
+ child.substituteLabels(mapping)
+ }
+ end
+end
+
+class Macro
+ def substitute(mapping)
+ myMapping = {}
+ mapping.each_pair {
+ | key, value |
+ unless @variables.include? key
+ myMapping[key] = value
+ end
+ }
+ mapChildren {
+ | child |
+ child.substitute(myMapping)
+ }
+ end
+end
+
+class Variable
+ def substitute(mapping)
+ if mapping[self]
+ mapping[self]
+ else
+ self
+ end
+ end
+end
+
+class LocalLabel
+ def substituteLabels(mapping)
+ if mapping[self]
+ mapping[self]
+ else
+ self
+ end
+ end
+end
+
+class Sequence
+ def substitute(constants)
+ newList = []
+ myConstants = constants.dup
+ @list.each {
+ | item |
+ if item.is_a? ConstDecl
+ myConstants[item.variable] = item.value.substitute(myConstants)
+ else
+ newList << item.substitute(myConstants)
+ end
+ }
+ Sequence.new(codeOrigin, newList)
+ end
+
+ def renameLabels(comment)
+ mapping = {}
+
+ @list.each {
+ | item |
+ if item.is_a? LocalLabel
+ mapping[item] = LocalLabel.unique(if comment then comment + "_" else "" end + item.cleanName)
+ end
+ }
+
+ substituteLabels(mapping)
+ end
+
+ def demacroify(macros)
+ myMacros = macros.dup
+ @list.each {
+ | item |
+ if item.is_a? Macro
+ myMacros[item.name] = item
+ end
+ }
+ newList = []
+ @list.each {
+ | item |
+ if item.is_a? Macro
+ # Ignore.
+ elsif item.is_a? MacroCall
+ mapping = {}
+ myMyMacros = myMacros.dup
+ raise "Could not find macro #{item.name} at #{item.codeOriginString}" unless myMacros[item.name]
+ raise "Argument count mismatch for call to #{item.name} at #{item.codeOriginString}" unless item.operands.size == myMacros[item.name].variables.size
+ item.operands.size.times {
+ | idx |
+ if item.operands[idx].is_a? Variable and myMacros[item.operands[idx].name]
+ myMyMacros[myMacros[item.name].variables[idx].name] = myMacros[item.operands[idx].name]
+ mapping[myMacros[item.name].variables[idx].name] = nil
+ elsif item.operands[idx].is_a? Macro
+ myMyMacros[myMacros[item.name].variables[idx].name] = item.operands[idx]
+ mapping[myMacros[item.name].variables[idx].name] = nil
+ else
+ myMyMacros[myMacros[item.name].variables[idx]] = nil
+ mapping[myMacros[item.name].variables[idx]] = item.operands[idx]
+ end
+ }
+ if item.annotation
+ newList << Instruction.new(item.codeOrigin, "localAnnotation", [], item.annotation)
+ end
+ newList += myMacros[item.name].body.substitute(mapping).demacroify(myMyMacros).renameLabels(item.name).list
+ else
+ newList << item.demacroify(myMacros)
+ end
+ }
+ Sequence.new(codeOrigin, newList).substitute({})
+ end
+end
+
+#
+# node.resolveOffsets(offsets, sizes)
+#
+# Construct a new AST that has offset values instead of symbolic
+# offsets.
+#
+
+class Node
+ def resolveOffsets(offsets, sizes)
+ mapChildren {
+ | child |
+ child.resolveOffsets(offsets, sizes)
+ }
+ end
+end
+
+class StructOffset
+ def resolveOffsets(offsets, sizes)
+ if offsets[self]
+ Immediate.new(codeOrigin, offsets[self])
+ else
+ self
+ end
+ end
+end
+
+class Sizeof
+ def resolveOffsets(offsets, sizes)
+ if sizes[self]
+ Immediate.new(codeOrigin, sizes[self])
+ else
+ puts "Could not find #{self.inspect} in #{sizes.keys.inspect}"
+ puts "sizes = #{sizes.inspect}"
+ self
+ end
+ end
+end
+
+#
+# node.fold
+#
+# Resolve constant references and compute arithmetic expressions.
+#
+
+class Node
+ def fold
+ mapChildren {
+ | child |
+ child.fold
+ }
+ end
+end
+
+class AddImmediates
+ def fold
+ @left = @left.fold
+ @right = @right.fold
+ return self unless @left.is_a? Immediate
+ return self unless @right.is_a? Immediate
+ Immediate.new(codeOrigin, @left.value + @right.value)
+ end
+end
+
+class SubImmediates
+ def fold
+ @left = @left.fold
+ @right = @right.fold
+ return self unless @left.is_a? Immediate
+ return self unless @right.is_a? Immediate
+ Immediate.new(codeOrigin, @left.value - @right.value)
+ end
+end
+
+class MulImmediates
+ def fold
+ @left = @left.fold
+ @right = @right.fold
+ return self unless @left.is_a? Immediate
+ return self unless @right.is_a? Immediate
+ Immediate.new(codeOrigin, @left.value * @right.value)
+ end
+end
+
+class NegImmediate
+ def fold
+ @child = @child.fold
+ return self unless @child.is_a? Immediate
+ Immediate.new(codeOrigin, -@child.value)
+ end
+end
+
+class OrImmediates
+ def fold
+ @left = @left.fold
+ @right = @right.fold
+ return self unless @left.is_a? Immediate
+ return self unless @right.is_a? Immediate
+ Immediate.new(codeOrigin, @left.value | @right.value)
+ end
+end
+
+class AndImmediates
+ def fold
+ @left = @left.fold
+ @right = @right.fold
+ return self unless @left.is_a? Immediate
+ return self unless @right.is_a? Immediate
+ Immediate.new(codeOrigin, @left.value & @right.value)
+ end
+end
+
+class XorImmediates
+ def fold
+ @left = @left.fold
+ @right = @right.fold
+ return self unless @left.is_a? Immediate
+ return self unless @right.is_a? Immediate
+ Immediate.new(codeOrigin, @left.value ^ @right.value)
+ end
+end
+
+class BitnotImmediate
+ def fold
+ @child = @child.fold
+ return self unless @child.is_a? Immediate
+ Immediate.new(codeOrigin, ~@child.value)
+ end
+end
+
+#
+# node.resolveAfterSettings(offsets, sizes)
+#
+# Compile assembly against a set of offsets.
+#
+
+class Node
+ def resolve(offsets, sizes)
+ demacroify({}).resolveOffsets(offsets, sizes).fold
+ end
+end
+
+#
+# node.validate
+#
+# Checks that the node is ready for backend compilation.
+#
+
+class Node
+ def validate
+ raise "Unresolved #{dump} at #{codeOriginString}"
+ end
+
+ def validateChildren
+ children.each {
+ | node |
+ node.validate
+ }
+ end
+end
+
+class Sequence
+ def validate
+ validateChildren
+
+ # Further verify that this list contains only instructions, labels, and skips.
+ @list.each {
+ | node |
+ unless node.is_a? Instruction or
+ node.is_a? Label or
+ node.is_a? LocalLabel or
+ node.is_a? Skip
+ raise "Unexpected #{node.inspect} at #{node.codeOrigin}"
+ end
+ }
+ end
+end
+
+class Immediate
+ def validate
+ end
+end
+
+class StringLiteral
+ def validate
+ end
+end
+
+class RegisterID
+ def validate
+ end
+end
+
+class FPRegisterID
+ def validate
+ end
+end
+
+class Address
+ def validate
+ validateChildren
+ end
+end
+
+class BaseIndex
+ def validate
+ validateChildren
+ end
+end
+
+class AbsoluteAddress
+ def validate
+ validateChildren
+ end
+end
+
+class Instruction
+ def validate
+ validateChildren
+ end
+end
+
+class SubImmediates
+ def validate
+ raise "Invalid operand #{left.dump} to immediate subtraction" unless left.immediateOperand?
+ raise "Invalid operand #{right.dump} to immediate subtraction" unless right.immediateOperand?
+ end
+end
+
+class Error
+ def validate
+ end
+end
+
+class Label
+ def validate
+ end
+end
+
+class LocalLabel
+ def validate
+ end
+end
+
+class LabelReference
+ def validate
+ end
+end
+
+class LocalLabelReference
+ def validate
+ end
+end
+
+class Skip
+ def validate
+ end
+end
+
diff --git a/Source/JavaScriptCore/offlineasm/x86.rb b/Source/JavaScriptCore/offlineasm/x86.rb
new file mode 100644
index 000000000..8830e3d41
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/x86.rb
@@ -0,0 +1,1526 @@
+# Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
+# Copyright (C) 2013 Digia Plc. and/or its subsidiary(-ies)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+require "config"
+
+def isX64
+ case $activeBackend
+ when "X86"
+ false
+ when "X86_WIN"
+ false
+ when "X86_64"
+ true
+ when "X86_64_WIN"
+ true
+ else
+ raise "bad value for $activeBackend: #{$activeBackend}"
+ end
+end
+
+def useX87
+ case $activeBackend
+ when "X86"
+ true
+ when "X86_WIN"
+ true
+ when "X86_64"
+ false
+ when "X86_64_WIN"
+ false
+ else
+ raise "bad value for $activeBackend: #{$activeBackend}"
+ end
+end
+
+def isWindows
+ ENV['OS'] == 'Windows_NT'
+end
+
+def isGCC
+ !isWindows
+end
+
+def isMSVC
+ isWindows
+end
+
+def isIntelSyntax
+ isWindows
+end
+
+def register(name)
+ isIntelSyntax ? name : "%" + name
+end
+
+def offsetRegister(off, register)
+ isIntelSyntax ? "[#{off} + #{register}]" : "#{off}(#{register})"
+end
+
+def callPrefix
+ isIntelSyntax ? "" : "*"
+end
+
+def orderOperands(opA, opB)
+ isIntelSyntax ? "#{opB}, #{opA}" : "#{opA}, #{opB}"
+end
+
+def const(c)
+ isIntelSyntax ? "#{c}" : "$#{c}"
+end
+
+def getSizeString(kind)
+ if !isIntelSyntax
+ return ""
+ end
+
+ size = ""
+ case kind
+ when :byte
+ size = "byte"
+ when :half
+ size = "word"
+ when :int
+ size = "dword"
+ when :ptr
+ size = isX64 ? "qword" : "dword"
+ when :double
+ size = "qword"
+ when :quad
+ size = "qword"
+ else
+ raise "Invalid kind #{kind}"
+ end
+
+ return size + " " + "ptr" + " ";
+end
+
+class SpecialRegister < NoChildren
+ def x86Operand(kind)
+ raise unless @name =~ /^r/
+ raise unless isX64
+ case kind
+ when :half
+ register(@name + "w")
+ when :int
+ register(@name + "d")
+ when :ptr
+ register(@name)
+ when :quad
+ register(@name)
+ else
+ raise
+ end
+ end
+ def x86CallOperand(kind)
+ # Call operands are not allowed to be partial registers.
+ "#{callPrefix}#{x86Operand(:quad)}"
+ end
+end
+
+X64_SCRATCH_REGISTER = SpecialRegister.new("r11")
+
+class RegisterID
+ def supports8BitOnX86
+ case name
+ when "t0", "a0", "r0", "t1", "a1", "r1", "t2", "t3", "t4", "t5"
+ true
+ when "cfr", "ttnr", "tmr"
+ false
+ when "t6"
+ isX64
+ else
+ raise
+ end
+ end
+
+ def x86Operand(kind)
+ case name
+ when "t0", "a0", "r0"
+ case kind
+ when :byte
+ register("al")
+ when :half
+ register("ax")
+ when :int
+ register("eax")
+ when :ptr
+ isX64 ? register("rax") : register("eax")
+ when :quad
+ isX64 ? register("rax") : raise
+ else
+ raise "Invalid kind #{kind} for name #{name}"
+ end
+ when "t1", "a1", "r1"
+ case kind
+ when :byte
+ register("dl")
+ when :half
+ register("dx")
+ when :int
+ register("edx")
+ when :ptr
+ isX64 ? register("rdx") : register("edx")
+ when :quad
+ isX64 ? register("rdx") : raise
+ else
+ raise
+ end
+ when "t2"
+ case kind
+ when :byte
+ register("cl")
+ when :half
+ register("cx")
+ when :int
+ register("ecx")
+ when :ptr
+ isX64 ? register("rcx") : register("ecx")
+ when :quad
+ isX64 ? register("rcx") : raise
+ else
+ raise
+ end
+ when "t3"
+ case kind
+ when :byte
+ register("bl")
+ when :half
+ register("bx")
+ when :int
+ register("ebx")
+ when :ptr
+ isX64 ? register("rbx") : register("ebx")
+ when :quad
+ isX64 ? register("rbx") : raise
+ else
+ raise
+ end
+ when "t4"
+ case kind
+ when :byte
+ register("dil")
+ when :half
+ register("di")
+ when :int
+ register("edi")
+ when :ptr
+ isX64 ? register("rdi") : register("edi")
+ when :quad
+ isX64 ? register("rdi") : raise
+ else
+ raise
+ end
+ when "cfr"
+ if isX64
+ case kind
+ when :half
+ register("bp")
+ when :int
+ register("ebp")
+ when :ptr
+ register("rbp")
+ when :quad
+ register("rbp")
+ else
+ raise
+ end
+ else
+ case kind
+ when :half
+ register("bp")
+ when :int
+ register("ebp")
+ when :ptr
+ register("ebp")
+ else
+ raise
+ end
+ end
+ when "sp"
+ case kind
+ when :byte
+ register("spl")
+ when :half
+ register("sp")
+ when :int
+ register("esp")
+ when :ptr
+ isX64 ? register("rsp") : register("esp")
+ when :quad
+ isX64 ? register("rsp") : raise
+ else
+ raise
+ end
+ when "t5"
+ case kind
+ when :byte
+ register("sil")
+ when :half
+ register("si")
+ when :int
+ register("esi")
+ when :ptr
+ isX64 ? register("rsi") : register("esi")
+ when :quad
+ isX64 ? register("rsi") : raise
+ end
+ when "t6"
+ raise "Cannot use #{name} in 32-bit X86 at #{codeOriginString}" unless isX64
+ case kind
+ when :half
+ register("r8w")
+ when :int
+ register("r8d")
+ when :ptr
+ register("r8")
+ when :quad
+ register("r8")
+ end
+ when "t7"
+ raise "Cannot use #{name} in 32-bit X86 at #{codeOriginString}" unless isX64
+ case kind
+ when :half
+ register("r9w")
+ when :int
+ register("r9d")
+ when :ptr
+ register("r9")
+ when :quad
+ register("r9")
+ end
+ when "csr1"
+ raise "Cannot use #{name} in 32-bit X86 at #{codeOriginString}" unless isX64
+ case kind
+ when :half
+ register("r14w")
+ when :int
+ register("r14d")
+ when :ptr
+ register("r14")
+ when :quad
+ register("r14")
+ end
+ when "csr2"
+ raise "Cannot use #{name} in 32-bit X86 at #{codeOriginString}" unless isX64
+ case kind
+ when :half
+ register("r15w")
+ when :int
+ register("r15d")
+ when :ptr
+ register("r15")
+ when :quad
+ register("r15")
+ end
+ else
+ raise "Bad register #{name} for X86 at #{codeOriginString}"
+ end
+ end
+ def x86CallOperand(kind)
+ isX64 ? "#{callPrefix}#{x86Operand(:quad)}" : "#{callPrefix}#{x86Operand(:ptr)}"
+ end
+end
+
+class FPRegisterID
+ def x86Operand(kind)
+ raise unless kind == :double
+ raise if useX87
+ case name
+ when "ft0", "fa0", "fr"
+ register("xmm0")
+ when "ft1", "fa1"
+ register("xmm1")
+ when "ft2", "fa2"
+ register("xmm2")
+ when "ft3", "fa3"
+ register("xmm3")
+ when "ft4"
+ register("xmm4")
+ when "ft5"
+ register("xmm5")
+ else
+ raise "Bad register #{name} for X86 at #{codeOriginString}"
+ end
+ end
+ def x87DefaultStackPosition
+ case name
+ when "ft0", "fr"
+ 0
+ when "ft1"
+ 1
+ when "ft2", "ft3", "ft4", "ft5"
+ raise "Unimplemented register #{name} for X86 at #{codeOriginString}"
+ else
+ raise "Bad register #{name} for X86 at #{codeOriginString}"
+ end
+ end
+ def x87Operand(offset)
+ raise unless useX87
+ raise unless offset == 0 or offset == 1
+ "#{register("st")}(#{x87DefaultStackPosition + offset})"
+ end
+ def x86CallOperand(kind)
+ "#{callPrefix}#{x86Operand(kind)}"
+ end
+end
+
+class Immediate
+ def validX86Immediate?
+ if isX64
+ value >= -0x80000000 and value <= 0x7fffffff
+ else
+ true
+ end
+ end
+ def x86Operand(kind)
+ "#{const(value)}"
+ end
+ def x86CallOperand(kind)
+ "#{value}"
+ end
+end
+
+class Address
+ def supports8BitOnX86
+ true
+ end
+
+ def x86AddressOperand(addressKind)
+ "#{offsetRegister(offset.value, base.x86Operand(addressKind))}"
+ end
+ def x86Operand(kind)
+ "#{getSizeString(kind)}#{x86AddressOperand(:ptr)}"
+ end
+ def x86CallOperand(kind)
+ "#{callPrefix}#{x86Operand(kind)}"
+ end
+end
+
+class BaseIndex
+ def supports8BitOnX86
+ true
+ end
+
+ def x86AddressOperand(addressKind)
+ if !isIntelSyntax
+ "#{offset.value}(#{base.x86Operand(addressKind)}, #{index.x86Operand(addressKind)}, #{scale})"
+ else
+ "#{getSizeString(addressKind)}[#{offset.value} + #{base.x86Operand(addressKind)} + #{index.x86Operand(addressKind)} * #{scale}]"
+ end
+ end
+
+ def x86Operand(kind)
+ if !isIntelSyntax
+ x86AddressOperand(:ptr)
+ else
+ "#{getSizeString(kind)}[#{offset.value} + #{base.x86Operand(:ptr)} + #{index.x86Operand(:ptr)} * #{scale}]"
+ end
+ end
+
+ def x86CallOperand(kind)
+ "#{callPrefix}#{x86Operand(kind)}"
+ end
+end
+
+class AbsoluteAddress
+ def supports8BitOnX86
+ true
+ end
+
+ def x86AddressOperand(addressKind)
+ "#{address.value}"
+ end
+
+ def x86Operand(kind)
+ "#{address.value}"
+ end
+
+ def x86CallOperand(kind)
+ "#{callPrefix}#{address.value}"
+ end
+end
+
+class LabelReference
+ def x86CallOperand(kind)
+ asmLabel
+ end
+end
+
+class LocalLabelReference
+ def x86Operand(kind)
+ asmLabel
+ end
+ def x86CallOperand(kind)
+ asmLabel
+ end
+end
+
+class Sequence
+ def getModifiedListX86_64
+ newList = []
+
+ @list.each {
+ | node |
+ newNode = node
+ if node.is_a? Instruction
+ unless node.opcode == "move"
+ usedScratch = false
+ newOperands = node.operands.map {
+ | operand |
+ if operand.immediate? and not operand.validX86Immediate?
+ if usedScratch
+ raise "Attempt to use scratch register twice at #{operand.codeOriginString}"
+ end
+ newList << Instruction.new(operand.codeOrigin, "move", [operand, X64_SCRATCH_REGISTER])
+ usedScratch = true
+ X64_SCRATCH_REGISTER
+ else
+ operand
+ end
+ }
+ newNode = Instruction.new(node.codeOrigin, node.opcode, newOperands, node.annotation)
+ end
+ else
+ unless node.is_a? Label or
+ node.is_a? LocalLabel or
+ node.is_a? Skip
+ raise "Unexpected #{node.inspect} at #{node.codeOrigin}"
+ end
+ end
+ if newNode
+ newList << newNode
+ end
+ }
+
+ return newList
+ end
+ def getModifiedListX86_64_WIN
+ getModifiedListX86_64
+ end
+end
+
+class Instruction
+ @@floatingPointCompareImplicitOperand = isIntelSyntax ? "st(0), " : ""
+
+ def x86Operands(*kinds)
+ raise unless kinds.size == operands.size
+ result = []
+ kinds.size.times {
+ | idx |
+ i = isIntelSyntax ? (kinds.size - idx - 1) : idx
+ result << operands[i].x86Operand(kinds[i])
+ }
+ result.join(", ")
+ end
+
+ def x86Suffix(kind)
+ if isIntelSyntax
+ return ""
+ end
+
+ case kind
+ when :byte
+ "b"
+ when :half
+ "w"
+ when :int
+ "l"
+ when :ptr
+ isX64 ? "q" : "l"
+ when :quad
+ isX64 ? "q" : raise
+ when :double
+ not useX87 ? "sd" : raise
+ else
+ raise
+ end
+ end
+
+ def x86Bytes(kind)
+ case kind
+ when :byte
+ 1
+ when :half
+ 2
+ when :int
+ 4
+ when :ptr
+ isX64 ? 8 : 4
+ when :quad
+ isX64 ? 8 : raise
+ when :double
+ 8
+ else
+ raise
+ end
+ end
+
+ def handleX86OpWithNumOperands(opcode, kind, numOperands)
+ if numOperands == 3
+ if operands[0] == operands[2]
+ $asm.puts "#{opcode} #{orderOperands(operands[1].x86Operand(kind), operands[2].x86Operand(kind))}"
+ elsif operands[1] == operands[2]
+ $asm.puts "#{opcode} #{orderOperands(operands[0].x86Operand(kind), operands[2].x86Operand(kind))}"
+ else
+ $asm.puts "mov#{x86Suffix(kind)} #{orderOperands(operands[0].x86Operand(kind), operands[2].x86Operand(kind))}"
+ $asm.puts "#{opcode} #{orderOperands(operands[1].x86Operand(kind), operands[2].x86Operand(kind))}"
+ end
+ else
+ $asm.puts "#{opcode} #{orderOperands(operands[0].x86Operand(kind), operands[1].x86Operand(kind))}"
+ end
+ end
+
+ def handleX86Op(opcode, kind)
+ handleX86OpWithNumOperands(opcode, kind, operands.size)
+ end
+
+ def handleX86Shift(opcode, kind)
+ if operands[0].is_a? Immediate or operands[0] == RegisterID.forName(nil, "t2")
+ $asm.puts "#{opcode} #{orderOperands(operands[0].x86Operand(:byte), operands[1].x86Operand(kind))}"
+ else
+ cx = RegisterID.forName(nil, "t2")
+ $asm.puts "xchg#{x86Suffix(:ptr)} #{operands[0].x86Operand(:ptr)}, #{cx.x86Operand(:ptr)}"
+ $asm.puts "#{opcode} #{orderOperands(register("cl"), operands[1].x86Operand(kind))}"
+ $asm.puts "xchg#{x86Suffix(:ptr)} #{operands[0].x86Operand(:ptr)}, #{cx.x86Operand(:ptr)}"
+ end
+ end
+
+ def handleX86DoubleBranch(branchOpcode, mode)
+ if useX87
+ handleX87Compare(mode)
+ else
+ case mode
+ when :normal
+ $asm.puts "ucomisd #{orderOperands(operands[1].x86Operand(:double), operands[0].x86Operand(:double))}"
+ when :reverse
+ $asm.puts "ucomisd #{orderOperands(operands[0].x86Operand(:double), operands[1].x86Operand(:double))}"
+ else
+ raise mode.inspect
+ end
+ end
+ $asm.puts "#{branchOpcode} #{operands[2].asmLabel}"
+ end
+
+ def handleX86IntCompare(opcodeSuffix, kind)
+ if operands[0].is_a? Immediate and operands[0].value == 0 and operands[1].is_a? RegisterID and (opcodeSuffix == "e" or opcodeSuffix == "ne")
+ $asm.puts "test#{x86Suffix(kind)} #{orderOperands(operands[1].x86Operand(kind), operands[1].x86Operand(kind))}"
+ elsif operands[1].is_a? Immediate and operands[1].value == 0 and operands[0].is_a? RegisterID and (opcodeSuffix == "e" or opcodeSuffix == "ne")
+ $asm.puts "test#{x86Suffix(kind)} #{orderOperands(operands[0].x86Operand(kind), operands[0].x86Operand(kind))}"
+ else
+ $asm.puts "cmp#{x86Suffix(kind)} #{orderOperands(operands[1].x86Operand(kind), operands[0].x86Operand(kind))}"
+ end
+ end
+
+ def handleX86IntBranch(branchOpcode, kind)
+ handleX86IntCompare(branchOpcode[1..-1], kind)
+ $asm.puts "#{branchOpcode} #{operands[2].asmLabel}"
+ end
+
+ def handleX86Set(setOpcode, operand)
+ if operand.supports8BitOnX86
+ $asm.puts "#{setOpcode} #{operand.x86Operand(:byte)}"
+ if !isIntelSyntax
+ $asm.puts "movzbl #{orderOperands(operand.x86Operand(:byte), operand.x86Operand(:int))}"
+ else
+ $asm.puts "movzx #{orderOperands(operand.x86Operand(:byte), operand.x86Operand(:int))}"
+ end
+ else
+ ax = RegisterID.new(nil, "t0")
+ $asm.puts "xchg#{x86Suffix(:ptr)} #{operand.x86Operand(:ptr)}, #{ax.x86Operand(:ptr)}"
+ $asm.puts "#{setOpcode} %al"
+ $asm.puts "movzbl %al, %eax"
+ $asm.puts "xchg#{x86Suffix(:ptr)} #{operand.x86Operand(:ptr)}, #{ax.x86Operand(:ptr)}"
+ end
+ end
+
+ def handleX86IntCompareSet(setOpcode, kind)
+ handleX86IntCompare(setOpcode[3..-1], kind)
+ handleX86Set(setOpcode, operands[2])
+ end
+
+ def handleX86Test(kind)
+ value = operands[0]
+ case operands.size
+ when 2
+ mask = Immediate.new(codeOrigin, -1)
+ when 3
+ mask = operands[1]
+ else
+ raise "Expected 2 or 3 operands, but got #{operands.size} at #{codeOriginString}"
+ end
+
+ if mask.is_a? Immediate and mask.value == -1
+ if value.is_a? RegisterID
+ $asm.puts "test#{x86Suffix(kind)} #{value.x86Operand(kind)}, #{value.x86Operand(kind)}"
+ else
+ $asm.puts "cmp#{x86Suffix(kind)} #{orderOperands(const(0), value.x86Operand(kind))}"
+ end
+ else
+ $asm.puts "test#{x86Suffix(kind)} #{orderOperands(mask.x86Operand(kind), value.x86Operand(kind))}"
+ end
+ end
+
+ def handleX86BranchTest(branchOpcode, kind)
+ handleX86Test(kind)
+ $asm.puts "#{branchOpcode} #{operands.last.asmLabel}"
+ end
+
+ def handleX86SetTest(setOpcode, kind)
+ handleX86Test(kind)
+ handleX86Set(setOpcode, operands.last)
+ end
+
+ def handleX86OpBranch(opcode, branchOpcode, kind)
+ handleX86OpWithNumOperands(opcode, kind, operands.size - 1)
+ case operands.size
+ when 4
+ jumpTarget = operands[3]
+ when 3
+ jumpTarget = operands[2]
+ else
+ raise self.inspect
+ end
+ $asm.puts "#{branchOpcode} #{jumpTarget.asmLabel}"
+ end
+
+ def handleX86SubBranch(branchOpcode, kind)
+ if operands.size == 4 and operands[1] == operands[2]
+ $asm.puts "neg#{x86Suffix(kind)} #{operands[2].x86Operand(kind)}"
+ $asm.puts "add#{x86Suffix(kind)} #{orderOperands(operands[0].x86Operand(kind), operands[2].x86Operand(kind))}"
+ else
+ handleX86OpWithNumOperands("sub#{x86Suffix(kind)}", kind, operands.size - 1)
+ end
+ case operands.size
+ when 4
+ jumpTarget = operands[3]
+ when 3
+ jumpTarget = operands[2]
+ else
+ raise self.inspect
+ end
+ $asm.puts "#{branchOpcode} #{jumpTarget.asmLabel}"
+ end
+
+ def handleX86Add(kind)
+ if operands.size == 3 and operands[1] == operands[2]
+ unless Immediate.new(nil, 0) == operands[0]
+ $asm.puts "add#{x86Suffix(kind)} #{orderOperands(operands[0].x86Operand(kind), operands[2].x86Operand(kind))}"
+ end
+ elsif operands.size == 3 and operands[0].is_a? Immediate
+ raise unless operands[1].is_a? RegisterID
+ raise unless operands[2].is_a? RegisterID
+ if operands[0].value == 0
+ unless operands[1] == operands[2]
+ $asm.puts "mov#{x86Suffix(kind)} #{orderOperands(operands[1].x86Operand(kind), operands[2].x86Operand(kind))}"
+ end
+ else
+ $asm.puts "lea#{x86Suffix(kind)} #{orderOperands(offsetRegister(operands[0].value, operands[1].x86Operand(kind)), operands[2].x86Operand(kind))}"
+ end
+ elsif operands.size == 3 and operands[0].is_a? RegisterID
+ raise unless operands[1].is_a? RegisterID
+ raise unless operands[2].is_a? RegisterID
+ if operands[0] == operands[2]
+ $asm.puts "add#{x86Suffix(kind)} #{orderOperands(operands[1].x86Operand(kind), operands[2].x86Operand(kind))}"
+ else
+ if !isIntelSyntax
+ $asm.puts "lea#{x86Suffix(kind)} (#{operands[0].x86Operand(kind)}, #{operands[1].x86Operand(kind)}), #{operands[2].x86Operand(kind)}"
+ else
+ $asm.puts "lea#{x86Suffix(kind)} #{operands[2].x86Operand(kind)}, [#{operands[0].x86Operand(kind)} + #{operands[1].x86Operand(kind)}]"
+ end
+ end
+ else
+ unless Immediate.new(nil, 0) == operands[0]
+ $asm.puts "add#{x86Suffix(kind)} #{x86Operands(kind, kind)}"
+ end
+ end
+ end
+
+ def handleX86Sub(kind)
+ if operands.size == 3 and operands[1] == operands[2]
+ $asm.puts "neg#{x86Suffix(kind)} #{operands[2].x86Operand(kind)}"
+ $asm.puts "add#{x86Suffix(kind)} #{orderOperands(operands[0].x86Operand(kind), operands[2].x86Operand(kind))}"
+ else
+ handleX86Op("sub#{x86Suffix(kind)}", kind)
+ end
+ end
+
+ def handleX86Mul(kind)
+ if operands.size == 3 and operands[0].is_a? Immediate
+ $asm.puts "imul#{x86Suffix(kind)} #{x86Operands(kind, kind, kind)}"
+ else
+ # FIXME: could do some peephole in case the left operand is immediate and it's
+ # a power of two.
+ handleX86Op("imul#{x86Suffix(kind)}", kind)
+ end
+ end
+
+ def handleX86Peek()
+ sp = RegisterID.new(nil, "sp")
+ opA = offsetRegister(operands[0].value * x86Bytes(:ptr), sp.x86Operand(:ptr))
+ opB = operands[1].x86Operand(:ptr)
+ $asm.puts "mov#{x86Suffix(:ptr)} #{orderOperands(opA, opB)}"
+ end
+
+ def handleX86Poke()
+ sp = RegisterID.new(nil, "sp")
+ opA = operands[0].x86Operand(:ptr)
+ opB = offsetRegister(operands[1].value * x86Bytes(:ptr), sp.x86Operand(:ptr))
+ $asm.puts "mov#{x86Suffix(:ptr)} #{orderOperands(opA, opB)}"
+ end
+
+ def handleMove
+ if Immediate.new(nil, 0) == operands[0] and operands[1].is_a? RegisterID
+ if isX64
+ $asm.puts "xor#{x86Suffix(:quad)} #{operands[1].x86Operand(:quad)}, #{operands[1].x86Operand(:quad)}"
+ else
+ $asm.puts "xor#{x86Suffix(:ptr)} #{operands[1].x86Operand(:ptr)}, #{operands[1].x86Operand(:ptr)}"
+ end
+ elsif operands[0] != operands[1]
+ if isX64
+ $asm.puts "mov#{x86Suffix(:quad)} #{x86Operands(:quad, :quad)}"
+ else
+ $asm.puts "mov#{x86Suffix(:ptr)} #{x86Operands(:ptr, :ptr)}"
+ end
+ end
+ end
+
+ def handleX87Compare(mode)
+ case mode
+ when :normal
+ if (operands[0].x87DefaultStackPosition == 0)
+ $asm.puts "fucomi #{@@floatingPointCompareImplicitOperand}#{operands[1].x87Operand(0)}"
+ else
+ $asm.puts "fld #{operands[0].x87Operand(0)}"
+ $asm.puts "fucomip #{@@floatingPointCompareImplicitOperand}#{operands[1].x87Operand(1)}"
+ end
+ when :reverse
+ if (operands[1].x87DefaultStackPosition == 0)
+ $asm.puts "fucomi #{@@floatingPointCompareImplicitOperand}#{operands[0].x87Operand(0)}"
+ else
+ $asm.puts "fld #{operands[1].x87Operand(0)}"
+ $asm.puts "fucomip #{@@floatingPointCompareImplicitOperand}#{operands[0].x87Operand(1)}"
+ end
+ else
+ raise mode.inspect
+ end
+ end
+
+ def handleX87BinOp(opcode, opcodereverse)
+ if (operands[1].x87DefaultStackPosition == 0)
+ $asm.puts "#{opcode} #{orderOperands(operands[0].x87Operand(0), register("st"))}"
+ elsif (operands[0].x87DefaultStackPosition == 0)
+ if !isIntelSyntax
+ $asm.puts "#{opcodereverse} #{register("st")}, #{operands[1].x87Operand(0)}"
+ else
+ $asm.puts "#{opcode} #{operands[1].x87Operand(0)}, #{register("st")}"
+ end
+ else
+ $asm.puts "fld #{operands[0].x87Operand(0)}"
+ $asm.puts "#{opcodereverse}p #{orderOperands(register("st"), operands[1].x87Operand(1))}"
+ end
+ end
+
+ def lowerX86
+ raise unless $activeBackend == "X86"
+ lowerX86Common
+ end
+
+ def lowerX86_WIN
+ raise unless $activeBackend == "X86_WIN"
+ lowerX86Common
+ end
+
+ def lowerX86_64
+ raise unless $activeBackend == "X86_64"
+ lowerX86Common
+ end
+
+ def lowerX86_64_WIN
+ raise unless $activeBackend == "X86_64_WIN"
+ lowerX86Common
+ end
+
+ def lowerX86Common
+ $asm.codeOrigin codeOriginString if $enableCodeOriginComments
+ $asm.annotation annotation if $enableInstrAnnotations
+
+ case opcode
+ when "addi"
+ handleX86Add(:int)
+ when "addp"
+ handleX86Add(:ptr)
+ when "addq"
+ handleX86Add(:quad)
+ when "andi"
+ handleX86Op("and#{x86Suffix(:int)}", :int)
+ when "andp"
+ handleX86Op("and#{x86Suffix(:ptr)}", :ptr)
+ when "andq"
+ handleX86Op("and#{x86Suffix(:quad)}", :quad)
+ when "lshifti"
+ handleX86Shift("sal#{x86Suffix(:int)}", :int)
+ when "lshiftp"
+ handleX86Shift("sal#{x86Suffix(:ptr)}", :ptr)
+ when "lshiftq"
+ handleX86Shift("sal#{x86Suffix(:quad)}", :quad)
+ when "muli"
+ handleX86Mul(:int)
+ when "mulp"
+ handleX86Mul(:ptr)
+ when "mulq"
+ handleX86Mul(:quad)
+ when "negi"
+ $asm.puts "neg#{x86Suffix(:int)} #{x86Operands(:int)}"
+ when "negp"
+ $asm.puts "neg#{x86Suffix(:ptr)} #{x86Operands(:ptr)}"
+ when "negq"
+ $asm.puts "neg#{x86Suffix(:quad)} #{x86Operands(:quad)}"
+ when "noti"
+ $asm.puts "not#{x86Suffix(:int)} #{x86Operands(:int)}"
+ when "ori"
+ handleX86Op("or#{x86Suffix(:int)}", :int)
+ when "orp"
+ handleX86Op("or#{x86Suffix(:ptr)}", :ptr)
+ when "orq"
+ handleX86Op("or#{x86Suffix(:quad)}", :quad)
+ when "rshifti"
+ handleX86Shift("sar#{x86Suffix(:int)}", :int)
+ when "rshiftp"
+ handleX86Shift("sar#{x86Suffix(:ptr)}", :ptr)
+ when "rshiftq"
+ handleX86Shift("sar#{x86Suffix(:quad)}", :quad)
+ when "urshifti"
+ handleX86Shift("shr#{x86Suffix(:int)}", :int)
+ when "urshiftp"
+ handleX86Shift("shr#{x86Suffix(:ptr)}", :ptr)
+ when "urshiftq"
+ handleX86Shift("shr#{x86Suffix(:quad)}", :quad)
+ when "subi"
+ handleX86Sub(:int)
+ when "subp"
+ handleX86Sub(:ptr)
+ when "subq"
+ handleX86Sub(:quad)
+ when "xori"
+ handleX86Op("xor#{x86Suffix(:int)}", :int)
+ when "xorp"
+ handleX86Op("xor#{x86Suffix(:ptr)}", :ptr)
+ when "xorq"
+ handleX86Op("xor#{x86Suffix(:quad)}", :quad)
+ when "loadi", "storei"
+ $asm.puts "mov#{x86Suffix(:int)} #{x86Operands(:int, :int)}"
+ when "loadis"
+ if isX64
+ if !isIntelSyntax
+ $asm.puts "movslq #{x86Operands(:int, :quad)}"
+ else
+ $asm.puts "movsxd #{x86Operands(:int, :quad)}"
+ end
+ else
+ $asm.puts "mov#{x86Suffix(:int)} #{x86Operands(:int, :int)}"
+ end
+ when "loadp", "storep"
+ $asm.puts "mov#{x86Suffix(:ptr)} #{x86Operands(:ptr, :ptr)}"
+ when "loadq", "storeq"
+ $asm.puts "mov#{x86Suffix(:quad)} #{x86Operands(:quad, :quad)}"
+ when "loadb"
+ if !isIntelSyntax
+ $asm.puts "movzbl #{orderOperands(operands[0].x86Operand(:byte), operands[1].x86Operand(:int))}"
+ else
+ $asm.puts "movzx #{orderOperands(operands[0].x86Operand(:byte), operands[1].x86Operand(:int))}"
+ end
+ when "loadbs"
+ $asm.puts "movsbl #{operands[0].x86Operand(:byte)}, #{operands[1].x86Operand(:int)}"
+ when "loadh"
+ if !isIntelSyntax
+ $asm.puts "movzwl #{orderOperands(operands[0].x86Operand(:half), operands[1].x86Operand(:int))}"
+ else
+ $asm.puts "movzx #{orderOperands(operands[0].x86Operand(:half), operands[1].x86Operand(:int))}"
+ end
+ when "loadhs"
+ $asm.puts "movswl #{operands[0].x86Operand(:half)}, #{operands[1].x86Operand(:int)}"
+ when "storeb"
+ $asm.puts "mov#{x86Suffix(:byte)} #{x86Operands(:byte, :byte)}"
+ when "loadd"
+ if useX87
+ if !isIntelSyntax
+ $asm.puts "fldl #{operands[0].x86Operand(:double)}"
+ else
+ $asm.puts "fld #{operands[0].x86Operand(:double)}"
+ end
+ $asm.puts "fstp #{operands[1].x87Operand(1)}"
+ else
+ $asm.puts "movsd #{x86Operands(:double, :double)}"
+ end
+ when "moved"
+ if useX87
+ if (operands[0].x87DefaultStackPosition == 0)
+ $asm.puts "fst #{operands[1].x87Operand(0)}"
+ else
+ $asm.puts "fld #{operands[0].x87Operand(0)}"
+ $asm.puts "fstp #{operands[1].x87Operand(1)}"
+ end
+ else
+ $asm.puts "movsd #{x86Operands(:double, :double)}"
+ end
+ when "stored"
+ if useX87
+ if (operands[0].x87DefaultStackPosition == 0)
+ $asm.puts "fst#{x86Suffix(:int)} #{operands[1].x86Operand(:double)}"
+ else
+ $asm.puts "fld #{operands[0].x87Operand(0)}"
+ if !isIntelSyntax
+ $asm.puts "fstpl #{operands[1].x86Operand(:double)}"
+ else
+ $asm.puts "fstp #{operands[1].x86Operand(:double)}"
+ end
+ end
+ else
+ $asm.puts "movsd #{x86Operands(:double, :double)}"
+ end
+ when "addd"
+ if useX87
+ handleX87BinOp("fadd", "fadd")
+ else
+ $asm.puts "addsd #{x86Operands(:double, :double)}"
+ end
+ when "muld"
+ if useX87
+ handleX87BinOp("fmul", "fmul")
+ else
+ $asm.puts "mulsd #{x86Operands(:double, :double)}"
+ end
+ when "subd"
+ if useX87
+ handleX87BinOp("fsub", "fsubr")
+ else
+ $asm.puts "subsd #{x86Operands(:double, :double)}"
+ end
+ when "divd"
+ if useX87
+ handleX87BinOp("fdiv", "fdivr")
+ else
+ $asm.puts "divsd #{x86Operands(:double, :double)}"
+ end
+ when "sqrtd"
+ if useX87
+ $asm.puts "fld #{operands[0].x87Operand(0)}"
+ $asm.puts "fsqrtl"
+ $asm.puts "fstp #{operands[1].x87Operand(1)}"
+ else
+ $asm.puts "sqrtsd #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:double)}"
+ end
+ when "ci2d"
+ if useX87
+ sp = RegisterID.new(nil, "sp")
+ $asm.puts "mov#{x86Suffix(:int)} #{orderOperands(operands[0].x86Operand(:int), offsetRegister(-4, sp.x86Operand(:ptr)))}"
+ $asm.puts "fild#{x86Suffix(:ptr)} #{getSizeString(:ptr)}#{offsetRegister(-4, sp.x86Operand(:ptr))}"
+ $asm.puts "fstp #{operands[1].x87Operand(1)}"
+ else
+ $asm.puts "cvtsi2sd #{orderOperands(operands[0].x86Operand(:int), operands[1].x86Operand(:double))}"
+ end
+ when "bdeq"
+ if useX87
+ handleX87Compare(:normal)
+ else
+ $asm.puts "ucomisd #{orderOperands(operands[0].x86Operand(:double), operands[1].x86Operand(:double))}"
+ end
+ if operands[0] == operands[1]
+ # This is just a jump ordered, which is a jnp.
+ $asm.puts "jnp #{operands[2].asmLabel}"
+ else
+ isUnordered = LocalLabel.unique("bdeq")
+ $asm.puts "jp #{LabelReference.new(codeOrigin, isUnordered).asmLabel}"
+ $asm.puts "je #{LabelReference.new(codeOrigin, operands[2]).asmLabel}"
+ isUnordered.lower("X86")
+ end
+ when "bdneq"
+ handleX86DoubleBranch("jne", :normal)
+ when "bdgt"
+ handleX86DoubleBranch("ja", :normal)
+ when "bdgteq"
+ handleX86DoubleBranch("jae", :normal)
+ when "bdlt"
+ handleX86DoubleBranch("ja", :reverse)
+ when "bdlteq"
+ handleX86DoubleBranch("jae", :reverse)
+ when "bdequn"
+ handleX86DoubleBranch("je", :normal)
+ when "bdnequn"
+ if useX87
+ handleX87Compare(:normal)
+ else
+ $asm.puts "ucomisd #{orderOperands(operands[0].x86Operand(:double), operands[1].x86Operand(:double))}"
+ end
+ if operands[0] == operands[1]
+ # This is just a jump unordered, which is a jp.
+ $asm.puts "jp #{operands[2].asmLabel}"
+ else
+ isUnordered = LocalLabel.unique("bdnequn")
+ isEqual = LocalLabel.unique("bdnequn")
+ $asm.puts "jp #{LabelReference.new(codeOrigin, isUnordered).asmLabel}"
+ $asm.puts "je #{LabelReference.new(codeOrigin, isEqual).asmLabel}"
+ isUnordered.lower("X86")
+ $asm.puts "jmp #{operands[2].asmLabel}"
+ isEqual.lower("X86")
+ end
+ when "bdgtun"
+ handleX86DoubleBranch("jb", :reverse)
+ when "bdgtequn"
+ handleX86DoubleBranch("jbe", :reverse)
+ when "bdltun"
+ handleX86DoubleBranch("jb", :normal)
+ when "bdltequn"
+ handleX86DoubleBranch("jbe", :normal)
+ when "btd2i"
+ # FIXME: unused and unimplemented for x87
+ raise if useX87
+ $asm.puts "cvttsd2si #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:int)}"
+ $asm.puts "cmpl $0x80000000 #{operands[1].x86Operand(:int)}"
+ $asm.puts "je #{operands[2].asmLabel}"
+ when "td2i"
+ # FIXME: unused and unimplemented for x87
+ raise if useX87
+ $asm.puts "cvttsd2si #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:int)}"
+ when "bcd2i"
+ if useX87
+ sp = RegisterID.new(nil, "sp")
+ if (operands[0].x87DefaultStackPosition == 0)
+ $asm.puts "fistl -4(#{sp.x86Operand(:ptr)})"
+ else
+ $asm.puts "fld #{operands[0].x87Operand(0)}"
+ $asm.puts "fistp#{x86Suffix(:ptr)} #{getSizeString(:ptr)}#{offsetRegister(-4, sp.x86Operand(:ptr))}"
+ end
+ $asm.puts "mov#{x86Suffix(:int)} #{orderOperands(offsetRegister(-4, sp.x86Operand(:ptr)), operands[1].x86Operand(:int))}"
+ $asm.puts "test#{x86Suffix(:int)} #{operands[1].x86Operand(:int)}, #{operands[1].x86Operand(:int)}"
+ $asm.puts "je #{operands[2].asmLabel}"
+ $asm.puts "fild#{x86Suffix(:int)} #{getSizeString(:int)}#{offsetRegister(-4, sp.x86Operand(:ptr))}"
+ $asm.puts "fucomip #{@@floatingPointCompareImplicitOperand}#{operands[0].x87Operand(1)}"
+ $asm.puts "jp #{operands[2].asmLabel}"
+ $asm.puts "jne #{operands[2].asmLabel}"
+ else
+ $asm.puts "cvttsd2si #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:int)}"
+ $asm.puts "test#{x86Suffix(:int)} #{operands[1].x86Operand(:int)}, #{operands[1].x86Operand(:int)}"
+ $asm.puts "je #{operands[2].asmLabel}"
+ $asm.puts "cvtsi2sd #{operands[1].x86Operand(:int)}, %xmm7"
+ $asm.puts "ucomisd #{operands[0].x86Operand(:double)}, %xmm7"
+ $asm.puts "jp #{operands[2].asmLabel}"
+ $asm.puts "jne #{operands[2].asmLabel}"
+ end
+ when "movdz"
+ if useX87
+ $asm.puts "fldzl"
+ $asm.puts "fstp #{operands[0].x87Operand(1)}"
+ else
+ $asm.puts "xorpd #{operands[0].x86Operand(:double)}, #{operands[0].x86Operand(:double)}"
+ end
+ when "pop"
+ operands.each {
+ | op |
+ $asm.puts "pop #{op.x86Operand(:ptr)}"
+ }
+ when "push"
+ operands.each {
+ | op |
+ $asm.puts "push #{op.x86Operand(:ptr)}"
+ }
+ when "move"
+ handleMove
+ when "sxi2q"
+ if !isIntelSyntax
+ $asm.puts "movslq #{operands[0].x86Operand(:int)}, #{operands[1].x86Operand(:quad)}"
+ else
+ $asm.puts "movsxd #{orderOperands(operands[0].x86Operand(:int), operands[1].x86Operand(:quad))}"
+ end
+ when "zxi2q"
+ $asm.puts "mov#{x86Suffix(:int)} #{orderOperands(operands[0].x86Operand(:int), operands[1].x86Operand(:int))}"
+ when "nop"
+ $asm.puts "nop"
+ when "bieq"
+ handleX86IntBranch("je", :int)
+ when "bpeq"
+ handleX86IntBranch("je", :ptr)
+ when "bqeq"
+ handleX86IntBranch("je", :quad)
+ when "bineq"
+ handleX86IntBranch("jne", :int)
+ when "bpneq"
+ handleX86IntBranch("jne", :ptr)
+ when "bqneq"
+ handleX86IntBranch("jne", :quad)
+ when "bia"
+ handleX86IntBranch("ja", :int)
+ when "bpa"
+ handleX86IntBranch("ja", :ptr)
+ when "bqa"
+ handleX86IntBranch("ja", :quad)
+ when "biaeq"
+ handleX86IntBranch("jae", :int)
+ when "bpaeq"
+ handleX86IntBranch("jae", :ptr)
+ when "bqaeq"
+ handleX86IntBranch("jae", :quad)
+ when "bib"
+ handleX86IntBranch("jb", :int)
+ when "bpb"
+ handleX86IntBranch("jb", :ptr)
+ when "bqb"
+ handleX86IntBranch("jb", :quad)
+ when "bibeq"
+ handleX86IntBranch("jbe", :int)
+ when "bpbeq"
+ handleX86IntBranch("jbe", :ptr)
+ when "bqbeq"
+ handleX86IntBranch("jbe", :quad)
+ when "bigt"
+ handleX86IntBranch("jg", :int)
+ when "bpgt"
+ handleX86IntBranch("jg", :ptr)
+ when "bqgt"
+ handleX86IntBranch("jg", :quad)
+ when "bigteq"
+ handleX86IntBranch("jge", :int)
+ when "bpgteq"
+ handleX86IntBranch("jge", :ptr)
+ when "bqgteq"
+ handleX86IntBranch("jge", :quad)
+ when "bilt"
+ handleX86IntBranch("jl", :int)
+ when "bplt"
+ handleX86IntBranch("jl", :ptr)
+ when "bqlt"
+ handleX86IntBranch("jl", :quad)
+ when "bilteq"
+ handleX86IntBranch("jle", :int)
+ when "bplteq"
+ handleX86IntBranch("jle", :ptr)
+ when "bqlteq"
+ handleX86IntBranch("jle", :quad)
+ when "bbeq"
+ handleX86IntBranch("je", :byte)
+ when "bbneq"
+ handleX86IntBranch("jne", :byte)
+ when "bba"
+ handleX86IntBranch("ja", :byte)
+ when "bbaeq"
+ handleX86IntBranch("jae", :byte)
+ when "bbb"
+ handleX86IntBranch("jb", :byte)
+ when "bbbeq"
+ handleX86IntBranch("jbe", :byte)
+ when "bbgt"
+ handleX86IntBranch("jg", :byte)
+ when "bbgteq"
+ handleX86IntBranch("jge", :byte)
+ when "bblt"
+ handleX86IntBranch("jl", :byte)
+ when "bblteq"
+ handleX86IntBranch("jlteq", :byte)
+ when "btis"
+ handleX86BranchTest("js", :int)
+ when "btps"
+ handleX86BranchTest("js", :ptr)
+ when "btqs"
+ handleX86BranchTest("js", :quad)
+ when "btiz"
+ handleX86BranchTest("jz", :int)
+ when "btpz"
+ handleX86BranchTest("jz", :ptr)
+ when "btqz"
+ handleX86BranchTest("jz", :quad)
+ when "btinz"
+ handleX86BranchTest("jnz", :int)
+ when "btpnz"
+ handleX86BranchTest("jnz", :ptr)
+ when "btqnz"
+ handleX86BranchTest("jnz", :quad)
+ when "btbs"
+ handleX86BranchTest("js", :byte)
+ when "btbz"
+ handleX86BranchTest("jz", :byte)
+ when "btbnz"
+ handleX86BranchTest("jnz", :byte)
+ when "jmp"
+ $asm.puts "jmp #{operands[0].x86CallOperand(:ptr)}"
+ when "baddio"
+ handleX86OpBranch("add#{x86Suffix(:int)}", "jo", :int)
+ when "baddpo"
+ handleX86OpBranch("add#{x86Suffix(:ptr)}", "jo", :ptr)
+ when "baddqo"
+ handleX86OpBranch("add#{x86Suffix(:quad)}", "jo", :quad)
+ when "baddis"
+ handleX86OpBranch("add#{x86Suffix(:int)}", "js", :int)
+ when "baddps"
+ handleX86OpBranch("add#{x86Suffix(:ptr)}", "js", :ptr)
+ when "baddqs"
+ handleX86OpBranch("add#{x86Suffix(:quad)}", "js", :quad)
+ when "baddiz"
+ handleX86OpBranch("add#{x86Suffix(:int)}", "jz", :int)
+ when "baddpz"
+ handleX86OpBranch("add#{x86Suffix(:ptr)}", "jz", :ptr)
+ when "baddqz"
+ handleX86OpBranch("add#{x86Suffix(:quad)}", "jz", :quad)
+ when "baddinz"
+ handleX86OpBranch("add#{x86Suffix(:int)}", "jnz", :int)
+ when "baddpnz"
+ handleX86OpBranch("add#{x86Suffix(:ptr)}", "jnz", :ptr)
+ when "baddqnz"
+ handleX86OpBranch("add#{x86Suffix(:quad)}", "jnz", :quad)
+ when "bsubio"
+ handleX86SubBranch("jo", :int)
+ when "bsubis"
+ handleX86SubBranch("js", :int)
+ when "bsubiz"
+ handleX86SubBranch("jz", :int)
+ when "bsubinz"
+ handleX86SubBranch("jnz", :int)
+ when "bmulio"
+ handleX86OpBranch("imul#{x86Suffix(:int)}", "jo", :int)
+ when "bmulis"
+ handleX86OpBranch("imul#{x86Suffix(:int)}", "js", :int)
+ when "bmuliz"
+ handleX86OpBranch("imul#{x86Suffix(:int)}", "jz", :int)
+ when "bmulinz"
+ handleX86OpBranch("imul#{x86Suffix(:int)}", "jnz", :int)
+ when "borio"
+ handleX86OpBranch("orl", "jo", :int)
+ when "boris"
+ handleX86OpBranch("orl", "js", :int)
+ when "boriz"
+ handleX86OpBranch("orl", "jz", :int)
+ when "borinz"
+ handleX86OpBranch("orl", "jnz", :int)
+ when "break"
+ $asm.puts "int #{const(3)}"
+ when "call"
+ if useX87
+ 2.times {
+ | offset |
+ $asm.puts "ffree #{register("st")}(#{offset})"
+ }
+ end
+ op = operands[0].x86CallOperand(:ptr)
+ if operands[0].is_a? LabelReference
+ operands[0].used
+ end
+ $asm.puts "call #{op}"
+ when "ret"
+ $asm.puts "ret"
+ when "cieq"
+ handleX86IntCompareSet("sete", :int)
+ when "cbeq"
+ handleX86IntCompareSet("sete", :byte)
+ when "cpeq"
+ handleX86IntCompareSet("sete", :ptr)
+ when "cqeq"
+ handleX86IntCompareSet("sete", :quad)
+ when "cineq"
+ handleX86IntCompareSet("setne", :int)
+ when "cbneq"
+ handleX86IntCompareSet("setne", :byte)
+ when "cpneq"
+ handleX86IntCompareSet("setne", :ptr)
+ when "cqneq"
+ handleX86IntCompareSet("setne", :quad)
+ when "cia"
+ handleX86IntCompareSet("seta", :int)
+ when "cba"
+ handleX86IntCompareSet("seta", :byte)
+ when "cpa"
+ handleX86IntCompareSet("seta", :ptr)
+ when "cqa"
+ handleX86IntCompareSet("seta", :quad)
+ when "ciaeq"
+ handleX86IntCompareSet("setae", :int)
+ when "cbaeq"
+ handleX86IntCompareSet("setae", :byte)
+ when "cpaeq"
+ handleX86IntCompareSet("setae", :ptr)
+ when "cqaeq"
+ handleX86IntCompareSet("setae", :quad)
+ when "cib"
+ handleX86IntCompareSet("setb", :int)
+ when "cbb"
+ handleX86IntCompareSet("setb", :byte)
+ when "cpb"
+ handleX86IntCompareSet("setb", :ptr)
+ when "cqb"
+ handleX86IntCompareSet("setb", :quad)
+ when "cibeq"
+ handleX86IntCompareSet("setbe", :int)
+ when "cbbeq"
+ handleX86IntCompareSet("setbe", :byte)
+ when "cpbeq"
+ handleX86IntCompareSet("setbe", :ptr)
+ when "cqbeq"
+ handleX86IntCompareSet("setbe", :quad)
+ when "cigt"
+ handleX86IntCompareSet("setg", :int)
+ when "cbgt"
+ handleX86IntCompareSet("setg", :byte)
+ when "cpgt"
+ handleX86IntCompareSet("setg", :ptr)
+ when "cqgt"
+ handleX86IntCompareSet("setg", :quad)
+ when "cigteq"
+ handleX86IntCompareSet("setge", :int)
+ when "cbgteq"
+ handleX86IntCompareSet("setge", :byte)
+ when "cpgteq"
+ handleX86IntCompareSet("setge", :ptr)
+ when "cqgteq"
+ handleX86IntCompareSet("setge", :quad)
+ when "cilt"
+ handleX86IntCompareSet("setl", :int)
+ when "cblt"
+ handleX86IntCompareSet("setl", :byte)
+ when "cplt"
+ handleX86IntCompareSet("setl", :ptr)
+ when "cqlt"
+ handleX86IntCompareSet("setl", :quad)
+ when "cilteq"
+ handleX86IntCompareSet("setle", :int)
+ when "cblteq"
+ handleX86IntCompareSet("setle", :byte)
+ when "cplteq"
+ handleX86IntCompareSet("setle", :ptr)
+ when "cqlteq"
+ handleX86IntCompareSet("setle", :quad)
+ when "tis"
+ handleX86SetTest("sets", :int)
+ when "tiz"
+ handleX86SetTest("setz", :int)
+ when "tinz"
+ handleX86SetTest("setnz", :int)
+ when "tps"
+ handleX86SetTest("sets", :ptr)
+ when "tpz"
+ handleX86SetTest("setz", :ptr)
+ when "tpnz"
+ handleX86SetTest("setnz", :ptr)
+ when "tqs"
+ handleX86SetTest("sets", :quad)
+ when "tqz"
+ handleX86SetTest("setz", :quad)
+ when "tqnz"
+ handleX86SetTest("setnz", :quad)
+ when "tbs"
+ handleX86SetTest("sets", :byte)
+ when "tbz"
+ handleX86SetTest("setz", :byte)
+ when "tbnz"
+ handleX86SetTest("setnz", :byte)
+ when "peek"
+ handleX86Peek()
+ when "poke"
+ handleX86Poke()
+ when "cdqi"
+ $asm.puts "cdq"
+ when "idivi"
+ $asm.puts "idiv#{x86Suffix(:int)} #{operands[0].x86Operand(:int)}"
+ when "fii2d"
+ if useX87
+ sp = RegisterID.new(nil, "sp")
+ $asm.puts "mov#{x86Suffix(:int)} #{orderOperands(operands[0].x86Operand(:int), offsetRegister(-8, sp.x86Operand(:ptr)))}"
+ $asm.puts "mov#{x86Suffix(:int)} #{orderOperands(operands[1].x86Operand(:int), offsetRegister(-4, sp.x86Operand(:ptr)))}"
+ $asm.puts "fld#{x86Suffix(:ptr)} #{getSizeString(:double)}#{offsetRegister(-8, sp.x86Operand(:ptr))}"
+ $asm.puts "fstp #{operands[2].x87Operand(1)}"
+ else
+ $asm.puts "movd #{operands[0].x86Operand(:int)}, #{operands[2].x86Operand(:double)}"
+ $asm.puts "movd #{operands[1].x86Operand(:int)}, %xmm7"
+ $asm.puts "psllq $32, %xmm7"
+ $asm.puts "por %xmm7, #{operands[2].x86Operand(:double)}"
+ end
+ when "fd2ii"
+ if useX87
+ sp = RegisterID.new(nil, "sp")
+ if (operands[0].x87DefaultStackPosition == 0)
+ $asm.puts "fst#{x86Suffix(:ptr)} #{getSizeString(:double)}#{offsetRegister(-8, sp.x86Operand(:ptr))}"
+ else
+ $asm.puts "fld #{operands[0].x87Operand(0)}"
+ $asm.puts "fstpl -8(#{sp.x86Operand(:ptr)})"
+ end
+ $asm.puts "mov#{x86Suffix(:int)} #{orderOperands(offsetRegister(-8, sp.x86Operand(:ptr)), operands[1].x86Operand(:int))}"
+ $asm.puts "mov#{x86Suffix(:int)} #{orderOperands(offsetRegister(-4, sp.x86Operand(:ptr)), operands[2].x86Operand(:int))}"
+ else
+ $asm.puts "movd #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:int)}"
+ $asm.puts "movsd #{operands[0].x86Operand(:double)}, %xmm7"
+ $asm.puts "psrlq $32, %xmm7"
+ $asm.puts "movd %xmm7, #{operands[2].x86Operand(:int)}"
+ end
+ when "fq2d"
+ if useX87
+ sp = RegisterID.new(nil, "sp")
+ $asm.puts "movq #{operands[0].x86Operand(:quad)}, -8(#{sp.x86Operand(:ptr)})"
+ $asm.puts "fldl -8(#{sp.x86Operand(:ptr)})"
+ $asm.puts "fstp #{operands[1].x87Operand(1)}"
+ else
+ if !isIntelSyntax
+ $asm.puts "movq #{operands[0].x86Operand(:quad)}, #{operands[1].x86Operand(:double)}"
+ else
+ # MASM does not accept register operands with movq.
+ # Debugging shows that movd actually moves a qword when using MASM.
+ $asm.puts "movd #{operands[1].x86Operand(:double)}, #{operands[0].x86Operand(:quad)}"
+ end
+ end
+ when "fd2q"
+ if useX87
+ sp = RegisterID.new(nil, "sp")
+ if (operands[0].x87DefaultStackPosition == 0)
+ $asm.puts "fst#{x86Suffix(:int)} #{getSizeString(:int)}#{offsetRegister(-8, sp.x86Operand(:ptr))}"
+ else
+ $asm.puts "fld #{operands[0].x87Operand(0)}"
+ $asm.puts "fstpl -8(#{sp.x86Operand(:ptr)})"
+ end
+ $asm.puts "movq -8(#{sp.x86Operand(:ptr)}), #{operands[1].x86Operand(:quad)}"
+ else
+ if !isIntelSyntax
+ $asm.puts "movq #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:quad)}"
+ else
+ # MASM does not accept register operands with movq.
+ # Debugging shows that movd actually moves a qword when using MASM.
+ $asm.puts "movd #{operands[1].x86Operand(:quad)}, #{operands[0].x86Operand(:double)}"
+ end
+ end
+ when "bo"
+ $asm.puts "jo #{operands[0].asmLabel}"
+ when "bs"
+ $asm.puts "js #{operands[0].asmLabel}"
+ when "bz"
+ $asm.puts "jz #{operands[0].asmLabel}"
+ when "bnz"
+ $asm.puts "jnz #{operands[0].asmLabel}"
+ when "leai"
+ $asm.puts "lea#{x86Suffix(:int)} #{orderOperands(operands[0].x86AddressOperand(:int), operands[1].x86Operand(:int))}"
+ when "leap"
+ $asm.puts "lea#{x86Suffix(:ptr)} #{orderOperands(operands[0].x86AddressOperand(:ptr), operands[1].x86Operand(:ptr))}"
+ when "memfence"
+ $asm.puts "mfence"
+ else
+ lowerDefault
+ end
+ end
+end
+