summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/b3/air/AirOpcode.opcodes
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
commit1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c (patch)
tree46dcd36c86e7fbc6e5df36deb463b33e9967a6f7 /Source/JavaScriptCore/b3/air/AirOpcode.opcodes
parent32761a6cee1d0dee366b885b7b9c777e67885688 (diff)
downloadWebKitGtk-tarball-master.tar.gz
Diffstat (limited to 'Source/JavaScriptCore/b3/air/AirOpcode.opcodes')
-rw-r--r--Source/JavaScriptCore/b3/air/AirOpcode.opcodes943
1 files changed, 943 insertions, 0 deletions
diff --git a/Source/JavaScriptCore/b3/air/AirOpcode.opcodes b/Source/JavaScriptCore/b3/air/AirOpcode.opcodes
new file mode 100644
index 000000000..e82c9f5bf
--- /dev/null
+++ b/Source/JavaScriptCore/b3/air/AirOpcode.opcodes
@@ -0,0 +1,943 @@
+# Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+# Syllabus:
+#
+# Examples of some roles, types, and widths:
+# U:G:32 => use of the low 32 bits of a general-purpose register or value
+# D:G:32 => def of the low 32 bits of a general-purpose register or value
+# UD:G:32 => use and def of the low 32 bits of a general-purpose register or value
+# U:G:64 => use of the low 64 bits of a general-purpose register or value
+# ZD:G:32 => def of all bits of a general-purpose register, where all but the low 32 bits are guaranteed to be zeroed.
+# UA:G:Ptr => UseAddr (see comment in Arg.h)
+# U:F:32 => use of a float register or value
+# U:F:64 => use of a double register or value
+# D:F:32 => def of a float register or value
+# UD:F:32 => use and def of a float register or value
+# S:F:32 => scratch float register.
+#
+# Argument kinds:
+# Tmp => temporary or register
+# Imm => 32-bit immediate int
+# BigImm => TrustedImm64
+# Addr => address as temporary/register+offset
+# Index => BaseIndex address
+# Abs => AbsoluteAddress
+#
+# The parser views these things as keywords, and understands that they fall into two distinct classes
+# of things. So, although this file uses a particular indentation style, none of the whitespace or
+# even newlines are meaningful to the parser. For example, you could write:
+#
+# Foo42 U:G:32, UD:F:32 Imm, Tmp Addr, Tmp
+#
+# And the parser would know that this is the same as:
+#
+# Foo42 U:G:32, UD:F:32
+# Imm, Tmp
+# Addr, Tmp
+#
+# I.e. a two-form instruction that uses a GPR or an int immediate and uses+defs a float register.
+#
+# Any opcode or opcode form can be preceded with an architecture list, which restricts the opcode to the
+# union of those architectures. For example, if this is the only overload of the opcode, then it makes the
+# opcode only available on x86_64:
+#
+# x86_64: Fuzz UD:G:64, D:G:64
+# Tmp, Tmp
+# Tmp, Addr
+#
+# But this only restricts the two-operand form, the other form is allowed on all architectures:
+#
+# x86_64: Fuzz UD:G:64, D:G:64
+# Tmp, Tmp
+# Tmp, Addr
+# Fuzz UD:G:Ptr, D:G:Ptr, U:F:Ptr
+# Tmp, Tmp, Tmp
+# Tmp, Addr, Tmp
+#
+# And you can also restrict individual forms:
+#
+# Thingy UD:G:32, D:G:32
+# Tmp, Tmp
+# arm64: Tmp, Addr
+#
+# Additionally, you can have an intersection between the architectures of the opcode overload and the
+# form. In this example, the version that takes an address is only available on armv7 while the other
+# versions are available on armv7 or x86_64:
+#
+# x86_64 armv7: Buzz U:G:32, UD:F:32
+# Tmp, Tmp
+# Imm, Tmp
+# armv7: Addr, Tmp
+#
+# Finally, you can specify architectures using helpful architecture groups. Here are all of the
+# architecture keywords that we support:
+#
+# x86: means x86-32 or x86-64.
+# x86_32: means just x86-32.
+# x86_64: means just x86-64.
+# arm: means armv7 or arm64.
+# armv7: means just armv7.
+# arm64: means just arm64.
+# 32: means x86-32 or armv7.
+# 64: means x86-64 or arm64.
+
+# Note that the opcodes here have a leading capital (Add32) but must correspond to MacroAssembler
+# API that has a leading lower-case (add32).
+
+Nop
+
+Add32 U:G:32, U:G:32, ZD:G:32
+ Imm, Tmp, Tmp
+ Tmp, Tmp, Tmp
+
+Add32 U:G:32, UZD:G:32
+ Tmp, Tmp
+ x86: Imm, Addr
+ x86: Imm, Index
+ Imm, Tmp
+ x86: Addr, Tmp
+ x86: Tmp, Addr
+ x86: Tmp, Index
+
+x86: Add8 U:G:8, UD:G:8
+ Imm, Addr
+ Imm, Index
+ Tmp, Addr
+ Tmp, Index
+
+x86: Add16 U:G:16, UD:G:16
+ Imm, Addr
+ Imm, Index
+ Tmp, Addr
+ Tmp, Index
+
+64: Add64 U:G:64, UD:G:64
+ Tmp, Tmp
+ x86: Imm, Addr
+ Imm, Tmp
+ x86: Addr, Tmp
+ x86: Tmp, Addr
+
+64: Add64 U:G:64, U:G:64, D:G:64
+ Imm, Tmp, Tmp
+ Tmp, Tmp, Tmp
+
+AddDouble U:F:64, U:F:64, D:F:64
+ Tmp, Tmp, Tmp
+ x86: Addr, Tmp, Tmp
+ x86: Tmp, Addr, Tmp
+ x86: Index, Tmp, Tmp
+
+x86: AddDouble U:F:64, UD:F:64
+ Tmp, Tmp
+ Addr, Tmp
+
+AddFloat U:F:32, U:F:32, D:F:32
+ Tmp, Tmp, Tmp
+ x86: Addr, Tmp, Tmp
+ x86: Tmp, Addr, Tmp
+ x86: Index, Tmp, Tmp
+
+x86: AddFloat U:F:32, UD:F:32
+ Tmp, Tmp
+ Addr, Tmp
+
+Sub32 U:G:32, UZD:G:32
+ Tmp, Tmp
+ x86: Imm, Addr
+ Imm, Tmp
+ x86: Addr, Tmp
+ x86: Tmp, Addr
+
+arm64: Sub32 U:G:32, U:G:32, D:G:32
+ Tmp, Tmp, Tmp
+
+64: Sub64 U:G:64, UD:G:64
+ Tmp, Tmp
+ x86: Imm, Addr
+ Imm, Tmp
+ x86: Addr, Tmp
+ x86: Tmp, Addr
+
+arm64: Sub64 U:G:64, U:G:64, D:G:64
+ Tmp, Tmp, Tmp
+
+SubDouble U:F:64, U:F:64, D:F:64
+ arm64: Tmp, Tmp, Tmp
+ x86: Tmp, Addr, Tmp
+ x86: Tmp, Index, Tmp
+
+x86: SubDouble U:F:64, UD:F:64
+ Tmp, Tmp
+ Addr, Tmp
+
+SubFloat U:F:32, U:F:32, D:F:32
+ arm64: Tmp, Tmp, Tmp
+ x86: Tmp, Addr, Tmp
+ x86: Tmp, Index, Tmp
+
+x86: SubFloat U:F:32, UD:F:32
+ Tmp, Tmp
+ Addr, Tmp
+
+Neg32 UZD:G:32
+ Tmp
+ x86: Addr
+
+64: Neg64 UD:G:64
+ Tmp
+
+arm64: NegateDouble U:F:64, D:F:64
+ Tmp, Tmp
+
+arm64: NegateFloat U:F:32, D:F:32
+ Tmp, Tmp
+
+Mul32 U:G:32, UZD:G:32
+ Tmp, Tmp
+ x86: Addr, Tmp
+
+Mul32 U:G:32, U:G:32, ZD:G:32
+ Tmp, Tmp, Tmp
+ x86: Addr, Tmp, Tmp
+ x86: Tmp, Addr, Tmp
+ x86: Imm, Tmp, Tmp
+
+64: Mul64 U:G:64, UD:G:64
+ Tmp, Tmp
+
+Mul64 U:G:64, U:G:64, D:G:64
+ Tmp, Tmp, Tmp
+
+arm64: MultiplyAdd32 U:G:32, U:G:32, U:G:32, ZD:G:32
+ Tmp, Tmp, Tmp, Tmp
+
+arm64: MultiplyAdd64 U:G:64, U:G:64, U:G:64, D:G:64
+ Tmp, Tmp, Tmp, Tmp
+
+arm64: MultiplySub32 U:G:32, U:G:32, U:G:32, ZD:G:32
+ Tmp, Tmp, Tmp, Tmp
+
+arm64: MultiplySub64 U:G:64, U:G:64, U:G:64, D:G:64
+ Tmp, Tmp, Tmp, Tmp
+
+arm64: MultiplyNeg32 U:G:32, U:G:32, ZD:G:32
+ Tmp, Tmp, Tmp
+
+arm64: MultiplyNeg64 U:G:64, U:G:64, ZD:G:64
+ Tmp, Tmp, Tmp
+
+arm64: Div32 U:G:32, U:G:32, ZD:G:32
+ Tmp, Tmp, Tmp
+
+arm64: UDiv32 U:G:32, U:G:32, ZD:G:32
+ Tmp, Tmp, Tmp
+
+arm64: Div64 U:G:64, U:G:64, D:G:64
+ Tmp, Tmp, Tmp
+
+arm64: UDiv64 U:G:64, U:G:64, D:G:64
+ Tmp, Tmp, Tmp
+
+MulDouble U:F:64, U:F:64, D:F:64
+ Tmp, Tmp, Tmp
+ x86: Addr, Tmp, Tmp
+ x86: Tmp, Addr, Tmp
+ x86: Index, Tmp, Tmp
+
+x86: MulDouble U:F:64, UD:F:64
+ Tmp, Tmp
+ Addr, Tmp
+
+MulFloat U:F:32, U:F:32, D:F:32
+ Tmp, Tmp, Tmp
+ x86: Addr, Tmp, Tmp
+ x86: Tmp, Addr, Tmp
+ x86: Index, Tmp, Tmp
+
+x86: MulFloat U:F:32, UD:F:32
+ Tmp, Tmp
+ Addr, Tmp
+
+arm64: DivDouble U:F:64, U:F:32, D:F:64
+ Tmp, Tmp, Tmp
+
+x86: DivDouble U:F:64, UD:F:64
+ Tmp, Tmp
+ Addr, Tmp
+
+arm64: DivFloat U:F:32, U:F:32, D:F:32
+ Tmp, Tmp, Tmp
+
+x86: DivFloat U:F:32, UD:F:32
+ Tmp, Tmp
+ Addr, Tmp
+
+x86: X86ConvertToDoubleWord32 U:G:32, ZD:G:32
+ Tmp*, Tmp*
+
+x86_64: X86ConvertToQuadWord64 U:G:64, D:G:64
+ Tmp*, Tmp*
+
+x86: X86Div32 UZD:G:32, UZD:G:32, U:G:32
+ Tmp*, Tmp*, Tmp
+
+x86: X86UDiv32 UZD:G:32, UZD:G:32, U:G:32
+ Tmp*, Tmp*, Tmp
+
+x86_64: X86Div64 UZD:G:64, UZD:G:64, U:G:64
+ Tmp*, Tmp*, Tmp
+
+x86_64: X86UDiv64 UZD:G:64, UZD:G:64, U:G:64
+ Tmp*, Tmp*, Tmp
+
+Lea32 UA:G:32, D:G:32
+ Addr, Tmp
+ x86: Index, Tmp as x86Lea32
+
+Lea64 UA:G:64, D:G:64
+ Addr, Tmp
+ x86: Index, Tmp as x86Lea64
+
+And32 U:G:32, U:G:32, ZD:G:32
+ Tmp, Tmp, Tmp
+ arm64: BitImm, Tmp, Tmp
+ x86: Tmp, Addr, Tmp
+ x86: Addr, Tmp, Tmp
+
+And32 U:G:32, UZD:G:32
+ Tmp, Tmp
+ x86: Imm, Tmp
+ x86: Tmp, Addr
+ x86: Addr, Tmp
+ x86: Imm, Addr
+
+64: And64 U:G:64, U:G:64, D:G:64
+ Tmp, Tmp, Tmp
+ arm64: BitImm64, Tmp, Tmp
+
+x86_64: And64 U:G:64, UD:G:64
+ Tmp, Tmp
+ x86: Imm, Tmp
+
+AndDouble U:F:64, U:F:64, D:F:64
+ Tmp, Tmp, Tmp
+
+x86: AndDouble U:F:64, UD:F:64
+ Tmp, Tmp
+
+AndFloat U:F:32, U:F:32, D:F:32
+ Tmp, Tmp, Tmp
+
+x86: AndFloat U:F:32, UD:F:32
+ Tmp, Tmp
+
+OrDouble U:F:64, U:F:64, D:F:64
+ Tmp, Tmp, Tmp
+
+x86: OrDouble U:F:64, UD:F:64
+ Tmp, Tmp
+
+OrFloat U:F:32, U:F:32, D:F:32
+ Tmp, Tmp, Tmp
+
+x86: OrFloat U:F:32, UD:F:32
+ Tmp, Tmp
+
+x86: XorDouble U:F:64, U:F:64, D:F:64
+ Tmp, Tmp, Tmp
+
+x86: XorDouble U:F:64, UD:F:64
+ Tmp, Tmp
+
+x86: XorFloat U:F:32, U:F:32, D:F:32
+ Tmp, Tmp, Tmp
+
+x86: XorFloat U:F:32, UD:F:32
+ Tmp, Tmp
+
+arm64: Lshift32 U:G:32, U:G:32, ZD:G:32
+ Tmp, Tmp, Tmp
+ Tmp, Imm, Tmp
+
+x86:Lshift32 U:G:32, UZD:G:32
+ Tmp*, Tmp
+ Imm, Tmp
+
+arm64: Lshift64 U:G:64, U:G:64, D:G:64
+ Tmp, Tmp, Tmp
+ Tmp, Imm, Tmp
+
+x86_64: Lshift64 U:G:64, UD:G:64
+ Tmp*, Tmp
+ Imm, Tmp
+
+arm64: Rshift32 U:G:32, U:G:32, ZD:G:32
+ Tmp, Tmp, Tmp
+ Tmp, Imm, Tmp
+
+x86: Rshift32 U:G:32, UZD:G:32
+ Tmp*, Tmp
+ Imm, Tmp
+
+arm64: Rshift64 U:G:64, U:G:64, D:G:64
+ Tmp, Tmp, Tmp
+ Tmp, Imm, Tmp
+
+x86_64: Rshift64 U:G:64, UD:G:64
+ Tmp*, Tmp
+ Imm, Tmp
+
+arm64: Urshift32 U:G:32, U:G:32, ZD:G:32
+ Tmp, Tmp, Tmp
+ Tmp, Imm, Tmp
+
+x86: Urshift32 U:G:32, UZD:G:32
+ Tmp*, Tmp
+ Imm, Tmp
+
+arm64: Urshift64 U:G:64, U:G:64, D:G:64
+ Tmp, Tmp, Tmp
+ Tmp, Imm, Tmp
+
+x86_64: Urshift64 U:G:64, UD:G:64
+ Tmp*, Tmp
+ Imm, Tmp
+
+x86_64: RotateRight32 U:G:32, UZD:G:32
+ Tmp*, Tmp
+ Imm, Tmp
+
+arm64: RotateRight32 U:G:32, U:G:32, ZD:G:32
+ Tmp, Tmp, Tmp
+ Tmp, Imm, Tmp
+
+x86_64: RotateRight64 U:G:64, UD:G:64
+ Tmp*, Tmp
+ Imm, Tmp
+
+arm64: RotateRight64 U:G:64, U:G:64, D:G:64
+ Tmp, Tmp, Tmp
+ Tmp, Imm, Tmp
+
+x86_64: RotateLeft32 U:G:32, UZD:G:32
+ Tmp*, Tmp
+ Imm, Tmp
+
+x86_64: RotateLeft64 U:G:64, UD:G:64
+ Tmp*, Tmp
+ Imm, Tmp
+
+Or32 U:G:32, U:G:32, ZD:G:32
+ Tmp, Tmp, Tmp
+ arm64: BitImm, Tmp, Tmp
+ x86: Tmp, Addr, Tmp
+ x86: Addr, Tmp, Tmp
+
+Or32 U:G:32, UZD:G:32
+ Tmp, Tmp
+ x86: Imm, Tmp
+ x86: Tmp, Addr
+ x86: Addr, Tmp
+ x86: Imm, Addr
+
+64: Or64 U:G:64, U:G:64, D:G:64
+ Tmp, Tmp, Tmp
+ arm64: BitImm64, Tmp, Tmp
+
+64: Or64 U:G:64, UD:G:64
+ Tmp, Tmp
+ x86: Imm, Tmp
+
+Xor32 U:G:32, U:G:32, ZD:G:32
+ Tmp, Tmp, Tmp
+ arm64: BitImm, Tmp, Tmp
+ x86: Tmp, Addr, Tmp
+ x86: Addr, Tmp, Tmp
+
+Xor32 U:G:32, UZD:G:32
+ Tmp, Tmp
+ x86: Imm, Tmp
+ x86: Tmp, Addr
+ x86: Addr, Tmp
+ x86: Imm, Addr
+
+64: Xor64 U:G:64, U:G:64, D:G:64
+ Tmp, Tmp, Tmp
+ arm64: BitImm64, Tmp, Tmp
+
+64: Xor64 U:G:64, UD:G:64
+ Tmp, Tmp
+ x86: Tmp, Addr
+ x86: Imm, Tmp
+
+arm64: Not32 U:G:32, ZD:G:32
+ Tmp, Tmp
+
+x86: Not32 UZD:G:32
+ Tmp
+ Addr
+
+arm64: Not64 U:G:64, D:G:64
+ Tmp, Tmp
+
+x86: Not64 UD:G:64
+ Tmp
+ Addr
+
+arm64: AbsDouble U:F:64, D:F:64
+ Tmp, Tmp
+
+arm64: AbsFloat U:F:32, D:F:32
+ Tmp, Tmp
+
+CeilDouble U:F:64, D:F:64
+ Tmp, Tmp
+ x86: Addr, Tmp
+
+CeilFloat U:F:32, D:F:32
+ Tmp, Tmp
+ x86: Addr, Tmp
+
+FloorDouble U:F:64, D:F:64
+ Tmp, Tmp
+ x86: Addr, Tmp
+
+FloorFloat U:F:32, D:F:32
+ Tmp, Tmp
+ x86: Addr, Tmp
+
+SqrtDouble U:F:64, D:F:64
+ Tmp, Tmp
+ x86: Addr, Tmp
+
+SqrtFloat U:F:32, D:F:32
+ Tmp, Tmp
+ x86: Addr, Tmp
+
+ConvertInt32ToDouble U:G:32, D:F:64
+ Tmp, Tmp
+ x86: Addr, Tmp
+
+64: ConvertInt64ToDouble U:G:64, D:F:64
+ Tmp, Tmp
+ x86_64: Addr, Tmp
+
+ConvertInt32ToFloat U:G:32, D:F:32
+ Tmp, Tmp
+ x86: Addr, Tmp
+
+64: ConvertInt64ToFloat U:G:64, D:F:32
+ Tmp, Tmp
+ x86_64: Addr, Tmp
+
+CountLeadingZeros32 U:G:32, ZD:G:32
+ Tmp, Tmp
+ x86: Addr, Tmp
+
+64: CountLeadingZeros64 U:G:64, D:G:64
+ Tmp, Tmp
+ x86: Addr, Tmp
+
+ConvertDoubleToFloat U:F:64, D:F:32
+ Tmp, Tmp
+ x86: Addr, Tmp
+
+ConvertFloatToDouble U:F:32, D:F:64
+ Tmp, Tmp
+ x86: Addr, Tmp
+
+# Note that Move operates over the full register size, which is either 32-bit or 64-bit depending on
+# the platform. I'm not entirely sure that this is a good thing; it might be better to just have a
+# Move64 instruction. OTOH, our MacroAssemblers already have this notion of "move()" that basically
+# means movePtr.
+Move U:G:Ptr, D:G:Ptr
+ Tmp, Tmp
+ Imm, Tmp as signExtend32ToPtr
+ BigImm, Tmp
+ Addr, Tmp as loadPtr # This means that "Move Addr, Tmp" is code-generated as "load" not "move".
+ Index, Tmp as loadPtr
+ Tmp, Addr as storePtr
+ Tmp, Index as storePtr
+ x86: Imm, Addr as storePtr
+
+x86: Swap32 UD:G:32, UD:G:32
+ Tmp, Tmp
+ Tmp, Addr
+
+x86_64: Swap64 UD:G:64, UD:G:64
+ Tmp, Tmp
+ Tmp, Addr
+
+Move32 U:G:32, ZD:G:32
+ Tmp, Tmp as zeroExtend32ToPtr
+ Addr, Tmp as load32
+ Index, Tmp as load32
+ Tmp, Addr as store32
+ Tmp, Index as store32
+ x86: Imm, Tmp as zeroExtend32ToPtr
+ x86: Imm, Addr as store32
+ x86: Imm, Index as store32
+
+StoreZero32 U:G:32
+ Addr
+ Index
+
+SignExtend32ToPtr U:G:32, D:G:Ptr
+ Tmp, Tmp
+
+ZeroExtend8To32 U:G:8, ZD:G:32
+ Tmp, Tmp
+ x86: Addr, Tmp as load8
+ x86: Index, Tmp as load8
+
+SignExtend8To32 U:G:8, ZD:G:32
+ Tmp, Tmp
+ x86: Addr, Tmp as load8SignedExtendTo32
+ x86: Index, Tmp as load8SignedExtendTo32
+
+ZeroExtend16To32 U:G:16, ZD:G:32
+ Tmp, Tmp
+ x86: Addr, Tmp as load16
+ x86: Index, Tmp as load16
+
+SignExtend16To32 U:G:16, ZD:G:32
+ Tmp, Tmp
+ x86: Addr, Tmp as load16SignedExtendTo32
+ x86: Index, Tmp as load16SignedExtendTo32
+
+MoveFloat U:F:32, D:F:32
+ Tmp, Tmp as moveDouble
+ Addr, Tmp as loadFloat
+ Index, Tmp as loadFloat
+ Tmp, Addr as storeFloat
+ Tmp, Index as storeFloat
+
+MoveDouble U:F:64, D:F:64
+ Tmp, Tmp
+ Addr, Tmp as loadDouble
+ Index, Tmp as loadDouble
+ Tmp, Addr as storeDouble
+ Tmp, Index as storeDouble
+
+MoveZeroToDouble D:F:64
+ Tmp
+
+64: Move64ToDouble U:G:64, D:F:64
+ Tmp, Tmp
+ x86: Addr, Tmp as loadDouble
+ Index, Tmp as loadDouble
+
+Move32ToFloat U:G:32, D:F:32
+ Tmp, Tmp
+ x86: Addr, Tmp as loadFloat
+ Index, Tmp as loadFloat
+
+64: MoveDoubleTo64 U:F:64, D:G:64
+ Tmp, Tmp
+ Addr, Tmp as load64
+ Index, Tmp as load64
+
+MoveFloatTo32 U:F:32, D:G:32
+ Tmp, Tmp
+ Addr, Tmp as load32
+ Index, Tmp as load32
+
+Load8 U:G:8, ZD:G:32
+ Addr, Tmp
+ Index, Tmp
+
+Store8 U:G:8, D:G:8
+ Tmp, Index
+ Tmp, Addr
+ x86: Imm, Index
+ x86: Imm, Addr
+
+Load8SignedExtendTo32 U:G:8, ZD:G:32
+ Addr, Tmp
+ Index, Tmp
+
+Load16 U:G:16, ZD:G:32
+ Addr, Tmp
+ Index, Tmp
+
+Load16SignedExtendTo32 U:G:16, ZD:G:32
+ Addr, Tmp
+ Index, Tmp
+
+Store16 U:G:16, D:G:16
+ Tmp, Index
+ Tmp, Addr
+
+Compare32 U:G:32, U:G:32, U:G:32, ZD:G:32
+ RelCond, Tmp, Tmp, Tmp
+ RelCond, Tmp, Imm, Tmp
+
+64: Compare64 U:G:32, U:G:64, U:G:64, ZD:G:32
+ RelCond, Tmp, Tmp, Tmp
+ x86: RelCond, Tmp, Imm, Tmp
+
+Test32 U:G:32, U:G:32, U:G:32, ZD:G:32
+ x86: ResCond, Addr, Imm, Tmp
+ ResCond, Tmp, Tmp, Tmp
+ ResCond, Tmp, BitImm, Tmp
+
+64: Test64 U:G:32, U:G:64, U:G:64, ZD:G:32
+ x86: ResCond, Tmp, Imm, Tmp
+ ResCond, Tmp, Tmp, Tmp
+
+CompareDouble U:G:32, U:F:64, U:F:64, ZD:G:32
+ DoubleCond, Tmp, Tmp, Tmp
+
+CompareFloat U:G:32, U:F:32, U:F:32, ZD:G:32
+ DoubleCond, Tmp, Tmp, Tmp
+
+# Note that branches have some logic in AirOptimizeBlockOrder.cpp. If you add new branches, please make sure
+# you opt them into the block order optimizations.
+
+Branch8 U:G:32, U:G:8, U:G:8 /branch
+ x86: RelCond, Addr, Imm
+ x86: RelCond, Index, Imm
+
+Branch32 U:G:32, U:G:32, U:G:32 /branch
+ x86: RelCond, Addr, Imm
+ RelCond, Tmp, Tmp
+ RelCond, Tmp, Imm
+ x86: RelCond, Tmp, Addr
+ x86: RelCond, Addr, Tmp
+ x86: RelCond, Index, Imm
+
+64: Branch64 U:G:32, U:G:64, U:G:64 /branch
+ RelCond, Tmp, Tmp
+ RelCond, Tmp, Imm
+ x86: RelCond, Tmp, Addr
+ x86: RelCond, Addr, Tmp
+ x86: RelCond, Addr, Imm
+ x86: RelCond, Index, Tmp
+
+BranchTest8 U:G:32, U:G:8, U:G:8 /branch
+ x86: ResCond, Addr, BitImm
+ x86: ResCond, Index, BitImm
+
+BranchTest32 U:G:32, U:G:32, U:G:32 /branch
+ ResCond, Tmp, Tmp
+ ResCond, Tmp, BitImm
+ x86: ResCond, Addr, BitImm
+ x86: ResCond, Index, BitImm
+
+# Warning: forms that take an immediate will sign-extend their immediate. You probably want
+# BranchTest32 in most cases where you use an immediate.
+64: BranchTest64 U:G:32, U:G:64, U:G:64 /branch
+ ResCond, Tmp, Tmp
+ arm64: ResCond, Tmp, BitImm64
+ x86: ResCond, Tmp, BitImm
+ x86: ResCond, Addr, BitImm
+ x86: ResCond, Addr, Tmp
+ x86: ResCond, Index, BitImm
+
+BranchDouble U:G:32, U:F:64, U:F:64 /branch
+ DoubleCond, Tmp, Tmp
+
+BranchFloat U:G:32, U:F:32, U:F:32 /branch
+ DoubleCond, Tmp, Tmp
+
+BranchAdd32 U:G:32, U:G:32, U:G:32, ZD:G:32 /branch
+ ResCond, Tmp, Tmp, Tmp
+ x86:ResCond, Tmp, Addr, Tmp
+ x86:ResCond, Addr, Tmp, Tmp
+
+BranchAdd32 U:G:32, U:G:32, UZD:G:32 /branch
+ ResCond, Tmp, Tmp
+ ResCond, Imm, Tmp
+ x86: ResCond, Imm, Addr
+ x86: ResCond, Tmp, Addr
+ x86: ResCond, Addr, Tmp
+
+BranchAdd64 U:G:32, U:G:64, U:G:64, ZD:G:64 /branch
+ ResCond, Tmp, Tmp, Tmp
+ x86:ResCond, Tmp, Addr, Tmp
+ x86:ResCond, Addr, Tmp, Tmp
+
+64: BranchAdd64 U:G:32, U:G:64, UD:G:64 /branch
+ ResCond, Imm, Tmp
+ ResCond, Tmp, Tmp
+ x86:ResCond, Addr, Tmp
+
+x86: BranchMul32 U:G:32, U:G:32, UZD:G:32 /branch
+ ResCond, Tmp, Tmp
+ ResCond, Addr, Tmp
+
+x86: BranchMul32 U:G:32, U:G:32, U:G:32, ZD:G:32 /branch
+ ResCond, Tmp, Imm, Tmp
+
+arm64: BranchMul32 U:G:32, U:G:32, U:G:32, S:G:32, S:G:32, ZD:G:32 /branch
+ ResCond, Tmp, Tmp, Tmp, Tmp, Tmp
+
+x86_64: BranchMul64 U:G:32, U:G:64, UZD:G:64 /branch
+ ResCond, Tmp, Tmp
+
+arm64: BranchMul64 U:G:32, U:G:64, U:G:64, S:G:64, S:G:64, ZD:G:64 /branch
+ ResCond, Tmp, Tmp, Tmp, Tmp, Tmp
+
+BranchSub32 U:G:32, U:G:32, UZD:G:32 /branch
+ ResCond, Tmp, Tmp
+ ResCond, Imm, Tmp
+ x86: ResCond, Imm, Addr
+ x86: ResCond, Tmp, Addr
+ x86: ResCond, Addr, Tmp
+
+64: BranchSub64 U:G:32, U:G:64, UD:G:64 /branch
+ ResCond, Imm, Tmp
+ ResCond, Tmp, Tmp
+
+BranchNeg32 U:G:32, UZD:G:32 /branch
+ ResCond, Tmp
+
+64: BranchNeg64 U:G:32, UZD:G:64 /branch
+ ResCond, Tmp
+
+MoveConditionally32 U:G:32, U:G:32, U:G:32, U:G:Ptr, UD:G:Ptr
+ RelCond, Tmp, Tmp, Tmp, Tmp
+
+MoveConditionally32 U:G:32, U:G:32, U:G:32, U:G:Ptr, U:G:Ptr, D:G:Ptr
+ RelCond, Tmp, Tmp, Tmp, Tmp, Tmp
+ RelCond, Tmp, Imm, Tmp, Tmp, Tmp
+
+64: MoveConditionally64 U:G:32, U:G:64, U:G:64, U:G:Ptr, UD:G:Ptr
+ RelCond, Tmp, Tmp, Tmp, Tmp
+
+64: MoveConditionally64 U:G:32, U:G:64, U:G:64, U:G:Ptr, U:G:Ptr, D:G:Ptr
+ RelCond, Tmp, Tmp, Tmp, Tmp, Tmp
+ RelCond, Tmp, Imm, Tmp, Tmp, Tmp
+
+MoveConditionallyTest32 U:G:32, U:G:32, U:G:32, U:G:Ptr, UD:G:Ptr
+ ResCond, Tmp, Tmp, Tmp, Tmp
+ x86: ResCond, Tmp, Imm, Tmp, Tmp
+
+MoveConditionallyTest32 U:G:32, U:G:32, U:G:32, U:G:Ptr, U:G:Ptr, D:G:Ptr
+ ResCond, Tmp, Tmp, Tmp, Tmp, Tmp
+ ResCond, Tmp, BitImm, Tmp, Tmp, Tmp
+
+64: MoveConditionallyTest64 U:G:32, U:G:64, U:G:64, U:G:Ptr, UD:G:Ptr
+ ResCond, Tmp, Tmp, Tmp, Tmp
+ x86: ResCond, Tmp, Imm, Tmp, Tmp
+
+64: MoveConditionallyTest64 U:G:32, U:G:32, U:G:32, U:G:Ptr, U:G:Ptr, D:G:Ptr
+ ResCond, Tmp, Tmp, Tmp, Tmp, Tmp
+ x86_64: ResCond, Tmp, Imm, Tmp, Tmp, Tmp
+
+MoveConditionallyDouble U:G:32, U:F:64, U:F:64, U:G:Ptr, U:G:Ptr, D:G:Ptr
+ DoubleCond, Tmp, Tmp, Tmp, Tmp, Tmp
+
+MoveConditionallyDouble U:G:32, U:F:64, U:F:64, U:G:Ptr, UD:G:Ptr
+ DoubleCond, Tmp, Tmp, Tmp, Tmp
+
+MoveConditionallyFloat U:G:32, U:F:32, U:F:32, U:G:Ptr, U:G:Ptr, D:G:Ptr
+ DoubleCond, Tmp, Tmp, Tmp, Tmp, Tmp
+
+MoveConditionallyFloat U:G:32, U:F:32, U:F:32, U:G:Ptr, UD:G:Ptr
+ DoubleCond, Tmp, Tmp, Tmp, Tmp
+
+MoveDoubleConditionally32 U:G:32, U:G:32, U:G:32, U:F:64, U:F:64, D:F:64
+ RelCond, Tmp, Tmp, Tmp, Tmp, Tmp
+ RelCond, Tmp, Imm, Tmp, Tmp, Tmp
+ x86: RelCond, Addr, Imm, Tmp, Tmp, Tmp
+ x86: RelCond, Tmp, Addr, Tmp, Tmp, Tmp
+ x86: RelCond, Addr, Tmp, Tmp, Tmp, Tmp
+ x86: RelCond, Index, Imm, Tmp, Tmp, Tmp
+
+64: MoveDoubleConditionally64 U:G:32, U:G:64, U:G:64, U:F:64, U:F:64, D:F:64
+ RelCond, Tmp, Tmp, Tmp, Tmp, Tmp
+ RelCond, Tmp, Imm, Tmp, Tmp, Tmp
+ x86_64: RelCond, Tmp, Addr, Tmp, Tmp, Tmp
+ x86_64: RelCond, Addr, Tmp, Tmp, Tmp, Tmp
+ x86_64: RelCond, Addr, Imm, Tmp, Tmp, Tmp
+ x86_64: RelCond, Index, Tmp, Tmp, Tmp, Tmp
+
+MoveDoubleConditionallyTest32 U:G:32, U:G:32, U:G:32, U:F:64, U:F:64, D:F:64
+ ResCond, Tmp, Tmp, Tmp, Tmp, Tmp
+ ResCond, Tmp, BitImm, Tmp, Tmp, Tmp
+ x86: ResCond, Addr, Imm, Tmp, Tmp, Tmp
+ x86: ResCond, Index, Imm, Tmp, Tmp, Tmp
+
+# Warning: forms that take an immediate will sign-extend their immediate. You probably want
+# MoveDoubleConditionallyTest32 in most cases where you use an immediate.
+64: MoveDoubleConditionallyTest64 U:G:32, U:G:64, U:G:64, U:F:64, U:F:64, D:F:64
+ ResCond, Tmp, Tmp, Tmp, Tmp, Tmp
+ x86_64: ResCond, Tmp, Imm, Tmp, Tmp, Tmp
+ x86_64: ResCond, Addr, Imm, Tmp, Tmp, Tmp
+ x86_64: ResCond, Addr, Tmp, Tmp, Tmp, Tmp
+ x86_64: ResCond, Index, Imm, Tmp, Tmp, Tmp
+
+MoveDoubleConditionallyDouble U:G:32, U:F:64, U:F:64, U:F:64, U:F:64, D:F:64
+ DoubleCond, Tmp, Tmp, Tmp, Tmp, Tmp
+
+MoveDoubleConditionallyFloat U:G:32, U:F:32, U:F:32, U:F:64, U:F:64, D:F:64
+ DoubleCond, Tmp, Tmp, Tmp, Tmp, Tmp
+
+MemoryFence /effects
+StoreFence /effects
+LoadFence /effects
+
+Jump /branch
+
+RetVoid /return
+
+Ret32 U:G:32 /return
+ Tmp
+
+64: Ret64 U:G:64 /return
+ Tmp
+
+RetFloat U:F:32 /return
+ Tmp
+
+RetDouble U:F:64 /return
+ Tmp
+
+Oops /terminal
+
+# This is a terminal but we express it as a Custom because we don't want it to have a code
+# generator.
+custom EntrySwitch
+
+# A Shuffle is a multi-source, multi-destination move. It simultaneously does multiple moves at once.
+# The moves are specified as triplets of src, dst, and width. For example you can request a swap this
+# way:
+# Shuffle %tmp1, %tmp2, 64, %tmp2, %tmp1, 64
+custom Shuffle
+
+# Air allows for exotic behavior. A Patch's behavior is determined entirely by the Special operand,
+# which must be the first operand.
+custom Patch
+
+# Instructions used for lowering C calls. These don't make it to Air generation. They get lowered to
+# something else first. The origin Value must be a CCallValue.
+custom CCall
+custom ColdCCall
+
+# This is a special wasm opcode that branches to a trap handler. This uses the generator located to Air::Code
+# to produce the side-exit code.
+custom WasmBoundsCheck
+