summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorbwilson <bwilson@138bc75d-0d04-0410-961f-82ee72b054a4>2002-01-23 21:03:53 +0000
committerbwilson <bwilson@138bc75d-0d04-0410-961f-82ee72b054a4>2002-01-23 21:03:53 +0000
commitf6b7ba2b65b368412f985d9e5fef16185bf7a249 (patch)
tree474190a92649d5af6ccd7a6f3ed51d377009630f
parentf97f597e410e178e838560cf99b8e840478b3bcb (diff)
downloadgcc-f6b7ba2b65b368412f985d9e5fef16185bf7a249.tar.gz
* config/xtensa/elf.h: New file.
* config/xtensa/lib1funcs.asm: New file. * config/xtensa/lib2funcs.S: New file. * config/xtensa/linux.h: New file. * config/xtensa/t-xtensa: New file. * config/xtensa/xtensa-config.h: New file. * config/xtensa/xtensa-protos.h: New file. * config/xtensa/xtensa.c: New file. * config/xtensa/xtensa.h: New file. * config/xtensa/xtensa.md: New file. * config.gcc (xtensa-*-elf*): New target. (xtensa-*-linux*): New target. * cse.c (canon_hash): Compare rtx pointers instead of register numbers. This is required for the Xtensa port. * integrate.c (copy_insn_list): Handle case where the static chain is in memory and the memory address has to be copied to a register. * doc/invoke.texi (Option Summary): Add Xtensa options. (Xtensa Options): New node. * doc/md.texi (Machine Constraints): Add Xtensa machine constraints. * gcc.c-torture/compile/20001226-1.x: xfail for Xtensa. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@49155 138bc75d-0d04-0410-961f-82ee72b054a4
-rw-r--r--gcc/ChangeLog23
-rw-r--r--gcc/config.gcc16
-rw-r--r--gcc/config/xtensa/elf.h138
-rw-r--r--gcc/config/xtensa/lib1funcs.asm419
-rw-r--r--gcc/config/xtensa/lib2funcs.S205
-rw-r--r--gcc/config/xtensa/linux.h94
-rw-r--r--gcc/config/xtensa/t-xtensa28
-rw-r--r--gcc/config/xtensa/xtensa-config.h50
-rw-r--r--gcc/config/xtensa/xtensa-protos.h116
-rw-r--r--gcc/config/xtensa/xtensa.c2658
-rw-r--r--gcc/config/xtensa/xtensa.h1701
-rw-r--r--gcc/config/xtensa/xtensa.md2415
-rw-r--r--gcc/cse.c8
-rw-r--r--gcc/doc/invoke.texi192
-rw-r--r--gcc/doc/md.texi25
-rw-r--r--gcc/integrate.c50
-rw-r--r--gcc/testsuite/ChangeLog4
-rw-r--r--gcc/testsuite/gcc.c-torture/compile/20001226-1.x6
18 files changed, 8137 insertions, 11 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 984781aca48..624aa13e1cb 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,26 @@
+2002-01-23 Bob Wilson <bob.wilson@acm.org>
+
+ * config/xtensa/elf.h: New file.
+ * config/xtensa/lib1funcs.asm: New file.
+ * config/xtensa/lib2funcs.S: New file.
+ * config/xtensa/linux.h: New file.
+ * config/xtensa/t-xtensa: New file.
+ * config/xtensa/xtensa-config.h: New file.
+ * config/xtensa/xtensa-protos.h: New file.
+ * config/xtensa/xtensa.c: New file.
+ * config/xtensa/xtensa.h: New file.
+ * config/xtensa/xtensa.md: New file.
+ * config.gcc (xtensa-*-elf*): New target.
+ (xtensa-*-linux*): New target.
+ * cse.c (canon_hash): Compare rtx pointers instead of register
+ numbers. This is required for the Xtensa port.
+ * integrate.c (copy_insn_list): Handle case where the static
+ chain is in memory and the memory address has to be copied to
+ a register.
+ * doc/invoke.texi (Option Summary): Add Xtensa options.
+ (Xtensa Options): New node.
+ * doc/md.texi (Machine Constraints): Add Xtensa machine constraints.
+
2002-01-23 Zack Weinberg <zack@codesourcery.com>
* diagnostic.c (internal_error): Do ICE suppression only
diff --git a/gcc/config.gcc b/gcc/config.gcc
index 9800e4be480..ad60a20a6a3 100644
--- a/gcc/config.gcc
+++ b/gcc/config.gcc
@@ -3260,6 +3260,22 @@ xstormy16-*-elf)
tmake_file="stormy16/t-stormy16"
extra_parts="crtbegin.o crtend.o"
;;
+xtensa-*-elf*)
+ tm_file="${tm_file} dbxelf.h elfos.h svr4.h xtensa/elf.h"
+ with_newlib=yes
+ tmake_file=xtensa/t-xtensa
+ extra_parts="crtbegin.o crtend.o"
+ fixincludes=Makefile.in # newlib headers should be OK
+ ;;
+xtensa-*-linux*)
+ tm_file="${tm_file} dbxelf.h elfos.h svr4.h linux.h xtensa/linux.h"
+ tmake_file="t-linux xtensa/t-xtensa"
+ extra_parts="crtbegin.o crtbeginS.o crtbeginT.o crtend.o crtendS.o"
+ gas=yes gnu_ld=yes
+ if test x$enable_threads = xyes; then
+ thread_file='posix'
+ fi
+ ;;
*)
echo "Configuration $machine not supported" 1>&2
exit 1
diff --git a/gcc/config/xtensa/elf.h b/gcc/config/xtensa/elf.h
new file mode 100644
index 00000000000..dea1e5f8414
--- /dev/null
+++ b/gcc/config/xtensa/elf.h
@@ -0,0 +1,138 @@
+/* Xtensa/Elf configuration.
+ Derived from the configuration for GCC for Intel i386 running Linux.
+ Copyright (C) 2001 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+02111-1307, USA. */
+
+/* Don't assume anything about the header files. */
+#define NO_IMPLICIT_EXTERN_C
+
+#undef ASM_APP_ON
+#define ASM_APP_ON "#APP\n"
+
+#undef ASM_APP_OFF
+#define ASM_APP_OFF "#NO_APP\n"
+
+#undef MD_EXEC_PREFIX
+#undef MD_STARTFILE_PREFIX
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (Xtensa/ELF)", stderr);
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+
+#undef ASM_SPEC
+#define ASM_SPEC "%{v} %{mno-density:--no-density} \
+ %{mtext-section-literals:--text-section-literals} \
+ %{mno-text-section-literals:--no-text-section-literals} \
+ %{mtarget-align:--target-align} \
+ %{mno-target-align:--no-target-align} \
+ %{mlongcalls:--longcalls} \
+ %{mno-longcalls:--no-longcalls}"
+
+#undef ASM_FINAL_SPEC
+
+#undef LIB_SPEC
+#define LIB_SPEC "-lc -lsim -lc -lhandlers-sim"
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "crt1-sim%O%s crti%O%s crtbegin%O%s _vectors%O%s"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend%O%s crtn%O%s"
+
+#undef LINK_SPEC
+#define LINK_SPEC \
+ "%{shared:-shared} \
+ %{!shared: \
+ %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{static:-static}}}"
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-D__XTENSA__ -D__ELF__ -Acpu=xtensa -Amachine=xtensa"
+
+/* Local compiler-generated symbols must have a prefix that the assembler
+ understands. By default, this is $, although some targets (e.g.,
+ NetBSD-ELF) need to override this. */
+
+#ifndef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+#endif
+
+/* By default, external symbols do not have an underscore prepended. */
+
+#ifndef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX ""
+#endif
+
+/* Define this macro if the assembler does not accept the character
+ "." in label names. By default constructors and destructors in G++
+ have names that use ".". If this macro is defined, these names
+ are rewritten to avoid ".". */
+#define NO_DOT_IN_LABEL
+
+/* Define NO_DOLLAR_IN_LABEL in your favorite tm file if your assembler
+ doesn't allow $ in symbol names. */
+#undef NO_DOLLAR_IN_LABEL
+
+/* Don't switch sections in the middle of a literal pool! */
+#undef SELECT_RTX_SECTION
+#define SELECT_RTX_SECTION(MODE,RTX,ALIGN)
+
+/* Do not force "-fpic" for this target. */
+#define XTENSA_ALWAYS_PIC 0
+
+/* Redefine the standard ELF version of ASM_DECLARE_FUNCTION_SIZE to
+ allow adding the ".end literal_prefix" directive at the end of the
+ function. */
+#undef ASM_DECLARE_FUNCTION_SIZE
+#define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \
+ do \
+ { \
+ if (!flag_inhibit_size_directive) \
+ { \
+ char label[256]; \
+ static int labelno; \
+ \
+ labelno++; \
+ \
+ ASM_GENERATE_INTERNAL_LABEL (label, "Lfe", labelno); \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, "Lfe", labelno); \
+ \
+ fprintf (FILE, "%s", SIZE_ASM_OP); \
+ assemble_name (FILE, (FNAME)); \
+ fprintf (FILE, ","); \
+ assemble_name (FILE, label); \
+ fprintf (FILE, "-"); \
+ assemble_name (FILE, (FNAME)); \
+ putc ('\n', FILE); \
+ } \
+ XTENSA_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL); \
+ } \
+ while (0)
diff --git a/gcc/config/xtensa/lib1funcs.asm b/gcc/config/xtensa/lib1funcs.asm
new file mode 100644
index 00000000000..acfb35769bf
--- /dev/null
+++ b/gcc/config/xtensa/lib1funcs.asm
@@ -0,0 +1,419 @@
+/* Assembly functions for the Xtensa version of libgcc1.
+ Copyright (C) 2001 Free Software Foundation, Inc.
+ Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+02111-1307, USA. */
+
+#include "xtensa/xtensa-config.h"
+
+#ifdef L_mulsi3
+ .align 4
+ .global __mulsi3
+ .type __mulsi3,@function
+__mulsi3:
+ entry sp, 16
+
+#if XCHAL_HAVE_MUL16
+ or a4, a2, a3
+ srai a4, a4, 16
+ bnez a4, .LMUL16
+ mul16u a2, a2, a3
+ retw
+.LMUL16:
+ srai a4, a2, 16
+ srai a5, a3, 16
+ mul16u a7, a4, a3
+ mul16u a6, a5, a2
+ mul16u a4, a2, a3
+ add a7, a7, a6
+ slli a7, a7, 16
+ add a2, a7, a4
+
+#elif XCHAL_HAVE_MAC16
+ mul.aa.hl a2, a3
+ mula.aa.lh a2, a3
+ rsr a5, 16 # ACCLO
+ umul.aa.ll a2, a3
+ rsr a4, 16 # ACCLO
+ slli a5, a5, 16
+ add a2, a4, a5
+
+#else /* !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MAC16 */
+
+ # Multiply one bit at a time, but unroll the loop 4x to better
+ # exploit the addx instructions.
+
+ # Peel the first iteration to save a cycle on init
+
+ # avoid negative numbers
+
+ xor a5, a2, a3 # top bit is 1 iff one of the inputs is negative
+ abs a3, a3
+ abs a2, a2
+
+ # swap so that second argument is smaller
+ sub a7, a2, a3
+ mov a4, a3
+ movgez a4, a2, a7 # a4 = max(a2, a3)
+ movltz a3, a2, a7 # a3 = min(a2, a3)
+
+ movi a2, 0
+ extui a6, a3, 0, 1
+ movnez a2, a4, a6
+
+ addx2 a7, a4, a2
+ extui a6, a3, 1, 1
+ movnez a2, a7, a6
+
+ addx4 a7, a4, a2
+ extui a6, a3, 2, 1
+ movnez a2, a7, a6
+
+ addx8 a7, a4, a2
+ extui a6, a3, 3, 1
+ movnez a2, a7, a6
+
+ bgeui a3, 16, .Lmult_main_loop
+ neg a3, a2
+ movltz a2, a3, a5
+ retw
+
+
+ .align 4
+.Lmult_main_loop:
+ srli a3, a3, 4
+ slli a4, a4, 4
+
+ add a7, a4, a2
+ extui a6, a3, 0, 1
+ movnez a2, a7, a6
+
+ addx2 a7, a4, a2
+ extui a6, a3, 1, 1
+ movnez a2, a7, a6
+
+ addx4 a7, a4, a2
+ extui a6, a3, 2, 1
+ movnez a2, a7, a6
+
+ addx8 a7, a4, a2
+ extui a6, a3, 3, 1
+ movnez a2, a7, a6
+
+
+ bgeui a3, 16, .Lmult_main_loop
+
+ neg a3, a2
+ movltz a2, a3, a5
+
+#endif /* !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MAC16 */
+
+ retw
+.Lfe0:
+ .size __mulsi3,.Lfe0-__mulsi3
+
+#endif /* L_mulsi3 */
+
+
+ # Some Xtensa configurations include the NSAU (unsigned
+ # normalize shift amount) instruction which computes the number
+ # of leading zero bits. For other configurations, the "nsau"
+ # operation is implemented as a macro.
+
+#if !XCHAL_HAVE_NSA
+ .macro nsau cnt, val, tmp, a
+ mov \a, \val
+ movi \cnt, 0
+ extui \tmp, \a, 16, 16
+ bnez \tmp, 0f
+ movi \cnt, 16
+ slli \a, \a, 16
+0:
+ extui \tmp, \a, 24, 8
+ bnez \tmp, 1f
+ addi \cnt, \cnt, 8
+ slli \a, \a, 8
+1:
+ movi \tmp, __nsau_data
+ extui \a, \a, 24, 8
+ add \tmp, \tmp, \a
+ l8ui \tmp, \tmp, 0
+ add \cnt, \cnt, \tmp
+ .endm
+#endif /* !XCHAL_HAVE_NSA */
+
+#ifdef L_nsau
+ .section .rodata
+ .align 4
+ .global __nsau_data
+ .type __nsau_data,@object
+__nsau_data:
+#if !XCHAL_HAVE_NSA
+ .byte 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4
+ .byte 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3
+ .byte 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
+ .byte 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
+ .byte 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ .byte 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ .byte 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ .byte 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+#endif /* !XCHAL_HAVE_NSA */
+.Lfe1:
+ .size __nsau_data,.Lfe1-__nsau_data
+ .hidden __nsau_data
+#endif /* L_nsau */
+
+
+#ifdef L_udivsi3
+ .align 4
+ .global __udivsi3
+ .type __udivsi3,@function
+__udivsi3:
+ entry sp, 16
+ bltui a3, 2, .Lle_one # check if the divisor <= 1
+
+ mov a6, a2 # keep dividend in a6
+#if XCHAL_HAVE_NSA
+ nsau a5, a6 # dividend_shift = nsau(dividend)
+ nsau a4, a3 # divisor_shift = nsau(divisor)
+#else /* !XCHAL_HAVE_NSA */
+ nsau a5, a6, a2, a7 # dividend_shift = nsau(dividend)
+ nsau a4, a3, a2, a7 # divisor_shift = nsau(divisor)
+#endif /* !XCHAL_HAVE_NSA */
+ bgeu a5, a4, .Lspecial
+
+ sub a4, a4, a5 # count = divisor_shift - dividend_shift
+ ssl a4
+ sll a3, a3 # divisor <<= count
+ movi a2, 0 # quotient = 0
+
+ # test-subtract-and-shift loop; one quotient bit on each iteration
+ loopnez a4, .Lloopend
+ bltu a6, a3, .Lzerobit
+ sub a6, a6, a3
+ addi a2, a2, 1
+.Lzerobit:
+ slli a2, a2, 1
+ srli a3, a3, 1
+.Lloopend:
+
+ bltu a6, a3, .Lreturn
+ addi a2, a2, 1 # increment quotient if dividend >= divisor
+.Lreturn:
+ retw
+
+.Lspecial:
+ # return dividend >= divisor
+ movi a2, 0
+ bltu a6, a3, .Lreturn2
+ movi a2, 1
+.Lreturn2:
+ retw
+
+.Lle_one:
+ beqz a3, .Lerror # if divisor == 1, return the dividend
+ retw
+.Lerror:
+ movi a2, 0 # just return 0; could throw an exception
+ retw
+.Lfe2:
+ .size __udivsi3,.Lfe2-__udivsi3
+
+#endif /* L_udivsi3 */
+
+
+#ifdef L_divsi3
+ .align 4
+ .global __divsi3
+ .type __divsi3,@function
+__divsi3:
+ entry sp, 16
+ xor a7, a2, a3 # sign = dividend ^ divisor
+ abs a6, a2 # udividend = abs(dividend)
+ abs a3, a3 # udivisor = abs(divisor)
+ bltui a3, 2, .Lle_one # check if udivisor <= 1
+#if XCHAL_HAVE_NSA
+ nsau a5, a6 # udividend_shift = nsau(udividend)
+ nsau a4, a3 # udivisor_shift = nsau(udivisor)
+#else /* !XCHAL_HAVE_NSA */
+ nsau a5, a6, a2, a8 # udividend_shift = nsau(udividend)
+ nsau a4, a3, a2, a8 # udivisor_shift = nsau(udivisor)
+#endif /* !XCHAL_HAVE_NSA */
+ bgeu a5, a4, .Lspecial
+
+ sub a4, a4, a5 # count = udivisor_shift - udividend_shift
+ ssl a4
+ sll a3, a3 # udivisor <<= count
+ movi a2, 0 # quotient = 0
+
+ # test-subtract-and-shift loop; one quotient bit on each iteration
+ loopnez a4, .Lloopend
+ bltu a6, a3, .Lzerobit
+ sub a6, a6, a3
+ addi a2, a2, 1
+.Lzerobit:
+ slli a2, a2, 1
+ srli a3, a3, 1
+.Lloopend:
+
+ bltu a6, a3, .Lreturn
+ addi a2, a2, 1 # increment quotient if udividend >= udivisor
+.Lreturn:
+ neg a5, a2
+ movltz a2, a5, a7 # return (sign < 0) ? -quotient : quotient
+ retw
+
+.Lspecial:
+ movi a2, 0
+ bltu a6, a3, .Lreturn2 # if dividend < divisor, return 0
+ movi a2, 1
+ movi a4, -1
+ movltz a2, a4, a7 # else return (sign < 0) ? -1 : 1
+.Lreturn2:
+ retw
+
+.Lle_one:
+ beqz a3, .Lerror
+ neg a2, a6 # if udivisor == 1, then return...
+ movgez a2, a6, a7 # (sign < 0) ? -udividend : udividend
+ retw
+.Lerror:
+ movi a2, 0 # just return 0; could throw an exception
+ retw
+.Lfe3:
+ .size __divsi3,.Lfe3-__divsi3
+
+#endif /* L_divsi3 */
+
+
+#ifdef L_umodsi3
+ .align 4
+ .global __umodsi3
+ .type __umodsi3,@function
+__umodsi3:
+ entry sp, 16
+ bltui a3, 2, .Lle_one # check if the divisor is <= 1
+
+#if XCHAL_HAVE_NSA
+ nsau a5, a2 # dividend_shift = nsau(dividend)
+ nsau a4, a3 # divisor_shift = nsau(divisor)
+#else /* !XCHAL_HAVE_NSA */
+ nsau a5, a2, a6, a7 # dividend_shift = nsau(dividend)
+ nsau a4, a3, a6, a7 # divisor_shift = nsau(divisor)
+#endif /* !XCHAL_HAVE_NSA */
+ bgeu a5, a4, .Lspecial
+
+ sub a4, a4, a5 # count = divisor_shift - dividend_shift
+ ssl a4
+ sll a3, a3 # divisor <<= count
+
+ # test-subtract-and-shift loop
+ loopnez a4, .Lloopend
+ bltu a2, a3, .Lzerobit
+ sub a2, a2, a3
+.Lzerobit:
+ srli a3, a3, 1
+.Lloopend:
+
+ bltu a2, a3, .Lreturn
+ sub a2, a2, a3 # subtract once more if dividend >= divisor
+.Lreturn:
+ retw
+
+.Lspecial:
+ bltu a2, a3, .Lreturn2
+ sub a2, a2, a3 # subtract once if dividend >= divisor
+.Lreturn2:
+ retw
+
+.Lle_one:
+ # the divisor is either 0 or 1, so just return 0.
+ # someday we may want to throw an exception if the divisor is 0.
+ movi a2, 0
+ retw
+.Lfe4:
+ .size __umodsi3,.Lfe4-__umodsi3
+
+#endif /* L_umodsi3 */
+
+
+#ifdef L_modsi3
+ .align 4
+ .global __modsi3
+ .type __modsi3,@function
+__modsi3:
+ entry sp, 16
+ mov a7, a2 # save original (signed) dividend
+ abs a2, a2 # udividend = abs(dividend)
+ abs a3, a3 # udivisor = abs(divisor)
+ bltui a3, 2, .Lle_one # check if udivisor <= 1
+#if XCHAL_HAVE_NSA
+ nsau a5, a2 # udividend_shift = nsau(udividend)
+ nsau a4, a3 # udivisor_shift = nsau(udivisor)
+#else /* !XCHAL_HAVE_NSA */
+ nsau a5, a2, a6, a8 # udividend_shift = nsau(udividend)
+ nsau a4, a3, a6, a8 # udivisor_shift = nsau(udivisor)
+#endif /* !XCHAL_HAVE_NSA */
+ bgeu a5, a4, .Lspecial
+
+ sub a4, a4, a5 # count = udivisor_shift - udividend_shift
+ ssl a4
+ sll a3, a3 # udivisor <<= count
+
+ # test-subtract-and-shift loop
+ loopnez a4, .Lloopend
+ bltu a2, a3, .Lzerobit
+ sub a2, a2, a3
+.Lzerobit:
+ srli a3, a3, 1
+.Lloopend:
+
+ bltu a2, a3, .Lreturn
+ sub a2, a2, a3 # subtract once more if udividend >= udivisor
+.Lreturn:
+ bgez a7, .Lpositive
+ neg a2, a2 # if (dividend < 0), return -udividend
+.Lpositive:
+ retw
+
+.Lspecial:
+ bltu a2, a3, .Lreturn2
+ sub a2, a2, a3 # subtract once if dividend >= divisor
+.Lreturn2:
+ bgez a7, .Lpositive2
+ neg a2, a2 # if (dividend < 0), return -udividend
+.Lpositive2:
+ retw
+
+.Lle_one:
+ # udivisor is either 0 or 1, so just return 0.
+ # someday we may want to throw an exception if udivisor is 0.
+ movi a2, 0
+ retw
+.Lfe5:
+ .size __modsi3,.Lfe5-__modsi3
+
+#endif /* L_modsi3 */
diff --git a/gcc/config/xtensa/lib2funcs.S b/gcc/config/xtensa/lib2funcs.S
new file mode 100644
index 00000000000..82679e65733
--- /dev/null
+++ b/gcc/config/xtensa/lib2funcs.S
@@ -0,0 +1,205 @@
+/* Assembly functions for libgcc2.
+ Copyright (C) 2001 Free Software Foundation, Inc.
+ Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+02111-1307, USA. */
+
+#include "xtensa/xtensa-config.h"
+
+/* __xtensa_libgcc_window_spill: This function uses a series of nested
+ calls to flush out all but the current register window. This is
+ used to set up the stack so that arbitrary frames can be accessed.
+ The functions used for the nested calls are also reused by the
+ nonlocal goto function below. */
+
+ .align 4
+ .global __xtensa_libgcc_window_spill
+ .type __xtensa_libgcc_window_spill,@function
+__xtensa_libgcc_window_spill:
+ entry sp, 48
+ call4 .L__wdwspill_assist52 // called with call8, only need a call4
+ retw
+ .size __xtensa_libgcc_window_spill,.-__xtensa_libgcc_window_spill
+
+ .align 4
+.L__wdwspill_assist56:
+ entry sp, 16
+ call4 .L__wdwspill_assist52
+ retw
+ .align 4
+.L__wdwspill_assist52:
+ entry sp, 48
+ call12 .L__wdwspill_assist40
+ retw
+ .align 4
+.L__wdwspill_assist40:
+ entry sp, 48
+ call12 .L__wdwspill_assist28
+ retw
+ .align 4
+.L__wdwspill_assist28:
+ entry sp, 48
+ call12 .L__wdwspill_assist16
+ retw
+ .align 4
+.L__wdwspill_assist16:
+ entry sp, 16
+ movi a15, 0
+ retw
+
+
+/* __xtensa_nonlocal_goto: This code does all the hard work of a
+ nonlocal goto on Xtensa. It is here in the library to avoid the
+ code size bloat of generating it in-line. There are two
+ arguments:
+
+ a2 = frame pointer for the procedure containing the label
+ a3 = goto handler address
+
+ This function never returns to its caller but instead goes directly
+ to the address of the specified goto handler. */
+
+ .align 4
+ .global __xtensa_nonlocal_goto
+ .type __xtensa_nonlocal_goto,@function
+__xtensa_nonlocal_goto:
+ entry sp, 32
+
+ /* flush registers */
+ call8 .L__wdwspill_assist56
+
+ /* Because the save area for a0-a3 is stored one frame below
+ the one identified by a2, the only way to restore those
+ registers is to unwind the stack. If alloca() were never
+ called, we could just unwind until finding the sp value
+ matching a2. However, a2 is a frame pointer, not a stack
+ pointer, and may not be encountered during the unwinding.
+ The solution is to unwind until going _past_ the value
+ given by a2. This involves keeping three stack pointer
+ values during the unwinding:
+
+ next = sp of frame N-1
+ cur = sp of frame N
+ prev = sp of frame N+1
+
+ When next > a2, the desired save area is stored relative
+ to prev. At this point, cur will be the same as a2
+ except in the alloca() case.
+
+ Besides finding the values to be restored to a0-a3, we also
+ need to find the current window size for the target
+ function. This can be extracted from the high bits of the
+ return address, initially in a0. As the unwinding
+ proceeds, the window size is taken from the value of a0
+ saved _two_ frames below the current frame. */
+
+ addi a5, sp, -16 # a5 = prev - save area
+ l32i a6, a5, 4
+ addi a6, a6, -16 # a6 = cur - save area
+ mov a8, a0 # a8 = return address (for window size)
+ j .Lfirstframe
+
+.Lnextframe:
+ l32i a8, a5, 0 # next return address (for window size)
+ mov a5, a6 # advance prev
+ addi a6, a7, -16 # advance cur
+.Lfirstframe:
+ l32i a7, a6, 4 # a7 = next
+ bge a2, a7, .Lnextframe
+
+ /* At this point, prev (a5) points to the save area with the saved
+ values of a0-a3. Copy those values into the save area at the
+ current sp so they will be reloaded when the return from this
+ function underflows. We don't have to worry about exceptions
+ while updating the current save area, because the windows have
+ already been flushed. */
+
+ addi a4, sp, -16 # a4 = save area of this function
+ l32i a6, a5, 0
+ l32i a7, a5, 4
+ s32i a6, a4, 0
+ s32i a7, a4, 4
+ l32i a6, a5, 8
+ l32i a7, a5, 12
+ s32i a6, a4, 8
+ s32i a7, a4, 12
+
+ /* Set return address to goto handler. Use the window size bits
+ from the return address two frames below the target. */
+ extui a8, a8, 30, 2 # get window size from return addr.
+ slli a3, a3, 2 # get goto handler addr. << 2
+ ssai 2
+ src a0, a8, a3 # combine them with a funnel shift
+
+ retw
+ .size __xtensa_nonlocal_goto,.-__xtensa_nonlocal_goto
+
+
+/* __xtensa_sync_caches: This function is called after writing a trampoline
+ on the stack to force all the data writes to memory and invalidate the
+ instruction cache. a2 is the address of the new trampoline.
+
+ After the trampoline data is written out, it must be flushed out of
+ the data cache into memory. We use DHWB in case we have a writeback
+ cache. At least one DHWB instruction is needed for each data cache
+ line which may be touched by the trampoline. An ISYNC instruction
+ must follow the DHWBs.
+
+ We have to flush the i-cache to make sure that the new values get used.
+ At least one IHI instruction is needed for each i-cache line which may
+ be touched by the trampoline. An ISYNC instruction is also needed to
+ make sure that the modified instructions are loaded into the instruction
+ fetch buffer. */
+
+#define TRAMPOLINE_SIZE 49
+
+ .text
+ .align 4
+ .global __xtensa_sync_caches
+ .type __xtensa_sync_caches,@function
+__xtensa_sync_caches:
+ entry sp, 32
+#if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK
+ # Flush the trampoline from the data cache
+ extui a4, a2, 0, XCHAL_DCACHE_LINEWIDTH
+ addi a4, a4, TRAMPOLINE_SIZE
+ addi a4, a4, (1 << XCHAL_DCACHE_LINEWIDTH) - 1
+ srli a4, a4, XCHAL_DCACHE_LINEWIDTH
+ mov a3, a2
+.Ldcache_loop:
+ dhwb a3, 0
+ addi a3, a3, (1 << XCHAL_DCACHE_LINEWIDTH)
+ addi a4, a4, -1
+ bnez a4, .Ldcache_loop
+ isync
+#endif
+#if XCHAL_ICACHE_SIZE > 0
+ # Invalidate the corresponding lines in the instruction cache
+ extui a4, a2, 0, XCHAL_ICACHE_LINEWIDTH
+ addi a4, a4, TRAMPOLINE_SIZE
+ addi a4, a4, (1 << XCHAL_ICACHE_LINEWIDTH) - 1
+ srli a4, a4, XCHAL_ICACHE_LINEWIDTH
+.Licache_loop:
+ ihi a2, 0
+ addi a2, a2, (1 << XCHAL_ICACHE_LINEWIDTH)
+ addi a4, a4, -1
+ bnez a4, .Licache_loop
+ isync
+#endif
+ retw
+ .size __xtensa_sync_caches,.-__xtensa_sync_caches
diff --git a/gcc/config/xtensa/linux.h b/gcc/config/xtensa/linux.h
new file mode 100644
index 00000000000..04c6b7f3d6d
--- /dev/null
+++ b/gcc/config/xtensa/linux.h
@@ -0,0 +1,94 @@
+/* Xtensa Linux configuration.
+ Derived from the configuration for GCC for Intel i386 running Linux.
+ Copyright (C) 2001 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+02111-1307, USA. */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (Xtensa GNU/Linux with ELF)", stderr);
+
+#undef ASM_SPEC
+#define ASM_SPEC "%{v} %{mno-density:--no-density} \
+ %{mtext-section-literals:--text-section-literals} \
+ %{mno-text-section-literals:--no-text-section-literals} \
+ %{mtarget-align:--target-align} \
+ %{mno-target-align:--no-target-align} \
+ %{mlongcalls:--longcalls} \
+ %{mno-longcalls:--no-longcalls}"
+
+#undef ASM_FINAL_SPEC
+
+#undef LIB_SPEC
+#define LIB_SPEC \
+ "%{shared: -lc} \
+ %{!shared: %{pthread:-lpthread} \
+ %{profile:-lc_p} %{!profile: -lc}}"
+
+#undef LINK_SPEC
+#define LINK_SPEC \
+ "%{shared:-shared} \
+ %{!shared: \
+ %{!ibcs: \
+ %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker /lib/ld.so.1}} \
+ %{static:-static}}}"
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES \
+ "-D__XTENSA__ -D__ELF__ -Acpu=xtensa -Amachine=xtensa \
+ -Dunix -Dlinux -Asystem=posix"
+
+#undef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX "."
+
+/* Don't switch sections in the middle of a literal pool! */
+#undef SELECT_RTX_SECTION
+#define SELECT_RTX_SECTION(MODE,RTX,ALIGN)
+
+/* Always enable "-fpic" for Xtensa Linux. */
+#define XTENSA_ALWAYS_PIC 1
+
+/* Redefine the standard ELF version of ASM_DECLARE_FUNCTION_SIZE to
+ allow adding the ".end literal_prefix" directive at the end of the
+ function. */
+#undef ASM_DECLARE_FUNCTION_SIZE
+#define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \
+ do \
+ { \
+ if (!flag_inhibit_size_directive) \
+ { \
+ char label[256]; \
+ static int labelno; \
+ \
+ labelno++; \
+ \
+ ASM_GENERATE_INTERNAL_LABEL (label, "Lfe", labelno); \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, "Lfe", labelno); \
+ \
+ fprintf (FILE, "%s", SIZE_ASM_OP); \
+ assemble_name (FILE, (FNAME)); \
+ fprintf (FILE, ","); \
+ assemble_name (FILE, label); \
+ fprintf (FILE, "-"); \
+ assemble_name (FILE, (FNAME)); \
+ putc ('\n', FILE); \
+ } \
+ XTENSA_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL); \
+ } \
+ while (0)
diff --git a/gcc/config/xtensa/t-xtensa b/gcc/config/xtensa/t-xtensa
new file mode 100644
index 00000000000..76b8df65efe
--- /dev/null
+++ b/gcc/config/xtensa/t-xtensa
@@ -0,0 +1,28 @@
+# Use GCC's floating-point emulation code
+LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ cat $(srcdir)/config/fp-bit.c > dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+########################################################################
+
+# Skip the libgcc1 test.
+LIBGCC1_TEST =
+
+# Don't run fixproto
+STMP_FIXPROTO =
+
+# Build crtbegin and crtend with the "longcalls" option
+CRTSTUFF_T_CFLAGS += -mlongcalls
+
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = xtensa/lib1funcs.asm
+LIB1ASMFUNCS = _mulsi3 _nsau _divsi3 _modsi3 _udivsi3 _umodsi3
+
+TARGET_LIBGCC2_CFLAGS += -mlongcalls
+
+LIB2FUNCS_EXTRA += $(srcdir)/config/xtensa/lib2funcs.S
diff --git a/gcc/config/xtensa/xtensa-config.h b/gcc/config/xtensa/xtensa-config.h
new file mode 100644
index 00000000000..277efb2a822
--- /dev/null
+++ b/gcc/config/xtensa/xtensa-config.h
@@ -0,0 +1,50 @@
+/* Xtensa configuration settings.
+ Copyright (C) 2001 Free Software Foundation, Inc.
+ Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+This program is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; if not, write to the Free Software
+Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+*/
+
+#ifndef XTENSA_CONFIG_H
+#define XTENSA_CONFIG_H
+
+#define XCHAL_HAVE_BE 1
+#define XCHAL_HAVE_DENSITY 1
+#define XCHAL_HAVE_MAC16 0
+#define XCHAL_HAVE_MUL16 0
+#define XCHAL_HAVE_MUL32 0
+#define XCHAL_HAVE_DIV32 0
+#define XCHAL_HAVE_NSA 1
+#define XCHAL_HAVE_MINMAX 0
+#define XCHAL_HAVE_SEXT 0
+#define XCHAL_HAVE_BOOLEANS 0
+#define XCHAL_HAVE_FP 0
+#define XCHAL_HAVE_FP_DIV 0
+#define XCHAL_HAVE_FP_RECIP 0
+#define XCHAL_HAVE_FP_SQRT 0
+#define XCHAL_HAVE_FP_RSQRT 0
+
+#define XCHAL_ICACHE_SIZE 8192
+#define XCHAL_DCACHE_SIZE 8192
+#define XCHAL_ICACHE_LINESIZE 16
+#define XCHAL_DCACHE_LINESIZE 16
+#define XCHAL_ICACHE_LINEWIDTH 4
+#define XCHAL_DCACHE_LINEWIDTH 4
+#define XCHAL_DCACHE_IS_WRITEBACK 0
+
+#define XCHAL_HAVE_MMU 1
+#define XCHAL_MMU_MIN_PTE_PAGE_SIZE 12
+
+#endif /* !XTENSA_CONFIG_H */
diff --git a/gcc/config/xtensa/xtensa-protos.h b/gcc/config/xtensa/xtensa-protos.h
new file mode 100644
index 00000000000..bb18e3ae84a
--- /dev/null
+++ b/gcc/config/xtensa/xtensa-protos.h
@@ -0,0 +1,116 @@
+/* Prototypes of target machine for GNU compiler for Xtensa.
+ Copyright (C) 2001 Free Software Foundation, Inc.
+ Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+02111-1307, USA. */
+
+#ifndef __XTENSA_PROTOS_H__
+#define __XTENSA_PROTOS_H__
+
+/* Functions to test whether an immediate fits in a given field. */
+extern int xtensa_simm7 PARAMS ((int));
+extern int xtensa_simm8 PARAMS ((int));
+extern int xtensa_simm8x256 PARAMS ((int));
+extern int xtensa_simm12b PARAMS ((int));
+extern int xtensa_uimm8 PARAMS ((int));
+extern int xtensa_uimm8x2 PARAMS ((int));
+extern int xtensa_uimm8x4 PARAMS ((int));
+extern int xtensa_ai4const PARAMS ((int));
+extern int xtensa_lsi4x4 PARAMS ((int));
+extern int xtensa_b4const PARAMS ((int));
+extern int xtensa_b4constu PARAMS ((int));
+extern int xtensa_tp7 PARAMS ((int));
+
+/* Functions within xtensa.c that we reference. */
+#ifdef RTX_CODE
+extern int xt_true_regnum PARAMS ((rtx));
+extern int add_operand PARAMS ((rtx, enum machine_mode));
+extern int arith_operand PARAMS ((rtx, enum machine_mode));
+extern int nonimmed_operand PARAMS ((rtx, enum machine_mode));
+extern int mem_operand PARAMS ((rtx, enum machine_mode));
+extern int non_acc_reg_operand PARAMS ((rtx, enum machine_mode));
+extern int mask_operand PARAMS ((rtx, enum machine_mode));
+extern int extui_fldsz_operand PARAMS ((rtx, enum machine_mode));
+extern int sext_operand PARAMS ((rtx, enum machine_mode));
+extern int sext_fldsz_operand PARAMS ((rtx, enum machine_mode));
+extern int lsbitnum_operand PARAMS ((rtx, enum machine_mode));
+extern int branch_operand PARAMS ((rtx, enum machine_mode));
+extern int ubranch_operand PARAMS ((rtx, enum machine_mode));
+extern int call_insn_operand PARAMS ((rtx, enum machine_mode));
+extern int move_operand PARAMS ((rtx, enum machine_mode));
+extern int smalloffset_mem_p PARAMS ((rtx));
+extern int smalloffset_double_mem_p PARAMS ((rtx));
+extern int constantpool_address_p PARAMS ((rtx));
+extern int constantpool_mem_p PARAMS ((rtx));
+extern int non_const_move_operand PARAMS ((rtx, enum machine_mode));
+extern int const_float_1_operand PARAMS ((rtx, enum machine_mode));
+extern int fpmem_offset_operand PARAMS ((rtx, enum machine_mode));
+extern void xtensa_extend_reg PARAMS ((rtx, rtx));
+extern void xtensa_load_constant PARAMS ((rtx, rtx));
+extern int branch_operator PARAMS ((rtx, enum machine_mode));
+extern int ubranch_operator PARAMS ((rtx, enum machine_mode));
+extern int boolean_operator PARAMS ((rtx, enum machine_mode));
+extern void xtensa_expand_conditional_branch PARAMS ((rtx *, enum rtx_code));
+extern int xtensa_expand_conditional_move PARAMS ((rtx *, int));
+extern int xtensa_expand_scc PARAMS ((rtx *));
+extern int xtensa_expand_block_move PARAMS ((rtx *));
+extern int xtensa_emit_move_sequence PARAMS ((rtx *, enum machine_mode));
+extern void xtensa_emit_block_move PARAMS ((rtx *, rtx *, int));
+extern void xtensa_expand_nonlocal_goto PARAMS ((rtx *));
+extern void xtensa_emit_loop_end PARAMS ((rtx, rtx *));
+extern char * xtensa_emit_call PARAMS ((int, rtx *));
+
+#ifdef TREE_CODE
+extern void init_cumulative_args PARAMS ((CUMULATIVE_ARGS *, tree, rtx));
+extern void xtensa_va_start PARAMS ((int, tree, rtx));
+extern rtx xtensa_va_arg PARAMS ((tree, tree));
+#endif /* TREE_CODE */
+
+extern void print_operand PARAMS ((FILE *, rtx, int));
+extern void print_operand_address PARAMS ((FILE *, rtx));
+extern void xtensa_output_literal
+ PARAMS ((FILE *, rtx, enum machine_mode, int labelno));
+extern void xtensa_reorg PARAMS ((rtx));
+extern rtx xtensa_builtin_saveregs PARAMS ((void));
+extern enum reg_class xtensa_secondary_reload_class
+ PARAMS ((enum reg_class, enum machine_mode, rtx, int));
+extern int a7_overlap_mentioned_p PARAMS ((rtx x));
+#endif /* RTX_CODE */
+
+#ifdef TREE_CODE
+extern void function_arg_advance
+ PARAMS ((CUMULATIVE_ARGS *, enum machine_mode, tree));
+extern struct rtx_def * function_arg
+ PARAMS ((CUMULATIVE_ARGS *, enum machine_mode, tree, int));
+extern tree xtensa_build_va_list PARAMS ((void));
+#endif /* TREE_CODE */
+
+extern int xtensa_mask_immediate PARAMS ((int));
+extern int xtensa_mem_offset PARAMS ((unsigned, enum machine_mode));
+extern void xtensa_setup_frame_addresses PARAMS ((void));
+extern int xtensa_dbx_register_number PARAMS ((int));
+extern void override_options PARAMS ((void));
+extern void xtensa_declare_object
+ PARAMS ((FILE *, char *, char *, char *, int));
+extern long compute_frame_size PARAMS ((int));
+extern int xtensa_frame_pointer_required PARAMS ((void));
+extern void xtensa_function_prologue PARAMS ((FILE *, int));
+extern void xtensa_function_epilogue PARAMS ((FILE *, int));
+extern void order_regs_for_local_alloc PARAMS ((void));
+
+#endif /* !__XTENSA_PROTOS_H__ */
diff --git a/gcc/config/xtensa/xtensa.c b/gcc/config/xtensa/xtensa.c
new file mode 100644
index 00000000000..979b3e216bd
--- /dev/null
+++ b/gcc/config/xtensa/xtensa.c
@@ -0,0 +1,2658 @@
+/* Subroutines for insn-output.c for Tensilica's Xtensa architecture.
+ Copyright (C) 2001 Free Software Foundation, Inc.
+ Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+02111-1307, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "regs.h"
+#include "machmode.h"
+#include "hard-reg-set.h"
+#include "basic-block.h"
+#include "real.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "insn-flags.h"
+#include "insn-attr.h"
+#include "insn-codes.h"
+#include "recog.h"
+#include "output.h"
+#include "tree.h"
+#include "expr.h"
+#include "flags.h"
+#include "reload.h"
+#include "tm_p.h"
+#include "function.h"
+#include "toplev.h"
+#include "optabs.h"
+#include "libfuncs.h"
+#include "target.h"
+#include "target-def.h"
+
+/* Enumeration for all of the relational tests, so that we can build
+ arrays indexed by the test type, and not worry about the order
+ of EQ, NE, etc. */
+
+enum internal_test {
+ ITEST_EQ,
+ ITEST_NE,
+ ITEST_GT,
+ ITEST_GE,
+ ITEST_LT,
+ ITEST_LE,
+ ITEST_GTU,
+ ITEST_GEU,
+ ITEST_LTU,
+ ITEST_LEU,
+ ITEST_MAX
+ };
+
+/* Cached operands, and operator to compare for use in set/branch on
+ condition codes. */
+rtx branch_cmp[2];
+
+/* what type of branch to use */
+enum cmp_type branch_type;
+
+/* Array giving truth value on whether or not a given hard register
+ can support a given mode. */
+char xtensa_hard_regno_mode_ok[(int) MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
+
+/* Current frame size calculated by compute_frame_size. */
+unsigned xtensa_current_frame_size;
+
+/* Tables of ld/st opcode names for block moves */
+const char *xtensa_ld_opcodes[(int) MAX_MACHINE_MODE];
+const char *xtensa_st_opcodes[(int) MAX_MACHINE_MODE];
+#define LARGEST_MOVE_RATIO 15
+
+/* Define the structure for the machine field in struct function. */
+struct machine_function
+{
+ int accesses_prev_frame;
+};
+
+/* Vector, indexed by hard register number, which contains 1 for a
+ register that is allowable in a candidate for leaf function
+ treatment. */
+
+const char xtensa_leaf_regs[FIRST_PSEUDO_REGISTER] =
+{
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1
+};
+
+/* Map hard register number to register class */
+const enum reg_class xtensa_regno_to_class[FIRST_PSEUDO_REGISTER] =
+{
+ GR_REGS, SP_REG, GR_REGS, GR_REGS,
+ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
+ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
+ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
+ AR_REGS, AR_REGS, BR_REGS,
+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
+ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
+ ACC_REG,
+};
+
+/* Map register constraint character to register class. */
+enum reg_class xtensa_char_to_class[256] =
+{
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+ NO_REGS, NO_REGS, NO_REGS, NO_REGS,
+};
+
+/* This macro generates the assembly code for function entry.
+ FILE is a stdio stream to output the code to.
+ SIZE is an int: how many units of temporary storage to allocate.
+ Refer to the array 'regs_ever_live' to determine which registers
+ to save; 'regs_ever_live[I]' is nonzero if register number I
+ is ever used in the function. This macro is responsible for
+ knowing which registers should not be saved even if used. */
+
+#undef TARGET_ASM_FUNCTION_PROLOGUE
+#define TARGET_ASM_FUNCTION_PROLOGUE xtensa_function_prologue
+
+/* This macro generates the assembly code for function exit,
+ on machines that need it. If FUNCTION_EPILOGUE is not defined
+ then individual return instructions are generated for each
+ return statement. Args are same as for FUNCTION_PROLOGUE. */
+
+#undef TARGET_ASM_FUNCTION_EPILOGUE
+#define TARGET_ASM_FUNCTION_EPILOGUE xtensa_function_epilogue
+
+/* These hooks specify assembly directives for creating certain kinds
+ of integer object. */
+
+#undef TARGET_ASM_ALIGNED_SI_OP
+#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+static int b4const_or_zero PARAMS ((int));
+static enum internal_test map_test_to_internal_test PARAMS ((enum rtx_code));
+static rtx gen_int_relational PARAMS ((enum rtx_code, rtx, rtx, int *));
+static rtx gen_float_relational PARAMS ((enum rtx_code, rtx, rtx));
+static rtx gen_conditional_move PARAMS ((rtx));
+static rtx fixup_subreg_mem PARAMS ((rtx x));
+static enum machine_mode xtensa_find_mode_for_size PARAMS ((unsigned));
+static void xtensa_init_machine_status PARAMS ((struct function *p));
+static void xtensa_free_machine_status PARAMS ((struct function *p));
+static void printx PARAMS ((FILE *, signed int));
+static rtx frame_size_const;
+static int current_function_arg_words;
+static const int reg_nonleaf_alloc_order[FIRST_PSEUDO_REGISTER] =
+ REG_ALLOC_ORDER;
+
+
+/*
+ * Functions to test Xtensa immediate operand validity.
+ */
+
+int
+xtensa_b4constu (v)
+ int v;
+{
+ switch (v)
+ {
+ case 32768:
+ case 65536:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ case 8:
+ case 10:
+ case 12:
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ case 256:
+ return 1;
+ }
+ return 0;
+}
+
+int
+xtensa_simm8x256 (v)
+ int v;
+{
+ return (v & 255) == 0 && (v >= -32768 && v <= 32512);
+}
+
+int
+xtensa_ai4const (v)
+ int v;
+{
+ return (v == -1 || (v >= 1 && v <= 15));
+}
+
+int
+xtensa_simm7 (v)
+ int v;
+{
+ return v >= -32 && v <= 95;
+}
+
+int
+xtensa_b4const (v)
+ int v;
+{
+ switch (v)
+ {
+ case -1:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ case 8:
+ case 10:
+ case 12:
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ case 256:
+ return 1;
+ }
+ return 0;
+}
+
+int
+xtensa_simm8 (v)
+ int v;
+{
+ return v >= -128 && v <= 127;
+}
+
+int
+xtensa_tp7 (v)
+ int v;
+{
+ return (v >= 7 && v <= 22);
+}
+
+int
+xtensa_lsi4x4 (v)
+ int v;
+{
+ return (v & 3) == 0 && (v >= 0 && v <= 60);
+}
+
+int
+xtensa_simm12b (v)
+ int v;
+{
+ return v >= -2048 && v <= 2047;
+}
+
+int
+xtensa_uimm8 (v)
+ int v;
+{
+ return v >= 0 && v <= 255;
+}
+
+int
+xtensa_uimm8x2 (v)
+ int v;
+{
+ return (v & 1) == 0 && (v >= 0 && v <= 510);
+}
+
+int
+xtensa_uimm8x4 (v)
+ int v;
+{
+ return (v & 3) == 0 && (v >= 0 && v <= 1020);
+}
+
+
+/* This is just like the standard true_regnum() function except that it
+ works even when reg_renumber is not initialized. */
+
+int
+xt_true_regnum (x)
+ rtx x;
+{
+ if (GET_CODE (x) == REG)
+ {
+ if (reg_renumber
+ && REGNO (x) >= FIRST_PSEUDO_REGISTER
+ && reg_renumber[REGNO (x)] >= 0)
+ return reg_renumber[REGNO (x)];
+ return REGNO (x);
+ }
+ if (GET_CODE (x) == SUBREG)
+ {
+ int base = xt_true_regnum (SUBREG_REG (x));
+ if (base >= 0 && base < FIRST_PSEUDO_REGISTER)
+ return base + subreg_regno_offset (REGNO (SUBREG_REG (x)),
+ GET_MODE (SUBREG_REG (x)),
+ SUBREG_BYTE (x), GET_MODE (x));
+ }
+ return -1;
+}
+
+
+int
+add_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == CONST_INT)
+ return (xtensa_simm8 (INTVAL (op)) ||
+ xtensa_simm8x256 (INTVAL (op)));
+
+ return register_operand (op, mode);
+}
+
+
+int
+arith_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == CONST_INT)
+ return xtensa_simm8 (INTVAL (op));
+
+ return register_operand (op, mode);
+}
+
+
+int
+nonimmed_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ /* We cannot use the standard nonimmediate_operand() predicate because
+ it includes constant pool memory operands. */
+
+ if (memory_operand (op, mode))
+ return !constantpool_address_p (XEXP (op, 0));
+
+ return register_operand (op, mode);
+}
+
+
+int
+mem_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ /* We cannot use the standard memory_operand() predicate because
+ it includes constant pool memory operands. */
+
+ if (memory_operand (op, mode))
+ return !constantpool_address_p (XEXP (op, 0));
+
+ return FALSE;
+}
+
+
+int
+non_acc_reg_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (register_operand (op, mode))
+ return !ACC_REG_P (xt_true_regnum (op));
+ return FALSE;
+}
+
+
+int
+mask_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == CONST_INT)
+ return xtensa_mask_immediate (INTVAL (op));
+
+ return register_operand (op, mode);
+}
+
+
+int
+extui_fldsz_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ return ((GET_CODE (op) == CONST_INT)
+ && xtensa_mask_immediate ((1 << INTVAL (op)) - 1));
+}
+
+
+int
+sext_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (TARGET_SEXT)
+ return nonimmed_operand (op, mode);
+ return mem_operand (op, mode);
+}
+
+
+int
+sext_fldsz_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ return ((GET_CODE (op) == CONST_INT) && xtensa_tp7 (INTVAL (op) - 1));
+}
+
+
+int
+lsbitnum_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ if (GET_CODE (op) == CONST_INT)
+ {
+ return (BITS_BIG_ENDIAN
+ ? (INTVAL (op) == BITS_PER_WORD-1)
+ : (INTVAL (op) == 0));
+ }
+ return FALSE;
+}
+
+
+static int
+b4const_or_zero (v)
+ int v;
+{
+ if (v == 0)
+ return TRUE;
+ return xtensa_b4const (v);
+}
+
+
+int
+branch_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == CONST_INT)
+ return b4const_or_zero (INTVAL (op));
+
+ return register_operand (op, mode);
+}
+
+
+int
+ubranch_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == CONST_INT)
+ return xtensa_b4constu (INTVAL (op));
+
+ return register_operand (op, mode);
+}
+
+
+int
+call_insn_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ if ((GET_CODE (op) == REG)
+ && (op != arg_pointer_rtx)
+ && ((REGNO (op) < FRAME_POINTER_REGNUM)
+ || (REGNO (op) > LAST_VIRTUAL_REGISTER)))
+ return TRUE;
+
+ if (CONSTANT_ADDRESS_P (op))
+ {
+ /* Direct calls only allowed to static functions with PIC. */
+ return (!flag_pic || (GET_CODE (op) == SYMBOL_REF
+ && SYMBOL_REF_FLAG (op)));
+ }
+
+ return FALSE;
+}
+
+
+int
+move_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (register_operand (op, mode))
+ return TRUE;
+
+ /* Accept CONSTANT_P_RTX, since it will be gone by CSE1 and
+ result in 0/1. */
+ if (GET_CODE (op) == CONSTANT_P_RTX)
+ return TRUE;
+
+ if (GET_CODE (op) == CONST_INT)
+ return xtensa_simm12b (INTVAL (op));
+
+ if (GET_CODE (op) == MEM)
+ return memory_address_p (mode, XEXP (op, 0));
+
+ return FALSE;
+}
+
+
+int
+smalloffset_mem_p (op)
+ rtx op;
+{
+ if (GET_CODE (op) == MEM)
+ {
+ rtx addr = XEXP (op, 0);
+ if (GET_CODE (addr) == REG)
+ return REG_OK_FOR_BASE_P (addr);
+ if (GET_CODE (addr) == PLUS)
+ {
+ rtx offset = XEXP (addr, 0);
+ if (GET_CODE (offset) != CONST_INT)
+ offset = XEXP (addr, 1);
+ if (GET_CODE (offset) != CONST_INT)
+ return FALSE;
+ return xtensa_lsi4x4 (INTVAL (offset));
+ }
+ }
+ return FALSE;
+}
+
+
+int
+smalloffset_double_mem_p (op)
+ rtx op;
+{
+ if (!smalloffset_mem_p (op))
+ return FALSE;
+ return smalloffset_mem_p (adjust_address (op, GET_MODE (op), 4));
+}
+
+
+int
+constantpool_address_p (addr)
+ rtx addr;
+{
+ rtx sym = addr;
+
+ if (GET_CODE (addr) == CONST)
+ {
+ rtx offset;
+
+ /* only handle (PLUS (SYM, OFFSET)) form */
+ addr = XEXP (addr, 0);
+ if (GET_CODE (addr) != PLUS)
+ return FALSE;
+
+ /* make sure the address is word aligned */
+ offset = XEXP (addr, 1);
+ if ((GET_CODE (offset) != CONST_INT)
+ || ((INTVAL (offset) & 3) != 0))
+ return FALSE;
+
+ sym = XEXP (addr, 0);
+ }
+
+ if ((GET_CODE (sym) == SYMBOL_REF)
+ && CONSTANT_POOL_ADDRESS_P (sym))
+ return TRUE;
+ return FALSE;
+}
+
+
+int
+constantpool_mem_p (op)
+ rtx op;
+{
+ if (GET_CODE (op) == MEM)
+ return constantpool_address_p (XEXP (op, 0));
+ return FALSE;
+}
+
+
+int
+non_const_move_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (register_operand (op, mode))
+ return 1;
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+ if (GET_CODE (op) == MEM)
+ return memory_address_p (mode, XEXP (op, 0));
+ return FALSE;
+}
+
+
+/* Accept the floating point constant 1 in the appropriate mode. */
+
+int
+const_float_1_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ REAL_VALUE_TYPE d;
+ static REAL_VALUE_TYPE onedf;
+ static REAL_VALUE_TYPE onesf;
+ static int one_initialized;
+
+ if ((GET_CODE (op) != CONST_DOUBLE)
+ || (mode != GET_MODE (op))
+ || (mode != DFmode && mode != SFmode))
+ return FALSE;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (d, op);
+
+ if (! one_initialized)
+ {
+ onedf = REAL_VALUE_ATOF ("1.0", DFmode);
+ onesf = REAL_VALUE_ATOF ("1.0", SFmode);
+ one_initialized = TRUE;
+ }
+
+ if (mode == DFmode)
+ return REAL_VALUES_EQUAL (d, onedf);
+ else
+ return REAL_VALUES_EQUAL (d, onesf);
+}
+
+
+int
+fpmem_offset_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ if (GET_CODE (op) == CONST_INT)
+ return xtensa_mem_offset (INTVAL (op), SFmode);
+ return 0;
+}
+
+
+void
+xtensa_extend_reg (dst, src)
+ rtx dst;
+ rtx src;
+{
+ rtx temp = gen_reg_rtx (SImode);
+ rtx shift = GEN_INT (BITS_PER_WORD - GET_MODE_BITSIZE (GET_MODE (src)));
+
+ /* generate paradoxical subregs as needed so that the modes match */
+ src = simplify_gen_subreg (SImode, src, GET_MODE (src), 0);
+ dst = simplify_gen_subreg (SImode, dst, GET_MODE (dst), 0);
+
+ emit_insn (gen_ashlsi3 (temp, src, shift));
+ emit_insn (gen_ashrsi3 (dst, temp, shift));
+}
+
+
+void
+xtensa_load_constant (dst, src)
+ rtx dst;
+ rtx src;
+{
+ enum machine_mode mode = GET_MODE (dst);
+ src = force_const_mem (SImode, src);
+
+ /* PC-relative loads are always SImode so we have to add a SUBREG if that
+ is not the desired mode */
+
+ if (mode != SImode)
+ {
+ if (register_operand (dst, mode))
+ dst = simplify_gen_subreg (SImode, dst, mode, 0);
+ else
+ {
+ src = force_reg (SImode, src);
+ src = gen_lowpart_SUBREG (mode, src);
+ }
+ }
+
+ emit_move_insn (dst, src);
+}
+
+
+int
+branch_operator (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ if (GET_MODE (x) != mode)
+ return FALSE;
+
+ switch (GET_CODE (x))
+ {
+ case EQ:
+ case NE:
+ case LT:
+ case GE:
+ return TRUE;
+ default:
+ break;
+ }
+ return FALSE;
+}
+
+
+int
+ubranch_operator (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ if (GET_MODE (x) != mode)
+ return FALSE;
+
+ switch (GET_CODE (x))
+ {
+ case LTU:
+ case GEU:
+ return TRUE;
+ default:
+ break;
+ }
+ return FALSE;
+}
+
+
+int
+boolean_operator (x, mode)
+ rtx x;
+ enum machine_mode mode;
+{
+ if (GET_MODE (x) != mode)
+ return FALSE;
+
+ switch (GET_CODE (x))
+ {
+ case EQ:
+ case NE:
+ return TRUE;
+ default:
+ break;
+ }
+ return FALSE;
+}
+
+
+int
+xtensa_mask_immediate (v)
+ int v;
+{
+#define MAX_MASK_SIZE 16
+ int mask_size;
+
+ for (mask_size = 1; mask_size <= MAX_MASK_SIZE; mask_size++)
+ {
+ if ((v & 1) == 0)
+ return FALSE;
+ v = v >> 1;
+ if (v == 0)
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+
+int
+xtensa_mem_offset (v, mode)
+ unsigned v;
+ enum machine_mode mode;
+{
+ switch (mode)
+ {
+ case BLKmode:
+ /* Handle the worst case for block moves. See xtensa_expand_block_move
+ where we emit an optimized block move operation if the block can be
+ moved in < "move_ratio" pieces. The worst case is when the block is
+ aligned but has a size of (3 mod 4) (does this happen?) so that the
+ last piece requires a byte load/store. */
+ return (xtensa_uimm8 (v) &&
+ xtensa_uimm8 (v + MOVE_MAX * LARGEST_MOVE_RATIO));
+
+ case QImode:
+ return xtensa_uimm8 (v);
+
+ case HImode:
+ return xtensa_uimm8x2 (v);
+
+ case DFmode:
+ return (xtensa_uimm8x4 (v) && xtensa_uimm8x4 (v + 4));
+
+ default:
+ break;
+ }
+
+ return xtensa_uimm8x4 (v);
+}
+
+
+/* Make normal rtx_code into something we can index from an array */
+
+static enum internal_test
+map_test_to_internal_test (test_code)
+ enum rtx_code test_code;
+{
+ enum internal_test test = ITEST_MAX;
+
+ switch (test_code)
+ {
+ default: break;
+ case EQ: test = ITEST_EQ; break;
+ case NE: test = ITEST_NE; break;
+ case GT: test = ITEST_GT; break;
+ case GE: test = ITEST_GE; break;
+ case LT: test = ITEST_LT; break;
+ case LE: test = ITEST_LE; break;
+ case GTU: test = ITEST_GTU; break;
+ case GEU: test = ITEST_GEU; break;
+ case LTU: test = ITEST_LTU; break;
+ case LEU: test = ITEST_LEU; break;
+ }
+
+ return test;
+}
+
+
+/* Generate the code to compare two integer values. The return value is
+ the comparison expression. */
+
+static rtx
+gen_int_relational (test_code, cmp0, cmp1, p_invert)
+ enum rtx_code test_code; /* relational test (EQ, etc) */
+ rtx cmp0; /* first operand to compare */
+ rtx cmp1; /* second operand to compare */
+ int *p_invert; /* whether branch needs to reverse its test */
+{
+ struct cmp_info {
+ enum rtx_code test_code; /* test code to use in insn */
+ int (*const_range_p) PARAMS ((int)); /* predicate function to check range */
+ int const_add; /* constant to add (convert LE -> LT) */
+ int reverse_regs; /* reverse registers in test */
+ int invert_const; /* != 0 if invert value if cmp1 is constant */
+ int invert_reg; /* != 0 if invert value if cmp1 is register */
+ int unsignedp; /* != 0 for unsigned comparisons. */
+ };
+
+ static struct cmp_info info[ (int)ITEST_MAX ] = {
+
+ { EQ, b4const_or_zero, 0, 0, 0, 0, 0 }, /* EQ */
+ { NE, b4const_or_zero, 0, 0, 0, 0, 0 }, /* NE */
+
+ { LT, b4const_or_zero, 1, 1, 1, 0, 0 }, /* GT */
+ { GE, b4const_or_zero, 0, 0, 0, 0, 0 }, /* GE */
+ { LT, b4const_or_zero, 0, 0, 0, 0, 0 }, /* LT */
+ { GE, b4const_or_zero, 1, 1, 1, 0, 0 }, /* LE */
+
+ { LTU, xtensa_b4constu, 1, 1, 1, 0, 1 }, /* GTU */
+ { GEU, xtensa_b4constu, 0, 0, 0, 0, 1 }, /* GEU */
+ { LTU, xtensa_b4constu, 0, 0, 0, 0, 1 }, /* LTU */
+ { GEU, xtensa_b4constu, 1, 1, 1, 0, 1 }, /* LEU */
+ };
+
+ enum internal_test test;
+ enum machine_mode mode;
+ struct cmp_info *p_info;
+
+ test = map_test_to_internal_test (test_code);
+ if (test == ITEST_MAX)
+ abort ();
+
+ p_info = &info[ (int)test ];
+
+ mode = GET_MODE (cmp0);
+ if (mode == VOIDmode)
+ mode = GET_MODE (cmp1);
+
+ /* Make sure we can handle any constants given to us. */
+ if (GET_CODE (cmp1) == CONST_INT)
+ {
+ HOST_WIDE_INT value = INTVAL (cmp1);
+ unsigned HOST_WIDE_INT uvalue = (unsigned HOST_WIDE_INT)value;
+
+ /* if the immediate overflows or does not fit in the immediate field,
+ spill it to a register */
+
+ if ((p_info->unsignedp ?
+ (uvalue + p_info->const_add > uvalue) :
+ (value + p_info->const_add > value)) != (p_info->const_add > 0))
+ {
+ cmp1 = force_reg (mode, cmp1);
+ }
+ else if (!(p_info->const_range_p) (value + p_info->const_add))
+ {
+ cmp1 = force_reg (mode, cmp1);
+ }
+ }
+ else if ((GET_CODE (cmp1) != REG) && (GET_CODE (cmp1) != SUBREG))
+ {
+ cmp1 = force_reg (mode, cmp1);
+ }
+
+ /* See if we need to invert the result. */
+ *p_invert = ((GET_CODE (cmp1) == CONST_INT)
+ ? p_info->invert_const
+ : p_info->invert_reg);
+
+ /* Comparison to constants, may involve adding 1 to change a LT into LE.
+ Comparison between two registers, may involve switching operands. */
+ if (GET_CODE (cmp1) == CONST_INT)
+ {
+ if (p_info->const_add != 0)
+ cmp1 = GEN_INT (INTVAL (cmp1) + p_info->const_add);
+
+ }
+ else if (p_info->reverse_regs)
+ {
+ rtx temp = cmp0;
+ cmp0 = cmp1;
+ cmp1 = temp;
+ }
+
+ return gen_rtx (p_info->test_code, VOIDmode, cmp0, cmp1);
+}
+
+
+/* Generate the code to compare two float values. The return value is
+ the comparison expression. */
+
+static rtx
+gen_float_relational (test_code, cmp0, cmp1)
+ enum rtx_code test_code; /* relational test (EQ, etc) */
+ rtx cmp0; /* first operand to compare */
+ rtx cmp1; /* second operand to compare */
+{
+ rtx (*gen_fn) PARAMS ((rtx, rtx, rtx));
+ rtx brtmp;
+ int reverse_regs, invert;
+
+ switch (test_code)
+ {
+ case EQ: reverse_regs = 0; invert = 0; gen_fn = gen_seq_sf; break;
+ case NE: reverse_regs = 0; invert = 1; gen_fn = gen_seq_sf; break;
+ case LE: reverse_regs = 0; invert = 0; gen_fn = gen_sle_sf; break;
+ case GT: reverse_regs = 1; invert = 0; gen_fn = gen_slt_sf; break;
+ case LT: reverse_regs = 0; invert = 0; gen_fn = gen_slt_sf; break;
+ case GE: reverse_regs = 1; invert = 0; gen_fn = gen_sle_sf; break;
+ default:
+ fatal_insn ("bad test", gen_rtx (test_code, VOIDmode, cmp0, cmp1));
+ reverse_regs = 0; invert = 0; gen_fn = 0; /* avoid compiler warnings */
+ }
+
+ if (reverse_regs)
+ {
+ rtx temp = cmp0;
+ cmp0 = cmp1;
+ cmp1 = temp;
+ }
+
+ brtmp = gen_rtx_REG (CCmode, FPCC_REGNUM);
+ emit_insn (gen_fn (brtmp, cmp0, cmp1));
+
+ return gen_rtx (invert ? EQ : NE, VOIDmode, brtmp, const0_rtx);
+}
+
+
+void
+xtensa_expand_conditional_branch (operands, test_code)
+ rtx *operands;
+ enum rtx_code test_code;
+{
+ enum cmp_type type = branch_type;
+ rtx cmp0 = branch_cmp[0];
+ rtx cmp1 = branch_cmp[1];
+ rtx cmp;
+ int invert;
+ rtx label1, label2;
+
+ switch (type)
+ {
+ case CMP_DF:
+ default:
+ fatal_insn ("bad test", gen_rtx (test_code, VOIDmode, cmp0, cmp1));
+
+ case CMP_SI:
+ invert = FALSE;
+ cmp = gen_int_relational (test_code, cmp0, cmp1, &invert);
+ break;
+
+ case CMP_SF:
+ if (!TARGET_HARD_FLOAT)
+ fatal_insn ("bad test", gen_rtx (test_code, VOIDmode, cmp0, cmp1));
+ invert = FALSE;
+ cmp = gen_float_relational (test_code, cmp0, cmp1);
+ break;
+ }
+
+ /* Generate the branch. */
+
+ label1 = gen_rtx_LABEL_REF (VOIDmode, operands[0]);
+ label2 = pc_rtx;
+
+ if (invert)
+ {
+ label2 = label1;
+ label1 = pc_rtx;
+ }
+
+ emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
+ gen_rtx_IF_THEN_ELSE (VOIDmode, cmp,
+ label1,
+ label2)));
+}
+
+
+static rtx
+gen_conditional_move (cmp)
+ rtx cmp;
+{
+ enum rtx_code code = GET_CODE (cmp);
+ rtx op0 = branch_cmp[0];
+ rtx op1 = branch_cmp[1];
+
+ if (branch_type == CMP_SI)
+ {
+ /* Jump optimization calls get_condition() which canonicalizes
+ comparisons like (GE x <const>) to (GT x <const-1>).
+ Transform those comparisons back to GE, since that is the
+ comparison supported in Xtensa. We shouldn't have to
+ transform <LE x const> comparisons, because neither
+ xtensa_expand_conditional_branch() nor get_condition() will
+ produce them. */
+
+ if ((code == GT) && (op1 == constm1_rtx))
+ {
+ code = GE;
+ op1 = const0_rtx;
+ }
+ cmp = gen_rtx (code, VOIDmode, cc0_rtx, const0_rtx);
+
+ if (boolean_operator (cmp, VOIDmode))
+ {
+ /* swap the operands to make const0 second */
+ if (op0 == const0_rtx)
+ {
+ op0 = op1;
+ op1 = const0_rtx;
+ }
+
+ /* if not comparing against zero, emit a comparison (subtract) */
+ if (op1 != const0_rtx)
+ {
+ op0 = expand_binop (SImode, sub_optab, op0, op1,
+ 0, 0, OPTAB_LIB_WIDEN);
+ op1 = const0_rtx;
+ }
+ }
+ else if (branch_operator (cmp, VOIDmode))
+ {
+ /* swap the operands to make const0 second */
+ if (op0 == const0_rtx)
+ {
+ op0 = op1;
+ op1 = const0_rtx;
+
+ switch (code)
+ {
+ case LT: code = GE; break;
+ case GE: code = LT; break;
+ default: abort ();
+ }
+ }
+
+ if (op1 != const0_rtx)
+ return 0;
+ }
+ else
+ return 0;
+
+ return gen_rtx (code, VOIDmode, op0, op1);
+ }
+
+ if (TARGET_HARD_FLOAT && (branch_type == CMP_SF))
+ return gen_float_relational (code, op0, op1);
+
+ return 0;
+}
+
+
+int
+xtensa_expand_conditional_move (operands, isflt)
+ rtx *operands;
+ int isflt;
+{
+ rtx cmp;
+ rtx (*gen_fn) PARAMS ((rtx, rtx, rtx, rtx, rtx));
+
+ if (!(cmp = gen_conditional_move (operands[1])))
+ return 0;
+
+ if (isflt)
+ gen_fn = (branch_type == CMP_SI
+ ? gen_movsfcc_internal0
+ : gen_movsfcc_internal1);
+ else
+ gen_fn = (branch_type == CMP_SI
+ ? gen_movsicc_internal0
+ : gen_movsicc_internal1);
+
+ emit_insn (gen_fn (operands[0], XEXP (cmp, 0),
+ operands[2], operands[3], cmp));
+ return 1;
+}
+
+
+int
+xtensa_expand_scc (operands)
+ rtx *operands;
+{
+ rtx dest = operands[0];
+ rtx cmp = operands[1];
+ rtx one_tmp, zero_tmp;
+ rtx (*gen_fn) PARAMS ((rtx, rtx, rtx, rtx, rtx));
+
+ if (!(cmp = gen_conditional_move (cmp)))
+ return 0;
+
+ one_tmp = gen_reg_rtx (SImode);
+ zero_tmp = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (one_tmp, const_true_rtx));
+ emit_insn (gen_movsi (zero_tmp, const0_rtx));
+
+ gen_fn = (branch_type == CMP_SI
+ ? gen_movsicc_internal0
+ : gen_movsicc_internal1);
+ emit_insn (gen_fn (dest, XEXP (cmp, 0), one_tmp, zero_tmp, cmp));
+ return 1;
+}
+
+
+/* Emit insns to move operands[1] into operands[0].
+
+ Return 1 if we have written out everything that needs to be done to
+ do the move. Otherwise, return 0 and the caller will emit the move
+ normally. */
+
+int
+xtensa_emit_move_sequence (operands, mode)
+ rtx *operands;
+ enum machine_mode mode;
+{
+ if (CONSTANT_P (operands[1])
+ && GET_CODE (operands[1]) != CONSTANT_P_RTX
+ && (GET_CODE (operands[1]) != CONST_INT
+ || !xtensa_simm12b (INTVAL (operands[1]))))
+ {
+ xtensa_load_constant (operands[0], operands[1]);
+ return 1;
+ }
+
+ if (!(reload_in_progress | reload_completed))
+ {
+ if (!non_acc_reg_operand (operands[0], mode)
+ && !non_acc_reg_operand (operands[1], mode))
+ operands[1] = force_reg (mode, operands[1]);
+
+ /* Check if this move is copying an incoming argument in a7. If
+ so, emit the move, followed by the special "set_frame_ptr"
+ unspec_volatile insn, at the very beginning of the function.
+ This is necessary because the register allocator will ignore
+ conflicts with a7 and may assign some other pseudo to a7. If
+ that pseudo was assigned prior to this move, it would clobber
+ the incoming argument in a7. By copying the argument out of
+ a7 as the very first thing, and then immediately following
+ that with an unspec_volatile to keep the scheduler away, we
+ should avoid any problems. */
+
+ if (a7_overlap_mentioned_p (operands[1]))
+ {
+ rtx mov;
+ switch (mode)
+ {
+ case SImode:
+ mov = gen_movsi_internal (operands[0], operands[1]);
+ break;
+ case HImode:
+ mov = gen_movhi_internal (operands[0], operands[1]);
+ break;
+ case QImode:
+ mov = gen_movqi_internal (operands[0], operands[1]);
+ break;
+ default:
+ abort ();
+ }
+
+ /* Insert the instructions before any other argument copies.
+ (The set_frame_ptr insn comes _after_ the move, so push it
+ out first.) */
+ push_topmost_sequence ();
+ emit_insn_after (gen_set_frame_ptr (), get_insns ());
+ emit_insn_after (mov, get_insns ());
+ pop_topmost_sequence ();
+
+ return 1;
+ }
+ }
+
+ /* During reload we don't want to emit (subreg:X (mem:Y)) since that
+ instruction won't be recognized after reload. So we remove the
+ subreg and adjust mem accordingly. */
+ if (reload_in_progress)
+ {
+ operands[0] = fixup_subreg_mem (operands[0]);
+ operands[1] = fixup_subreg_mem (operands[1]);
+ }
+ return 0;
+}
+
+static rtx
+fixup_subreg_mem (x)
+ rtx x;
+{
+ if (GET_CODE (x) == SUBREG
+ && GET_CODE (SUBREG_REG (x)) == REG
+ && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER)
+ {
+ rtx temp =
+ gen_rtx_SUBREG (GET_MODE (x),
+ reg_equiv_mem [REGNO (SUBREG_REG (x))],
+ SUBREG_BYTE (x));
+ x = alter_subreg (&temp);
+ }
+ return x;
+}
+
+
+/* Try to expand a block move operation to an RTL block move instruction.
+ If not optimizing or if the block size is not a constant or if the
+ block is small, the expansion fails and GCC falls back to calling
+ memcpy().
+
+ operands[0] is the destination
+ operands[1] is the source
+ operands[2] is the length
+ operands[3] is the alignment */
+
+int
+xtensa_expand_block_move (operands)
+ rtx *operands;
+{
+ rtx dest = operands[0];
+ rtx src = operands[1];
+ int bytes = INTVAL (operands[2]);
+ int align = XINT (operands[3], 0);
+ int num_pieces, move_ratio;
+
+ /* If this is not a fixed size move, just call memcpy */
+ if (!optimize || (GET_CODE (operands[2]) != CONST_INT))
+ return 0;
+
+ /* Anything to move? */
+ if (bytes <= 0)
+ return 1;
+
+ if (align > MOVE_MAX)
+ align = MOVE_MAX;
+
+ /* decide whether to expand inline based on the optimization level */
+ move_ratio = 4;
+ if (optimize > 2)
+ move_ratio = LARGEST_MOVE_RATIO;
+ num_pieces = (bytes / align) + (bytes % align); /* close enough anyway */
+ if (num_pieces >= move_ratio)
+ return 0;
+
+ /* make sure the memory addresses are valid */
+ operands[0] = change_address (dest, VOIDmode, NULL);
+ operands[1] = change_address (src, VOIDmode, NULL);
+
+ emit_insn (gen_movstrsi_internal (operands[0], operands[1],
+ operands[2], operands[3]));
+ return 1;
+}
+
+
+/* Emit a sequence of instructions to implement a block move, trying
+ to hide load delay slots as much as possible. Load N values into
+ temporary registers, store those N values, and repeat until the
+ complete block has been moved. N=delay_slots+1 */
+
+struct meminsnbuf {
+ char template[30];
+ rtx operands[2];
+};
+
+void
+xtensa_emit_block_move (operands, tmpregs, delay_slots)
+ rtx *operands;
+ rtx *tmpregs;
+ int delay_slots;
+{
+ rtx dest = operands[0];
+ rtx src = operands[1];
+ int bytes = INTVAL (operands[2]);
+ int align = XINT (operands[3], 0);
+ rtx from_addr = XEXP (src, 0);
+ rtx to_addr = XEXP (dest, 0);
+ int from_struct = MEM_IN_STRUCT_P (src);
+ int to_struct = MEM_IN_STRUCT_P (dest);
+ int offset = 0;
+ int chunk_size, item_size;
+ struct meminsnbuf *ldinsns, *stinsns;
+ const char *ldname, *stname;
+ enum machine_mode mode;
+
+ if (align > MOVE_MAX)
+ align = MOVE_MAX;
+ item_size = align;
+ chunk_size = delay_slots + 1;
+
+ ldinsns = (struct meminsnbuf *)
+ alloca (chunk_size * sizeof (struct meminsnbuf));
+ stinsns = (struct meminsnbuf *)
+ alloca (chunk_size * sizeof (struct meminsnbuf));
+
+ mode = xtensa_find_mode_for_size (item_size);
+ item_size = GET_MODE_SIZE (mode);
+ ldname = xtensa_ld_opcodes[(int) mode];
+ stname = xtensa_st_opcodes[(int) mode];
+
+ while (bytes > 0)
+ {
+ int n;
+
+ for (n = 0; n < chunk_size; n++)
+ {
+ rtx addr, mem;
+
+ if (bytes == 0)
+ {
+ chunk_size = n;
+ break;
+ }
+
+ if (bytes < item_size)
+ {
+ /* find a smaller item_size which we can load & store */
+ item_size = bytes;
+ mode = xtensa_find_mode_for_size (item_size);
+ item_size = GET_MODE_SIZE (mode);
+ ldname = xtensa_ld_opcodes[(int) mode];
+ stname = xtensa_st_opcodes[(int) mode];
+ }
+
+ /* record the load instruction opcode and operands */
+ addr = plus_constant (from_addr, offset);
+ mem = gen_rtx_MEM (mode, addr);
+ if (! memory_address_p (mode, addr))
+ abort ();
+ MEM_IN_STRUCT_P (mem) = from_struct;
+ ldinsns[n].operands[0] = tmpregs[n];
+ ldinsns[n].operands[1] = mem;
+ sprintf (ldinsns[n].template, "%s\t%%0, %%1", ldname);
+
+ /* record the store instruction opcode and operands */
+ addr = plus_constant (to_addr, offset);
+ mem = gen_rtx_MEM (mode, addr);
+ if (! memory_address_p (mode, addr))
+ abort ();
+ MEM_IN_STRUCT_P (mem) = to_struct;
+ stinsns[n].operands[0] = tmpregs[n];
+ stinsns[n].operands[1] = mem;
+ sprintf (stinsns[n].template, "%s\t%%0, %%1", stname);
+
+ offset += item_size;
+ bytes -= item_size;
+ }
+
+ /* now output the loads followed by the stores */
+ for (n = 0; n < chunk_size; n++)
+ output_asm_insn (ldinsns[n].template, ldinsns[n].operands);
+ for (n = 0; n < chunk_size; n++)
+ output_asm_insn (stinsns[n].template, stinsns[n].operands);
+ }
+}
+
+
+static enum machine_mode
+xtensa_find_mode_for_size (item_size)
+ unsigned item_size;
+{
+ enum machine_mode mode, tmode;
+
+ while (1)
+ {
+ mode = VOIDmode;
+
+ /* find mode closest to but not bigger than item_size */
+ for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
+ tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
+ if (GET_MODE_SIZE (tmode) <= item_size)
+ mode = tmode;
+ if (mode == VOIDmode)
+ abort ();
+
+ item_size = GET_MODE_SIZE (mode);
+
+ if (xtensa_ld_opcodes[(int) mode]
+ && xtensa_st_opcodes[(int) mode])
+ break;
+
+ /* cannot load & store this mode; try something smaller */
+ item_size -= 1;
+ }
+
+ return mode;
+}
+
+
+void
+xtensa_expand_nonlocal_goto (operands)
+ rtx *operands;
+{
+ rtx goto_handler = operands[1];
+ rtx containing_fp = operands[3];
+
+ /* generate a call to "__xtensa_nonlocal_goto" (in libgcc); the code
+ is too big to generate in-line */
+
+ if (GET_CODE (containing_fp) != REG)
+ containing_fp = force_reg (Pmode, containing_fp);
+
+ goto_handler = replace_rtx (copy_rtx (goto_handler),
+ virtual_stack_vars_rtx,
+ containing_fp);
+
+ emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__xtensa_nonlocal_goto"),
+ 0, VOIDmode, 2,
+ containing_fp, Pmode,
+ goto_handler, Pmode);
+}
+
+
+static void
+xtensa_init_machine_status (p)
+ struct function *p;
+{
+ p->machine = (struct machine_function *)
+ xcalloc (1, sizeof (struct machine_function));
+}
+
+
+static void
+xtensa_free_machine_status (p)
+ struct function *p;
+{
+ free (p->machine);
+ p->machine = NULL;
+}
+
+
+void
+xtensa_setup_frame_addresses ()
+{
+ /* Set flag to cause FRAME_POINTER_REQUIRED to be set. */
+ cfun->machine->accesses_prev_frame = 1;
+
+ emit_library_call
+ (gen_rtx_SYMBOL_REF (Pmode, "__xtensa_libgcc_window_spill"),
+ 0, VOIDmode, 0);
+}
+
+
+/* Emit the assembly for the end of a zero-cost loop. Normally we just emit
+ a comment showing where the end of the loop is. However, if there is a
+ label or a branch at the end of the loop then we need to place a nop
+ there. If the loop ends with a label we need the nop so that branches
+ targetting that label will target the nop (and thus remain in the loop),
+ instead of targetting the instruction after the loop (and thus exiting
+ the loop). If the loop ends with a branch, we need the nop in case the
+ branch is targetting a location inside the loop. When the branch
+ executes it will cause the loop count to be decremented even if it is
+ taken (because it is the last instruction in the loop), so we need to
+ nop after the branch to prevent the loop count from being decremented
+ when the branch is taken. */
+
+void
+xtensa_emit_loop_end (insn, operands)
+ rtx insn;
+ rtx *operands;
+{
+ char done = 0;
+
+ for (insn = PREV_INSN (insn); insn && !done; insn = PREV_INSN (insn))
+ {
+ switch (GET_CODE (insn))
+ {
+ case NOTE:
+ case BARRIER:
+ break;
+
+ case CODE_LABEL:
+ output_asm_insn ("nop.n", operands);
+ done = 1;
+ break;
+
+ default:
+ {
+ rtx body = PATTERN (insn);
+
+ if (GET_CODE (body) == JUMP_INSN)
+ {
+ output_asm_insn ("nop.n", operands);
+ done = 1;
+ }
+ else if ((GET_CODE (body) != USE)
+ && (GET_CODE (body) != CLOBBER))
+ done = 1;
+ }
+ break;
+ }
+ }
+
+ output_asm_insn ("# loop end for %0", operands);
+}
+
+
+char *
+xtensa_emit_call (callop, operands)
+ int callop;
+ rtx *operands;
+{
+ char *result = (char *) malloc (64);
+ rtx tgt = operands[callop];
+
+ if (GET_CODE (tgt) == CONST_INT)
+ sprintf (result, "call8\t0x%x", INTVAL (tgt));
+ else if (register_operand (tgt, VOIDmode))
+ sprintf (result, "callx8\t%%%d", callop);
+ else
+ sprintf (result, "call8\t%%%d", callop);
+
+ return result;
+}
+
+
+/* Return the stabs register number to use for 'regno'. */
+
+int
+xtensa_dbx_register_number (regno)
+ int regno;
+{
+ int first = -1;
+
+ if (GP_REG_P (regno)) {
+ regno -= GP_REG_FIRST;
+ first = 0;
+ }
+ else if (BR_REG_P (regno)) {
+ regno -= BR_REG_FIRST;
+ first = 16;
+ }
+ else if (FP_REG_P (regno)) {
+ regno -= FP_REG_FIRST;
+ /* The current numbering convention is that TIE registers are
+ numbered in libcc order beginning with 256. We can't guarantee
+ that the FP registers will come first, so the following is just
+ a guess. It seems like we should make a special case for FP
+ registers and give them fixed numbers < 256. */
+ first = 256;
+ }
+ else if (ACC_REG_P (regno))
+ {
+ first = 0;
+ regno = -1;
+ }
+
+ /* When optimizing, we sometimes get asked about pseudo-registers
+ that don't represent hard registers. Return 0 for these. */
+ if (first == -1)
+ return 0;
+
+ return first + regno;
+}
+
+
+/* Argument support functions. */
+
+/* Initialize CUMULATIVE_ARGS for a function. */
+
+void
+init_cumulative_args (cum, fntype, libname)
+ CUMULATIVE_ARGS *cum; /* argument info to initialize */
+ tree fntype ATTRIBUTE_UNUSED; /* tree ptr for function decl */
+ rtx libname ATTRIBUTE_UNUSED; /* SYMBOL_REF of library name or 0 */
+{
+ cum->arg_words = 0;
+}
+
+/* Advance the argument to the next argument position. */
+
+void
+function_arg_advance (cum, mode, type)
+ CUMULATIVE_ARGS *cum; /* current arg information */
+ enum machine_mode mode; /* current arg mode */
+ tree type; /* type of the argument or 0 if lib support */
+{
+ int words, max;
+ int *arg_words;
+
+ arg_words = &cum->arg_words;
+ max = MAX_ARGS_IN_REGISTERS;
+
+ words = (((mode != BLKmode)
+ ? (int) GET_MODE_SIZE (mode)
+ : int_size_in_bytes (type)) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+
+ if ((*arg_words + words > max) && (*arg_words < max))
+ *arg_words = max;
+
+ *arg_words += words;
+}
+
+
+/* Return an RTL expression containing the register for the given mode,
+ or 0 if the argument is to be passed on the stack. */
+
+rtx
+function_arg (cum, mode, type, incoming_p)
+ CUMULATIVE_ARGS *cum; /* current arg information */
+ enum machine_mode mode; /* current arg mode */
+ tree type; /* type of the argument or 0 if lib support */
+ int incoming_p; /* computing the incoming registers? */
+{
+ int regbase, words, max;
+ int *arg_words;
+ int regno;
+ enum machine_mode result_mode;
+
+ arg_words = &cum->arg_words;
+ regbase = (incoming_p ? GP_ARG_FIRST : GP_OUTGOING_ARG_FIRST);
+ max = MAX_ARGS_IN_REGISTERS;
+
+ words = (((mode != BLKmode)
+ ? (int) GET_MODE_SIZE (mode)
+ : int_size_in_bytes (type)) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+
+ if (type && (TYPE_ALIGN (type) > BITS_PER_WORD))
+ *arg_words += (*arg_words & 1);
+
+ if (*arg_words + words > max)
+ return (rtx)0;
+
+ regno = regbase + *arg_words;
+ result_mode = (mode == BLKmode ? TYPE_MODE (type) : mode);
+
+ /* We need to make sure that references to a7 are represented with
+ rtx that is not equal to hard_frame_pointer_rtx. For BLKmode and
+ modes bigger than 2 words (because we only have patterns for
+ modes of 2 words or smaller), we can't control the expansion
+ unless we explicitly list the individual registers in a PARALLEL. */
+
+ if ((mode == BLKmode || words > 2)
+ && regno < A7_REG
+ && regno + words > A7_REG)
+ {
+ rtx result;
+ int n;
+
+ result = gen_rtx_PARALLEL (result_mode, rtvec_alloc (words));
+ for (n = 0; n < words; n++)
+ {
+ XVECEXP (result, 0, n) =
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_raw_REG (SImode, regno + n),
+ GEN_INT (n * UNITS_PER_WORD));
+ }
+ return result;
+ }
+
+ return gen_raw_REG (result_mode, regno);
+}
+
+
+void
+override_options ()
+{
+ int regno;
+ enum machine_mode mode;
+
+ if (!TARGET_BOOLEANS && TARGET_HARD_FLOAT)
+ error ("boolean registers required for the floating-point option");
+
+ /* set up the tables of ld/st opcode names for block moves */
+ xtensa_ld_opcodes[(int) SImode] = "l32i";
+ xtensa_ld_opcodes[(int) HImode] = "l16ui";
+ xtensa_ld_opcodes[(int) QImode] = "l8ui";
+ xtensa_st_opcodes[(int) SImode] = "s32i";
+ xtensa_st_opcodes[(int) HImode] = "s16i";
+ xtensa_st_opcodes[(int) QImode] = "s8i";
+
+ xtensa_char_to_class['q'] = SP_REG;
+ xtensa_char_to_class['a'] = GR_REGS;
+ xtensa_char_to_class['b'] = ((TARGET_BOOLEANS) ? BR_REGS : NO_REGS);
+ xtensa_char_to_class['f'] = ((TARGET_HARD_FLOAT) ? FP_REGS : NO_REGS);
+ xtensa_char_to_class['A'] = ((TARGET_MAC16) ? ACC_REG : NO_REGS);
+ xtensa_char_to_class['B'] = ((TARGET_SEXT) ? GR_REGS : NO_REGS);
+ xtensa_char_to_class['C'] = ((TARGET_MUL16) ? GR_REGS: NO_REGS);
+ xtensa_char_to_class['D'] = ((TARGET_DENSITY) ? GR_REGS: NO_REGS);
+ xtensa_char_to_class['d'] = ((TARGET_DENSITY) ? AR_REGS: NO_REGS);
+
+ /* Set up array giving whether a given register can hold a given mode. */
+ for (mode = VOIDmode;
+ mode != MAX_MACHINE_MODE;
+ mode = (enum machine_mode) ((int) mode + 1))
+ {
+ int size = GET_MODE_SIZE (mode);
+ enum mode_class class = GET_MODE_CLASS (mode);
+
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ {
+ int temp;
+
+ if (ACC_REG_P (regno))
+ temp = (TARGET_MAC16 &&
+ (class == MODE_INT) && (size <= UNITS_PER_WORD));
+ else if (GP_REG_P (regno))
+ temp = ((regno & 1) == 0 || (size <= UNITS_PER_WORD));
+ else if (FP_REG_P (regno))
+ temp = (TARGET_HARD_FLOAT && (mode == SFmode));
+ else if (BR_REG_P (regno))
+ temp = (TARGET_BOOLEANS && (mode == CCmode));
+ else
+ temp = FALSE;
+
+ xtensa_hard_regno_mode_ok[(int) mode][regno] = temp;
+ }
+ }
+
+ init_machine_status = xtensa_init_machine_status;
+ free_machine_status = xtensa_free_machine_status;
+
+ /* Check PIC settings. There's no need for -fPIC on Xtensa and
+ some targets need to always use PIC. */
+ if (XTENSA_ALWAYS_PIC)
+ {
+ if (flag_pic)
+ warning ("-f%s ignored (all code is position independent)",
+ (flag_pic > 1 ? "PIC" : "pic"));
+ flag_pic = 1;
+ }
+ if (flag_pic > 1)
+ flag_pic = 1;
+}
+
+
+/* A C compound statement to output to stdio stream STREAM the
+ assembler syntax for an instruction operand X. X is an RTL
+ expression.
+
+ CODE is a value that can be used to specify one of several ways
+ of printing the operand. It is used when identical operands
+ must be printed differently depending on the context. CODE
+ comes from the '%' specification that was used to request
+ printing of the operand. If the specification was just '%DIGIT'
+ then CODE is 0; if the specification was '%LTR DIGIT' then CODE
+ is the ASCII code for LTR.
+
+ If X is a register, this macro should print the register's name.
+ The names can be found in an array 'reg_names' whose type is
+ 'char *[]'. 'reg_names' is initialized from 'REGISTER_NAMES'.
+
+ When the machine description has a specification '%PUNCT' (a '%'
+ followed by a punctuation character), this macro is called with
+ a null pointer for X and the punctuation character for CODE.
+
+ 'a', 'c', 'l', and 'n' are reserved.
+
+ The Xtensa specific codes are:
+
+ 'd' CONST_INT, print as signed decimal
+ 'x' CONST_INT, print as signed hexadecimal
+ 'K' CONST_INT, print number of bits in mask for EXTUI
+ 'R' CONST_INT, print (X & 0x1f)
+ 'L' CONST_INT, print ((32 - X) & 0x1f)
+ 'D' REG, print second register of double-word register operand
+ 'N' MEM, print address of next word following a memory operand
+ 'v' MEM, if memory reference is volatile, output a MEMW before it
+*/
+
+static void
+printx (file, val)
+ FILE *file;
+ signed int val;
+{
+ /* print a hexadecimal value in a nice way */
+ if ((val > -0xa) && (val < 0xa))
+ fprintf (file, "%d", val);
+ else if (val < 0)
+ fprintf (file, "-0x%x", -val);
+ else
+ fprintf (file, "0x%x", val);
+}
+
+
+void
+print_operand (file, op, letter)
+ FILE *file; /* file to write to */
+ rtx op; /* operand to print */
+ int letter; /* %<letter> or 0 */
+{
+ enum rtx_code code;
+
+ if (! op)
+ error ("PRINT_OPERAND null pointer");
+
+ code = GET_CODE (op);
+ switch (code)
+ {
+ case REG:
+ case SUBREG:
+ {
+ int regnum = xt_true_regnum (op);
+ if (letter == 'D')
+ regnum++;
+ fprintf (file, "%s", reg_names[regnum]);
+ break;
+ }
+
+ case MEM:
+ /*
+ * For a volatile memory reference, emit a MEMW before the
+ * load or store.
+ */
+ if (letter == 'v')
+ {
+ if (MEM_VOLATILE_P (op) && TARGET_SERIALIZE_VOLATILE)
+ fprintf (file, "memw\n\t");
+ break;
+ }
+ else if (letter == 'N')
+ op = adjust_address (op, GET_MODE (op), 4);
+
+ output_address (XEXP (op, 0));
+ break;
+
+ case CONST_INT:
+ switch (letter)
+ {
+ case 'K':
+ {
+ int num_bits = 0;
+ unsigned val = INTVAL (op);
+ while (val & 1)
+ {
+ num_bits += 1;
+ val = val >> 1;
+ }
+ if ((val != 0) || (num_bits == 0) || (num_bits > 16))
+ fatal_insn ("invalid mask", op);
+
+ fprintf (file, "%d", num_bits);
+ break;
+ }
+
+ case 'L':
+ fprintf (file, "%d", (32 - INTVAL (op)) & 0x1f);
+ break;
+
+ case 'R':
+ fprintf (file, "%d", INTVAL (op) & 0x1f);
+ break;
+
+ case 'x':
+ printx (file, INTVAL (op));
+ break;
+
+ case 'd':
+ default:
+ fprintf (file, "%d", INTVAL (op));
+ break;
+
+ }
+ break;
+
+ default:
+ output_addr_const (file, op);
+ }
+}
+
+
+/* A C compound statement to output to stdio stream STREAM the
+ assembler syntax for an instruction operand that is a memory
+ reference whose address is ADDR. ADDR is an RTL expression.
+
+ On some machines, the syntax for a symbolic address depends on
+ the section that the address refers to. On these machines,
+ define the macro 'ENCODE_SECTION_INFO' to store the information
+ into the 'symbol_ref', and then check for it here. */
+
+void
+print_operand_address (file, addr)
+ FILE *file;
+ rtx addr;
+{
+ if (!addr)
+ error ("PRINT_OPERAND_ADDRESS, null pointer");
+
+ switch (GET_CODE (addr))
+ {
+ default:
+ fatal_insn ("invalid address", addr);
+ break;
+
+ case REG:
+ fprintf (file, "%s, 0", reg_names [REGNO (addr)]);
+ break;
+
+ case PLUS:
+ {
+ rtx reg = (rtx)0;
+ rtx offset = (rtx)0;
+ rtx arg0 = XEXP (addr, 0);
+ rtx arg1 = XEXP (addr, 1);
+
+ if (GET_CODE (arg0) == REG)
+ {
+ reg = arg0;
+ offset = arg1;
+ }
+ else if (GET_CODE (arg1) == REG)
+ {
+ reg = arg1;
+ offset = arg0;
+ }
+ else
+ fatal_insn ("no register in address", addr);
+
+ if (CONSTANT_P (offset))
+ {
+ fprintf (file, "%s, ", reg_names [REGNO (reg)]);
+ output_addr_const (file, offset);
+ }
+ else
+ fatal_insn ("address offset not a constant", addr);
+ }
+ break;
+
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST_INT:
+ case CONST:
+ output_addr_const (file, addr);
+ break;
+ }
+}
+
+
+/* Emit either a label, .comm, or .lcomm directive. */
+
+void
+xtensa_declare_object (file, name, init_string, final_string, size)
+ FILE *file;
+ char *name;
+ char *init_string;
+ char *final_string;
+ int size;
+{
+ fputs (init_string, file); /* "", "\t.comm\t", or "\t.lcomm\t" */
+ assemble_name (file, name);
+ fprintf (file, final_string, size); /* ":\n", ",%u\n", ",%u\n" */
+}
+
+
+void
+xtensa_output_literal (file, x, mode, labelno)
+ FILE *file;
+ rtx x;
+ enum machine_mode mode;
+ int labelno;
+{
+ long value_long[2];
+ union real_extract u;
+ int size;
+
+ fprintf (file, "\t.literal .LC%u, ", (unsigned) labelno);
+
+ switch (GET_MODE_CLASS (mode))
+ {
+ case MODE_FLOAT:
+ if (GET_CODE (x) != CONST_DOUBLE)
+ abort ();
+
+ memcpy ((char *) &u, (char *) &CONST_DOUBLE_LOW (x), sizeof u);
+ switch (mode)
+ {
+ case SFmode:
+ REAL_VALUE_TO_TARGET_SINGLE (u.d, value_long[0]);
+ fprintf (file, "0x%08lx\t\t# %.12g (float)\n", value_long[0], u.d);
+ break;
+
+ case DFmode:
+ REAL_VALUE_TO_TARGET_DOUBLE (u.d, value_long);
+ fprintf (file, "0x%08lx, 0x%08lx # %.20g (double)\n",
+ value_long[0], value_long[1], u.d);
+ break;
+
+ default:
+ abort ();
+ }
+
+ break;
+
+ case MODE_INT:
+ case MODE_PARTIAL_INT:
+ size = GET_MODE_SIZE (mode);
+ if (size == 4)
+ {
+ output_addr_const (file, x);
+ fputs ("\n", file);
+ }
+ else if (size == 8)
+ {
+ output_addr_const (file, operand_subword (x, 0, 0, DImode));
+ fputs (", ", file);
+ output_addr_const (file, operand_subword (x, 1, 0, DImode));
+ fputs ("\n", file);
+ }
+ else
+ abort ();
+ break;
+
+ default:
+ abort ();
+ }
+}
+
+
+/* Return the bytes needed to compute the frame pointer from the current
+ stack pointer. */
+
+#define STACK_BYTES (STACK_BOUNDARY / BITS_PER_UNIT)
+#define XTENSA_STACK_ALIGN(LOC) (((LOC) + STACK_BYTES-1) & ~(STACK_BYTES-1))
+
+long
+compute_frame_size (size)
+ int size; /* # of var. bytes allocated */
+{
+ /* add space for the incoming static chain value */
+ if (current_function_needs_context)
+ size += (1 * UNITS_PER_WORD);
+
+ xtensa_current_frame_size =
+ XTENSA_STACK_ALIGN (size
+ + current_function_outgoing_args_size
+ + (WINDOW_SIZE * UNITS_PER_WORD));
+ return xtensa_current_frame_size;
+}
+
+
+int
+xtensa_frame_pointer_required ()
+{
+ /* The code to expand builtin_frame_addr and builtin_return_addr
+ currently uses the hard_frame_pointer instead of frame_pointer.
+ This seems wrong but maybe it's necessary for other architectures.
+ This function is derived from the i386 code. */
+
+ if (cfun->machine->accesses_prev_frame)
+ return 1;
+
+ return 0;
+}
+
+
+void
+xtensa_reorg (first)
+ rtx first;
+{
+ rtx insn, set_frame_ptr_insn = 0;
+
+ unsigned long tsize = compute_frame_size (get_frame_size ());
+ if (tsize < (1 << (12+3)))
+ frame_size_const = 0;
+ else
+ {
+ frame_size_const = force_const_mem (SImode, GEN_INT (tsize - 16));;
+
+ /* make sure the constant is used so it doesn't get eliminated
+ from the constant pool */
+ emit_insn_before (gen_rtx_USE (SImode, frame_size_const), first);
+ }
+
+ if (!frame_pointer_needed)
+ return;
+
+ /* Search all instructions, looking for the insn that sets up the
+ frame pointer. This search will fail if the function does not
+ have an incoming argument in $a7, but in that case, we can just
+ set up the frame pointer at the very beginning of the
+ function. */
+
+ for (insn = first; insn; insn = NEXT_INSN (insn))
+ {
+ rtx pat;
+
+ if (!INSN_P (insn))
+ continue;
+
+ pat = PATTERN (insn);
+ if (GET_CODE (pat) == UNSPEC_VOLATILE
+ && (XINT (pat, 1) == UNSPECV_SET_FP))
+ {
+ set_frame_ptr_insn = insn;
+ break;
+ }
+ }
+
+ if (set_frame_ptr_insn)
+ {
+ /* for all instructions prior to set_frame_ptr_insn, replace
+ hard_frame_pointer references with stack_pointer */
+ for (insn = first; insn != set_frame_ptr_insn; insn = NEXT_INSN (insn))
+ {
+ if (INSN_P (insn))
+ PATTERN (insn) = replace_rtx (copy_rtx (PATTERN (insn)),
+ hard_frame_pointer_rtx,
+ stack_pointer_rtx);
+ }
+ }
+ else
+ {
+ /* emit the frame pointer move immediately after the NOTE that starts
+ the function */
+ emit_insn_after (gen_movsi (hard_frame_pointer_rtx,
+ stack_pointer_rtx), first);
+ }
+}
+
+
+/* Set up the stack and frame (if desired) for the function. */
+
+void
+xtensa_function_prologue (file, size)
+ FILE *file;
+ int size ATTRIBUTE_UNUSED;
+{
+ unsigned long tsize = compute_frame_size (get_frame_size ());
+
+ if (frame_pointer_needed)
+ fprintf (file, "\t.frame\ta7, %ld\n", tsize);
+ else
+ fprintf (file, "\t.frame\tsp, %ld\n", tsize);
+
+
+ if (tsize < (1 << (12+3)))
+ {
+ fprintf (file, "\tentry\tsp, %ld\n", tsize);
+ }
+ else
+ {
+ fprintf (file, "\tentry\tsp, 16\n");
+
+ /* use a8 as a temporary since a0-a7 may be live */
+ fprintf (file, "\tl32r\ta8, ");
+ print_operand (file, frame_size_const, 0);
+ fprintf (file, "\n\tsub\ta8, sp, a8\n");
+ fprintf (file, "\tmovsp\tsp, a8\n");
+ }
+}
+
+
+/* Do any necessary cleanup after a function to restore
+ stack, frame, and regs. */
+
+void
+xtensa_function_epilogue (file, size)
+ FILE *file;
+ int size ATTRIBUTE_UNUSED;
+{
+ rtx insn = get_last_insn ();
+ /* If the last insn was a BARRIER, we don't have to write anything. */
+ if (GET_CODE (insn) == NOTE)
+ insn = prev_nonnote_insn (insn);
+ if (insn == 0 || GET_CODE (insn) != BARRIER)
+ fprintf (file, TARGET_DENSITY ? "\tretw.n\n" : "\tretw\n");
+
+ xtensa_current_frame_size = 0;
+}
+
+
+/* Create the va_list data type.
+ This structure is set up by __builtin_saveregs. The __va_reg
+ field points to a stack-allocated region holding the contents of the
+ incoming argument registers. The __va_ndx field is an index initialized
+ to the position of the first unnamed (variable) argument. This same index
+ is also used to address the arguments passed in memory. Thus, the
+ __va_stk field is initialized to point to the position of the first
+ argument in memory offset to account for the arguments passed in
+ registers. E.G., if there are 6 argument registers, and each register is
+ 4 bytes, then __va_stk is set to $sp - (6 * 4); then __va_reg[N*4]
+ references argument word N for 0 <= N < 6, and __va_stk[N*4] references
+ argument word N for N >= 6. */
+
+tree
+xtensa_build_va_list (void)
+{
+ tree f_stk, f_reg, f_ndx, record;
+
+ record = make_node (RECORD_TYPE);
+
+ f_stk = build_decl (FIELD_DECL, get_identifier ("__va_stk"),
+ ptr_type_node);
+ f_reg = build_decl (FIELD_DECL, get_identifier ("__va_reg"),
+ ptr_type_node);
+ f_ndx = build_decl (FIELD_DECL, get_identifier ("__va_ndx"),
+ integer_type_node);
+
+ DECL_FIELD_CONTEXT (f_stk) = record;
+ DECL_FIELD_CONTEXT (f_reg) = record;
+ DECL_FIELD_CONTEXT (f_ndx) = record;
+
+ TYPE_FIELDS (record) = f_stk;
+ TREE_CHAIN (f_stk) = f_reg;
+ TREE_CHAIN (f_reg) = f_ndx;
+
+ layout_type (record);
+ return record;
+}
+
+
+/* Save the incoming argument registers on the stack. Returns the
+ address of the saved registers. */
+
+rtx
+xtensa_builtin_saveregs ()
+{
+ rtx gp_regs, dest;
+ int arg_words = current_function_arg_words;
+ int gp_left = MAX_ARGS_IN_REGISTERS - arg_words;
+ int i;
+
+ if (gp_left == 0)
+ return const0_rtx;
+
+ /* allocate the general-purpose register space */
+ gp_regs = assign_stack_local
+ (BLKmode, MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD, -1);
+ MEM_IN_STRUCT_P (gp_regs) = 1;
+ RTX_UNCHANGING_P (gp_regs) = 1;
+ RTX_UNCHANGING_P (XEXP (gp_regs, 0)) = 1;
+
+ /* Now store the incoming registers. */
+ dest = change_address (gp_regs, SImode,
+ plus_constant (XEXP (gp_regs, 0),
+ arg_words * UNITS_PER_WORD));
+
+ /* Note: Don't use move_block_from_reg() here because the incoming
+ argument in a7 cannot be represented by hard_frame_pointer_rtx.
+ Instead, call gen_raw_REG() directly so that we get a distinct
+ instance of (REG:SI 7). */
+ for (i = 0; i < gp_left; i++)
+ {
+ emit_move_insn (operand_subword (dest, i, 1, BLKmode),
+ gen_raw_REG (SImode, GP_ARG_FIRST + arg_words + i));
+ }
+
+ return XEXP (gp_regs, 0);
+}
+
+
+/* Implement `va_start' for varargs and stdarg. We look at the
+ current function to fill in an initial va_list. */
+
+void
+xtensa_va_start (stdarg_p, valist, nextarg)
+ int stdarg_p ATTRIBUTE_UNUSED;
+ tree valist;
+ rtx nextarg ATTRIBUTE_UNUSED;
+{
+ tree f_stk, stk;
+ tree f_reg, reg;
+ tree f_ndx, ndx;
+ tree t, u;
+ int arg_words;
+
+ arg_words = current_function_args_info.arg_words;
+
+ f_stk = TYPE_FIELDS (va_list_type_node);
+ f_reg = TREE_CHAIN (f_stk);
+ f_ndx = TREE_CHAIN (f_reg);
+
+ stk = build (COMPONENT_REF, TREE_TYPE (f_stk), valist, f_stk);
+ reg = build (COMPONENT_REF, TREE_TYPE (f_reg), valist, f_reg);
+ ndx = build (COMPONENT_REF, TREE_TYPE (f_ndx), valist, f_ndx);
+
+ /* Call __builtin_saveregs; save the result in __va_reg */
+ current_function_arg_words = arg_words;
+ u = make_tree (ptr_type_node, expand_builtin_saveregs ());
+ t = build (MODIFY_EXPR, ptr_type_node, reg, u);
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+ /* Set the __va_stk member to $arg_ptr - (size of __va_reg area) */
+ u = make_tree (ptr_type_node, virtual_incoming_args_rtx);
+ u = fold (build (PLUS_EXPR, ptr_type_node, u,
+ build_int_2 (-MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD, -1)));
+ t = build (MODIFY_EXPR, ptr_type_node, stk, u);
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+ /* Set the __va_ndx member. */
+ u = build_int_2 (arg_words * UNITS_PER_WORD, 0);
+ t = build (MODIFY_EXPR, integer_type_node, ndx, u);
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+}
+
+
+/* Implement `va_arg'. */
+
+rtx
+xtensa_va_arg (valist, type)
+ tree valist, type;
+{
+ tree f_stk, stk;
+ tree f_reg, reg;
+ tree f_ndx, ndx;
+ tree tmp, addr_tree;
+ rtx array, orig_ndx, r, addr;
+ HOST_WIDE_INT size, va_size;
+ rtx lab_false, lab_over, lab_false2;
+
+ size = int_size_in_bytes (type);
+ va_size = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
+
+ f_stk = TYPE_FIELDS (va_list_type_node);
+ f_reg = TREE_CHAIN (f_stk);
+ f_ndx = TREE_CHAIN (f_reg);
+
+ stk = build (COMPONENT_REF, TREE_TYPE (f_stk), valist, f_stk);
+ reg = build (COMPONENT_REF, TREE_TYPE (f_reg), valist, f_reg);
+ ndx = build (COMPONENT_REF, TREE_TYPE (f_ndx), valist, f_ndx);
+
+
+ /* First align __va_ndx to a double word boundary if necessary for this arg:
+
+ if (__alignof__ (TYPE) > 4)
+ (AP).__va_ndx = (((AP).__va_ndx + 7) & -8)
+ */
+
+ if (TYPE_ALIGN (type) > BITS_PER_WORD)
+ {
+ tmp = build (PLUS_EXPR, integer_type_node, ndx,
+ build_int_2 ((2 * UNITS_PER_WORD) - 1, 0));
+ tmp = build (BIT_AND_EXPR, integer_type_node, tmp,
+ build_int_2 (-2 * UNITS_PER_WORD, -1));
+ tmp = build (MODIFY_EXPR, integer_type_node, ndx, tmp);
+ TREE_SIDE_EFFECTS (tmp) = 1;
+ expand_expr (tmp, const0_rtx, VOIDmode, EXPAND_NORMAL);
+ }
+
+
+ /* Increment __va_ndx to point past the argument:
+
+ orig_ndx = (AP).__va_ndx;
+ (AP).__va_ndx += __va_size (TYPE);
+ */
+
+ orig_ndx = gen_reg_rtx (SImode);
+ r = expand_expr (ndx, orig_ndx, SImode, EXPAND_NORMAL);
+ if (r != orig_ndx)
+ emit_move_insn (orig_ndx, r);
+
+ tmp = build (PLUS_EXPR, integer_type_node, ndx, build_int_2 (va_size, 0));
+ tmp = build (MODIFY_EXPR, integer_type_node, ndx, tmp);
+ TREE_SIDE_EFFECTS (tmp) = 1;
+ expand_expr (tmp, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+
+ /* Check if the argument is in registers:
+
+ if ((AP).__va_ndx <= __MAX_ARGS_IN_REGISTERS * 4)
+ __array = (AP).__va_reg;
+ */
+
+ lab_false = gen_label_rtx ();
+ lab_over = gen_label_rtx ();
+ array = gen_reg_rtx (Pmode);
+
+ emit_cmp_and_jump_insns (expand_expr (ndx, NULL_RTX, SImode, EXPAND_NORMAL),
+ GEN_INT (MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD),
+ GT, const1_rtx, SImode, 0, lab_false);
+
+ r = expand_expr (reg, array, Pmode, EXPAND_NORMAL);
+ if (r != array)
+ emit_move_insn (array, r);
+
+ emit_jump_insn (gen_jump (lab_over));
+ emit_barrier ();
+ emit_label (lab_false);
+
+
+ /* ...otherwise, the argument is on the stack (never split between
+ registers and the stack -- change __va_ndx if necessary):
+
+ else
+ {
+ if (orig_ndx < __MAX_ARGS_IN_REGISTERS * 4)
+ (AP).__va_ndx = __MAX_ARGS_IN_REGISTERS * 4 + __va_size (TYPE);
+ __array = (AP).__va_stk;
+ }
+ */
+
+ lab_false2 = gen_label_rtx ();
+ emit_cmp_and_jump_insns (orig_ndx,
+ GEN_INT (MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD),
+ GE, const1_rtx, SImode, 0, lab_false2);
+
+ tmp = build_int_2 ((MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD) + va_size, 0);
+ tmp = build (MODIFY_EXPR, integer_type_node, ndx, tmp);
+ TREE_SIDE_EFFECTS (tmp) = 1;
+ expand_expr (tmp, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+ emit_label (lab_false2);
+
+ r = expand_expr (stk, array, Pmode, EXPAND_NORMAL);
+ if (r != array)
+ emit_move_insn (array, r);
+
+
+ /* Given the base array pointer (__array) and index to the subsequent
+ argument (__va_ndx), find the address:
+
+ Big-endian:
+ __array + (AP).__va_ndx - sizeof (TYPE)
+
+ Little-endian:
+ __array + (AP).__va_ndx - __va_size (TYPE)
+
+ The results are endian-dependent because values smaller than one word
+ are aligned differently.
+ */
+
+ emit_label (lab_over);
+
+ addr_tree = build (PLUS_EXPR, ptr_type_node,
+ make_tree (ptr_type_node, array),
+ ndx);
+ addr_tree = build (PLUS_EXPR, ptr_type_node,
+ addr_tree,
+ build_int_2 (BYTES_BIG_ENDIAN
+ && size < (PARM_BOUNDARY / BITS_PER_UNIT)
+ ? -size
+ : -va_size, -1));
+ addr = expand_expr (addr_tree, NULL_RTX, Pmode, EXPAND_NORMAL);
+ addr = copy_to_reg (addr);
+ return addr;
+}
+
+
+enum reg_class
+xtensa_secondary_reload_class (class, mode, x, isoutput)
+ enum reg_class class;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+ rtx x;
+ int isoutput;
+{
+ int regno;
+
+ if (GET_CODE (x) == SIGN_EXTEND)
+ x = XEXP (x, 0);
+ regno = xt_true_regnum (x);
+
+ if (!isoutput)
+ {
+ if (class == FP_REGS && constantpool_mem_p (x))
+ return GR_REGS;
+ }
+
+ if (ACC_REG_P (regno))
+ return (class == GR_REGS ? NO_REGS : GR_REGS);
+ if (class == ACC_REG)
+ return (GP_REG_P (regno) ? NO_REGS : GR_REGS);
+
+ return NO_REGS;
+}
+
+
+void
+order_regs_for_local_alloc ()
+{
+ if (!leaf_function_p ())
+ {
+ memcpy (reg_alloc_order, reg_nonleaf_alloc_order,
+ FIRST_PSEUDO_REGISTER * sizeof (int));
+ }
+ else
+ {
+ int i, num_arg_regs;
+ int nxt = 0;
+
+ /* use the AR registers in increasing order (skipping a0 and a1)
+ but save the incoming argument registers for a last resort */
+ num_arg_regs = current_function_args_info.arg_words;
+ if (num_arg_regs > MAX_ARGS_IN_REGISTERS)
+ num_arg_regs = MAX_ARGS_IN_REGISTERS;
+ for (i = GP_ARG_FIRST; i < 16 - num_arg_regs; i++)
+ reg_alloc_order[nxt++] = i + num_arg_regs;
+ for (i = 0; i < num_arg_regs; i++)
+ reg_alloc_order[nxt++] = GP_ARG_FIRST + i;
+
+ /* list the FP registers in order for now */
+ for (i = 0; i < 16; i++)
+ reg_alloc_order[nxt++] = FP_REG_FIRST + i;
+
+ /* GCC requires that we list *all* the registers.... */
+ reg_alloc_order[nxt++] = 0; /* a0 = return address */
+ reg_alloc_order[nxt++] = 1; /* a1 = stack pointer */
+ reg_alloc_order[nxt++] = 16; /* pseudo frame pointer */
+ reg_alloc_order[nxt++] = 17; /* pseudo arg pointer */
+
+ /* list the coprocessor registers in order */
+ for (i = 0; i < BR_REG_NUM; i++)
+ reg_alloc_order[nxt++] = BR_REG_FIRST + i;
+
+ reg_alloc_order[nxt++] = ACC_REG_FIRST; /* MAC16 accumulator */
+ }
+}
+
+
+/* A customized version of reg_overlap_mentioned_p that only looks for
+ references to a7 (as opposed to hard_frame_pointer_rtx). */
+
+int
+a7_overlap_mentioned_p (x)
+ rtx x;
+{
+ int i, j;
+ unsigned int x_regno;
+ const char *fmt;
+
+ if (GET_CODE (x) == REG)
+ {
+ x_regno = REGNO (x);
+ return (x != hard_frame_pointer_rtx
+ && x_regno < A7_REG + 1
+ && x_regno + HARD_REGNO_NREGS (A7_REG, GET_MODE (x)) > A7_REG);
+ }
+
+ if (GET_CODE (x) == SUBREG
+ && GET_CODE (SUBREG_REG (x)) == REG
+ && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
+ {
+ x_regno = subreg_regno (x);
+ return (SUBREG_REG (x) != hard_frame_pointer_rtx
+ && x_regno < A7_REG + 1
+ && x_regno + HARD_REGNO_NREGS (A7_REG, GET_MODE (x)) > A7_REG);
+ }
+
+ /* X does not match, so try its subexpressions. */
+ fmt = GET_RTX_FORMAT (GET_CODE (x));
+ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'e')
+ {
+ if (a7_overlap_mentioned_p (XEXP (x, i)))
+ return 1;
+ }
+ else if (fmt[i] == 'E')
+ {
+ for (j = XVECLEN (x, i) - 1; j >=0; j--)
+ if (a7_overlap_mentioned_p (XVECEXP (x, i, j)))
+ return 1;
+ }
+ }
+
+ return 0;
+}
diff --git a/gcc/config/xtensa/xtensa.h b/gcc/config/xtensa/xtensa.h
new file mode 100644
index 00000000000..0cfbdb24d7e
--- /dev/null
+++ b/gcc/config/xtensa/xtensa.h
@@ -0,0 +1,1701 @@
+/* Definitions of Tensilica's Xtensa target machine for GNU compiler.
+ Copyright (C) 2001 Free Software Foundation, Inc.
+ Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING. If not, write to the Free
+Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+02111-1307, USA. */
+
+/* Get Xtensa configuration settings */
+#include "xtensa/xtensa-config.h"
+
+/* Standard GCC variables that we reference. */
+extern int current_function_calls_alloca;
+extern int target_flags;
+extern int optimize;
+
+/* External variables defined in xtensa.c. */
+
+/* comparison type */
+enum cmp_type {
+ CMP_SI, /* four byte integers */
+ CMP_DI, /* eight byte integers */
+ CMP_SF, /* single precision floats */
+ CMP_DF, /* double precision floats */
+ CMP_MAX /* max comparison type */
+};
+
+extern struct rtx_def * branch_cmp[2]; /* operands for compare */
+extern enum cmp_type branch_type; /* what type of branch to use */
+extern unsigned xtensa_current_frame_size;
+
+/* Run-time compilation parameters selecting different hardware subsets. */
+
+#define MASK_BIG_ENDIAN 0x00000001 /* big or little endian */
+#define MASK_DENSITY 0x00000002 /* code density option */
+#define MASK_MAC16 0x00000004 /* MAC16 option */
+#define MASK_MUL16 0x00000008 /* 16-bit integer multiply */
+#define MASK_MUL32 0x00000010 /* integer multiply/divide */
+#define MASK_DIV32 0x00000020 /* integer multiply/divide */
+#define MASK_NSA 0x00000040 /* nsa instruction option */
+#define MASK_MINMAX 0x00000080 /* min/max instructions */
+#define MASK_SEXT 0x00000100 /* sign extend insn option */
+#define MASK_BOOLEANS 0x00000200 /* boolean register option */
+#define MASK_HARD_FLOAT 0x00000400 /* floating-point option */
+#define MASK_HARD_FLOAT_DIV 0x00000800 /* floating-point divide */
+#define MASK_HARD_FLOAT_RECIP 0x00001000 /* floating-point reciprocal */
+#define MASK_HARD_FLOAT_SQRT 0x00002000 /* floating-point sqrt */
+#define MASK_HARD_FLOAT_RSQRT 0x00004000 /* floating-point recip sqrt */
+#define MASK_NO_FUSED_MADD 0x00008000 /* avoid f-p mul/add */
+#define MASK_SERIALIZE_VOLATILE 0x00010000 /* serialize volatile refs */
+
+/* Macros used in the machine description to test the flags. */
+
+#define TARGET_BIG_ENDIAN (target_flags & MASK_BIG_ENDIAN)
+#define TARGET_DENSITY (target_flags & MASK_DENSITY)
+#define TARGET_MAC16 (target_flags & MASK_MAC16)
+#define TARGET_MUL16 (target_flags & MASK_MUL16)
+#define TARGET_MUL32 (target_flags & MASK_MUL32)
+#define TARGET_DIV32 (target_flags & MASK_DIV32)
+#define TARGET_NSA (target_flags & MASK_NSA)
+#define TARGET_MINMAX (target_flags & MASK_MINMAX)
+#define TARGET_SEXT (target_flags & MASK_SEXT)
+#define TARGET_BOOLEANS (target_flags & MASK_BOOLEANS)
+#define TARGET_HARD_FLOAT (target_flags & MASK_HARD_FLOAT)
+#define TARGET_HARD_FLOAT_DIV (target_flags & MASK_HARD_FLOAT_DIV)
+#define TARGET_HARD_FLOAT_RECIP (target_flags & MASK_HARD_FLOAT_RECIP)
+#define TARGET_HARD_FLOAT_SQRT (target_flags & MASK_HARD_FLOAT_SQRT)
+#define TARGET_HARD_FLOAT_RSQRT (target_flags & MASK_HARD_FLOAT_RSQRT)
+#define TARGET_NO_FUSED_MADD (target_flags & MASK_NO_FUSED_MADD)
+#define TARGET_SERIALIZE_VOLATILE (target_flags & MASK_SERIALIZE_VOLATILE)
+
+/* Default target_flags if no switches are specified */
+
+#define TARGET_DEFAULT ( \
+ (XCHAL_HAVE_BE ? MASK_BIG_ENDIAN : 0) | \
+ (XCHAL_HAVE_DENSITY ? MASK_DENSITY : 0) | \
+ (XCHAL_HAVE_MAC16 ? MASK_MAC16 : 0) | \
+ (XCHAL_HAVE_MUL16 ? MASK_MUL16 : 0) | \
+ (XCHAL_HAVE_MUL32 ? MASK_MUL32 : 0) | \
+ (XCHAL_HAVE_DIV32 ? MASK_DIV32 : 0) | \
+ (XCHAL_HAVE_NSA ? MASK_NSA : 0) | \
+ (XCHAL_HAVE_MINMAX ? MASK_MINMAX : 0) | \
+ (XCHAL_HAVE_SEXT ? MASK_SEXT : 0) | \
+ (XCHAL_HAVE_BOOLEANS ? MASK_BOOLEANS : 0) | \
+ (XCHAL_HAVE_FP ? MASK_HARD_FLOAT : 0) | \
+ (XCHAL_HAVE_FP_DIV ? MASK_HARD_FLOAT_DIV : 0) | \
+ (XCHAL_HAVE_FP_RECIP ? MASK_HARD_FLOAT_RECIP : 0) | \
+ (XCHAL_HAVE_FP_SQRT ? MASK_HARD_FLOAT_SQRT : 0) | \
+ (XCHAL_HAVE_FP_RSQRT ? MASK_HARD_FLOAT_RSQRT : 0) | \
+ MASK_SERIALIZE_VOLATILE)
+
+/* Macro to define tables used to set the flags. */
+
+#define TARGET_SWITCHES \
+{ \
+ {"big-endian", MASK_BIG_ENDIAN, \
+ N_("Use big-endian byte order")}, \
+ {"little-endian", -MASK_BIG_ENDIAN, \
+ N_("Use little-endian byte order")}, \
+ {"density", MASK_DENSITY, \
+ N_("Use the Xtensa code density option")}, \
+ {"no-density", -MASK_DENSITY, \
+ N_("Do not use the Xtensa code density option")}, \
+ {"mac16", MASK_MAC16, \
+ N_("Use the Xtensa MAC16 option")}, \
+ {"no-mac16", -MASK_MAC16, \
+ N_("Do not use the Xtensa MAC16 option")}, \
+ {"mul16", MASK_MUL16, \
+ N_("Use the Xtensa MUL16 option")}, \
+ {"no-mul16", -MASK_MUL16, \
+ N_("Do not use the Xtensa MUL16 option")}, \
+ {"mul32", MASK_MUL32, \
+ N_("Use the Xtensa MUL32 option")}, \
+ {"no-mul32", -MASK_MUL32, \
+ N_("Do not use the Xtensa MUL32 option")}, \
+ {"div32", MASK_DIV32, \
+ 0 /* undocumented */}, \
+ {"no-div32", -MASK_DIV32, \
+ 0 /* undocumented */}, \
+ {"nsa", MASK_NSA, \
+ N_("Use the Xtensa NSA option")}, \
+ {"no-nsa", -MASK_NSA, \
+ N_("Do not use the Xtensa NSA option")}, \
+ {"minmax", MASK_MINMAX, \
+ N_("Use the Xtensa MIN/MAX option")}, \
+ {"no-minmax", -MASK_MINMAX, \
+ N_("Do not use the Xtensa MIN/MAX option")}, \
+ {"sext", MASK_SEXT, \
+ N_("Use the Xtensa SEXT option")}, \
+ {"no-sext", -MASK_SEXT, \
+ N_("Do not use the Xtensa SEXT option")}, \
+ {"booleans", MASK_BOOLEANS, \
+ N_("Use the Xtensa boolean register option")}, \
+ {"no-booleans", -MASK_BOOLEANS, \
+ N_("Do not use the Xtensa boolean register option")}, \
+ {"hard-float", MASK_HARD_FLOAT, \
+ N_("Use the Xtensa floating-point unit")}, \
+ {"soft-float", -MASK_HARD_FLOAT, \
+ N_("Do not use the Xtensa floating-point unit")}, \
+ {"hard-float-div", MASK_HARD_FLOAT_DIV, \
+ 0 /* undocumented */}, \
+ {"no-hard-float-div", -MASK_HARD_FLOAT_DIV, \
+ 0 /* undocumented */}, \
+ {"hard-float-recip", MASK_HARD_FLOAT_RECIP, \
+ 0 /* undocumented */}, \
+ {"no-hard-float-recip", -MASK_HARD_FLOAT_RECIP, \
+ 0 /* undocumented */}, \
+ {"hard-float-sqrt", MASK_HARD_FLOAT_SQRT, \
+ 0 /* undocumented */}, \
+ {"no-hard-float-sqrt", -MASK_HARD_FLOAT_SQRT, \
+ 0 /* undocumented */}, \
+ {"hard-float-rsqrt", MASK_HARD_FLOAT_RSQRT, \
+ 0 /* undocumented */}, \
+ {"no-hard-float-rsqrt", -MASK_HARD_FLOAT_RSQRT, \
+ 0 /* undocumented */}, \
+ {"no-fused-madd", MASK_NO_FUSED_MADD, \
+ N_("Disable fused multiply/add and multiply/subtract FP instructions")}, \
+ {"fused-madd", -MASK_NO_FUSED_MADD, \
+ N_("Enable fused multiply/add and multiply/subtract FP instructions")}, \
+ {"serialize-volatile", MASK_SERIALIZE_VOLATILE, \
+ N_("Serialize volatile memory references with MEMW instructions")}, \
+ {"no-serialize-volatile", -MASK_SERIALIZE_VOLATILE, \
+ N_("Do not serialize volatile memory references with MEMW instructions")},\
+ {"text-section-literals", 0, \
+ N_("Intersperse literal pools with code in the text section")}, \
+ {"no-text-section-literals", 0, \
+ N_("Put literal pools in a separate literal section")}, \
+ {"target-align", 0, \
+ N_("Automatically align branch targets to reduce branch penalties")}, \
+ {"no-target-align", 0, \
+ N_("Do not automatically align branch targets")}, \
+ {"longcalls", 0, \
+ N_("Use indirect CALLXn instructions for large programs")}, \
+ {"no-longcalls", 0, \
+ N_("Use direct CALLn instructions for fast calls")}, \
+ {"", TARGET_DEFAULT, 0} \
+}
+
+
+#define OVERRIDE_OPTIONS override_options ()
+
+#if XCHAL_HAVE_BE
+#define CPP_ENDIAN_SPEC "\
+ %{mlittle-endian:-D__XTENSA_EL__} \
+ %{!mlittle-endian:-D__XTENSA_EB__} "
+#else /* !XCHAL_HAVE_BE */
+#define CPP_ENDIAN_SPEC "\
+ %{mbig-endian:-D__XTENSA_EB__} \
+ %{!mbig-endian:-D__XTENSA_EL__} "
+#endif /* !XCHAL_HAVE_BE */
+
+#if XCHAL_HAVE_FP
+#define CPP_FLOAT_SPEC "%{msoft-float:-D__XTENSA_SOFT_FLOAT__}"
+#else
+#define CPP_FLOAT_SPEC "%{!mhard-float:-D__XTENSA_SOFT_FLOAT__}"
+#endif
+
+#undef CPP_SPEC
+#define CPP_SPEC CPP_ENDIAN_SPEC CPP_FLOAT_SPEC
+
+/* Define this to set the endianness to use in libgcc2.c, which can
+ not depend on target_flags. */
+#define LIBGCC2_WORDS_BIG_ENDIAN XCHAL_HAVE_BE
+
+/* Show we can debug even without a frame pointer. */
+#define CAN_DEBUG_WITHOUT_FP
+
+
+/* Target machine storage layout */
+
+/* Define in order to support both big and little endian float formats
+ in the same gcc binary. */
+#define REAL_ARITHMETIC
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN (TARGET_BIG_ENDIAN != 0)
+
+/* Define this if most significant byte of a word is the lowest numbered. */
+#define BYTES_BIG_ENDIAN (TARGET_BIG_ENDIAN != 0)
+
+/* Define this if most significant word of a multiword number is the lowest. */
+#define WORDS_BIG_ENDIAN (TARGET_BIG_ENDIAN != 0)
+
+/* Number of bits in an addressable storage unit */
+#define BITS_PER_UNIT 8
+
+/* Width in bits of a "word", which is the contents of a machine register. */
+#define BITS_PER_WORD 32
+#define MAX_BITS_PER_WORD 32
+
+/* Width of a word, in units (bytes). */
+#define UNITS_PER_WORD 4
+#define MIN_UNITS_PER_WORD 4
+
+/* Width of a floating point register. */
+#define UNITS_PER_FPREG 4
+
+/* Size in bits of various types on the target machine. */
+#define INT_TYPE_SIZE 32
+#define MAX_INT_TYPE_SIZE 32
+#define SHORT_TYPE_SIZE 16
+#define LONG_TYPE_SIZE 32
+#define MAX_LONG_TYPE_SIZE 32
+#define LONG_LONG_TYPE_SIZE 64
+#define CHAR_TYPE_SIZE BITS_PER_UNIT
+#define FLOAT_TYPE_SIZE 32
+#define DOUBLE_TYPE_SIZE 64
+#define LONG_DOUBLE_TYPE_SIZE 64
+#define POINTER_SIZE 32
+
+/* Tell the preprocessor the maximum size of wchar_t. */
+#ifndef MAX_WCHAR_TYPE_SIZE
+#ifndef WCHAR_TYPE_SIZE
+#define MAX_WCHAR_TYPE_SIZE MAX_INT_TYPE_SIZE
+#endif
+#endif
+
+/* Allocation boundary (in *bits*) for storing pointers in memory. */
+#define POINTER_BOUNDARY 32
+
+/* Allocation boundary (in *bits*) for storing arguments in argument list. */
+#define PARM_BOUNDARY 32
+
+/* Allocation boundary (in *bits*) for the code of a function. */
+#define FUNCTION_BOUNDARY 32
+
+/* Alignment of field after 'int : 0' in a structure. */
+#define EMPTY_FIELD_BOUNDARY 32
+
+/* Every structure's size must be a multiple of this. */
+#define STRUCTURE_SIZE_BOUNDARY 8
+
+/* There is no point aligning anything to a rounder boundary than this. */
+#define BIGGEST_ALIGNMENT 128
+
+/* Set this nonzero if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 1
+
+/* Promote integer modes smaller than a word to SImode. Set UNSIGNEDP
+ for QImode, because there is no 8-bit load from memory with sign
+ extension. Otherwise, leave UNSIGNEDP alone, since Xtensa has 16-bit
+ loads both with and without sign extension. */
+#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
+ do { \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < UNITS_PER_WORD) \
+ { \
+ if ((MODE) == QImode) \
+ (UNSIGNEDP) = 1; \
+ (MODE) = SImode; \
+ } \
+ } while (0)
+
+/* The promotion described by `PROMOTE_MODE' should also be done for
+ outgoing function arguments. */
+#define PROMOTE_FUNCTION_ARGS
+
+/* The promotion described by `PROMOTE_MODE' should also be done for
+ the return value of functions. Note: `FUNCTION_VALUE' must perform
+ the same promotions done by `PROMOTE_MODE'. */
+#define PROMOTE_FUNCTION_RETURN
+
+/* Imitate the way many other C compilers handle alignment of
+ bitfields and the structures that contain them. */
+#define PCC_BITFIELD_TYPE_MATTERS 1
+
+/* Align string constants and constructors to at least a word boundary.
+ The typical use of this macro is to increase alignment for string
+ constants to be word aligned so that 'strcpy' calls that copy
+ constants can be done inline. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ ((TREE_CODE (EXP) == STRING_CST || TREE_CODE (EXP) == CONSTRUCTOR) \
+ && (ALIGN) < BITS_PER_WORD \
+ ? BITS_PER_WORD \
+ : (ALIGN))
+
+/* Align arrays, unions and records to at least a word boundary.
+ One use of this macro is to increase alignment of medium-size
+ data to make it all fit in fewer cache lines. Another is to
+ cause character arrays to be word-aligned so that 'strcpy' calls
+ that copy constants to character arrays can be done inline. */
+#undef DATA_ALIGNMENT
+#define DATA_ALIGNMENT(TYPE, ALIGN) \
+ ((((ALIGN) < BITS_PER_WORD) \
+ && (TREE_CODE (TYPE) == ARRAY_TYPE \
+ || TREE_CODE (TYPE) == UNION_TYPE \
+ || TREE_CODE (TYPE) == RECORD_TYPE)) ? BITS_PER_WORD : (ALIGN))
+
+/* An argument declared as 'char' or 'short' in a prototype should
+ actually be passed as an 'int'. */
+#define PROMOTE_PROTOTYPES 1
+
+/* Operations between registers always perform the operation
+ on the full register even if a narrower mode is specified. */
+#define WORD_REGISTER_OPERATIONS
+
+/* Xtensa loads are zero-extended by default. */
+#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
+
+/* Standard register usage. */
+
+/* Number of actual hardware registers.
+ The hardware registers are assigned numbers for the compiler
+ from 0 to just below FIRST_PSEUDO_REGISTER.
+ All registers that the compiler knows about must be given numbers,
+ even those that are not normally considered general registers.
+
+ The fake frame pointer and argument pointer will never appear in
+ the generated code, since they will always be eliminated and replaced
+ by either the stack pointer or the hard frame pointer.
+
+ 0 - 15 AR[0] - AR[15]
+ 16 FRAME_POINTER (fake = initial sp)
+ 17 ARG_POINTER (fake = initial sp + framesize)
+ 18 LOOP_COUNT (loop count special register)
+ 18 BR[0] for floating-point CC
+ 19 - 34 FR[0] - FR[15]
+ 35 MAC16 accumulator */
+
+#define FIRST_PSEUDO_REGISTER 36
+
+/* Return the stabs register number to use for REGNO. */
+#define DBX_REGISTER_NUMBER(REGNO) xtensa_dbx_register_number (REGNO)
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator. */
+#define FIXED_REGISTERS \
+{ \
+ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 1, 1, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, \
+}
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like. */
+#define CALL_USED_REGISTERS \
+{ \
+ 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, \
+}
+
+/* For non-leaf procedures on Xtensa processors, the allocation order
+ is as specified below by REG_ALLOC_ORDER. For leaf procedures, we
+ want to use the lowest numbered registers first to minimize
+ register window overflows. However, local-alloc is not smart
+ enough to consider conflicts with incoming arguments. If an
+ incoming argument in a2 is live throughout the function and
+ local-alloc decides to use a2, then the incoming argument must
+ either be spilled or copied to another register. To get around
+ this, we define ORDER_REGS_FOR_LOCAL_ALLOC to redefine
+ reg_alloc_order for leaf functions such that lowest numbered
+ registers are used first with the exception that the incoming
+ argument registers are not used until after other register choices
+ have been exhausted. */
+
+#define REG_ALLOC_ORDER \
+{ 8, 9, 10, 11, 12, 13, 14, 15, 7, 6, 5, 4, 3, 2, 19, \
+ 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, \
+ 0, 1, 16, 17, \
+ 36, \
+}
+
+#define ORDER_REGS_FOR_LOCAL_ALLOC order_regs_for_local_alloc ()
+
+/* For Xtensa, the only point of this is to prevent GCC from otherwise
+ giving preference to call-used registers. To minimize window
+ overflows for the AR registers, we want to give preference to the
+ lower-numbered AR registers. For other register files, which are
+ not windowed, we still prefer call-used registers, if there are any. */
+extern const char xtensa_leaf_regs[FIRST_PSEUDO_REGISTER];
+#define LEAF_REGISTERS xtensa_leaf_regs
+
+/* For Xtensa, no remapping is necessary, but this macro must be
+ defined if LEAF_REGISTERS is defined. */
+#define LEAF_REG_REMAP(REGNO) (REGNO)
+
+/* this must be declared if LEAF_REGISTERS is set */
+extern int leaf_function;
+
+/* Internal macros to classify a register number. */
+
+/* 16 address registers + fake registers */
+#define GP_REG_FIRST 0
+#define GP_REG_LAST 17
+#define GP_REG_NUM (GP_REG_LAST - GP_REG_FIRST + 1)
+
+/* Special registers */
+#define SPEC_REG_FIRST 18
+#define SPEC_REG_LAST 18
+#define SPEC_REG_NUM (SPEC_REG_LAST - SPEC_REG_FIRST + 1)
+
+/* Coprocessor registers */
+#define BR_REG_FIRST 18
+#define BR_REG_LAST 18
+#define BR_REG_NUM (BR_REG_LAST - BR_REG_FIRST + 1)
+
+/* 16 floating-point registers */
+#define FP_REG_FIRST 19
+#define FP_REG_LAST 34
+#define FP_REG_NUM (FP_REG_LAST - FP_REG_FIRST + 1)
+
+/* MAC16 accumulator */
+#define ACC_REG_FIRST 35
+#define ACC_REG_LAST 35
+#define ACC_REG_NUM (ACC_REG_LAST - ACC_REG_FIRST + 1)
+
+#define GP_REG_P(REGNO) ((unsigned) ((REGNO) - GP_REG_FIRST) < GP_REG_NUM)
+#define BR_REG_P(REGNO) ((unsigned) ((REGNO) - BR_REG_FIRST) < BR_REG_NUM)
+#define FP_REG_P(REGNO) ((unsigned) ((REGNO) - FP_REG_FIRST) < FP_REG_NUM)
+#define ACC_REG_P(REGNO) ((unsigned) ((REGNO) - ACC_REG_FIRST) < ACC_REG_NUM)
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE. */
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ (FP_REG_P (REGNO) ? \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG) : \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode
+ MODE. */
+extern char xtensa_hard_regno_mode_ok[][FIRST_PSEUDO_REGISTER];
+
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ xtensa_hard_regno_mode_ok[(int) (MODE)][(REGNO)]
+
+/* Value is 1 if it is a good idea to tie two pseudo registers
+ when one has mode MODE1 and one has mode MODE2.
+ If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
+ for any hard reg, then this must be 0 for correct output. */
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ ((GET_MODE_CLASS (MODE1) == MODE_FLOAT || \
+ GET_MODE_CLASS (MODE1) == MODE_COMPLEX_FLOAT) \
+ == (GET_MODE_CLASS (MODE2) == MODE_FLOAT || \
+ GET_MODE_CLASS (MODE2) == MODE_COMPLEX_FLOAT))
+
+/* Register to use for LCOUNT special register. */
+#define COUNT_REGISTER_REGNUM (SPEC_REG_FIRST + 0)
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM (GP_REG_FIRST + 1)
+
+/* Base register for access to local variables of the function. */
+#define HARD_FRAME_POINTER_REGNUM (GP_REG_FIRST + 7)
+
+/* The register number of the frame pointer register, which is used to
+ access automatic variables in the stack frame. For Xtensa, this
+ register never appears in the output. It is always eliminated to
+ either the stack pointer or the hard frame pointer. */
+#define FRAME_POINTER_REGNUM (GP_REG_FIRST + 16)
+
+/* Value should be nonzero if functions must have frame pointers.
+ Zero means the frame pointer need not be set up (and parms
+ may be accessed via the stack pointer) in functions that seem suitable.
+ This is computed in 'reload', in reload1.c. */
+#define FRAME_POINTER_REQUIRED xtensa_frame_pointer_required ()
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM (GP_REG_FIRST + 17)
+
+/* If the static chain is passed in memory, these macros provide rtx
+ giving 'mem' expressions that denote where they are stored.
+ 'STATIC_CHAIN' and 'STATIC_CHAIN_INCOMING' give the locations as
+ seen by the calling and called functions, respectively. */
+
+#define STATIC_CHAIN \
+ gen_rtx_MEM (Pmode, plus_constant (stack_pointer_rtx, -5 * UNITS_PER_WORD))
+
+#define STATIC_CHAIN_INCOMING \
+ gen_rtx_MEM (Pmode, plus_constant (arg_pointer_rtx, -5 * UNITS_PER_WORD))
+
+/* For now we don't try to use the full set of boolean registers. Without
+ software pipelining of FP operations, there's not much to gain and it's
+ a real pain to get them reloaded. */
+#define FPCC_REGNUM (BR_REG_FIRST + 0)
+
+/* Pass structure value address as an "invisible" first argument. */
+#define STRUCT_VALUE 0
+
+/* It is as good or better to call a constant function address than to
+ call an address kept in a register. */
+#define NO_FUNCTION_CSE 1
+
+/* It is as good or better for a function to call itself with an
+ explicit address than to call an address kept in a register. */
+#define NO_RECURSIVE_FUNCTION_CSE 1
+
+/* Xtensa processors have "register windows". GCC does not currently
+ take advantage of the possibility for variable-sized windows; instead,
+ we use a fixed window size of 8. */
+
+#define INCOMING_REGNO(OUT) \
+ ((GP_REG_P (OUT) && \
+ ((unsigned) ((OUT) - GP_REG_FIRST) >= WINDOW_SIZE)) ? \
+ (OUT) - WINDOW_SIZE : (OUT))
+
+#define OUTGOING_REGNO(IN) \
+ ((GP_REG_P (IN) && \
+ ((unsigned) ((IN) - GP_REG_FIRST) < WINDOW_SIZE)) ? \
+ (IN) + WINDOW_SIZE : (IN))
+
+
+/* Define the classes of registers for register constraints in the
+ machine description. */
+enum reg_class
+{
+ NO_REGS, /* no registers in set */
+ BR_REGS, /* coprocessor boolean registers */
+ FP_REGS, /* floating point registers */
+ ACC_REG, /* MAC16 accumulator */
+ SP_REG, /* sp register (aka a1) */
+ GR_REGS, /* integer registers except sp */
+ AR_REGS, /* all integer registers */
+ ALL_REGS, /* all registers */
+ LIM_REG_CLASSES /* max value + 1 */
+};
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+#define GENERAL_REGS AR_REGS
+
+/* An initializer containing the names of the register classes as C
+ string constants. These names are used in writing some of the
+ debugging dumps. */
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "BR_REGS", \
+ "FP_REGS", \
+ "ACC_REG", \
+ "SP_REG", \
+ "GR_REGS", \
+ "AR_REGS", \
+ "ALL_REGS" \
+}
+
+/* Contents of the register classes. The Nth integer specifies the
+ contents of class N. The way the integer MASK is interpreted is
+ that register R is in the class if 'MASK & (1 << R)' is 1. */
+#define REG_CLASS_CONTENTS \
+{ \
+ { 0x00000000, 0x00000000 }, /* no registers */ \
+ { 0x00040000, 0x00000000 }, /* coprocessor boolean registers */ \
+ { 0xfff80000, 0x00000007 }, /* floating-point registers */ \
+ { 0x00000000, 0x00000008 }, /* MAC16 accumulator */ \
+ { 0x00000002, 0x00000000 }, /* stack pointer register */ \
+ { 0x0000fffd, 0x00000000 }, /* general-purpose registers */ \
+ { 0x0003ffff, 0x00000000 }, /* integer registers */ \
+ { 0xffffffff, 0x0000000f } /* all registers */ \
+}
+
+/* A C expression whose value is a register class containing hard
+ register REGNO. In general there is more that one such class;
+ choose a class which is "minimal", meaning that no smaller class
+ also contains the register. */
+extern const enum reg_class xtensa_regno_to_class[FIRST_PSEUDO_REGISTER];
+
+#define REGNO_REG_CLASS(REGNO) xtensa_regno_to_class[ (REGNO) ]
+
+/* Use the Xtensa AR register file for base registers.
+ No index registers. */
+#define BASE_REG_CLASS AR_REGS
+#define INDEX_REG_CLASS NO_REGS
+
+/* SMALL_REGISTER_CLASSES is required for Xtensa, because all of the
+ 16 AR registers may be explicitly used in the RTL, as either
+ incoming or outgoing arguments. */
+#define SMALL_REGISTER_CLASSES 1
+
+
+/* REGISTER AND CONSTANT CLASSES */
+
+/* Get reg_class from a letter such as appears in the machine
+ description.
+
+ Available letters: a-f,h,j-l,q,t-z,A-D,W,Y-Z
+
+ DEFINED REGISTER CLASSES:
+
+ 'a' general-purpose registers except sp
+ 'q' sp (aka a1)
+ 'D' general-purpose registers (only if density option enabled)
+ 'd' general-purpose registers, including sp (only if density enabled)
+ 'A' MAC16 accumulator (only if MAC16 option enabled)
+ 'B' general-purpose registers (only if sext instruction enabled)
+ 'C' general-purpose registers (only if mul16 option enabled)
+ 'b' coprocessor boolean registers
+ 'f' floating-point registers
+*/
+
+extern enum reg_class xtensa_char_to_class[256];
+
+#define REG_CLASS_FROM_LETTER(C) xtensa_char_to_class[ (int) (C) ]
+
+/* The letters I, J, K, L, M, N, O, and P in a register constraint
+ string can be used to stand for particular ranges of immediate
+ operands. This macro defines what the ranges are. C is the
+ letter, and VALUE is a constant value. Return 1 if VALUE is
+ in the range specified by C.
+
+ For Xtensa:
+
+ I = 12-bit signed immediate for movi
+ J = 8-bit signed immediate for addi
+ K = 4-bit value in (b4const U {0})
+ L = 4-bit value in b4constu
+ M = 7-bit value in simm7
+ N = 8-bit unsigned immediate shifted left by 8 bits for addmi
+ O = 4-bit value in ai4const
+ P = valid immediate mask value for extui */
+
+#define CONST_OK_FOR_LETTER_P(VALUE, C) \
+ ((C) == 'I' ? (xtensa_simm12b (VALUE)) \
+ : (C) == 'J' ? (xtensa_simm8 (VALUE)) \
+ : (C) == 'K' ? (((VALUE) == 0) || xtensa_b4const (VALUE)) \
+ : (C) == 'L' ? (xtensa_b4constu (VALUE)) \
+ : (C) == 'M' ? (xtensa_simm7 (VALUE)) \
+ : (C) == 'N' ? (xtensa_simm8x256 (VALUE)) \
+ : (C) == 'O' ? (xtensa_ai4const (VALUE)) \
+ : (C) == 'P' ? (xtensa_mask_immediate (VALUE)) \
+ : FALSE)
+
+
+/* Similar, but for floating constants, and defining letters G and H.
+ Here VALUE is the CONST_DOUBLE rtx itself. */
+#define CONST_DOUBLE_OK_FOR_LETTER_P(VALUE, C) (0)
+
+
+/* Other letters can be defined in a machine-dependent fashion to
+ stand for particular classes of registers or other arbitrary
+ operand types.
+
+ R = memory that can be accessed with a 4-bit unsigned offset
+ S = memory where the second word can be addressed with a 4-bit offset
+ T = memory in a constant pool (addressable with a pc-relative load)
+ U = memory *NOT* in a constant pool
+
+ The offset range should not be checked here (except to distinguish
+ denser versions of the instructions for which more general versions
+ are available). Doing so leads to problems in reloading: an
+ argptr-relative address may become invalid when the phony argptr is
+ eliminated in favor of the stack pointer (the offset becomes too
+ large to fit in the instruction's immediate field); a reload is
+ generated to fix this but the RTL is not immediately updated; in
+ the meantime, the constraints are checked and none match. The
+ solution seems to be to simply skip the offset check here. The
+ address will be checked anyway because of the code in
+ GO_IF_LEGITIMATE_ADDRESS. */
+
+#define EXTRA_CONSTRAINT(OP, CODE) \
+ ((GET_CODE (OP) != MEM) ? \
+ ((CODE) >= 'R' && (CODE) <= 'U' \
+ && reload_in_progress && GET_CODE (OP) == REG \
+ && REGNO (OP) >= FIRST_PSEUDO_REGISTER) \
+ : ((CODE) == 'R') ? smalloffset_mem_p (OP) \
+ : ((CODE) == 'S') ? smalloffset_double_mem_p (OP) \
+ : ((CODE) == 'T') ? constantpool_mem_p (OP) \
+ : ((CODE) == 'U') ? !constantpool_mem_p (OP) \
+ : FALSE)
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use. */
+#define PREFERRED_RELOAD_CLASS(X, CLASS) \
+ (CONSTANT_P (X) \
+ ? (GET_CODE (X) == CONST_DOUBLE) ? NO_REGS : (CLASS) \
+ : (CLASS))
+
+#define PREFERRED_OUTPUT_RELOAD_CLASS(X, CLASS) \
+ (CLASS)
+
+#define SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, X) \
+ xtensa_secondary_reload_class (CLASS, MODE, X, 0)
+
+#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS, MODE, X) \
+ xtensa_secondary_reload_class (CLASS, MODE, X, 1)
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS. */
+#define CLASS_UNITS(mode, size) \
+ ((GET_MODE_SIZE (mode) + (size) - 1) / (size))
+
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ (CLASS_UNITS (MODE, UNITS_PER_WORD))
+
+
+/* Stack layout; function entry, exit and calling. */
+
+#define STACK_GROWS_DOWNWARD
+
+/* Offset within stack frame to start allocating local variables at. */
+#define STARTING_FRAME_OFFSET \
+ current_function_outgoing_args_size
+
+/* The ARG_POINTER and FRAME_POINTER are not real Xtensa registers, so
+ they are eliminated to either the stack pointer or hard frame pointer. */
+#define ELIMINABLE_REGS \
+{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
+ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}}
+
+#define CAN_ELIMINATE(FROM, TO) 1
+
+/* Specify the initial difference between the specified pair of registers. */
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+ do { \
+ compute_frame_size (get_frame_size ()); \
+ if ((FROM) == FRAME_POINTER_REGNUM) \
+ (OFFSET) = 0; \
+ else if ((FROM) == ARG_POINTER_REGNUM) \
+ (OFFSET) = xtensa_current_frame_size; \
+ else \
+ abort (); \
+ } while (0)
+
+/* If defined, the maximum amount of space required for outgoing
+ arguments will be computed and placed into the variable
+ 'current_function_outgoing_args_size'. No space will be pushed
+ onto the stack for each call; instead, the function prologue
+ should increase the stack frame size by this amount. */
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+/* Offset from the argument pointer register to the first argument's
+ address. On some machines it may depend on the data type of the
+ function. If 'ARGS_GROW_DOWNWARD', this is the offset to the
+ location above the first argument's address. */
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+/* Align stack frames on 128 bits for Xtensa. This is necessary for
+ 128-bit datatypes defined in TIE (e.g., for Vectra). */
+#define STACK_BOUNDARY 128
+
+/* Functions do not pop arguments off the stack. */
+#define RETURN_POPS_ARGS(FUNDECL, FUNTYPE, SIZE) 0
+
+/* Use a fixed register window size of 8. */
+#define WINDOW_SIZE 8
+
+/* Symbolic macros for the registers used to return integer, floating
+ point, and values of coprocessor and user-defined modes. */
+#define GP_RETURN (GP_REG_FIRST + 2 + WINDOW_SIZE)
+#define GP_OUTGOING_RETURN (GP_REG_FIRST + 2)
+
+/* Symbolic macros for the first/last argument registers. */
+#define GP_ARG_FIRST (GP_REG_FIRST + 2)
+#define GP_ARG_LAST (GP_REG_FIRST + 7)
+#define GP_OUTGOING_ARG_FIRST (GP_REG_FIRST + 2 + WINDOW_SIZE)
+#define GP_OUTGOING_ARG_LAST (GP_REG_FIRST + 7 + WINDOW_SIZE)
+
+#define MAX_ARGS_IN_REGISTERS 6
+
+/* Don't worry about compatibility with PCC. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* For Xtensa, we would like to be able to return up to 6 words in
+ memory but GCC cannot support that. The return value must be given
+ one of the standard MODE_INT modes, and there is no 6 word mode.
+ Instead, if we try to return a 6 word structure, GCC selects the
+ next biggest mode (OImode, 8 words) and then the register allocator
+ fails because there is no 8-register group beginning with a10. So
+ we have to fall back on the next largest size which is 4 words... */
+#define RETURN_IN_MEMORY(TYPE) \
+ ((unsigned HOST_WIDE_INT) int_size_in_bytes (TYPE) > 4 * UNITS_PER_WORD)
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. Because we have defined
+ PROMOTE_FUNCTION_RETURN, we have to perform the same promotions as
+ PROMOTE_MODE. */
+#define XTENSA_LIBCALL_VALUE(MODE, OUTGOINGP) \
+ gen_rtx_REG ((GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < UNITS_PER_WORD) \
+ ? SImode : (MODE), \
+ OUTGOINGP ? GP_OUTGOING_RETURN : GP_RETURN)
+
+#define LIBCALL_VALUE(MODE) \
+ XTENSA_LIBCALL_VALUE ((MODE), 0)
+
+#define LIBCALL_OUTGOING_VALUE(MODE) \
+ XTENSA_LIBCALL_VALUE ((MODE), 1)
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0. */
+#define XTENSA_FUNCTION_VALUE(VALTYPE, FUNC, OUTGOINGP) \
+ gen_rtx_REG ((INTEGRAL_TYPE_P (VALTYPE) \
+ && TYPE_PRECISION (VALTYPE) < BITS_PER_WORD) \
+ ? SImode: TYPE_MODE (VALTYPE), \
+ OUTGOINGP ? GP_OUTGOING_RETURN : GP_RETURN)
+
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ XTENSA_FUNCTION_VALUE (VALTYPE, FUNC, 0)
+
+#define FUNCTION_OUTGOING_VALUE(VALTYPE, FUNC) \
+ XTENSA_FUNCTION_VALUE (VALTYPE, FUNC, 1)
+
+/* A C expression that is nonzero if REGNO is the number of a hard
+ register in which the values of called function may come back. A
+ register whose use for returning values is limited to serving as
+ the second of a pair (for a value of type 'double', say) need not
+ be recognized by this macro. If the machine has register windows,
+ so that the caller and the called function use different registers
+ for the return value, this macro should recognize only the caller's
+ register numbers. */
+#define FUNCTION_VALUE_REGNO_P(N) \
+ ((N) == GP_RETURN)
+
+/* A C expression that is nonzero if REGNO is the number of a hard
+ register in which function arguments are sometimes passed. This
+ does *not* include implicit arguments such as the static chain and
+ the structure-value address. On many machines, no registers can be
+ used for this purpose since all function arguments are pushed on
+ the stack. */
+#define FUNCTION_ARG_REGNO_P(N) \
+ ((N) >= GP_OUTGOING_ARG_FIRST && (N) <= GP_OUTGOING_ARG_LAST)
+
+/* Use IEEE floating-point format. */
+#define TARGET_FLOAT_FORMAT IEEE_FLOAT_FORMAT
+
+/* Define a data type for recording info about an argument list
+ during the scan of that argument list. This data type should
+ hold all necessary information about the function itself
+ and about the args processed so far, enough to enable macros
+ such as FUNCTION_ARG to determine where the next arg should go. */
+typedef struct xtensa_args {
+ int arg_words; /* # total words the arguments take */
+} CUMULATIVE_ARGS;
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0. */
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT) \
+ init_cumulative_args (&CUM, FNTYPE, LIBNAME)
+
+#define INIT_CUMULATIVE_INCOMING_ARGS(CUM, FNTYPE, LIBNAME) \
+ init_cumulative_args (&CUM, FNTYPE, LIBNAME)
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ function_arg_advance (&CUM, MODE, TYPE)
+
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+ function_arg (&CUM, MODE, TYPE, FALSE)
+
+#define FUNCTION_INCOMING_ARG(CUM, MODE, TYPE, NAMED) \
+ function_arg (&CUM, MODE, TYPE, TRUE)
+
+/* Arguments are never passed partly in memory and partly in registers. */
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) (0)
+
+/* Specify function argument alignment. */
+#define FUNCTION_ARG_BOUNDARY(MODE, TYPE) \
+ ((TYPE) != 0 \
+ ? (TYPE_ALIGN (TYPE) <= PARM_BOUNDARY \
+ ? PARM_BOUNDARY \
+ : TYPE_ALIGN (TYPE)) \
+ : (GET_MODE_ALIGNMENT (MODE) <= PARM_BOUNDARY \
+ ? PARM_BOUNDARY \
+ : GET_MODE_ALIGNMENT (MODE)))
+
+
+/* Nonzero if we do not know how to pass TYPE solely in registers.
+ We cannot do so in the following cases:
+
+ - if the type has variable size
+ - if the type is marked as addressable (it is required to be constructed
+ into the stack)
+
+ This differs from the default in that it does not check if the padding
+ and mode of the type are such that a copy into a register would put it
+ into the wrong part of the register. */
+
+#define MUST_PASS_IN_STACK(MODE, TYPE) \
+ ((TYPE) != 0 \
+ && (TREE_CODE (TYPE_SIZE (TYPE)) != INTEGER_CST \
+ || TREE_ADDRESSABLE (TYPE)))
+
+/* Output assembler code to FILE to increment profiler label LABELNO
+ for profiling a function entry.
+
+ The mcount code in glibc doesn't seem to use this LABELNO stuff.
+ Some ports (e.g., MIPS) don't even bother to pass the label
+ address, and even those that do (e.g., i386) don't seem to use it.
+ The information needed by mcount() is the current PC and the
+ current return address, so that mcount can identify an arc in the
+ call graph. For Xtensa, we pass the current return address as
+ the first argument to mcount, and the current PC is available as
+ a0 in mcount's register window. Both of these values contain
+ window size information in the two most significant bits; we assume
+ that the mcount code will mask off those bits. The call to mcount
+ uses a window size of 8 to make sure that mcount doesn't clobber
+ any incoming argument values. */
+
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ do { \
+ fprintf (FILE, "\taddi\t%s, %s, 0\t# save current return address\n", \
+ reg_names[GP_REG_FIRST+10], \
+ reg_names[GP_REG_FIRST+0]); \
+ fprintf (FILE, "\tcall8\t_mcount\n"); \
+ } while (0);
+
+/* Stack pointer value doesn't matter at exit. */
+#define EXIT_IGNORE_STACK 1
+
+/* A C statement to output, on the stream FILE, assembler code for a
+ block of data that contains the constant parts of a trampoline.
+ This code should not include a label--the label is taken care of
+ automatically.
+
+ For Xtensa, the trampoline must perform an entry instruction with a
+ minimal stack frame in order to get some free registers. Once the
+ actual call target is known, the proper stack frame size is extracted
+ from the entry instruction at the target and the current frame is
+ adjusted to match. The trampoline then transfers control to the
+ instruction following the entry at the target. Note: this assumes
+ that the target begins with an entry instruction. */
+
+/* minimum frame = reg save area (4 words) plus static chain (1 word)
+ and the total number of words must be a multiple of 128 bits */
+#define MIN_FRAME_SIZE (8 * UNITS_PER_WORD)
+
+#define TRAMPOLINE_TEMPLATE(STREAM) \
+ do { \
+ fprintf (STREAM, "\t.begin no-generics\n"); \
+ fprintf (STREAM, "\tentry\tsp, %d\n", MIN_FRAME_SIZE); \
+ \
+ /* GCC isn't prepared to deal with data at the beginning of the \
+ trampoline, and the Xtensa l32r instruction requires that the \
+ constant pool be located before the code. We put the constant \
+ pool in the middle of the trampoline and jump around it. */ \
+ \
+ fprintf (STREAM, "\tj\t.Lskipconsts\n"); \
+ fprintf (STREAM, "\t.align\t4\n"); \
+ fprintf (STREAM, ".Lfnaddr:%s0\n", integer_asm_op (4, TRUE)); \
+ fprintf (STREAM, ".Lchainval:%s0\n", integer_asm_op (4, TRUE)); \
+ fprintf (STREAM, ".Lskipconsts:\n"); \
+ \
+ /* store the static chain */ \
+ fprintf (STREAM, "\tl32r\ta8, .Lchainval\n"); \
+ fprintf (STREAM, "\ts32i\ta8, sp, %d\n", \
+ MIN_FRAME_SIZE - (5 * UNITS_PER_WORD)); \
+ \
+ /* set the proper stack pointer value */ \
+ fprintf (STREAM, "\tl32r\ta8, .Lfnaddr\n"); \
+ fprintf (STREAM, "\tl32i\ta9, a8, 0\n"); \
+ fprintf (STREAM, "\textui\ta9, a9, %d, 12\n", \
+ TARGET_BIG_ENDIAN ? 8 : 12); \
+ fprintf (STREAM, "\tslli\ta9, a9, 3\n"); \
+ fprintf (STREAM, "\taddi\ta9, a9, %d\n", -MIN_FRAME_SIZE); \
+ fprintf (STREAM, "\tsub\ta9, sp, a9\n"); \
+ fprintf (STREAM, "\tmovsp\tsp, a9\n"); \
+ \
+ /* jump to the instruction following the entry */ \
+ fprintf (STREAM, "\taddi\ta8, a8, 3\n"); \
+ fprintf (STREAM, "\tjx\ta8\n"); \
+ fprintf (STREAM, "\t.end no-generics\n"); \
+ } while (0)
+
+/* Size in bytes of the trampoline, as an integer. */
+#define TRAMPOLINE_SIZE 49
+
+/* Alignment required for trampolines, in bits. */
+#define TRAMPOLINE_ALIGNMENT (32)
+
+/* A C statement to initialize the variable parts of a trampoline. */
+#define INITIALIZE_TRAMPOLINE(ADDR, FUNC, CHAIN) \
+ do { \
+ rtx addr = ADDR; \
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (addr, 8)), FUNC); \
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (addr, 12)), CHAIN); \
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, "__xtensa_sync_caches"), \
+ 0, VOIDmode, 1, addr, Pmode); \
+ } while (0)
+
+/* Define the `__builtin_va_list' type for the ABI. */
+#define BUILD_VA_LIST_TYPE(VALIST) \
+ (VALIST) = xtensa_build_va_list ()
+
+/* If defined, is a C expression that produces the machine-specific
+ code for a call to '__builtin_saveregs'. This code will be moved
+ to the very beginning of the function, before any parameter access
+ are made. The return value of this function should be an RTX that
+ contains the value to use as the return of '__builtin_saveregs'. */
+#define EXPAND_BUILTIN_SAVEREGS \
+ xtensa_builtin_saveregs
+
+/* Implement `va_start' for varargs and stdarg. */
+#define EXPAND_BUILTIN_VA_START(stdarg, valist, nextarg) \
+ xtensa_va_start (stdarg, valist, nextarg)
+
+/* Implement `va_arg'. */
+#define EXPAND_BUILTIN_VA_ARG(valist, type) \
+ xtensa_va_arg (valist, type)
+
+/* If defined, a C expression that produces the machine-specific code
+ to setup the stack so that arbitrary frames can be accessed.
+
+ On Xtensa, a stack back-trace must always begin from the stack pointer,
+ so that the register overflow save area can be located. However, the
+ stack-walking code in GCC always begins from the hard_frame_pointer
+ register, not the stack pointer. The frame pointer is usually equal
+ to the stack pointer, but the __builtin_return_address and
+ __builtin_frame_address functions will not work if count > 0 and
+ they are called from a routine that uses alloca. These functions
+ are not guaranteed to work at all if count > 0 so maybe that is OK.
+
+ A nicer solution would be to allow the architecture-specific files to
+ specify whether to start from the stack pointer or frame pointer. That
+ would also allow us to skip the machine->accesses_prev_frame stuff that
+ we currently need to ensure that there is a frame pointer when these
+ builtin functions are used. */
+
+#define SETUP_FRAME_ADDRESSES() \
+ xtensa_setup_frame_addresses ()
+
+/* A C expression whose value is RTL representing the address in a
+ stack frame where the pointer to the caller's frame is stored.
+ Assume that FRAMEADDR is an RTL expression for the address of the
+ stack frame itself.
+
+ For Xtensa, there is no easy way to get the frame pointer if it is
+ not equivalent to the stack pointer. Moreover, the result of this
+ macro is used for continuing to walk back up the stack, so it must
+ return the stack pointer address. Thus, there is some inconsistency
+ here in that __builtin_frame_address will return the frame pointer
+ when count == 0 and the stack pointer when count > 0. */
+
+#define DYNAMIC_CHAIN_ADDRESS(frame) \
+ gen_rtx (PLUS, Pmode, frame, \
+ gen_rtx_CONST_INT (VOIDmode, -3 * UNITS_PER_WORD))
+
+/* Define this if the return address of a particular stack frame is
+ accessed from the frame pointer of the previous stack frame. */
+#define RETURN_ADDR_IN_PREVIOUS_FRAME
+
+/* A C expression whose value is RTL representing the value of the
+ return address for the frame COUNT steps up from the current
+ frame, after the prologue. FRAMEADDR is the frame pointer of the
+ COUNT frame, or the frame pointer of the COUNT - 1 frame if
+ 'RETURN_ADDR_IN_PREVIOUS_FRAME' is defined.
+
+ The 2 most-significant bits of the return address on Xtensa hold
+ the register window size. To get the real return address, these bits
+ must be masked off and replaced with the high bits from the current
+ PC. Since it is unclear how the __builtin_return_address function
+ is used, the current code does not do this masking and simply returns
+ the raw return address from the a0 register. */
+#define RETURN_ADDR_RTX(count, frame) \
+ ((count) == -1 \
+ ? gen_rtx_REG (Pmode, 0) \
+ : gen_rtx_MEM (Pmode, memory_address \
+ (Pmode, plus_constant (frame, -4 * UNITS_PER_WORD))))
+
+
+/* Addressing modes, and classification of registers for them. */
+
+/* C expressions which are nonzero if register number NUM is suitable
+ for use as a base or index register in operand addresses. It may
+ be either a suitable hard register or a pseudo register that has
+ been allocated such a hard register. The difference between an
+ index register and a base register is that the index register may
+ be scaled. */
+
+#define REGNO_OK_FOR_BASE_P(NUM) \
+ (GP_REG_P (NUM) || GP_REG_P ((unsigned) reg_renumber[NUM]))
+
+#define REGNO_OK_FOR_INDEX_P(NUM) 0
+
+/* C expressions that are nonzero if X (assumed to be a `reg' RTX) is
+ valid for use as a base or index register. For hard registers, it
+ should always accept those which the hardware permits and reject
+ the others. Whether the macro accepts or rejects pseudo registers
+ must be controlled by `REG_OK_STRICT'. This usually requires two
+ variant definitions, of which `REG_OK_STRICT' controls the one
+ actually used. The difference between an index register and a base
+ register is that the index register may be scaled. */
+
+#ifdef REG_OK_STRICT
+
+#define REG_OK_FOR_INDEX_P(X) 0
+#define REG_OK_FOR_BASE_P(X) \
+ REGNO_OK_FOR_BASE_P (REGNO (X))
+
+#else /* !REG_OK_STRICT */
+
+#define REG_OK_FOR_INDEX_P(X) 0
+#define REG_OK_FOR_BASE_P(X) \
+ ((REGNO (X) >= FIRST_PSEUDO_REGISTER) || (GP_REG_P (REGNO (X))))
+
+#endif /* !REG_OK_STRICT */
+
+/* Maximum number of registers that can appear in a valid memory address. */
+#define MAX_REGS_PER_ADDRESS 1
+
+/* Identify valid Xtensa addresses. */
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
+ do { \
+ rtx xinsn = (X); \
+ \
+ /* allow constant pool addresses */ \
+ if ((MODE) != BLKmode && GET_MODE_SIZE (MODE) >= UNITS_PER_WORD \
+ && constantpool_address_p (xinsn)) \
+ goto ADDR; \
+ \
+ while (GET_CODE (xinsn) == SUBREG) \
+ xinsn = SUBREG_REG (xinsn); \
+ \
+ /* allow base registers */ \
+ if (GET_CODE (xinsn) == REG && REG_OK_FOR_BASE_P (xinsn)) \
+ goto ADDR; \
+ \
+ /* check for "register + offset" addressing */ \
+ if (GET_CODE (xinsn) == PLUS) \
+ { \
+ rtx xplus0 = XEXP (xinsn, 0); \
+ rtx xplus1 = XEXP (xinsn, 1); \
+ enum rtx_code code0; \
+ enum rtx_code code1; \
+ \
+ while (GET_CODE (xplus0) == SUBREG) \
+ xplus0 = SUBREG_REG (xplus0); \
+ code0 = GET_CODE (xplus0); \
+ \
+ while (GET_CODE (xplus1) == SUBREG) \
+ xplus1 = SUBREG_REG (xplus1); \
+ code1 = GET_CODE (xplus1); \
+ \
+ /* swap operands if necessary so the register is first */ \
+ if (code0 != REG && code1 == REG) \
+ { \
+ xplus0 = XEXP (xinsn, 1); \
+ xplus1 = XEXP (xinsn, 0); \
+ code0 = GET_CODE (xplus0); \
+ code1 = GET_CODE (xplus1); \
+ } \
+ \
+ if (code0 == REG && REG_OK_FOR_BASE_P (xplus0) \
+ && code1 == CONST_INT \
+ && xtensa_mem_offset (INTVAL (xplus1), (MODE))) \
+ { \
+ goto ADDR; \
+ } \
+ } \
+ } while (0)
+
+/* A C expression that is 1 if the RTX X is a constant which is a
+ valid address. This is defined to be the same as 'CONSTANT_P (X)',
+ but rejecting CONST_DOUBLE. */
+#define CONSTANT_ADDRESS_P(X) \
+ ((GET_CODE (X) == LABEL_REF || GET_CODE (X) == SYMBOL_REF \
+ || GET_CODE (X) == CONST_INT || GET_CODE (X) == HIGH \
+ || (GET_CODE (X) == CONST)))
+
+/* Nonzero if the constant value X is a legitimate general operand.
+ It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
+#define LEGITIMATE_CONSTANT_P(X) 1
+
+/* A C expression that is nonzero if X is a legitimate immediate
+ operand on the target machine when generating position independent
+ code. */
+#define LEGITIMATE_PIC_OPERAND_P(X) \
+ ((GET_CODE (X) != SYMBOL_REF || SYMBOL_REF_FLAG (X)) \
+ && GET_CODE (X) != LABEL_REF \
+ && GET_CODE (X) != CONST)
+
+/* Tell GCC how to use ADDMI to generate addresses. */
+#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \
+ do { \
+ rtx xinsn = (X); \
+ if (GET_CODE (xinsn) == PLUS) \
+ { \
+ rtx plus0 = XEXP (xinsn, 0); \
+ rtx plus1 = XEXP (xinsn, 1); \
+ \
+ if (GET_CODE (plus0) != REG && GET_CODE (plus1) == REG) \
+ { \
+ plus0 = XEXP (xinsn, 1); \
+ plus1 = XEXP (xinsn, 0); \
+ } \
+ \
+ if (GET_CODE (plus0) == REG \
+ && GET_CODE (plus1) == CONST_INT \
+ && !xtensa_mem_offset (INTVAL (plus1), MODE) \
+ && !xtensa_simm8 (INTVAL (plus1)) \
+ && xtensa_mem_offset (INTVAL (plus1) & 0xff, MODE) \
+ && xtensa_simm8x256 (INTVAL (plus1) & ~0xff)) \
+ { \
+ rtx temp = gen_reg_rtx (Pmode); \
+ emit_insn (gen_rtx (SET, Pmode, temp, \
+ gen_rtx (PLUS, Pmode, plus0, \
+ GEN_INT (INTVAL (plus1) & ~0xff)))); \
+ (X) = gen_rtx (PLUS, Pmode, temp, \
+ GEN_INT (INTVAL (plus1) & 0xff)); \
+ goto WIN; \
+ } \
+ } \
+ } while (0)
+
+
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR, LABEL) {}
+
+/* If we are referencing a function that is static, make the SYMBOL_REF
+ special so that we can generate direct calls to it even with -fpic. */
+#define ENCODE_SECTION_INFO(DECL) \
+ do { \
+ if (TREE_CODE (DECL) == FUNCTION_DECL && ! TREE_PUBLIC (DECL)) \
+ SYMBOL_REF_FLAG (XEXP (DECL_RTL (DECL), 0)) = 1; \
+ } while (0)
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE (SImode)
+
+/* Define this if the tablejump instruction expects the table
+ to contain offsets from the address of the table.
+ Do not define this if the table should contain absolute addresses. */
+/* #define CASE_VECTOR_PC_RELATIVE */
+
+/* Specify the tree operation to be used to convert reals to integers. */
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+/* This is the kind of divide that is easiest to do in the general case. */
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+/* Define this as 1 if 'char' should by default be signed; else as 0. */
+#define DEFAULT_SIGNED_CHAR 0
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX 4
+#define MAX_MOVE_MAX 4
+
+/* Prefer word-sized loads. */
+#define SLOW_BYTE_ACCESS 1
+
+/* Xtensa doesn't have any instructions that set integer values based on the
+ results of comparisons, but the simplification code in the combiner also
+ uses this macro. The value should be either 1 or -1 to enable some
+ optimizations in the combiner; I'm not sure which is better for us.
+ Since we've been using 1 for a while, it should probably stay that way for
+ compatibility. */
+#define STORE_FLAG_VALUE 1
+
+/* Shift instructions ignore all but the low-order few bits. */
+#define SHIFT_COUNT_TRUNCATED 1
+
+/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
+ is done just by pretending it is already truncated. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+/* Specify the machine mode that pointers have.
+ After generation of rtl, the compiler makes no further distinction
+ between pointers and any other objects of this machine mode. */
+#define Pmode SImode
+
+/* A function address in a call instruction is a word address (for
+ indexing purposes) so give the MEM rtx a words's mode. */
+#define FUNCTION_MODE SImode
+
+/* A C expression that evaluates to true if it is ok to perform a
+ sibling call to DECL. */
+/* TODO: fix this up to allow at least some sibcalls */
+#define FUNCTION_OK_FOR_SIBCALL(DECL) 0
+
+/* Xtensa constant costs. */
+#define CONST_COSTS(X, CODE, OUTER_CODE) \
+ case CONST_INT: \
+ switch (OUTER_CODE) \
+ { \
+ case SET: \
+ if (xtensa_simm12b (INTVAL (X))) return 4; \
+ break; \
+ case PLUS: \
+ if (xtensa_simm8 (INTVAL (X))) return 0; \
+ if (xtensa_simm8x256 (INTVAL (X))) return 0; \
+ break; \
+ case AND: \
+ if (xtensa_mask_immediate (INTVAL (X))) return 0; \
+ break; \
+ case COMPARE: \
+ if ((INTVAL (X) == 0) || xtensa_b4const (INTVAL (X))) return 0; \
+ break; \
+ case ASHIFT: \
+ case ASHIFTRT: \
+ case LSHIFTRT: \
+ case ROTATE: \
+ case ROTATERT: \
+ /* no way to tell if X is the 2nd operand so be conservative */ \
+ default: break; \
+ } \
+ if (xtensa_simm12b (INTVAL (X))) return 5; \
+ return 6; \
+ case CONST: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ return 5; \
+ case CONST_DOUBLE: \
+ return 7;
+
+/* Costs of various Xtensa operations. */
+#define RTX_COSTS(X, CODE, OUTER_CODE) \
+ case MEM: \
+ { \
+ int num_words = \
+ (GET_MODE_SIZE (GET_MODE (X)) > UNITS_PER_WORD) ? 2 : 1; \
+ if (memory_address_p (GET_MODE (X), XEXP ((X), 0))) \
+ return COSTS_N_INSNS (num_words); \
+ \
+ return COSTS_N_INSNS (2*num_words); \
+ } \
+ \
+ case FFS: \
+ return COSTS_N_INSNS (TARGET_NSA ? 5 : 50); \
+ \
+ case NOT: \
+ return COSTS_N_INSNS ((GET_MODE (X) == DImode) ? 3 : 2); \
+ \
+ case AND: \
+ case IOR: \
+ case XOR: \
+ if (GET_MODE (X) == DImode) return COSTS_N_INSNS (2); \
+ return COSTS_N_INSNS (1); \
+ \
+ case ASHIFT: \
+ case ASHIFTRT: \
+ case LSHIFTRT: \
+ if (GET_MODE (X) == DImode) return COSTS_N_INSNS (50); \
+ return COSTS_N_INSNS (1); \
+ \
+ case ABS: \
+ { \
+ enum machine_mode xmode = GET_MODE (X); \
+ if (xmode == SFmode) \
+ return COSTS_N_INSNS (TARGET_HARD_FLOAT ? 1 : 50); \
+ if (xmode == DFmode) \
+ return COSTS_N_INSNS (50); \
+ return COSTS_N_INSNS (4); \
+ } \
+ \
+ case PLUS: \
+ case MINUS: \
+ { \
+ enum machine_mode xmode = GET_MODE (X); \
+ if (xmode == SFmode) \
+ return COSTS_N_INSNS (TARGET_HARD_FLOAT ? 1 : 50); \
+ if (xmode == DFmode || xmode == DImode) \
+ return COSTS_N_INSNS (50); \
+ return COSTS_N_INSNS (1); \
+ } \
+ \
+ case NEG: \
+ return COSTS_N_INSNS ((GET_MODE (X) == DImode) ? 4 : 2); \
+ \
+ case MULT: \
+ { \
+ enum machine_mode xmode = GET_MODE (X); \
+ if (xmode == SFmode) \
+ return COSTS_N_INSNS (TARGET_HARD_FLOAT ? 4 : 50); \
+ if (xmode == DFmode || xmode == DImode) \
+ return COSTS_N_INSNS (50); \
+ if (TARGET_MUL32) \
+ return COSTS_N_INSNS (4); \
+ if (TARGET_MAC16) \
+ return COSTS_N_INSNS (16); \
+ if (TARGET_MUL16) \
+ return COSTS_N_INSNS (12); \
+ return COSTS_N_INSNS (50); \
+ } \
+ \
+ case DIV: \
+ case MOD: \
+ { \
+ enum machine_mode xmode = GET_MODE (X); \
+ if (xmode == SFmode) \
+ return COSTS_N_INSNS (TARGET_HARD_FLOAT_DIV ? 8 : 50); \
+ if (xmode == DFmode) \
+ return COSTS_N_INSNS (50); \
+ } \
+ /* fall through */ \
+ \
+ case UDIV: \
+ case UMOD: \
+ { \
+ enum machine_mode xmode = GET_MODE (X); \
+ if (xmode == DImode) \
+ return COSTS_N_INSNS (50); \
+ if (TARGET_DIV32) \
+ return COSTS_N_INSNS (32); \
+ return COSTS_N_INSNS (50); \
+ } \
+ \
+ case SQRT: \
+ if (GET_MODE (X) == SFmode) \
+ return COSTS_N_INSNS (TARGET_HARD_FLOAT_SQRT ? 8 : 50); \
+ return COSTS_N_INSNS (50); \
+ \
+ case SMIN: \
+ case UMIN: \
+ case SMAX: \
+ case UMAX: \
+ return COSTS_N_INSNS (TARGET_MINMAX ? 1 : 50); \
+ \
+ case SIGN_EXTRACT: \
+ case SIGN_EXTEND: \
+ return COSTS_N_INSNS (TARGET_SEXT ? 1 : 2); \
+ \
+ case ZERO_EXTRACT: \
+ case ZERO_EXTEND: \
+ return COSTS_N_INSNS (1);
+
+
+/* An expression giving the cost of an addressing mode that
+ contains ADDRESS. */
+#define ADDRESS_COST(ADDR) 1
+
+/* A C expression for the cost of moving data from a register in
+ class FROM to one in class TO. The classes are expressed using
+ the enumeration values such as 'GENERAL_REGS'. A value of 2 is
+ the default; other values are interpreted relative to that. */
+#define REGISTER_MOVE_COST(MODE, FROM, TO) \
+ (((FROM) == (TO) && (FROM) != BR_REGS && (TO) != BR_REGS) \
+ ? 2 \
+ : (reg_class_subset_p ((FROM), AR_REGS) \
+ && reg_class_subset_p ((TO), AR_REGS) \
+ ? 2 \
+ : (reg_class_subset_p ((FROM), AR_REGS) \
+ && (TO) == ACC_REG \
+ ? 3 \
+ : ((FROM) == ACC_REG \
+ && reg_class_subset_p ((TO), AR_REGS) \
+ ? 3 \
+ : 10))))
+
+#define MEMORY_MOVE_COST(MODE, CLASS, IN) 4
+
+#define BRANCH_COST 3
+
+/* Optionally define this if you have added predicates to
+ 'MACHINE.c'. This macro is called within an initializer of an
+ array of structures. The first field in the structure is the
+ name of a predicate and the second field is an array of rtl
+ codes. For each predicate, list all rtl codes that can be in
+ expressions matched by the predicate. The list should have a
+ trailing comma. */
+
+#define PREDICATE_CODES \
+ {"add_operand", { REG, CONST_INT, SUBREG }}, \
+ {"arith_operand", { REG, CONST_INT, SUBREG }}, \
+ {"nonimmed_operand", { REG, SUBREG, MEM }}, \
+ {"non_acc_reg_operand", { REG, SUBREG }}, \
+ {"mem_operand", { MEM }}, \
+ {"mask_operand", { REG, CONST_INT, SUBREG }}, \
+ {"extui_fldsz_operand", { CONST_INT }}, \
+ {"sext_fldsz_operand", { CONST_INT }}, \
+ {"lsbitnum_operand", { CONST_INT }}, \
+ {"fpmem_offset_operand", { CONST_INT }}, \
+ {"sext_operand", { REG, SUBREG, MEM }}, \
+ {"branch_operand", { REG, CONST_INT, SUBREG }}, \
+ {"ubranch_operand", { REG, CONST_INT, SUBREG }}, \
+ {"call_insn_operand", { CONST_INT, CONST, SYMBOL_REF, REG }}, \
+ {"move_operand", { REG, SUBREG, MEM, CONST_INT, CONST_DOUBLE, \
+ CONST, SYMBOL_REF, LABEL_REF }}, \
+ {"non_const_move_operand", { REG, SUBREG, MEM }}, \
+ {"const_float_1_operand", { CONST_DOUBLE }}, \
+ {"branch_operator", { EQ, NE, LT, GE }}, \
+ {"ubranch_operator", { LTU, GEU }}, \
+ {"boolean_operator", { EQ, NE }},
+
+/* Control the assembler format that we output. */
+
+/* How to refer to registers in assembler output.
+ This sequence is indexed by compiler's hard-register-number (see above). */
+#define REGISTER_NAMES \
+{ \
+ "a0", "sp", "a2", "a3", "a4", "a5", "a6", "a7", \
+ "a8", "a9", "a10", "a11", "a12", "a13", "a14", "a15", \
+ "fp", "argp", "b0", \
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", \
+ "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", \
+ "acc" \
+}
+
+/* If defined, a C initializer for an array of structures containing a
+ name and a register number. This macro defines additional names
+ for hard registers, thus allowing the 'asm' option in declarations
+ to refer to registers using alternate names. */
+#define ADDITIONAL_REGISTER_NAMES \
+{ \
+ { "a1", 1 + GP_REG_FIRST } \
+}
+
+#define PRINT_OPERAND(FILE, X, CODE) print_operand (FILE, X, CODE)
+#define PRINT_OPERAND_ADDRESS(FILE, ADDR) print_operand_address (FILE, ADDR)
+
+/* Recognize machine-specific patterns that may appear within
+ constants. Used for PIC-specific UNSPECs. */
+#define OUTPUT_ADDR_CONST_EXTRA(STREAM, X, FAIL) \
+ do { \
+ if (flag_pic && GET_CODE (X) == UNSPEC && XVECLEN ((X), 0) == 1) \
+ { \
+ switch (XINT ((X), 1)) \
+ { \
+ case UNSPEC_PLT: \
+ output_addr_const ((STREAM), XVECEXP ((X), 0, 0)); \
+ fputs ("@PLT", (STREAM)); \
+ break; \
+ default: \
+ goto FAIL; \
+ } \
+ break; \
+ } \
+ else \
+ goto FAIL; \
+ } while (0)
+
+
+/* This is how to output the definition of a user-level label named NAME,
+ such as the label on a static function or variable NAME. */
+#define ASM_OUTPUT_LABEL(STREAM, NAME) \
+ do { \
+ assemble_name (STREAM, NAME); \
+ fputs (":\n", STREAM); \
+ } while (0)
+
+/* This is how to output a command to make the user-level label named NAME
+ defined for reference from other files. */
+#define ASM_GLOBALIZE_LABEL(STREAM, NAME) \
+ do { \
+ fputs ("\t.global\t", STREAM); \
+ assemble_name (STREAM, NAME); \
+ fputs ("\n", STREAM); \
+ } while (0)
+
+/* This says how to define a global common symbol. */
+#define ASM_OUTPUT_COMMON(STREAM, NAME, SIZE, ROUNDED) \
+ xtensa_declare_object (STREAM, NAME, "\n\t.comm\t", ",%u\n", (SIZE))
+
+/* This says how to define a local common symbol (ie, not visible to
+ linker). */
+#define ASM_OUTPUT_LOCAL(STREAM, NAME, SIZE, ROUNDED) \
+ xtensa_declare_object (STREAM, NAME, "\n\t.lcomm\t", ",%u\n", (SIZE))
+
+/* This is how to output an element of a case-vector that is absolute. */
+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \
+ fprintf (STREAM, "%s%sL%u\n", integer_asm_op (4, TRUE), \
+ LOCAL_LABEL_PREFIX, VALUE)
+
+/* This is how to output an element of a case-vector that is relative.
+ This is used for pc-relative code. */
+#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \
+ do { \
+ fprintf (STREAM, "%s%sL%u-%sL%u\n", integer_asm_op (4, TRUE), \
+ LOCAL_LABEL_PREFIX, (VALUE), \
+ LOCAL_LABEL_PREFIX, (REL)); \
+ } while (0)
+
+/* This is how to output an assembler line that says to advance the
+ location counter to a multiple of 2**LOG bytes. */
+#define ASM_OUTPUT_ALIGN(STREAM, LOG) \
+ do { \
+ if ((LOG) != 0) \
+ fprintf (STREAM, "\t.align\t%d\n", 1 << (LOG)); \
+ } while (0)
+
+/* Indicate that jump tables go in the text section. This is
+ necessary when compiling PIC code. */
+#define JUMP_TABLES_IN_TEXT_SECTION (flag_pic)
+
+
+/* Define this macro for the rare case where the RTL needs some sort of
+ machine-dependent fixup immediately before register allocation is done.
+
+ If the stack frame size is too big to fit in the immediate field of
+ the ENTRY instruction, we need to store the frame size in the
+ constant pool. However, the code in xtensa_function_prologue runs too
+ late to be able to add anything to the constant pool. Since the
+ final frame size isn't known until reload is complete, this seems
+ like the best place to do it.
+
+ There may also be some fixup required if there is an incoming argument
+ in a7 and the function requires a frame pointer. */
+
+#define MACHINE_DEPENDENT_REORG(INSN) xtensa_reorg (INSN)
+
+
+/* Define the strings to put out for each section in the object file. */
+#define TEXT_SECTION_ASM_OP "\t.text" /* instructions */
+#define DATA_SECTION_ASM_OP "\t.data" /* large data */
+
+
+/* Define output to appear before the constant pool. If the function
+ has been assigned to a specific ELF section, or if it goes into a
+ unique section, set the name of that section to be the literal
+ prefix. */
+#define ASM_OUTPUT_POOL_PROLOGUE(FILE, FUNNAME, FUNDECL, SIZE) \
+ do { \
+ tree fnsection; \
+ resolve_unique_section ((FUNDECL), 0); \
+ fnsection = DECL_SECTION_NAME (FUNDECL); \
+ if (fnsection != NULL_TREE) \
+ { \
+ const char *fnsectname = TREE_STRING_POINTER (fnsection); \
+ fprintf (FILE, "\t.begin\tliteral_prefix %s\n", \
+ strcmp (fnsectname, ".text") ? fnsectname : ""); \
+ } \
+ } while (0)
+
+
+/* Define code to write out the ".end literal_prefix" directive for a
+ function in a special section. This is appended to the standard ELF
+ code for ASM_DECLARE_FUNCTION_SIZE. */
+#define XTENSA_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \
+ if (DECL_SECTION_NAME (DECL) != NULL_TREE) \
+ fprintf (FILE, "\t.end\tliteral_prefix\n")
+
+/* A C statement (with or without semicolon) to output a constant in
+ the constant pool, if it needs special treatment. */
+#define ASM_OUTPUT_SPECIAL_POOL_ENTRY(FILE, X, MODE, ALIGN, LABELNO, JUMPTO) \
+ do { \
+ xtensa_output_literal (FILE, X, MODE, LABELNO); \
+ goto JUMPTO; \
+ } while (0)
+
+/* Store in OUTPUT a string (made with alloca) containing
+ an assembler-name for a local static variable named NAME.
+ LABELNO is an integer which is different for each call. */
+#define ASM_FORMAT_PRIVATE_NAME(OUTPUT, NAME, LABELNO) \
+ do { \
+ (OUTPUT) = (char *) alloca (strlen (NAME) + 10); \
+ sprintf ((OUTPUT), "%s.%u", (NAME), (LABELNO)); \
+ } while (0)
+
+/* How to start an assembler comment. */
+#define ASM_COMMENT_START "#"
+
+/* Exception handling TODO!! */
+#define DWARF_UNWIND_INFO 0
+
diff --git a/gcc/config/xtensa/xtensa.md b/gcc/config/xtensa/xtensa.md
new file mode 100644
index 00000000000..d1fd5edf4c7
--- /dev/null
+++ b/gcc/config/xtensa/xtensa.md
@@ -0,0 +1,2415 @@
+;; GCC machine description for Tensilica's Xtensa architecture.
+;; Copyright (C) 2001 Free Software Foundation, Inc.
+;; Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING. If not, write to the Free
+;; Software Foundation, 59 Temple Place - Suite 330, Boston, MA
+;; 02111-1307, USA.
+
+;;
+;; ....................
+;;
+;; CONSTANTS
+;;
+;; ....................
+;;
+
+(define_constants [
+ (A0_REG 0)
+ (A7_REG 7)
+
+ (UNSPEC_NSAU 1)
+ (UNSPEC_NOP 2)
+ (UNSPEC_PLT 3)
+ (UNSPECV_SET_FP 1)
+])
+
+;;
+;; ....................
+;;
+;; ATTRIBUTES
+;;
+;; ....................
+;;
+
+(define_attr "type"
+ "unknown,branch,jump,call,load,store,move,arith,multi,nop,misc,farith,fmadd,fdiv,fsqrt,fconv,fload,fstore,mul16,mul32,div32,mac16,rsr,wsr,udef_move,udef_loadi,udef_storei,udef_loadiu,udef_storeiu,udef_conv,udef_conv_loadiu,udef_conv_storeiu"
+ (const_string "unknown"))
+
+(define_attr "mode"
+ "unknown,none,QI,HI,SI,DI,SF,DF,BL"
+ (const_string "unknown"))
+
+(define_attr "length" "" (const_int 1))
+
+;; Describe a user's asm statement.
+(define_asm_attributes
+ [(set_attr "type" "multi")])
+
+
+;;
+;; ....................
+;;
+;; FUNCTIONAL UNITS
+;;
+;; ....................
+;;
+
+(define_function_unit "memory" 1 0 (eq_attr "type" "load,fload") 2 0)
+
+(define_function_unit "sreg" 1 1 (eq_attr "type" "rsr") 2 0)
+
+(define_function_unit "mul16" 1 0 (eq_attr "type" "mul16") 2 0)
+
+(define_function_unit "mul32" 1 0 (eq_attr "type" "mul32") 2 0)
+
+(define_function_unit "fpmadd" 1 0 (eq_attr "type" "fmadd") 4 0)
+
+(define_function_unit "fpconv" 1 0 (eq_attr "type" "fconv") 2 0)
+
+
+;;
+;; ....................
+;;
+;; ADDITION
+;;
+;; ....................
+;;
+
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "register_operand" "=D,D,a,a,a")
+ (plus:SI (match_operand:SI 1 "register_operand" "%d,d,r,r,r")
+ (match_operand:SI 2 "add_operand" "d,O,r,J,N")))]
+ ""
+ "@
+ add.n\\t%0, %1, %2
+ addi.n\\t%0, %1, %d2
+ add\\t%0, %1, %2
+ addi\\t%0, %1, %d2
+ addmi\\t%0, %1, %x2"
+ [(set_attr "type" "arith,arith,arith,arith,arith")
+ (set_attr "mode" "SI")
+ (set_attr "length" "2,2,3,3,3")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 2))
+ (match_operand:SI 2 "register_operand" "r")))]
+ ""
+ "addx2\\t%0, %1, %2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 4))
+ (match_operand:SI 2 "register_operand" "r")))]
+ ""
+ "addx4\\t%0, %1, %2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 8))
+ (match_operand:SI 2 "register_operand" "r")))]
+ ""
+ "addx8\\t%0, %1, %2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3")])
+
+(define_insn "addsf3"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (plus:SF (match_operand:SF 1 "register_operand" "%f")
+ (match_operand:SF 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "add.s\\t%0, %1, %2"
+ [(set_attr "type" "fmadd")
+ (set_attr "mode" "SF")
+ (set_attr "length" "3")])
+
+
+;;
+;; ....................
+;;
+;; SUBTRACTION
+;;
+;; ....................
+;;
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (minus:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r")))]
+ ""
+ "sub\\t%0, %1, %2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (minus:SI (mult:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 2))
+ (match_operand:SI 2 "register_operand" "r")))]
+ ""
+ "subx2\\t%0, %1, %2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (minus:SI (mult:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 4))
+ (match_operand:SI 2 "register_operand" "r")))]
+ ""
+ "subx4\\t%0, %1, %2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (minus:SI (mult:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 8))
+ (match_operand:SI 2 "register_operand" "r")))]
+ ""
+ "subx8\\t%0, %1, %2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3")])
+
+(define_insn "subsf3"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (minus:SF (match_operand:SF 1 "register_operand" "f")
+ (match_operand:SF 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "sub.s\\t%0, %1, %2"
+ [(set_attr "type" "fmadd")
+ (set_attr "mode" "SF")
+ (set_attr "length" "3")])
+
+
+;;
+;; ....................
+;;
+;; MULTIPLICATION
+;;
+;; ....................
+;;
+
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (mult:SI (match_operand:SI 1 "register_operand" "%r")
+ (match_operand:SI 2 "register_operand" "r")))]
+ "TARGET_MUL32"
+ "mull\\t%0, %1, %2"
+ [(set_attr "type" "mul32")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3")])
+
+(define_insn "mulhisi3"
+ [(set (match_operand:SI 0 "register_operand" "=C,A")
+ (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "register_operand" "%r,r"))
+ (sign_extend:SI
+ (match_operand:HI 2 "register_operand" "r,r"))))]
+ "TARGET_MUL16 || TARGET_MAC16"
+ "@
+ mul16s\\t%0, %1, %2
+ mul.aa.ll\\t%1, %2"
+ [(set_attr "type" "mul16,mac16")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3,3")])
+
+(define_insn "umulhisi3"
+ [(set (match_operand:SI 0 "register_operand" "=C,A")
+ (mult:SI (zero_extend:SI
+ (match_operand:HI 1 "register_operand" "%r,r"))
+ (zero_extend:SI
+ (match_operand:HI 2 "register_operand" "r,r"))))]
+ "TARGET_MUL16 || TARGET_MAC16"
+ "@
+ mul16u\\t%0, %1, %2
+ umul.aa.ll\\t%1, %2"
+ [(set_attr "type" "mul16,mac16")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3,3")])
+
+(define_insn "muladdhisi"
+ [(set (match_operand:SI 0 "register_operand" "=A")
+ (plus:SI (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "register_operand" "%r"))
+ (sign_extend:SI
+ (match_operand:HI 2 "register_operand" "r")))
+ (match_operand:SI 3 "register_operand" "0")))]
+ "TARGET_MAC16"
+ "mula.aa.ll\\t%1, %2"
+ [(set_attr "type" "mac16")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3")])
+
+(define_insn "mulsubhisi"
+ [(set (match_operand:SI 0 "register_operand" "=A")
+ (minus:SI (match_operand:SI 1 "register_operand" "0")
+ (mult:SI (sign_extend:SI
+ (match_operand:HI 2 "register_operand" "%r"))
+ (sign_extend:SI
+ (match_operand:HI 3 "register_operand" "r")))))]
+ "TARGET_MAC16"
+ "muls.aa.ll\\t%2, %3"
+ [(set_attr "type" "mac16")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3")])
+
+(define_insn "mulsf3"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (mult:SF (match_operand:SF 1 "register_operand" "%f")
+ (match_operand:SF 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "mul.s\\t%0, %1, %2"
+ [(set_attr "type" "fmadd")
+ (set_attr "mode" "SF")
+ (set_attr "length" "3")])
+
+(define_insn "muladdsf3"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (plus:SF (mult:SF (match_operand:SF 1 "register_operand" "%f")
+ (match_operand:SF 2 "register_operand" "f"))
+ (match_operand:SF 3 "register_operand" "0")))]
+ "TARGET_HARD_FLOAT && !TARGET_NO_FUSED_MADD"
+ "madd.s\\t%0, %1, %2"
+ [(set_attr "type" "fmadd")
+ (set_attr "mode" "SF")
+ (set_attr "length" "3")])
+
+(define_insn "mulsubsf3"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (minus:SF (match_operand:SF 1 "register_operand" "0")
+ (mult:SF (match_operand:SF 2 "register_operand" "%f")
+ (match_operand:SF 3 "register_operand" "f"))))]
+ "TARGET_HARD_FLOAT && !TARGET_NO_FUSED_MADD"
+ "msub.s\\t%0, %2, %3"
+ [(set_attr "type" "fmadd")
+ (set_attr "mode" "SF")
+ (set_attr "length" "3")])
+
+
+;;
+;; ....................
+;;
+;; DIVISION
+;;
+;; ....................
+;;
+
+(define_insn "divsi3"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (div:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r")))]
+ "TARGET_DIV32"
+ "quos\\t%0, %1, %2"
+ [(set_attr "type" "div32")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3")])
+
+(define_insn "udivsi3"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (udiv:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r")))]
+ "TARGET_DIV32"
+ "quou\\t%0, %1, %2"
+ [(set_attr "type" "div32")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3")])
+
+(define_insn "divsf3"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (div:SF (match_operand:SF 1 "register_operand" "f")
+ (match_operand:SF 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT_DIV"
+ "div.s\\t%0, %1, %2"
+ [(set_attr "type" "fdiv")
+ (set_attr "mode" "SF")
+ (set_attr "length" "3")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (div:SF (match_operand:SF 1 "const_float_1_operand" "")
+ (match_operand:SF 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT_RECIP && flag_unsafe_math_optimizations"
+ "recip.s\\t%0, %2"
+ [(set_attr "type" "fdiv")
+ (set_attr "mode" "SF")
+ (set_attr "length" "3")])
+
+
+;;
+;; ....................
+;;
+;; REMAINDER
+;;
+;; ....................
+;;
+
+(define_insn "modsi3"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (mod:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r")))]
+ "TARGET_DIV32"
+ "rems\\t%0, %1, %2"
+ [(set_attr "type" "div32")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3")])
+
+(define_insn "umodsi3"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (umod:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "register_operand" "r")))]
+ "TARGET_DIV32"
+ "remu\\t%0, %1, %2"
+ [(set_attr "type" "div32")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3")])
+
+
+;;
+;; ....................
+;;
+;; SQUARE ROOT
+;;
+;; ....................
+;;
+
+(define_insn "sqrtsf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (sqrt:SF (match_operand:SF 1 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT_SQRT"
+ "sqrt.s\\t%0, %1"
+ [(set_attr "type" "fsqrt")
+ (set_attr "mode" "SF")
+ (set_attr "length" "3")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (div:SF (match_operand:SF 1 "const_float_1_operand" "")
+ (sqrt:SF (match_operand:SF 2 "register_operand" "f"))))]
+ "TARGET_HARD_FLOAT_RSQRT && flag_unsafe_math_optimizations"
+ "rsqrt.s\\t%0, %2"
+ [(set_attr "type" "fsqrt")
+ (set_attr "mode" "SF")
+ (set_attr "length" "3")])
+
+
+;;
+;; ....................
+;;
+;; ABSOLUTE VALUE
+;;
+;; ....................
+;;
+
+(define_insn "abssi2"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (abs:SI (match_operand:SI 1 "register_operand" "r")))]
+ ""
+ "abs\\t%0, %1"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3")])
+
+(define_insn "abssf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (abs:SF (match_operand:SF 1 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "abs.s\\t%0, %1"
+ [(set_attr "type" "farith")
+ (set_attr "mode" "SF")
+ (set_attr "length" "3")])
+
+
+;;
+;; ....................
+;;
+;; MIN AND MAX INSTRUCTIONS
+;;
+;; ....................
+;;
+
+(define_insn "sminsi3"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (smin:SI (match_operand:SI 1 "register_operand" "%r")
+ (match_operand:SI 2 "register_operand" "r")))]
+ "TARGET_MINMAX"
+ "min\\t%0, %1, %2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3")])
+
+(define_insn "uminsi3"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (umin:SI (match_operand:SI 1 "register_operand" "%r")
+ (match_operand:SI 2 "register_operand" "r")))]
+ "TARGET_MINMAX"
+ "minu\\t%0, %1, %2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3")])
+
+(define_insn "smaxsi3"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (smax:SI (match_operand:SI 1 "register_operand" "%r")
+ (match_operand:SI 2 "register_operand" "r")))]
+ "TARGET_MINMAX"
+ "max\\t%0, %1, %2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3")])
+
+(define_insn "umaxsi3"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (umax:SI (match_operand:SI 1 "register_operand" "%r")
+ (match_operand:SI 2 "register_operand" "r")))]
+ "TARGET_MINMAX"
+ "maxu\\t%0, %1, %2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3")])
+
+
+;;
+;; ....................
+;;
+;; FIND FIRST BIT INSTRUCTION
+;;
+;; ....................
+;;
+
+(define_expand "ffssi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ffs:SI (match_operand:SI 1 "register_operand" "")))]
+ "TARGET_NSA"
+ "
+{
+ rtx temp = gen_reg_rtx (SImode);
+ emit_insn (gen_negsi2 (temp, operands[1]));
+ emit_insn (gen_andsi3 (temp, temp, operands[1]));
+ emit_insn (gen_nsau (temp, temp));
+ emit_insn (gen_negsi2 (temp, temp));
+ emit_insn (gen_addsi3 (operands[0], temp, GEN_INT (32)));
+ DONE;
+}")
+
+;; there is no RTL operator corresponding to NSAU
+(define_insn "nsau"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "r")] UNSPEC_NSAU))]
+ "TARGET_NSA"
+ "nsau\\t%0, %1"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3")])
+
+
+;;
+;; ....................
+;;
+;; NEGATION and ONE'S COMPLEMENT
+;;
+;; ....................
+;;
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (neg:SI (match_operand:SI 1 "register_operand" "r")))]
+ ""
+ "neg\\t%0, %1"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3")])
+
+(define_expand "one_cmplsi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (not:SI (match_operand:SI 1 "register_operand" "")))]
+ ""
+ "
+{
+ rtx temp = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (temp, constm1_rtx));
+ emit_insn (gen_xorsi3 (operands[0], temp, operands[1]));
+ DONE;
+}")
+
+(define_insn "negsf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (neg:SF (match_operand:SF 1 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "neg.s\\t%0, %1"
+ [(set_attr "type" "farith")
+ (set_attr "mode" "SF")
+ (set_attr "length" "3")])
+
+
+;;
+;; ....................
+;;
+;; LOGICAL
+;;
+;; ....................
+;;
+
+(define_insn "andsi3"
+ [(set (match_operand:SI 0 "register_operand" "=a,a")
+ (and:SI (match_operand:SI 1 "register_operand" "%r,r")
+ (match_operand:SI 2 "mask_operand" "P,r")))]
+ ""
+ "@
+ extui\\t%0, %1, 0, %K2
+ and\\t%0, %1, %2"
+ [(set_attr "type" "arith,arith")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3,3")])
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (ior:SI (match_operand:SI 1 "register_operand" "%r")
+ (match_operand:SI 2 "register_operand" "r")))]
+ ""
+ "or\\t%0, %1, %2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3")])
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (xor:SI (match_operand:SI 1 "register_operand" "%r")
+ (match_operand:SI 2 "register_operand" "r")))]
+ ""
+ "xor\\t%0, %1, %2"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3")])
+
+
+;;
+;; ....................
+;;
+;; ZERO EXTENSION
+;;
+;; ....................
+;;
+
+(define_insn "zero_extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "=a,a")
+ (zero_extend:SI (match_operand:HI 1 "nonimmed_operand" "r,U")))]
+ ""
+ "@
+ extui\\t%0, %1, 0, 16
+ l16ui\\t%0, %1"
+ [(set_attr "type" "arith,load")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3,3")])
+
+(define_insn "zero_extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "=a,a")
+ (zero_extend:SI (match_operand:QI 1 "nonimmed_operand" "r,U")))]
+ ""
+ "@
+ extui\\t%0, %1, 0, 8
+ l8ui\\t%0, %1"
+ [(set_attr "type" "arith,load")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3,3")])
+
+
+;;
+;; ....................
+;;
+;; SIGN EXTENSION
+;;
+;; ....................
+;;
+
+(define_expand "extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (sign_extend:SI (match_operand:HI 1 "register_operand" "")))]
+ ""
+ "
+{
+ if (sext_operand (operands[1], HImode))
+ emit_insn (gen_extendhisi2_internal (operands[0], operands[1]));
+ else
+ xtensa_extend_reg (operands[0], operands[1]);
+ DONE;
+}")
+
+(define_insn "extendhisi2_internal"
+ [(set (match_operand:SI 0 "register_operand" "=B,a")
+ (sign_extend:SI (match_operand:HI 1 "sext_operand" "r,U")))]
+ ""
+ "@
+ sext\\t%0, %1, 15
+ l16si\\t%0, %1"
+ [(set_attr "type" "arith,load")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3,3")])
+
+(define_expand "extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (sign_extend:SI (match_operand:QI 1 "register_operand" "")))]
+ ""
+ "
+{
+ if (TARGET_SEXT)
+ {
+ emit_insn (gen_extendqisi2_internal (operands[0], operands[1]));
+ DONE;
+ }
+ xtensa_extend_reg (operands[0], operands[1]);
+ DONE;
+}")
+
+(define_insn "extendqisi2_internal"
+ [(set (match_operand:SI 0 "register_operand" "=B")
+ (sign_extend:SI (match_operand:QI 1 "register_operand" "r")))]
+ "TARGET_SEXT"
+ "sext\\t%0, %1, 7"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3")])
+
+
+;;
+;; ....................
+;;
+;; FIELD EXTRACT
+;;
+;; ....................
+;;
+
+(define_expand "extv"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (sign_extract:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")))]
+ "TARGET_SEXT"
+ "
+{
+ if (!sext_fldsz_operand (operands[2], SImode)) FAIL;
+ /* we could expand to a right shift followed by sext but that's
+ no better than the standard left and right shift sequence */
+ if (!lsbitnum_operand (operands[3], SImode)) FAIL;
+ emit_insn (gen_extv_internal (operands[0], operands[1],
+ operands[2], operands[3]));
+ DONE;
+}")
+
+(define_insn "extv_internal"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (sign_extract:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "sext_fldsz_operand" "i")
+ (match_operand:SI 3 "lsbitnum_operand" "i")))]
+ "TARGET_SEXT"
+ "*
+{
+ int fldsz = INTVAL (operands[2]);
+ operands[2] = GEN_INT (fldsz - 1);
+ return \"sext\\t%0, %1, %2\";
+}"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3")])
+
+(define_expand "extzv"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extract:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")))]
+ ""
+ "
+{
+ if (!extui_fldsz_operand (operands[2], SImode)) FAIL;
+ emit_insn (gen_extzv_internal (operands[0], operands[1],
+ operands[2], operands[3]));
+ DONE;
+}")
+
+(define_insn "extzv_internal"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (zero_extract:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "extui_fldsz_operand" "i")
+ (match_operand:SI 3 "const_int_operand" "i")))]
+ ""
+ "*
+{
+ int shift;
+ if (BITS_BIG_ENDIAN)
+ shift = (32 - (INTVAL (operands[2]) + INTVAL (operands[3]))) & 0x1f;
+ else
+ shift = INTVAL (operands[3]) & 0x1f;
+ operands[3] = GEN_INT (shift);
+ return \"extui\\t%0, %1, %3, %2\";
+}"
+ [(set_attr "type" "arith")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3")])
+
+
+;;
+;; ....................
+;;
+;; CONVERSIONS
+;;
+;; ....................
+;;
+
+(define_insn "fix_truncsfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (fix:SI (match_operand:SF 1 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "trunc.s\\t%0, %1, 0"
+ [(set_attr "type" "fconv")
+ (set_attr "mode" "SF")
+ (set_attr "length" "3")])
+
+(define_insn "fixuns_truncsfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=a")
+ (unsigned_fix:SI (match_operand:SF 1 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "utrunc.s %0, %1, 0"
+ [(set_attr "type" "fconv")
+ (set_attr "mode" "SF")
+ (set_attr "length" "3")])
+
+(define_insn "floatsisf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (float:SF (match_operand:SI 1 "register_operand" "a")))]
+ "TARGET_HARD_FLOAT"
+ "float.s\\t%0, %1, 0"
+ [(set_attr "type" "fconv")
+ (set_attr "mode" "SF")
+ (set_attr "length" "3")])
+
+(define_insn "floatunssisf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (unsigned_float:SF (match_operand:SI 1 "register_operand" "a")))]
+ "TARGET_HARD_FLOAT"
+ "ufloat.s %0, %1, 0"
+ [(set_attr "type" "fconv")
+ (set_attr "mode" "SF")
+ (set_attr "length" "3")])
+
+
+;;
+;; ....................
+;;
+;; DATA MOVEMENT
+;;
+;; ....................
+;;
+
+;; 64-bit Integer moves
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "nonimmed_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (CONSTANT_P (operands[1]))
+ {
+ rtx src0, src1, dst0, dst1;
+ if ((dst0 = operand_subword (operands[0], 0, 1, DImode))
+ && (src0 = operand_subword (operands[1], 0, 1, DImode))
+ && (dst1 = operand_subword (operands[0], 1, 1, DImode))
+ && (src1 = operand_subword (operands[1], 1, 1, DImode)))
+ {
+ emit_insn (gen_movsi (dst0, src0));
+ emit_insn (gen_movsi (dst1, src1));
+ DONE;
+ }
+ else
+ /* any other constant will be loaded from memory */
+ operands[1] = force_const_mem (DImode, operands[1]);
+ }
+
+ if (!(reload_in_progress | reload_completed))
+ {
+ if (!register_operand (operands[0], DImode)
+ && !register_operand (operands[1], DImode))
+ operands[1] = force_reg (DImode, operands[1]);
+
+ if (a7_overlap_mentioned_p (operands[1]))
+ {
+ emit_insn (gen_movdi_internal (operands[0], operands[1]));
+ emit_insn (gen_set_frame_ptr ());
+ DONE;
+ }
+ }
+}")
+
+(define_insn "movdi_internal"
+ [(set (match_operand:DI 0 "nonimmed_operand" "=D,D,S,a,a,a,U")
+ (match_operand:DI 1 "non_const_move_operand" "d,S,d,r,T,U,r"))]
+ "register_operand (operands[0], DImode)
+ || register_operand (operands[1], DImode)"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0: return \"mov.n\\t%0, %1\;mov.n\\t%D0, %D1\";
+ case 2: return \"%v0s32i.n\\t%1, %0\;s32i.n\\t%D1, %N0\";
+ case 3: return \"mov\\t%0, %1\;mov\\t%D0, %D1\";
+ case 6: return \"%v0s32i\\t%1, %0\;s32i\\t%D1, %N0\";
+
+ case 1:
+ case 4:
+ case 5:
+ {
+ /* Check if the first half of the destination register is used
+ in the source address. If so, reverse the order of the loads
+ so that the source address doesn't get clobbered until it is
+ no longer needed. */
+
+ rtx dstreg = operands[0];
+ if (GET_CODE (dstreg) == SUBREG)
+ dstreg = SUBREG_REG (dstreg);
+ if (GET_CODE (dstreg) != REG)
+ abort();
+
+ if (reg_mentioned_p (dstreg, operands[1]))
+ {
+ switch (which_alternative)
+ {
+ case 1: return \"%v1l32i.n\\t%D0, %N1\;l32i.n\\t%0, %1\";
+ case 4: return \"%v1l32r\\t%D0, %N1\;l32r\\t%0, %1\";
+ case 5: return \"%v1l32i\\t%D0, %N1\;l32i\\t%0, %1\";
+ }
+ }
+ else
+ {
+ switch (which_alternative)
+ {
+ case 1: return \"%v1l32i.n\\t%0, %1\;l32i.n\\t%D0, %N1\";
+ case 4: return \"%v1l32r\\t%0, %1\;l32r\\t%D0, %N1\";
+ case 5: return \"%v1l32i\\t%0, %1\;l32i\\t%D0, %N1\";
+ }
+ }
+ }
+ }
+ abort ();
+ return \"\";
+}"
+ [(set_attr "type" "move,load,store,move,load,load,store")
+ (set_attr "mode" "DI")
+ (set_attr "length" "4,4,4,6,6,6,6")])
+
+
+;; 32-bit Integer moves
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "nonimmed_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (xtensa_emit_move_sequence (operands, SImode))
+ DONE;
+}")
+
+(define_insn "movsi_internal"
+ [(set (match_operand:SI 0 "nonimmed_operand" "=D,D,D,D,R,R,a,q,a,a,a,U,*a,*A")
+ (match_operand:SI 1 "move_operand" "M,D,d,R,D,d,r,r,I,T,U,r,*A,*r"))]
+ "non_acc_reg_operand (operands[0], SImode)
+ || non_acc_reg_operand (operands[1], SImode)"
+ "@
+ movi.n\\t%0, %x1
+ mov.n\\t%0, %1
+ mov.n\\t%0, %1
+ %v1l32i.n\\t%0, %1
+ %v0s32i.n\\t%1, %0
+ %v0s32i.n\\t%1, %0
+ mov\\t%0, %1
+ movsp\\t%0, %1
+ movi\\t%0, %x1
+ %v1l32r\\t%0, %1
+ %v1l32i\\t%0, %1
+ %v0s32i\\t%1, %0
+ rsr\\t%0, 16 # ACCLO
+ wsr\\t%1, 16 # ACCLO"
+ [(set_attr "type" "move,move,move,load,store,store,move,move,move,load,load,store,rsr,wsr")
+ (set_attr "mode" "SI")
+ (set_attr "length" "2,2,2,2,2,2,3,3,3,3,3,3,3,3")])
+
+;; 16-bit Integer moves
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "nonimmed_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (xtensa_emit_move_sequence (operands, HImode))
+ DONE;
+}")
+
+(define_insn "movhi_internal"
+ [(set (match_operand:HI 0 "nonimmed_operand" "=D,D,a,a,a,U,*a,*A")
+ (match_operand:HI 1 "move_operand" "M,d,r,I,U,r,*A,*r"))]
+ "non_acc_reg_operand (operands[0], HImode)
+ || non_acc_reg_operand (operands[1], HImode)"
+ "@
+ movi.n\\t%0, %x1
+ mov.n\\t%0, %1
+ mov\\t%0, %1
+ movi\\t%0, %x1
+ %v1l16ui\\t%0, %1
+ %v0s16i\\t%1, %0
+ rsr\\t%0, 16 # ACCLO
+ wsr\\t%1, 16 # ACCLO"
+ [(set_attr "type" "move,move,move,move,load,store,rsr,wsr")
+ (set_attr "mode" "HI")
+ (set_attr "length" "2,2,3,3,3,3,3,3")])
+
+;; 8-bit Integer moves
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "nonimmed_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (xtensa_emit_move_sequence (operands, QImode))
+ DONE;
+}")
+
+(define_insn "movqi_internal"
+ [(set (match_operand:QI 0 "nonimmed_operand" "=D,D,a,a,a,U,*a,*A")
+ (match_operand:QI 1 "move_operand" "M,d,r,I,U,r,*A,*r"))]
+ "non_acc_reg_operand (operands[0], QImode)
+ || non_acc_reg_operand (operands[1], QImode)"
+ "@
+ movi.n\\t%0, %x1
+ mov.n\\t%0, %1
+ mov\\t%0, %1
+ movi\\t%0, %x1
+ %v1l8ui\\t%0, %1
+ %v0s8i\\t%1, %0
+ rsr\\t%0, 16 # ACCLO
+ wsr\\t%1, 16 # ACCLO"
+ [(set_attr "type" "move,move,move,move,load,store,rsr,wsr")
+ (set_attr "mode" "QI")
+ (set_attr "length" "2,2,3,3,3,3,3,3")])
+
+;; 32-bit floating point moves
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "nonimmed_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ operands[1] = force_const_mem (SFmode, operands[1]);
+
+ if (!(reload_in_progress | reload_completed))
+ {
+ if (((!register_operand (operands[0], SFmode)
+ && !register_operand (operands[1], SFmode))
+ || (FP_REG_P (xt_true_regnum (operands[0]))
+ && constantpool_mem_p (operands[1]))))
+ operands[1] = force_reg (SFmode, operands[1]);
+
+ if (a7_overlap_mentioned_p (operands[1]))
+ {
+ emit_insn (gen_movsf_internal (operands[0], operands[1]));
+ emit_insn (gen_set_frame_ptr ());
+ DONE;
+ }
+ }
+}")
+
+(define_insn "movsf_internal"
+ [(set (match_operand:SF 0 "nonimmed_operand"
+ "=f,f,U,D,D,R,a,f,a,a,a,U")
+ (match_operand:SF 1 "non_const_move_operand"
+ "f,U,f,d,R,d,r,r,f,T,U,r"))]
+ "((register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode))
+ && (!FP_REG_P (xt_true_regnum (operands[0]))
+ || !constantpool_mem_p (operands[1])))"
+ "@
+ mov.s\\t%0, %1
+ %v1lsi\\t%0, %1
+ %v0ssi\\t%1, %0
+ mov.n\\t%0, %1
+ %v1l32i.n\\t%0, %1
+ %v0s32i.n\\t%1, %0
+ mov\\t%0, %1
+ wfr\\t%0, %1
+ rfr\\t%0, %1
+ %v1l32r\\t%0, %1
+ %v1l32i\\t%0, %1
+ %v0s32i\\t%1, %0"
+ [(set_attr "type" "farith,fload,fstore,move,load,store,move,farith,farith,load,load,store")
+ (set_attr "mode" "SF")
+ (set_attr "length" "3,3,3,2,2,2,3,3,3,3,3,3")])
+
+(define_insn ""
+ [(parallel
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (mem:SF (plus:SI (match_operand:SI 1 "register_operand" "+a")
+ (match_operand:SI 2 "fpmem_offset_operand" "i"))))
+ (set (match_dup 1)
+ (plus:SI (match_dup 1) (match_dup 2)))])]
+ "TARGET_HARD_FLOAT"
+ "*
+{
+ if (TARGET_SERIALIZE_VOLATILE && volatile_refs_p (PATTERN (insn)))
+ output_asm_insn (\"memw\", operands);
+ return \"lsiu\\t%0, %1, %2\";
+}"
+ [(set_attr "type" "fload")
+ (set_attr "mode" "SF")
+ (set_attr "length" "3")])
+
+(define_insn ""
+ [(parallel
+ [(set (mem:SF (plus:SI (match_operand:SI 0 "register_operand" "+a")
+ (match_operand:SI 1 "fpmem_offset_operand" "i")))
+ (match_operand:SF 2 "register_operand" "f"))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0) (match_dup 1)))])]
+ "TARGET_HARD_FLOAT"
+ "*
+{
+ if (TARGET_SERIALIZE_VOLATILE && volatile_refs_p (PATTERN (insn)))
+ output_asm_insn (\"memw\", operands);
+ return \"ssiu\\t%2, %0, %1\";
+}"
+ [(set_attr "type" "fstore")
+ (set_attr "mode" "SF")
+ (set_attr "length" "3")])
+
+;; 64-bit floating point moves
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "nonimmed_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ operands[1] = force_const_mem (DFmode, operands[1]);
+
+ if (!(reload_in_progress | reload_completed))
+ {
+ if (!register_operand (operands[0], DFmode)
+ && !register_operand (operands[1], DFmode))
+ operands[1] = force_reg (DFmode, operands[1]);
+
+ if (a7_overlap_mentioned_p (operands[1]))
+ {
+ emit_insn (gen_movdf_internal (operands[0], operands[1]));
+ emit_insn (gen_set_frame_ptr ());
+ DONE;
+ }
+ }
+}")
+
+(define_insn "movdf_internal"
+ [(set (match_operand:DF 0 "nonimmed_operand" "=D,D,S,a,a,a,U")
+ (match_operand:DF 1 "non_const_move_operand" "d,S,d,r,T,U,r"))]
+ "register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode)"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0: return \"mov.n\\t%0, %1\;mov.n\\t%D0, %D1\";
+ case 2: return \"%v0s32i.n\\t%1, %0\;s32i.n\\t%D1, %N0\";
+ case 3: return \"mov\\t%0, %1\;mov\\t%D0, %D1\";
+ case 6: return \"%v0s32i\\t%1, %0\;s32i\\t%D1, %N0\";
+
+ case 1:
+ case 4:
+ case 5:
+ {
+ /* Check if the first half of the destination register is used
+ in the source address. If so, reverse the order of the loads
+ so that the source address doesn't get clobbered until it is
+ no longer needed. */
+
+ rtx dstreg = operands[0];
+ if (GET_CODE (dstreg) == SUBREG)
+ dstreg = SUBREG_REG (dstreg);
+ if (GET_CODE (dstreg) != REG)
+ abort ();
+
+ if (reg_mentioned_p (dstreg, operands[1]))
+ {
+ switch (which_alternative)
+ {
+ case 1: return \"%v1l32i.n\\t%D0, %N1\;l32i.n\\t%0, %1\";
+ case 4: return \"%v1l32r\\t%D0, %N1\;l32r\\t%0, %1\";
+ case 5: return \"%v1l32i\\t%D0, %N1\;l32i\\t%0, %1\";
+ }
+ }
+ else
+ {
+ switch (which_alternative)
+ {
+ case 1: return \"%v1l32i.n\\t%0, %1\;l32i.n\\t%D0, %N1\";
+ case 4: return \"%v1l32r\\t%0, %1\;l32r\\t%D0, %N1\";
+ case 5: return \"%v1l32i\\t%0, %1\;l32i\\t%D0, %N1\";
+ }
+ }
+ }
+ }
+ abort ();
+ return \"\";
+}"
+ [(set_attr "type" "move,load,store,move,load,load,store")
+ (set_attr "mode" "DF")
+ (set_attr "length" "4,4,4,6,6,6,6")])
+
+;; Block moves
+
+(define_expand "movstrsi"
+ [(parallel [(set (match_operand:BLK 0 "" "")
+ (match_operand:BLK 1 "" ""))
+ (use (match_operand:SI 2 "arith_operand" ""))
+ (use (match_operand:SI 3 "const_int_operand" ""))])]
+ ""
+ "
+{
+ if (!xtensa_expand_block_move (operands)) FAIL;
+ DONE;
+}")
+
+(define_insn "movstrsi_internal"
+ [(parallel [(set (match_operand:BLK 0 "memory_operand" "=U")
+ (match_operand:BLK 1 "memory_operand" "U"))
+ (use (match_operand:SI 2 "arith_operand" ""))
+ (use (match_operand:SI 3 "const_int_operand" ""))
+ (clobber (match_scratch:SI 4 "=&r"))
+ (clobber (match_scratch:SI 5 "=&r"))])]
+ ""
+ "*
+{
+ rtx tmpregs[2];
+ tmpregs[0] = operands[4];
+ tmpregs[1] = operands[5];
+ xtensa_emit_block_move (operands, tmpregs, 1);
+ return \"\";
+}"
+ [(set_attr "type" "multi")
+ (set_attr "mode" "none")
+ (set_attr "length" "300")])
+
+
+;;
+;; ....................
+;;
+;; SHIFTS
+;;
+;; ....................
+;;
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "register_operand" "=a,a")
+ (ashift:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "arith_operand" "J,r")))]
+ ""
+ "@
+ slli\\t%0, %1, %R2
+ ssl\\t%2\;sll\\t%0, %1"
+ [(set_attr "type" "arith,arith")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3,6")])
+
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=a,a")
+ (ashiftrt:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "arith_operand" "J,r")))]
+ ""
+ "@
+ srai\\t%0, %1, %R2
+ ssr\\t%2\;sra\\t%0, %1"
+ [(set_attr "type" "arith,arith")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3,6")])
+
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=a,a")
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "arith_operand" "J,r")))]
+ ""
+ "*
+{
+ if (which_alternative == 0)
+ {
+ if ((INTVAL (operands[2]) & 0x1f) < 16)
+ return \"srli\\t%0, %1, %R2\";
+ else
+ return \"extui\\t%0, %1, %R2, %L2\";
+ }
+ return \"ssr\\t%2\;srl\\t%0, %1\";
+}"
+ [(set_attr "type" "arith,arith")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3,6")])
+
+(define_insn "rotlsi3"
+ [(set (match_operand:SI 0 "register_operand" "=a,a")
+ (rotate:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "arith_operand" "J,r")))]
+ ""
+ "@
+ ssai\\t%L2\;src\\t%0, %1, %1
+ ssl\\t%2\;src\\t%0, %1, %1"
+ [(set_attr "type" "multi,multi")
+ (set_attr "mode" "SI")
+ (set_attr "length" "6,6")])
+
+(define_insn "rotrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=a,a")
+ (rotatert:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "arith_operand" "J,r")))]
+ ""
+ "@
+ ssai\\t%R2\;src\\t%0, %1, %1
+ ssr\\t%2\;src\\t%0, %1, %1"
+ [(set_attr "type" "multi,multi")
+ (set_attr "mode" "SI")
+ (set_attr "length" "6,6")])
+
+;;
+;; ....................
+;;
+;; COMPARISONS
+;;
+;; ....................
+;;
+
+;; Like the md files for MIPS and SPARC, we handle comparisons by stashing
+;; away the operands and then using that information in the subsequent
+;; conditional branch.
+
+(define_expand "cmpsi"
+ [(set (cc0)
+ (compare:CC (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "nonmemory_operand" "")))]
+ ""
+ "
+{
+ branch_cmp[0] = operands[0];
+ branch_cmp[1] = operands[1];
+ branch_type = CMP_SI;
+ DONE;
+}")
+
+(define_expand "tstsi"
+ [(set (cc0)
+ (match_operand:SI 0 "register_operand" ""))]
+ ""
+ "
+{
+ branch_cmp[0] = operands[0];
+ branch_cmp[1] = const0_rtx;
+ branch_type = CMP_SI;
+ DONE;
+}")
+
+(define_expand "cmpsf"
+ [(set (cc0)
+ (compare:CC (match_operand:SF 0 "register_operand" "")
+ (match_operand:SF 1 "register_operand" "")))]
+ "TARGET_HARD_FLOAT"
+ "
+{
+ branch_cmp[0] = operands[0];
+ branch_cmp[1] = operands[1];
+ branch_type = CMP_SF;
+ DONE;
+}")
+
+
+;;
+;; ....................
+;;
+;; CONDITIONAL BRANCHES
+;;
+;; ....................
+;;
+
+(define_expand "beq"
+ [(set (pc)
+ (if_then_else (eq (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ xtensa_expand_conditional_branch (operands, EQ);
+ DONE;
+}")
+
+(define_expand "bne"
+ [(set (pc)
+ (if_then_else (ne (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ xtensa_expand_conditional_branch (operands, NE);
+ DONE;
+}")
+
+(define_expand "bgt"
+ [(set (pc)
+ (if_then_else (gt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ xtensa_expand_conditional_branch (operands, GT);
+ DONE;
+}")
+
+(define_expand "bge"
+ [(set (pc)
+ (if_then_else (ge (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ xtensa_expand_conditional_branch (operands, GE);
+ DONE;
+}")
+
+(define_expand "blt"
+ [(set (pc)
+ (if_then_else (lt (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ xtensa_expand_conditional_branch (operands, LT);
+ DONE;
+}")
+
+(define_expand "ble"
+ [(set (pc)
+ (if_then_else (le (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ xtensa_expand_conditional_branch (operands, LE);
+ DONE;
+}")
+
+(define_expand "bgtu"
+ [(set (pc)
+ (if_then_else (gtu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ xtensa_expand_conditional_branch (operands, GTU);
+ DONE;
+}")
+
+(define_expand "bgeu"
+ [(set (pc)
+ (if_then_else (geu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ xtensa_expand_conditional_branch (operands, GEU);
+ DONE;
+}")
+
+(define_expand "bltu"
+ [(set (pc)
+ (if_then_else (ltu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ xtensa_expand_conditional_branch (operands, LTU);
+ DONE;
+}")
+
+(define_expand "bleu"
+ [(set (pc)
+ (if_then_else (leu (cc0) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ xtensa_expand_conditional_branch (operands, LEU);
+ DONE;
+}")
+
+;; Branch patterns for standard integer comparisons
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 3 "branch_operator"
+ [(match_operand:SI 0 "register_operand" "r,r")
+ (match_operand:SI 1 "branch_operand" "K,r")])
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ if (which_alternative == 1)
+ {
+ switch (GET_CODE (operands[3]))
+ {
+ case EQ: return \"beq\\t%0, %1, %2\";
+ case NE: return \"bne\\t%0, %1, %2\";
+ case LT: return \"blt\\t%0, %1, %2\";
+ case GE: return \"bge\\t%0, %1, %2\";
+ default: break;
+ }
+ }
+ else if (INTVAL (operands[1]) == 0)
+ {
+ switch (GET_CODE (operands[3]))
+ {
+ case EQ: return (TARGET_DENSITY
+ ? \"beqz.n\\t%0, %2\"
+ : \"beqz\\t%0, %2\");
+ case NE: return (TARGET_DENSITY
+ ? \"bnez.n\\t%0, %2\"
+ : \"bnez\\t%0, %2\");
+ case LT: return \"bltz\\t%0, %2\";
+ case GE: return \"bgez\\t%0, %2\";
+ default: break;
+ }
+ }
+ else
+ {
+ switch (GET_CODE (operands[3]))
+ {
+ case EQ: return \"beqi\\t%0, %d1, %2\";
+ case NE: return \"bnei\\t%0, %d1, %2\";
+ case LT: return \"blti\\t%0, %d1, %2\";
+ case GE: return \"bgei\\t%0, %d1, %2\";
+ default: break;
+ }
+ }
+ fatal_insn (\"unexpected branch operator\", operands[3]);
+ return \"\";
+}"
+ [(set_attr "type" "jump,jump")
+ (set_attr "mode" "none")
+ (set_attr "length" "3,3")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 3 "branch_operator"
+ [(match_operand:SI 0 "register_operand" "r,r")
+ (match_operand:SI 1 "branch_operand" "K,r")])
+ (pc)
+ (label_ref (match_operand 2 "" ""))))]
+ ""
+ "*
+{
+ if (which_alternative == 1)
+ {
+ switch (GET_CODE (operands[3]))
+ {
+ case EQ: return \"bne\\t%0, %1, %2\";
+ case NE: return \"beq\\t%0, %1, %2\";
+ case LT: return \"bge\\t%0, %1, %2\";
+ case GE: return \"blt\\t%0, %1, %2\";
+ default: break;
+ }
+ }
+ else if (INTVAL (operands[1]) == 0)
+ {
+ switch (GET_CODE (operands[3]))
+ {
+ case EQ: return (TARGET_DENSITY
+ ? \"bnez.n\\t%0, %2\"
+ : \"bnez\\t%0, %2\");
+ case NE: return (TARGET_DENSITY
+ ? \"beqz.n\\t%0, %2\"
+ : \"beqz\\t%0, %2\");
+ case LT: return \"bgez\\t%0, %2\";
+ case GE: return \"bltz\\t%0, %2\";
+ default: break;
+ }
+ }
+ else
+ {
+ switch (GET_CODE (operands[3]))
+ {
+ case EQ: return \"bnei\\t%0, %d1, %2\";
+ case NE: return \"beqi\\t%0, %d1, %2\";
+ case LT: return \"bgei\\t%0, %d1, %2\";
+ case GE: return \"blti\\t%0, %d1, %2\";
+ default: break;
+ }
+ }
+ fatal_insn (\"unexpected branch operator\", operands[3]);
+ return \"\";
+}"
+ [(set_attr "type" "jump,jump")
+ (set_attr "mode" "none")
+ (set_attr "length" "3,3")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 3 "ubranch_operator"
+ [(match_operand:SI 0 "register_operand" "r,r")
+ (match_operand:SI 1 "ubranch_operand" "L,r")])
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ if (which_alternative == 1)
+ {
+ switch (GET_CODE (operands[3]))
+ {
+ case LTU: return \"bltu\\t%0, %1, %2\";
+ case GEU: return \"bgeu\\t%0, %1, %2\";
+ default: break;
+ }
+ }
+ else
+ {
+ switch (GET_CODE (operands[3]))
+ {
+ case LTU: return \"bltui\\t%0, %d1, %2\";
+ case GEU: return \"bgeui\\t%0, %d1, %2\";
+ default: break;
+ }
+ }
+ fatal_insn (\"unexpected branch operator\", operands[3]);
+ return \"\";
+}"
+ [(set_attr "type" "jump,jump")
+ (set_attr "mode" "none")
+ (set_attr "length" "3,3")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 3 "ubranch_operator"
+ [(match_operand:SI 0 "register_operand" "r,r")
+ (match_operand:SI 1 "ubranch_operand" "L,r")])
+ (pc)
+ (label_ref (match_operand 2 "" ""))))]
+ ""
+ "*
+{
+ if (which_alternative == 1)
+ {
+ switch (GET_CODE (operands[3]))
+ {
+ case LTU: return \"bgeu\\t%0, %1, %2\";
+ case GEU: return \"bltu\\t%0, %1, %2\";
+ default: break;
+ }
+ }
+ else
+ {
+ switch (GET_CODE (operands[3]))
+ {
+ case LTU: return \"bgeui\\t%0, %d1, %2\";
+ case GEU: return \"bltui\\t%0, %d1, %2\";
+ default: break;
+ }
+ }
+ fatal_insn (\"unexpected branch operator\", operands[3]);
+ return \"\";
+}"
+ [(set_attr "type" "jump,jump")
+ (set_attr "mode" "none")
+ (set_attr "length" "3,3")])
+
+;; Branch patterns for bit testing
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 3 "boolean_operator"
+ [(zero_extract:SI
+ (match_operand:SI 0 "register_operand" "r,r")
+ (const_int 1)
+ (match_operand:SI 1 "arith_operand" "J,r"))
+ (const_int 0)])
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ if (which_alternative == 0)
+ {
+ unsigned bitnum = INTVAL(operands[1]) & 0x1f;
+ operands[1] = GEN_INT(bitnum);
+ switch (GET_CODE (operands[3]))
+ {
+ case EQ: return \"bbci\\t%0, %d1, %2\";
+ case NE: return \"bbsi\\t%0, %d1, %2\";
+ default: break;
+ }
+ }
+ else
+ {
+ switch (GET_CODE (operands[3]))
+ {
+ case EQ: return \"bbc\\t%0, %1, %2\";
+ case NE: return \"bbs\\t%0, %1, %2\";
+ default: break;
+ }
+ }
+ fatal_insn (\"unexpected branch operator\", operands[3]);
+ return \"\";
+}"
+ [(set_attr "type" "jump")
+ (set_attr "mode" "none")
+ (set_attr "length" "3")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 3 "boolean_operator"
+ [(zero_extract:SI
+ (match_operand:SI 0 "register_operand" "r,r")
+ (const_int 1)
+ (match_operand:SI 1 "arith_operand" "J,r"))
+ (const_int 0)])
+ (pc)
+ (label_ref (match_operand 2 "" ""))))]
+ ""
+ "*
+{
+ if (which_alternative == 0)
+ {
+ unsigned bitnum = INTVAL (operands[1]) & 0x1f;
+ operands[1] = GEN_INT (bitnum);
+ switch (GET_CODE (operands[3]))
+ {
+ case EQ: return \"bbsi\\t%0, %d1, %2\";
+ case NE: return \"bbci\\t%0, %d1, %2\";
+ default: break;
+ }
+ }
+ else
+ {
+ switch (GET_CODE (operands[3]))
+ {
+ case EQ: return \"bbs\\t%0, %1, %2\";
+ case NE: return \"bbc\\t%0, %1, %2\";
+ default: break;
+ }
+ }
+ fatal_insn (\"unexpected branch operator\", operands[3]);
+ return \"\";
+}"
+ [(set_attr "type" "jump")
+ (set_attr "mode" "none")
+ (set_attr "length" "3")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 3 "boolean_operator"
+ [(and:SI (match_operand:SI 0 "register_operand" "r")
+ (match_operand:SI 1 "register_operand" "r"))
+ (const_int 0)])
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ switch (GET_CODE (operands[3]))
+ {
+ case EQ: return \"bnone\\t%0, %1, %2\";
+ case NE: return \"bany\\t%0, %1, %2\";
+ default: break;
+ }
+ fatal_insn (\"unexpected branch operator\", operands[3]);
+ return \"\";
+}"
+ [(set_attr "type" "jump")
+ (set_attr "mode" "none")
+ (set_attr "length" "3")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 3 "boolean_operator"
+ [(and:SI (match_operand:SI 0 "register_operand" "r")
+ (match_operand:SI 1 "register_operand" "r"))
+ (const_int 0)])
+ (pc)
+ (label_ref (match_operand 2 "" ""))))]
+ ""
+ "*
+{
+ switch (GET_CODE (operands[3]))
+ {
+ case EQ: return \"bany\\t%0, %1, %2\";
+ case NE: return \"bnone\\t%0, %1, %2\";
+ default: break;
+ }
+ fatal_insn (\"unexpected branch operator\", operands[3]);
+ return \"\";
+}"
+ [(set_attr "type" "jump")
+ (set_attr "mode" "none")
+ (set_attr "length" "3")])
+
+
+;; Define the loop insns that is used by bct optimization to represent the
+;; start and end of a zero-overhead loop (in loop.c). This start template
+;; generates the loop insn, the end template doesn't generate any instructions
+;; since since loop end is handled in hardware.
+
+(define_insn "zero_cost_loop_start"
+ [(parallel [(set (pc) (if_then_else (eq (match_operand:SI 0 "register_operand" "a")
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (reg:SI 19)
+ (plus:SI (match_dup 0)
+ (const_int -1)))])]
+ ""
+ "loopnez %0, %l1"
+ [(set_attr "type" "jump")
+ (set_attr "mode" "none")
+ (set_attr "length" "3")])
+
+(define_insn "zero_cost_loop_end"
+ [(parallel [(set (pc) (if_then_else (ne (reg:SI 19)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))
+ (set (reg:SI 19)
+ (plus:SI (reg:SI 19)
+ (const_int -1)))])]
+ ""
+ "*
+ xtensa_emit_loop_end (insn, operands);
+ return \"\";
+ "
+ [(set_attr "type" "jump")
+ (set_attr "mode" "none")
+ (set_attr "length" "0")])
+
+
+;;
+;; ....................
+;;
+;; SETTING A REGISTER FROM A COMPARISON
+;;
+;; ....................
+;;
+
+(define_expand "seq"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "
+{
+ operands[1] = gen_rtx (EQ, SImode, branch_cmp[0], branch_cmp[1]);
+ if (!xtensa_expand_scc (operands)) FAIL;
+ DONE;
+}")
+
+(define_expand "sne"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "
+{
+ operands[1] = gen_rtx (NE, SImode, branch_cmp[0], branch_cmp[1]);
+ if (!xtensa_expand_scc (operands)) FAIL;
+ DONE;
+}")
+
+(define_expand "sgt"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "
+{
+ operands[1] = gen_rtx (GT, SImode, branch_cmp[0], branch_cmp[1]);
+ if (!xtensa_expand_scc (operands)) FAIL;
+ DONE;
+}")
+
+(define_expand "sge"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "
+{
+ operands[1] = gen_rtx (GE, SImode, branch_cmp[0], branch_cmp[1]);
+ if (!xtensa_expand_scc (operands)) FAIL;
+ DONE;
+}")
+
+(define_expand "slt"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "
+{
+ operands[1] = gen_rtx (LT, SImode, branch_cmp[0], branch_cmp[1]);
+ if (!xtensa_expand_scc (operands)) FAIL;
+ DONE;
+}")
+
+(define_expand "sle"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_dup 1))]
+ ""
+ "
+{
+ operands[1] = gen_rtx (LE, SImode, branch_cmp[0], branch_cmp[1]);
+ if (!xtensa_expand_scc (operands)) FAIL;
+ DONE;
+}")
+
+
+;;
+;; ....................
+;;
+;; CONDITIONAL MOVES
+;;
+;; ....................
+;;
+
+(define_expand "movsicc"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (if_then_else:SI (match_operand 1 "comparison_operator" "")
+ (match_operand:SI 2 "register_operand" "")
+ (match_operand:SI 3 "register_operand" "")))]
+ ""
+ "
+{
+ if (!xtensa_expand_conditional_move (operands, 0)) FAIL;
+ DONE;
+}")
+
+(define_expand "movsfcc"
+ [(set (match_operand:SF 0 "register_operand" "")
+ (if_then_else:SF (match_operand 1 "comparison_operator" "")
+ (match_operand:SF 2 "register_operand" "")
+ (match_operand:SF 3 "register_operand" "")))]
+ ""
+ "
+{
+ if (!xtensa_expand_conditional_move (operands, 1)) FAIL;
+ DONE;
+}")
+
+(define_insn "movsicc_internal0"
+ [(set (match_operand:SI 0 "register_operand" "=a,a")
+ (if_then_else:SI (match_operator 4 "branch_operator"
+ [(match_operand:SI 1 "register_operand" "r,r")
+ (const_int 0)])
+ (match_operand:SI 2 "register_operand" "r,0")
+ (match_operand:SI 3 "register_operand" "0,r")))]
+ ""
+ "*
+{
+ if (which_alternative == 0)
+ {
+ switch (GET_CODE (operands[4]))
+ {
+ case EQ: return \"moveqz\\t%0, %2, %1\";
+ case NE: return \"movnez\\t%0, %2, %1\";
+ case LT: return \"movltz\\t%0, %2, %1\";
+ case GE: return \"movgez\\t%0, %2, %1\";
+ default: break;
+ }
+ }
+ else
+ {
+ switch (GET_CODE (operands[4]))
+ {
+ case EQ: return \"movnez\\t%0, %3, %1\";
+ case NE: return \"moveqz\\t%0, %3, %1\";
+ case LT: return \"movgez\\t%0, %3, %1\";
+ case GE: return \"movltz\\t%0, %3, %1\";
+ default: break;
+ }
+ }
+ fatal_insn (\"unexpected cmov operator\", operands[4]);
+ return \"\";
+}"
+ [(set_attr "type" "move,move")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3,3")])
+
+(define_insn "movsicc_internal1"
+ [(set (match_operand:SI 0 "register_operand" "=a,a")
+ (if_then_else:SI (match_operator 4 "boolean_operator"
+ [(match_operand:CC 1 "register_operand" "b,b")
+ (const_int 0)])
+ (match_operand:SI 2 "register_operand" "r,0")
+ (match_operand:SI 3 "register_operand" "0,r")))]
+ "TARGET_BOOLEANS"
+ "*
+{
+ int isEq = (GET_CODE (operands[4]) == EQ);
+ switch (which_alternative)
+ {
+ case 0:
+ if (isEq) return \"movf\\t%0, %2, %1\";
+ return \"movt\\t%0, %2, %1\";
+ case 1:
+ if (isEq) return \"movt\\t%0, %3, %1\";
+ return \"movf\\t%0, %3, %1\";
+ }
+ abort ();
+ return \"\";
+}"
+ [(set_attr "type" "move,move")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3,3")])
+
+(define_insn "movsfcc_internal0"
+ [(set (match_operand:SF 0 "register_operand" "=a,a,f,f")
+ (if_then_else:SF (match_operator 4 "branch_operator"
+ [(match_operand:SI 1 "register_operand" "r,r,r,r")
+ (const_int 0)])
+ (match_operand:SF 2 "register_operand" "r,0,f,0")
+ (match_operand:SF 3 "register_operand" "0,r,0,f")))]
+ ""
+ "*
+{
+ if (which_alternative == 0)
+ {
+ switch (GET_CODE (operands[4]))
+ {
+ case EQ: return \"moveqz\\t%0, %2, %1\";
+ case NE: return \"movnez\\t%0, %2, %1\";
+ case LT: return \"movltz\\t%0, %2, %1\";
+ case GE: return \"movgez\\t%0, %2, %1\";
+ default: break;
+ }
+ }
+ else if (which_alternative == 1)
+ {
+ switch (GET_CODE (operands[4]))
+ {
+ case EQ: return \"movnez\\t%0, %3, %1\";
+ case NE: return \"moveqz\\t%0, %3, %1\";
+ case LT: return \"movgez\\t%0, %3, %1\";
+ case GE: return \"movltz\\t%0, %3, %1\";
+ default: break;
+ }
+ }
+ else if (which_alternative == 2)
+ {
+ switch (GET_CODE (operands[4]))
+ {
+ case EQ: return \"moveqz.s %0, %2, %1\";
+ case NE: return \"movnez.s %0, %2, %1\";
+ case LT: return \"movltz.s %0, %2, %1\";
+ case GE: return \"movgez.s %0, %2, %1\";
+ default: break;
+ }
+ }
+ else if (which_alternative == 3)
+ {
+ switch (GET_CODE (operands[4]))
+ {
+ case EQ: return \"movnez.s %0, %3, %1\";
+ case NE: return \"moveqz.s %0, %3, %1\";
+ case LT: return \"movgez.s %0, %3, %1\";
+ case GE: return \"movltz.s %0, %3, %1\";
+ default: break;
+ }
+ }
+ fatal_insn (\"unexpected cmov operator\", operands[4]);
+ return \"\";
+}"
+ [(set_attr "type" "move,move,move,move")
+ (set_attr "mode" "SF")
+ (set_attr "length" "3,3,3,3")])
+
+(define_insn "movsfcc_internal1"
+ [(set (match_operand:SF 0 "register_operand" "=a,a,f,f")
+ (if_then_else:SF (match_operator 4 "boolean_operator"
+ [(match_operand:CC 1 "register_operand" "b,b,b,b")
+ (const_int 0)])
+ (match_operand:SF 2 "register_operand" "r,0,f,0")
+ (match_operand:SF 3 "register_operand" "0,r,0,f")))]
+ "TARGET_BOOLEANS"
+ "*
+{
+ int isEq = (GET_CODE (operands[4]) == EQ);
+ switch (which_alternative)
+ {
+ case 0:
+ if (isEq) return \"movf\\t%0, %2, %1\";
+ return \"movt\\t%0, %2, %1\";
+ case 1:
+ if (isEq) return \"movt\\t%0, %3, %1\";
+ return \"movf\\t%0, %3, %1\";
+ case 2:
+ if (isEq) return \"movf.s\\t%0, %2, %1\";
+ return \"movt.s\\t%0, %2, %1\";
+ case 3:
+ if (isEq) return \"movt.s\\t%0, %3, %1\";
+ return \"movf.s\\t%0, %3, %1\";
+ }
+ abort ();
+ return \"\";
+}"
+ [(set_attr "type" "move,move,move,move")
+ (set_attr "mode" "SF")
+ (set_attr "length" "3,3,3,3")])
+
+
+;;
+;; ....................
+;;
+;; FLOATING POINT COMPARISONS
+;;
+;; ....................
+;;
+
+(define_insn "seq_sf"
+ [(set (match_operand:CC 0 "register_operand" "=b")
+ (eq:CC (match_operand:SF 1 "register_operand" "f")
+ (match_operand:SF 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "oeq.s\\t%0, %1, %2"
+ [(set_attr "type" "farith")
+ (set_attr "mode" "BL")
+ (set_attr "length" "3")])
+
+(define_insn "slt_sf"
+ [(set (match_operand:CC 0 "register_operand" "=b")
+ (lt:CC (match_operand:SF 1 "register_operand" "f")
+ (match_operand:SF 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "olt.s\\t%0, %1, %2"
+ [(set_attr "type" "farith")
+ (set_attr "mode" "BL")
+ (set_attr "length" "3")])
+
+(define_insn "sle_sf"
+ [(set (match_operand:CC 0 "register_operand" "=b")
+ (le:CC (match_operand:SF 1 "register_operand" "f")
+ (match_operand:SF 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "ole.s\\t%0, %1, %2"
+ [(set_attr "type" "farith")
+ (set_attr "mode" "BL")
+ (set_attr "length" "3")])
+
+
+;;
+;; ....................
+;;
+;; UNCONDITIONAL BRANCHES
+;;
+;; ....................
+;;
+
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "j\\t%l0"
+ [(set_attr "type" "jump")
+ (set_attr "mode" "none")
+ (set_attr "length" "3")])
+
+(define_expand "indirect_jump"
+ [(set (pc) (match_operand 0 "register_operand" ""))]
+ ""
+ "
+{
+ rtx dest = operands[0];
+ if (GET_CODE (dest) != REG || GET_MODE (dest) != Pmode)
+ operands[0] = copy_to_mode_reg (Pmode, dest);
+
+ emit_jump_insn (gen_indirect_jump_internal (dest));
+ DONE;
+}")
+
+(define_insn "indirect_jump_internal"
+ [(set (pc) (match_operand:SI 0 "register_operand" "r"))]
+ ""
+ "jx\\t%0"
+ [(set_attr "type" "jump")
+ (set_attr "mode" "none")
+ (set_attr "length" "3")])
+
+
+(define_expand "tablejump"
+ [(use (match_operand:SI 0 "register_operand" ""))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "
+{
+ rtx target = operands[0];
+ if (flag_pic)
+ {
+ /* For PIC, the table entry is relative to the start of the table. */
+ rtx label = gen_reg_rtx (SImode);
+ target = gen_reg_rtx (SImode);
+ emit_move_insn (label, gen_rtx_LABEL_REF (SImode, operands[1]));
+ emit_insn (gen_addsi3 (target, operands[0], label));
+ }
+ emit_jump_insn (gen_tablejump_internal (target, operands[1]));
+ DONE;
+}")
+
+(define_insn "tablejump_internal"
+ [(set (pc)
+ (match_operand:SI 0 "register_operand" "r"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "jx\\t%0"
+ [(set_attr "type" "jump")
+ (set_attr "mode" "none")
+ (set_attr "length" "3")])
+
+
+;;
+;; ....................
+;;
+;; FUNCTION CALLS
+;;
+;; ....................
+;;
+
+(define_expand "sym_PLT"
+ [(const (unspec [(match_operand:SI 0 "" "")] UNSPEC_PLT))]
+ ""
+ "")
+
+(define_expand "call"
+ [(call (match_operand 0 "memory_operand" "")
+ (match_operand 1 "" ""))]
+ ""
+ "
+{
+ rtx addr = XEXP (operands[0], 0);
+ if (flag_pic && GET_CODE (addr) == SYMBOL_REF && !SYMBOL_REF_FLAG (addr))
+ addr = gen_sym_PLT (addr);
+ if (!call_insn_operand (addr, VOIDmode))
+ XEXP (operands[0], 0) = copy_to_mode_reg (Pmode, addr);
+}")
+
+(define_insn "call_internal"
+ [(call (mem (match_operand:SI 0 "call_insn_operand" "n,i,r"))
+ (match_operand 1 "" "i,i,i"))]
+ ""
+ "*
+ return xtensa_emit_call (0, operands);
+ "
+ [(set_attr "type" "call")
+ (set_attr "mode" "none")
+ (set_attr "length" "3")])
+
+(define_expand "call_value"
+ [(set (match_operand 0 "register_operand" "")
+ (call (match_operand 1 "memory_operand" "")
+ (match_operand 2 "" "")))]
+ ""
+ "
+{
+ rtx addr = XEXP (operands[1], 0);
+ if (flag_pic && GET_CODE (addr) == SYMBOL_REF && !SYMBOL_REF_FLAG (addr))
+ addr = gen_sym_PLT (addr);
+ if (!call_insn_operand (addr, VOIDmode))
+ XEXP (operands[1], 0) = copy_to_mode_reg (Pmode, addr);
+}")
+
+;; cannot combine constraints for operand 0 into "afvb"
+;; reload.c:find_reloads seems to assume that grouped constraints somehow
+;; specify related register classes, and when they don't the constraints
+;; fail to match. By not grouping the constraints, we get the correct
+;; behavior.
+(define_insn "call_value_internal"
+ [(set (match_operand 0 "register_operand" "=af,af,af,v,v,v,b,b,b")
+ (call (mem (match_operand:SI 1 "call_insn_operand"
+ "n,i,r,n,i,r,n,i,r"))
+ (match_operand 2 "" "i,i,i,i,i,i,i,i,i")))]
+ ""
+ "*
+ return xtensa_emit_call (1, operands);
+ "
+ [(set_attr "type" "call")
+ (set_attr "mode" "none")
+ (set_attr "length" "3")])
+
+(define_insn "return"
+ [(return)
+ (use (reg:SI A0_REG))]
+ "reload_completed"
+ "*
+{
+ return (TARGET_DENSITY ? \"retw.n\" : \"retw\");
+}"
+ [(set_attr "type" "jump")
+ (set_attr "mode" "none")
+ (set_attr "length" "2")])
+
+
+;;
+;; ....................
+;;
+;; MISC.
+;;
+;; ....................
+;;
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "*
+{
+ return (TARGET_DENSITY ? \"nop.n\" : \"nop\");
+}"
+ [(set_attr "type" "nop")
+ (set_attr "mode" "none")
+ (set_attr "length" "3")])
+
+(define_expand "nonlocal_goto"
+ [(match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" "")
+ (match_operand:SI 2 "general_operand" "")
+ (match_operand:SI 3 "" "")]
+ ""
+ "
+{
+ xtensa_expand_nonlocal_goto (operands);
+ DONE;
+}")
+
+;; Setting up a frame pointer is tricky for Xtensa because GCC doesn't
+;; know if a frame pointer is required until the reload pass, and
+;; because there may be an incoming argument value in the hard frame
+;; pointer register (a7). If there is an incoming argument in that
+;; register, the "set_frame_ptr" insn gets inserted immediately after
+;; the insn that copies the incoming argument to a pseudo or to the
+;; stack. This serves several purposes here: (1) it keeps the
+;; optimizer from copy-propagating or scheduling the use of a7 as an
+;; incoming argument away from the beginning of the function; (2) we
+;; can use a post-reload splitter to expand away the insn if a frame
+;; pointer is not required, so that the post-reload scheduler can do
+;; the right thing; and (3) it makes it easy for xtensa_reorg() to
+;; search for this insn to determine whether it should add a new insn
+;; to set up the frame pointer.
+
+(define_insn "set_frame_ptr"
+ [(unspec_volatile [(const_int 0)] UNSPECV_SET_FP)]
+ ""
+ "*
+{
+ if (frame_pointer_needed)
+ return \"mov\\ta7, sp\";
+ return \"\";
+}"
+ [(set_attr "type" "move")
+ (set_attr "mode" "SI")
+ (set_attr "length" "3")])
+
+;; Post-reload splitter to remove fp assignment when it's not needed.
+(define_split
+ [(unspec_volatile [(const_int 0)] UNSPECV_SET_FP)]
+ "reload_completed && !frame_pointer_needed"
+ [(unspec [(const_int 0)] UNSPEC_NOP)]
+ "")
+
+;; The preceding splitter needs something to split the insn into;
+;; things start breaking if the result is just a "use" so instead we
+;; generate the following insn.
+(define_insn ""
+ [(unspec [(const_int 0)] UNSPEC_NOP)]
+ ""
+ ""
+ [(set_attr "type" "nop")
+ (set_attr "mode" "none")
+ (set_attr "length" "0")])
+
+;;
+;; ....................
+;;
+;; BOOLEANS
+;;
+;; ....................
+;;
+
+;; branch patterns
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 2 "boolean_operator"
+ [(match_operand:CC 0 "register_operand" "b")
+ (const_int 0)])
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ "TARGET_BOOLEANS"
+ "*
+{
+ if (GET_CODE (operands[2]) == EQ)
+ return \"bf\\t%0, %1\";
+ else
+ return \"bt\\t%0, %1\";
+}"
+ [(set_attr "type" "jump")
+ (set_attr "mode" "none")
+ (set_attr "length" "3")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 2 "boolean_operator"
+ [(match_operand:CC 0 "register_operand" "b")
+ (const_int 0)])
+ (pc)
+ (label_ref (match_operand 1 "" ""))))]
+ "TARGET_BOOLEANS"
+ "*
+{
+ if (GET_CODE (operands[2]) == EQ)
+ return \"bt\\t%0, %1\";
+ else
+ return \"bf\\t%0, %1\";
+}"
+ [(set_attr "type" "jump")
+ (set_attr "mode" "none")
+ (set_attr "length" "3")])
diff --git a/gcc/cse.c b/gcc/cse.c
index 6cecb1c40d0..ac9e8860d38 100644
--- a/gcc/cse.c
+++ b/gcc/cse.c
@@ -2272,10 +2272,10 @@ canon_hash (x, mode)
|| CLASS_LIKELY_SPILLED_P (REGNO_REG_CLASS (regno))
|| (SMALL_REGISTER_CLASSES
&& ! fixed_regs[regno]
- && regno != FRAME_POINTER_REGNUM
- && regno != HARD_FRAME_POINTER_REGNUM
- && regno != ARG_POINTER_REGNUM
- && regno != STACK_POINTER_REGNUM
+ && x != frame_pointer_rtx
+ && x != hard_frame_pointer_rtx
+ && x != arg_pointer_rtx
+ && x != stack_pointer_rtx
&& GET_MODE_CLASS (GET_MODE (x)) != MODE_CC)))
{
do_not_record = 1;
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index 945819e7049..fd8a5824ed2 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -636,6 +636,24 @@ in the following sections.
@gccoptlist{
-msim}
+@emph{Xtensa Options}
+@gccoptlist{
+-mbig-endian -mlittle-endian @gol
+-mdensity -mno-density @gol
+-mmac16 -mno-mac16 @gol
+-mmul16 -mno-mul16 @gol
+-mmul32 -mno-mul32 @gol
+-mnsa -mno-nsa @gol
+-mminmax -mno-minmax @gol
+-msext -mno-sext @gol
+-mbooleans -mno-booleans @gol
+-mhard-float -msoft-float @gol
+-mfused-madd -mno-fused-madd @gol
+-mserialize-volatile -mno-serialize-volatile @gol
+-mtext-section-literals -mno-text-section-literals @gol
+-mtarget-align -mno-target-align @gol
+-mlongcalls -mno-longcalls}
+
@item Code Generation Options
@xref{Code Gen Options,,Options for Code Generation Conventions}.
@gccoptlist{
@@ -5116,6 +5134,7 @@ that macro, which enables you to change the defaults.
* MMIX Options::
* PDP-11 Options::
* Xstormy16 Options::
+* Xtensa Options::
@end menu
@node M680x0 Options
@@ -9604,6 +9623,179 @@ These options are defined for Xstormy16:
Choose startup files and linker script suitable for the simulator.
@end table
+@node Xtensa Options
+@subsection Xtensa Options
+@cindex Xtensa Options
+
+The Xtensa architecture is designed to support many different
+configurations. The compiler's default options can be set to match a
+particular Xtensa configuration by copying a configuration file into the
+GCC sources when building GCC@. The options below may be used to
+override the default options.
+
+@table @gcctabopt
+@item -mbig-endian
+@itemx -mlittle-endian
+@opindex mbig-endian
+@opindex mlittle-endian
+Specify big-endian or little-endian byte ordering for the target Xtensa
+processor.
+
+@item -mdensity
+@itemx -mno-density
+@opindex mdensity
+@opindex mno-density
+Enable or disable use of the optional Xtensa code density instructions.
+
+@item -mmac16
+@itemx -mno-mac16
+@opindex mmac16
+@opindex mno-mac16
+Enable or disable use of the Xtensa MAC16 option. When enabled, GCC
+will generate MAC16 instructions from standard C code, with the
+limitation that it will use neither the MR register file nor any
+instruction that operates on the MR registers. When this option is
+disabled, GCC will translate 16-bit multiply/accumulate operations to a
+combination of core instructions and library calls, depending on whether
+any other multiplier options are enabled.
+
+@item -mmul16
+@itemx -mno-mul16
+@opindex mmul16
+@opindex mno-mul16
+Enable or disable use of the 16-bit integer multiplier option. When
+enabled, the compiler will generate 16-bit multiply instructions for
+multiplications of 16 bits or smaller in standard C code. When this
+option is disabled, the compiler will either use 32-bit multiply or
+MAC16 instructions if they are available or generate library calls to
+perform the multiply operations using shifts and adds.
+
+@item -mmul32
+@itemx -mno-mul32
+@opindex mmul32
+@opindex mno-mul32
+Enable or disable use of the 32-bit integer multiplier option. When
+enabled, the compiler will generate 32-bit multiply instructions for
+multiplications of 32 bits or smaller in standard C code. When this
+option is disabled, the compiler will generate library calls to perform
+the multiply operations using either shifts and adds or 16-bit multiply
+instructions if they are available.
+
+@item -mnsa
+@itemx -mno-nsa
+@opindex mnsa
+@opindex mno-nsa
+Enable or disable use of the optional normalization shift amount
+(@code{NSA}) instructions to implement the built-in @code{ffs} function.
+
+@item -mminmax
+@itemx -mno-minmax
+@opindex mminmax
+@opindex mno-minmax
+Enable or disable use of the optional minimum and maximum value
+instructions.
+
+@item -msext
+@itemx -mno-sext
+@opindex msext
+@opindex mno-sext
+Enable or disable use of the optional sign extend (@code{SEXT})
+instruction.
+
+@item -mbooleans
+@itemx -mno-booleans
+@opindex mbooleans
+@opindex mno-booleans
+Enable or disable support for the boolean register file used by Xtensa
+coprocessors. This is not typically useful by itself but may be
+required for other options that make use of the boolean registers (e.g.,
+the floating-point option).
+
+@item -mhard-float
+@itemx -msoft-float
+@opindex mhard-float
+@opindex msoft-float
+Enable or disable use of the floating-point option. When enabled, GCC
+generates floating-point instructions for 32-bit @code{float}
+operations. When this option is disabled, GCC generates library calls
+to emulate 32-bit floating-point operations using integer instructions.
+Regardless of this option, 64-bit @code{double} operations are always
+emulated with calls to library functions.
+
+@item -mfused-madd
+@itemx -mno-fused-madd
+@opindex mfused-madd
+@opindex mno-fused-madd
+Enable or disable use of fused multiply/add and multiply/subtract
+instructions in the floating-point option. This has no effect if the
+floating-point option is not also enabled. Disabling fused multiply/add
+and multiply/subtract instructions forces the compiler to use separate
+instructions for the multiply and add/subtract operations. This may be
+desirable in some cases where strict IEEE 754-compliant results are
+required: the fused multiply add/subtract instructions do not round the
+intermediate result, thereby producing results with @emph{more} bits of
+precision than specified by the IEEE standard. Disabling fused multiply
+add/subtract instructions also ensures that the program output is not
+sensitive to the compiler's ability to combine multiply and add/subtract
+operations.
+
+@item -mserialize-volatile
+@itemx -mno-serialize-volatile
+@opindex mserialize-volatile
+@opindex mno-serialize-volatile
+When this option is enabled, GCC inserts @code{MEMW} instructions before
+@code{volatile} memory references to guarantee sequential consistency.
+The default is @option{-mserialize-volatile}. Use
+@option{-mno-serialize-volatile} to omit the @code{MEMW} instructions.
+
+@item -mtext-section-literals
+@itemx -mno-text-section-literals
+@opindex mtext-section-literals
+@opindex mno-text-section-literals
+Control the treatment of literal pools. The default is
+@option{-mno-text-section-literals}, which places literals in a separate
+section in the output file. This allows the literal pool to be placed
+in a data RAM/ROM, and it also allows the linker to combine literal
+pools from separate object files to remove redundant literals and
+improve code size. With @option{-mtext-section-literals}, the literals
+are interspersed in the text section in order to keep them as close as
+possible to their references. This may be necessary for large assembly
+files.
+
+@item -mtarget-align
+@itemx -mno-target-align
+@opindex mtarget-align
+@opindex mno-target-align
+When this option is enabled, GCC instructs the assembler to
+automatically align instructions to reduce branch penalties at the
+expense of some code density. The assembler attempts to widen density
+instructions to align branch targets and the instructions following call
+instructions. If there are not enough preceding safe density
+instructions to align a target, no widening will be performed. The
+default is @option{-mtarget-align}. These options do not affect the
+treatment of auto-aligned instructions like @code{LOOP}, which the
+assembler will always align, either by widening density instructions or
+by inserting no-op instructions.
+
+@item -mlongcalls
+@itemx -mno-longcalls
+@opindex mlongcalls
+@opindex mno-longcalls
+When this option is enabled, GCC instructs the assembler to translate
+direct calls to indirect calls unless it can determine that the target
+of a direct call is in the range allowed by the call instruction. This
+translation typically occurs for calls to functions in other source
+files. Specifically, the assembler translates a direct @code{CALL}
+instruction into an @code{L32R} followed by a @code{CALLX} instruction.
+The default is @option{-mno-longcalls}. This option should be used in
+programs where the call target can potentially be out of range. This
+option is implemented in the assembler, not the compiler, so the
+assembly code generated by GCC will still show direct call
+instructions---look at the disassembled object code to see the actual
+instructions. Note that the assembler will use an indirect call for
+every cross-file call, not just those that really will be out of range.
+@end table
+
@node Code Gen Options
@section Options for Code Generation Conventions
@cindex code generation conventions
diff --git a/gcc/doc/md.texi b/gcc/doc/md.texi
index 50acfbce529..ca59a6c69e0 100644
--- a/gcc/doc/md.texi
+++ b/gcc/doc/md.texi
@@ -2068,6 +2068,31 @@ A constant that is not between 2 and 15 inclusive.
@end table
+@item Xtensa---@file{xtensa.h}
+@table @code
+@item a
+General-purpose 32-bit register
+
+@item b
+One-bit boolean register
+
+@item A
+MAC16 40-bit accumulator register
+
+@item I
+Signed 12-bit integer constant, for use in MOVI instructions
+
+@item J
+Signed 8-bit integer constant, for use in ADDI instructions
+
+@item K
+Integer constant valid for BccI instructions
+
+@item L
+Unsigned constant valid for BccUI instructions
+
+@end table
+
@end table
@ifset INTERNALS
diff --git a/gcc/integrate.c b/gcc/integrate.c
index b1fecc4788d..10d98f2f8a8 100644
--- a/gcc/integrate.c
+++ b/gcc/integrate.c
@@ -1318,6 +1318,7 @@ copy_insn_list (insns, map, static_chain_value)
#ifdef HAVE_cc0
rtx cc0_insn = 0;
#endif
+ rtx static_chain_mem = 0;
/* Copy the insns one by one. Do this in two passes, first the insns and
then their REG_NOTES. */
@@ -1381,25 +1382,62 @@ copy_insn_list (insns, map, static_chain_value)
&& REG_FUNCTION_VALUE_P (XEXP (pattern, 0)))
break;
- /* If this is setting the static chain rtx, omit it. */
+ /* Look for the address of the static chain slot. The
+ rtx_equal_p comparisons against the
+ static_chain_incoming_rtx below may fail if the static
+ chain is in memory and the address specified is not
+ "legitimate". This happens on Xtensa where the static
+ chain is at a negative offset from argp and where only
+ positive offsets are legitimate. When the RTL is
+ generated, the address is "legitimized" by copying it
+ into a register, causing the rtx_equal_p comparisons to
+ fail. This workaround looks for code that sets a
+ register to the address of the static chain. Subsequent
+ memory references via that register can then be
+ identified as static chain references. We assume that
+ the register is only assigned once, and that the static
+ chain address is only live in one register at a time. */
+
else if (static_chain_value != 0
&& set != 0
+ && GET_CODE (static_chain_incoming_rtx) == MEM
&& GET_CODE (SET_DEST (set)) == REG
- && rtx_equal_p (SET_DEST (set),
- static_chain_incoming_rtx))
+ && rtx_equal_p (SET_SRC (set),
+ XEXP (static_chain_incoming_rtx, 0)))
+ {
+ static_chain_mem =
+ gen_rtx_MEM (GET_MODE (static_chain_incoming_rtx),
+ SET_DEST (set));
+
+ /* emit the instruction in case it is used for something
+ other than setting the static chain; if it's not used,
+ it can always be removed as dead code */
+ copy = emit_insn (copy_rtx_and_substitute (pattern, map, 0));
+ }
+
+ /* If this is setting the static chain rtx, omit it. */
+ else if (static_chain_value != 0
+ && set != 0
+ && (rtx_equal_p (SET_DEST (set),
+ static_chain_incoming_rtx)
+ || (static_chain_mem
+ && rtx_equal_p (SET_DEST (set), static_chain_mem))))
break;
/* If this is setting the static chain pseudo, set it from
the value we want to give it instead. */
else if (static_chain_value != 0
&& set != 0
- && rtx_equal_p (SET_SRC (set),
- static_chain_incoming_rtx))
+ && (rtx_equal_p (SET_SRC (set),
+ static_chain_incoming_rtx)
+ || (static_chain_mem
+ && rtx_equal_p (SET_SRC (set), static_chain_mem))))
{
rtx newdest = copy_rtx_and_substitute (SET_DEST (set), map, 1);
copy = emit_move_insn (newdest, static_chain_value);
- static_chain_value = 0;
+ if (GET_CODE (static_chain_incoming_rtx) != MEM)
+ static_chain_value = 0;
}
/* If this is setting the virtual stack vars register, this must
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index 7a338d12c38..eb918c3eaec 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,7 @@
+2002-01-23 Bob Wilson <bob.wilson@acm.org>
+
+ * gcc.c-torture/compile/20001226-1.x: xfail for Xtensa.
+
2002-01-23 Janis Johnson <janis187@us.ibm.com>
* gcc.dg/20020122-3.c: New.
diff --git a/gcc/testsuite/gcc.c-torture/compile/20001226-1.x b/gcc/testsuite/gcc.c-torture/compile/20001226-1.x
index a8db223e1f9..f1f000f8889 100644
--- a/gcc/testsuite/gcc.c-torture/compile/20001226-1.x
+++ b/gcc/testsuite/gcc.c-torture/compile/20001226-1.x
@@ -1,8 +1,12 @@
# This does not assemble on m68hc11 because the function is larger
# than 64K.
+# It doesn't work on Xtensa with -O0 because the function is larger
+# than the range of a jump instruction (+- 128K) and the assembler
+# does not yet relax jumps to indirect jumps.
+
global target_triplet
-if { [istarget "m6811-*-*"] || [istarget "m6812-*-*"] } {
+if { [istarget "m6811-*-*"] || [istarget "m6812-*-*"] || [istarget "xtensa-*-*"]} {
set torture_compile_xfail "$target_triplet"
return 1
}