summaryrefslogtreecommitdiff
path: root/gcc/config/rx
diff options
context:
space:
mode:
authorrus <rus@138bc75d-0d04-0410-961f-82ee72b054a4>2009-11-09 20:58:24 +0000
committerrus <rus@138bc75d-0d04-0410-961f-82ee72b054a4>2009-11-09 20:58:24 +0000
commit7f4db7c80779ecbc57d1146654daf0acfe18de66 (patch)
tree3af522a3b5e149c3fd498ecb1255994daae2129a /gcc/config/rx
parent611349f0ec42a37591db2cd02974a11a48d10edb (diff)
downloadgcc-profile-stdlib.tar.gz
merge from trunkprofile-stdlib
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/branches/profile-stdlib@154052 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/config/rx')
-rw-r--r--gcc/config/rx/constraints.md81
-rw-r--r--gcc/config/rx/predicates.md288
-rw-r--r--gcc/config/rx/rx-protos.h52
-rw-r--r--gcc/config/rx/rx.c2517
-rw-r--r--gcc/config/rx/rx.h659
-rw-r--r--gcc/config/rx/rx.md1766
-rw-r--r--gcc/config/rx/rx.opt98
-rw-r--r--gcc/config/rx/t-rx32
8 files changed, 5493 insertions, 0 deletions
diff --git a/gcc/config/rx/constraints.md b/gcc/config/rx/constraints.md
new file mode 100644
index 00000000000..52bf7df3621
--- /dev/null
+++ b/gcc/config/rx/constraints.md
@@ -0,0 +1,81 @@
+;; Constraint definitions for Renesas RX.
+;; Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+;; Contributed by Red Hat.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+
+(define_constraint "Symbol"
+ "@internal Constraint on the type of rtx allowed in call insns"
+ (match_test "GET_CODE (op) == SYMBOL_REF")
+)
+
+
+(define_constraint "Int08"
+ "@internal A signed or unsigned 8-bit immediate value"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, (-1 << 8), (1 << 8) - 1)")
+ )
+)
+
+(define_constraint "Sint08"
+ "@internal A signed 8-bit immediate value"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, (-1 << 7), (1 << 7) - 1)")
+ )
+)
+
+(define_constraint "Sint16"
+ "@internal A signed 16-bit immediate value"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, (-1 << 15), (1 << 15) - 1)")
+ )
+)
+
+(define_constraint "Sint24"
+ "@internal A signed 24-bit immediate value"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, (-1 << 23), (1 << 23) - 1)")
+ )
+)
+
+;; This constraint is used by the SUBSI3 pattern because the
+;; RX SUB instruction can only take a 4-bit unsigned integer
+;; value. Also used by the MVTIPL instruction.
+(define_constraint "Uint04"
+ "@internal An unsigned 4-bit immediate value"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, 0, 15)")
+ )
+)
+
+;; This is used in arithmetic and logic instructions for
+;; a source operand that lies in memory and which satisfies
+;; rx_restricted_memory_address().
+
+(define_memory_constraint "Q"
+ "A MEM which only uses REG or REG+INT addressing."
+ (and (match_code "mem")
+ (ior (match_code "reg" "0")
+ (and (match_code "plus" "0")
+ (and (match_code "reg,subreg" "00")
+ (match_code "const_int" "01")
+ )
+ )
+ )
+ )
+)
diff --git a/gcc/config/rx/predicates.md b/gcc/config/rx/predicates.md
new file mode 100644
index 00000000000..d7a363ebb88
--- /dev/null
+++ b/gcc/config/rx/predicates.md
@@ -0,0 +1,288 @@
+;; Predicate definitions for Renesas RX.
+;; Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+;; Contributed by Red Hat.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+
+
+;; Check that the operand is suitable for a call insn.
+;; Only registers and symbol refs are allowed.
+
+(define_predicate "rx_call_operand"
+ (match_code "symbol_ref,reg")
+)
+
+;; For sibcall operations we can only use a symbolic address.
+
+(define_predicate "rx_symbolic_call_operand"
+ (match_code "symbol_ref")
+)
+
+;; Check that the operand is suitable for a shift insn
+;; Only small integers or a value in a register are permitted.
+
+(define_predicate "rx_shift_operand"
+ (match_code "const_int,reg")
+ {
+ if (CONST_INT_P (op))
+ return IN_RANGE (INTVAL (op), 0, 31);
+ return true;
+ }
+)
+
+;; Check that the operand is suitable as the source operand
+;; for a logic or arithmeitc instruction. Registers, integers
+;; and a restricted subset of memory addresses are allowed.
+
+(define_predicate "rx_source_operand"
+ (match_code "const_int,reg,mem")
+ {
+ if (CONST_INT_P (op))
+ return rx_is_legitimate_constant (op);
+
+ if (! MEM_P (op))
+ return true;
+
+ /* Do not allow size conversions whilst accessing memory. */
+ if (GET_MODE (op) != mode)
+ return false;
+
+ return rx_is_restricted_memory_address (XEXP (op, 0), mode);
+ }
+)
+
+;; Check that the operand is suitable as the source operand
+;; for a comparison instruction. This is the same as
+;; rx_source_operand except that SUBREGs are allowed but
+;; CONST_INTs are not.
+
+(define_predicate "rx_compare_operand"
+ (match_code "subreg,reg,mem")
+ {
+ if (GET_CODE (op) == SUBREG)
+ return REG_P (XEXP (op, 0));
+
+ if (! MEM_P (op))
+ return true;
+
+ return rx_is_restricted_memory_address (XEXP (op, 0), mode);
+ }
+)
+
+;; Return true if OP is a store multiple operation. This looks like:
+;;
+;; [(set (SP) (MINUS (SP) (INT)))
+;; (set (MEM (SP)) (REG))
+;; (set (MEM (MINUS (SP) (INT))) (REG)) {optionally repeated}
+;; ]
+
+(define_special_predicate "rx_store_multiple_vector"
+ (match_code "parallel")
+{
+ int count = XVECLEN (op, 0);
+ unsigned int src_regno;
+ rtx element;
+ int i;
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= 2)
+ return false;
+
+ /* Check that the first element of the vector is the stack adjust. */
+ element = XVECEXP (op, 0, 0);
+ if ( ! SET_P (element)
+ || ! REG_P (SET_DEST (element))
+ || REGNO (SET_DEST (element)) != SP_REG
+ || GET_CODE (SET_SRC (element)) != MINUS
+ || ! REG_P (XEXP (SET_SRC (element), 0))
+ || REGNO (XEXP (SET_SRC (element), 0)) != SP_REG
+ || ! CONST_INT_P (XEXP (SET_SRC (element), 1)))
+ return false;
+
+ /* Check that the next element is the first push. */
+ element = XVECEXP (op, 0, 1);
+ if ( ! SET_P (element)
+ || ! REG_P (SET_SRC (element))
+ || GET_MODE (SET_SRC (element)) != SImode
+ || ! MEM_P (SET_DEST (element))
+ || GET_MODE (SET_DEST (element)) != SImode
+ || GET_CODE (XEXP (SET_DEST (element), 0)) != MINUS
+ || ! REG_P (XEXP (XEXP (SET_DEST (element), 0), 0))
+ || REGNO (XEXP (XEXP (SET_DEST (element), 0), 0)) != SP_REG
+ || ! CONST_INT_P (XEXP (XEXP (SET_DEST (element), 0), 1))
+ || INTVAL (XEXP (XEXP (SET_DEST (element), 0), 1))
+ != GET_MODE_SIZE (SImode))
+ return false;
+
+ src_regno = REGNO (SET_SRC (element));
+
+ /* Check that the remaining elements use SP-<disp>
+ addressing and decreasing register numbers. */
+ for (i = 2; i < count; i++)
+ {
+ element = XVECEXP (op, 0, i);
+
+ if ( ! SET_P (element)
+ || ! REG_P (SET_SRC (element))
+ || GET_MODE (SET_SRC (element)) != SImode
+ || REGNO (SET_SRC (element)) != src_regno - (i - 1)
+ || ! MEM_P (SET_DEST (element))
+ || GET_MODE (SET_DEST (element)) != SImode
+ || GET_CODE (XEXP (SET_DEST (element), 0)) != MINUS
+ || ! REG_P (XEXP (XEXP (SET_DEST (element), 0), 0))
+ || REGNO (XEXP (XEXP (SET_DEST (element), 0), 0)) != SP_REG
+ || ! CONST_INT_P (XEXP (XEXP (SET_DEST (element), 0), 1))
+ || INTVAL (XEXP (XEXP (SET_DEST (element), 0), 1))
+ != i * GET_MODE_SIZE (SImode))
+ return false;
+ }
+ return true;
+})
+
+;; Return true if OP is a load multiple operation.
+;; This looks like:
+;; [(set (SP) (PLUS (SP) (INT)))
+;; (set (REG) (MEM (SP)))
+;; (set (REG) (MEM (PLUS (SP) (INT)))) {optionally repeated}
+;; ]
+
+(define_special_predicate "rx_load_multiple_vector"
+ (match_code "parallel")
+{
+ int count = XVECLEN (op, 0);
+ unsigned int dest_regno;
+ rtx element;
+ int i;
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= 2)
+ return false;
+
+ /* Check that the first element of the vector is the stack adjust. */
+ element = XVECEXP (op, 0, 0);
+ if ( ! SET_P (element)
+ || ! REG_P (SET_DEST (element))
+ || REGNO (SET_DEST (element)) != SP_REG
+ || GET_CODE (SET_SRC (element)) != PLUS
+ || ! REG_P (XEXP (SET_SRC (element), 0))
+ || REGNO (XEXP (SET_SRC (element), 0)) != SP_REG
+ || ! CONST_INT_P (XEXP (SET_SRC (element), 1)))
+ return false;
+
+ /* Check that the next element is the first push. */
+ element = XVECEXP (op, 0, 1);
+ if ( ! SET_P (element)
+ || ! REG_P (SET_DEST (element))
+ || ! MEM_P (SET_SRC (element))
+ || ! REG_P (XEXP (SET_SRC (element), 0))
+ || REGNO (XEXP (SET_SRC (element), 0)) != SP_REG)
+ return false;
+
+ dest_regno = REGNO (SET_DEST (element));
+
+ /* Check that the remaining elements use SP+<disp>
+ addressing and incremental register numbers. */
+ for (i = 2; i < count; i++)
+ {
+ element = XVECEXP (op, 0, i);
+
+ if ( ! SET_P (element)
+ || ! REG_P (SET_DEST (element))
+ || GET_MODE (SET_DEST (element)) != SImode
+ || REGNO (SET_DEST (element)) != dest_regno + (i - 1)
+ || ! MEM_P (SET_SRC (element))
+ || GET_MODE (SET_SRC (element)) != SImode
+ || GET_CODE (XEXP (SET_SRC (element), 0)) != PLUS
+ || ! REG_P (XEXP (XEXP (SET_SRC (element), 0), 0))
+ || REGNO (XEXP (XEXP (SET_SRC (element), 0), 0)) != SP_REG
+ || ! CONST_INT_P (XEXP (XEXP (SET_SRC (element), 0), 1))
+ || INTVAL (XEXP (XEXP (SET_SRC (element), 0), 1))
+ != (i - 1) * GET_MODE_SIZE (SImode))
+ return false;
+ }
+ return true;
+})
+
+;; Return true if OP is a pop-and-return load multiple operation.
+;; This looks like:
+;; [(set (SP) (PLUS (SP) (INT)))
+;; (set (REG) (MEM (SP)))
+;; (set (REG) (MEM (PLUS (SP) (INT)))) {optional and possibly repeated}
+;; (return)
+;; ]
+
+(define_special_predicate "rx_rtsd_vector"
+ (match_code "parallel")
+{
+ int count = XVECLEN (op, 0);
+ unsigned int dest_regno;
+ rtx element;
+ int i;
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= 2)
+ return false;
+
+ /* Check that the first element of the vector is the stack adjust. */
+ element = XVECEXP (op, 0, 0);
+ if ( ! SET_P (element)
+ || ! REG_P (SET_DEST (element))
+ || REGNO (SET_DEST (element)) != SP_REG
+ || GET_CODE (SET_SRC (element)) != PLUS
+ || ! REG_P (XEXP (SET_SRC (element), 0))
+ || REGNO (XEXP (SET_SRC (element), 0)) != SP_REG
+ || ! CONST_INT_P (XEXP (SET_SRC (element), 1)))
+ return false;
+
+ /* Check that the next element is the first push. */
+ element = XVECEXP (op, 0, 1);
+ if ( ! SET_P (element)
+ || ! REG_P (SET_DEST (element))
+ || ! MEM_P (SET_SRC (element))
+ || ! REG_P (XEXP (SET_SRC (element), 0))
+ || REGNO (XEXP (SET_SRC (element), 0)) != SP_REG)
+ return false;
+
+ dest_regno = REGNO (SET_DEST (element));
+
+ /* Check that the remaining elements, if any, and except
+ for the last one, use SP+<disp> addressing and incremental
+ register numbers. */
+ for (i = 2; i < count - 1; i++)
+ {
+ element = XVECEXP (op, 0, i);
+
+ if ( ! SET_P (element)
+ || ! REG_P (SET_DEST (element))
+ || GET_MODE (SET_DEST (element)) != SImode
+ || REGNO (SET_DEST (element)) != dest_regno + (i - 1)
+ || ! MEM_P (SET_SRC (element))
+ || GET_MODE (SET_SRC (element)) != SImode
+ || GET_CODE (XEXP (SET_SRC (element), 0)) != PLUS
+ || ! REG_P (XEXP (XEXP (SET_SRC (element), 0), 0))
+ || REGNO (XEXP (XEXP (SET_SRC (element), 0), 0)) != SP_REG
+ || ! CONST_INT_P (XEXP (XEXP (SET_SRC (element), 0), 1))
+ || INTVAL (XEXP (XEXP (SET_SRC (element), 0), 1))
+ != (i - 1) * GET_MODE_SIZE (SImode))
+ return false;
+ }
+
+ /* The last element must be a RETURN. */
+ element = XVECEXP (op, 0, count - 1);
+ return GET_CODE (element) == RETURN;
+})
diff --git a/gcc/config/rx/rx-protos.h b/gcc/config/rx/rx-protos.h
new file mode 100644
index 00000000000..5c37fe0a83c
--- /dev/null
+++ b/gcc/config/rx/rx-protos.h
@@ -0,0 +1,52 @@
+/* Exported function prototypes from the Renesas RX backend.
+ Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ Contributed by Red Hat.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_RX_PROTOS_H
+#define GCC_RX_PROTOS_H
+
+/* A few abbreviations to make the prototypes shorter. */
+#define Mmode enum machine_mode
+#define Fargs CUMULATIVE_ARGS
+
+extern void rx_conditional_register_usage (void);
+extern void rx_expand_prologue (void);
+extern int rx_initial_elimination_offset (int, int);
+
+#ifdef RTX_CODE
+extern void rx_emit_stack_popm (rtx *, bool);
+extern void rx_emit_stack_pushm (rtx *);
+extern void rx_expand_epilogue (bool);
+extern bool rx_expand_insv (rtx *);
+extern const char * rx_gen_cond_branch_template (rtx, bool);
+extern char * rx_gen_move_template (rtx *, bool);
+extern bool rx_is_legitimate_constant (rtx);
+extern bool rx_is_mode_dependent_addr (rtx);
+extern bool rx_is_restricted_memory_address (rtx, Mmode);
+extern void rx_notice_update_cc (rtx body, rtx insn);
+extern void rx_print_operand (FILE *, rtx, int);
+extern void rx_print_operand_address (FILE *, rtx);
+#endif
+
+#ifdef TREE_CODE
+extern unsigned int rx_function_arg_size (Mmode, const_tree);
+extern struct rtx_def * rx_function_arg (Fargs *, Mmode, const_tree, bool);
+#endif
+
+#endif /* GCC_RX_PROTOS_H */
diff --git a/gcc/config/rx/rx.c b/gcc/config/rx/rx.c
new file mode 100644
index 00000000000..885f52581de
--- /dev/null
+++ b/gcc/config/rx/rx.c
@@ -0,0 +1,2517 @@
+/* Subroutines used for code generation on Renesas RX processors.
+ Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ Contributed by Red Hat.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* To Do:
+
+ * Re-enable memory-to-memory copies and fix up reload. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "tree.h"
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "function.h"
+#include "expr.h"
+#include "optabs.h"
+#include "libfuncs.h"
+#include "recog.h"
+#include "toplev.h"
+#include "reload.h"
+#include "df.h"
+#include "ggc.h"
+#include "tm_p.h"
+#include "debug.h"
+#include "target.h"
+#include "target-def.h"
+#include "langhooks.h"
+
+enum rx_cpu_types rx_cpu_type = RX600;
+
+/* Return true if OP is a reference to an object in a small data area. */
+
+static bool
+rx_small_data_operand (rtx op)
+{
+ if (rx_small_data_limit == 0)
+ return false;
+
+ if (GET_CODE (op) == SYMBOL_REF)
+ return SYMBOL_REF_SMALL_P (op);
+
+ return false;
+}
+
+static bool
+rx_is_legitimate_address (Mmode mode, rtx x, bool strict ATTRIBUTE_UNUSED)
+{
+ if (RTX_OK_FOR_BASE (x, strict))
+ /* Register Indirect. */
+ return true;
+
+ if (GET_MODE_SIZE (mode) == 4
+ && (GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC))
+ /* Pre-decrement Register Indirect or
+ Post-increment Register Indirect. */
+ return RTX_OK_FOR_BASE (XEXP (x, 0), strict);
+
+ if (GET_CODE (x) == PLUS)
+ {
+ rtx arg1 = XEXP (x, 0);
+ rtx arg2 = XEXP (x, 1);
+ rtx index = NULL_RTX;
+
+ if (REG_P (arg1) && RTX_OK_FOR_BASE (arg1, strict))
+ index = arg2;
+ else if (REG_P (arg2) && RTX_OK_FOR_BASE (arg2, strict))
+ index = arg1;
+ else
+ return false;
+
+ switch (GET_CODE (index))
+ {
+ case CONST_INT:
+ {
+ /* Register Relative: REG + INT.
+ Only positive, mode-aligned, mode-sized
+ displacements are allowed. */
+ HOST_WIDE_INT val = INTVAL (index);
+ int factor;
+
+ if (val < 0)
+ return false;
+
+ switch (GET_MODE_SIZE (mode))
+ {
+ default:
+ case 4: factor = 4; break;
+ case 2: factor = 2; break;
+ case 1: factor = 1; break;
+ }
+
+ if (val > (65535 * factor))
+ return false;
+ return (val % factor) == 0;
+ }
+
+ case REG:
+ /* Unscaled Indexed Register Indirect: REG + REG
+ Size has to be "QI", REG has to be valid. */
+ return GET_MODE_SIZE (mode) == 1 && RTX_OK_FOR_BASE (index, strict);
+
+ case MULT:
+ {
+ /* Scaled Indexed Register Indirect: REG + (REG * FACTOR)
+ Factor has to equal the mode size, REG has to be valid. */
+ rtx factor;
+
+ factor = XEXP (index, 1);
+ index = XEXP (index, 0);
+
+ return REG_P (index)
+ && RTX_OK_FOR_BASE (index, strict)
+ && CONST_INT_P (factor)
+ && GET_MODE_SIZE (mode) == INTVAL (factor);
+ }
+
+ default:
+ return false;
+ }
+ }
+
+ /* Small data area accesses turn into register relative offsets. */
+ return rx_small_data_operand (x);
+}
+
+/* Returns TRUE for simple memory addreses, ie ones
+ that do not involve register indirect addressing
+ or pre/post increment/decrement. */
+
+bool
+rx_is_restricted_memory_address (rtx mem, enum machine_mode mode)
+{
+ rtx base, index;
+
+ if (! rx_is_legitimate_address
+ (mode, mem, reload_in_progress || reload_completed))
+ return false;
+
+ switch (GET_CODE (mem))
+ {
+ case REG:
+ /* Simple memory addresses are OK. */
+ return true;
+
+ case PRE_DEC:
+ case POST_INC:
+ return false;
+
+ case PLUS:
+ /* Only allow REG+INT addressing. */
+ base = XEXP (mem, 0);
+ index = XEXP (mem, 1);
+
+ return RX_REG_P (base) && CONST_INT_P (index);
+
+ case SYMBOL_REF:
+ /* Can happen when small data is being supported.
+ Assume that it will be resolved into GP+INT. */
+ return true;
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+bool
+rx_is_mode_dependent_addr (rtx addr)
+{
+ if (GET_CODE (addr) == CONST)
+ addr = XEXP (addr, 0);
+
+ switch (GET_CODE (addr))
+ {
+ /* --REG and REG++ only work in SImode. */
+ case PRE_DEC:
+ case POST_INC:
+ return true;
+
+ case MINUS:
+ case PLUS:
+ if (! REG_P (XEXP (addr, 0)))
+ return true;
+
+ addr = XEXP (addr, 1);
+
+ switch (GET_CODE (addr))
+ {
+ case REG:
+ /* REG+REG only works in SImode. */
+ return true;
+
+ case CONST_INT:
+ /* REG+INT is only mode independent if INT is a
+ multiple of 4, positive and will fit into 8-bits. */
+ if (((INTVAL (addr) & 3) == 0)
+ && IN_RANGE (INTVAL (addr), 4, 252))
+ return false;
+ return true;
+
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return true;
+
+ case MULT:
+ gcc_assert (REG_P (XEXP (addr, 0)));
+ gcc_assert (CONST_INT_P (XEXP (addr, 1)));
+ /* REG+REG*SCALE is always mode dependent. */
+ return true;
+
+ default:
+ /* Not recognized, so treat as mode dependent. */
+ return true;
+ }
+
+ case CONST_INT:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case REG:
+ /* These are all mode independent. */
+ return false;
+
+ default:
+ /* Everything else is unrecognized,
+ so treat as mode dependent. */
+ return true;
+ }
+}
+
+/* A C compound statement to output to stdio stream FILE the
+ assembler syntax for an instruction operand that is a memory
+ reference whose address is ADDR. */
+
+void
+rx_print_operand_address (FILE * file, rtx addr)
+{
+ switch (GET_CODE (addr))
+ {
+ case REG:
+ fprintf (file, "[");
+ rx_print_operand (file, addr, 0);
+ fprintf (file, "]");
+ break;
+
+ case PRE_DEC:
+ fprintf (file, "[-");
+ rx_print_operand (file, XEXP (addr, 0), 0);
+ fprintf (file, "]");
+ break;
+
+ case POST_INC:
+ fprintf (file, "[");
+ rx_print_operand (file, XEXP (addr, 0), 0);
+ fprintf (file, "+]");
+ break;
+
+ case PLUS:
+ {
+ rtx arg1 = XEXP (addr, 0);
+ rtx arg2 = XEXP (addr, 1);
+ rtx base, index;
+
+ if (REG_P (arg1) && RTX_OK_FOR_BASE (arg1, true))
+ base = arg1, index = arg2;
+ else if (REG_P (arg2) && RTX_OK_FOR_BASE (arg2, true))
+ base = arg2, index = arg1;
+ else
+ {
+ rx_print_operand (file, arg1, 0);
+ fprintf (file, " + ");
+ rx_print_operand (file, arg2, 0);
+ break;
+ }
+
+ if (REG_P (index) || GET_CODE (index) == MULT)
+ {
+ fprintf (file, "[");
+ rx_print_operand (file, index, 'A');
+ fprintf (file, ",");
+ }
+ else /* GET_CODE (index) == CONST_INT */
+ {
+ rx_print_operand (file, index, 'A');
+ fprintf (file, "[");
+ }
+ rx_print_operand (file, base, 0);
+ fprintf (file, "]");
+ break;
+ }
+
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST:
+ fprintf (file, "#");
+ default:
+ output_addr_const (file, addr);
+ break;
+ }
+}
+
+static void
+rx_print_integer (FILE * file, HOST_WIDE_INT val)
+{
+ if (IN_RANGE (val, -64, 64))
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, val);
+ else
+ fprintf (file,
+ TARGET_AS100_SYNTAX
+ ? "0%" HOST_WIDE_INT_PRINT "xH" : HOST_WIDE_INT_PRINT_HEX,
+ val);
+}
+
+static bool
+rx_assemble_integer (rtx x, unsigned int size, int is_aligned)
+{
+ const char * op = integer_asm_op (size, is_aligned);
+
+ if (! CONST_INT_P (x))
+ return default_assemble_integer (x, size, is_aligned);
+
+ if (op == NULL)
+ return false;
+ fputs (op, asm_out_file);
+
+ rx_print_integer (asm_out_file, INTVAL (x));
+ fputc ('\n', asm_out_file);
+ return true;
+}
+
+
+int rx_float_compare_mode;
+
+/* Handles the insertion of a single operand into the assembler output.
+ The %<letter> directives supported are:
+
+ %A Print an operand without a leading # character.
+ %B Print an integer comparison name.
+ %C Print a control register name.
+ %F Print a condition code flag name.
+ %H Print high part of a DImode register, integer or address.
+ %L Print low part of a DImode register, integer or address.
+ %Q If the operand is a MEM, then correctly generate
+ register indirect or register relative addressing. */
+
+void
+rx_print_operand (FILE * file, rtx op, int letter)
+{
+ switch (letter)
+ {
+ case 'A':
+ /* Print an operand without a leading #. */
+ if (MEM_P (op))
+ op = XEXP (op, 0);
+
+ switch (GET_CODE (op))
+ {
+ case LABEL_REF:
+ case SYMBOL_REF:
+ output_addr_const (file, op);
+ break;
+ case CONST_INT:
+ fprintf (file, "%ld", (long) INTVAL (op));
+ break;
+ default:
+ rx_print_operand (file, op, 0);
+ break;
+ }
+ break;
+
+ case 'B':
+ switch (GET_CODE (op))
+ {
+ case LT: fprintf (file, "lt"); break;
+ case GE: fprintf (file, "ge"); break;
+ case GT: fprintf (file, "gt"); break;
+ case LE: fprintf (file, "le"); break;
+ case GEU: fprintf (file, "geu"); break;
+ case LTU: fprintf (file, "ltu"); break;
+ case GTU: fprintf (file, "gtu"); break;
+ case LEU: fprintf (file, "leu"); break;
+ case EQ: fprintf (file, "eq"); break;
+ case NE: fprintf (file, "ne"); break;
+ default: debug_rtx (op); gcc_unreachable ();
+ }
+ break;
+
+ case 'C':
+ gcc_assert (CONST_INT_P (op));
+ switch (INTVAL (op))
+ {
+ case 0: fprintf (file, "psw"); break;
+ case 2: fprintf (file, "usp"); break;
+ case 3: fprintf (file, "fpsw"); break;
+ case 4: fprintf (file, "cpen"); break;
+ case 8: fprintf (file, "bpsw"); break;
+ case 9: fprintf (file, "bpc"); break;
+ case 0xa: fprintf (file, "isp"); break;
+ case 0xb: fprintf (file, "fintv"); break;
+ case 0xc: fprintf (file, "intb"); break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case 'F':
+ gcc_assert (CONST_INT_P (op));
+ switch (INTVAL (op))
+ {
+ case 0: case 'c': case 'C': fprintf (file, "C"); break;
+ case 1: case 'z': case 'Z': fprintf (file, "Z"); break;
+ case 2: case 's': case 'S': fprintf (file, "S"); break;
+ case 3: case 'o': case 'O': fprintf (file, "O"); break;
+ case 8: case 'i': case 'I': fprintf (file, "I"); break;
+ case 9: case 'u': case 'U': fprintf (file, "U"); break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case 'H':
+ if (REG_P (op))
+ fprintf (file, "%s", reg_names [REGNO (op) + (WORDS_BIG_ENDIAN ? 0 : 1)]);
+ else if (CONST_INT_P (op))
+ {
+ HOST_WIDE_INT v = INTVAL (op);
+
+ fprintf (file, "#");
+ /* Trickery to avoid problems with shifting 32 bits at a time. */
+ v = v >> 16;
+ v = v >> 16;
+ rx_print_integer (file, v);
+ }
+ else
+ {
+ gcc_assert (MEM_P (op));
+
+ if (! WORDS_BIG_ENDIAN)
+ op = adjust_address (op, SImode, 4);
+ output_address (XEXP (op, 0));
+ }
+ break;
+
+ case 'L':
+ if (REG_P (op))
+ fprintf (file, "%s", reg_names [REGNO (op) + (WORDS_BIG_ENDIAN ? 1 : 0)]);
+ else if (CONST_INT_P (op))
+ {
+ fprintf (file, "#");
+ rx_print_integer (file, INTVAL (op) & 0xffffffff);
+ }
+ else
+ {
+ gcc_assert (MEM_P (op));
+
+ if (WORDS_BIG_ENDIAN)
+ op = adjust_address (op, SImode, 4);
+ output_address (XEXP (op, 0));
+ }
+ break;
+
+ case 'Q':
+ if (MEM_P (op))
+ {
+ HOST_WIDE_INT offset;
+
+ op = XEXP (op, 0);
+
+ if (REG_P (op))
+ offset = 0;
+ else if (GET_CODE (op) == PLUS)
+ {
+ rtx displacement;
+
+ if (REG_P (XEXP (op, 0)))
+ {
+ displacement = XEXP (op, 1);
+ op = XEXP (op, 0);
+ }
+ else
+ {
+ displacement = XEXP (op, 0);
+ op = XEXP (op, 1);
+ gcc_assert (REG_P (op));
+ }
+
+ gcc_assert (CONST_INT_P (displacement));
+ offset = INTVAL (displacement);
+ gcc_assert (offset >= 0);
+
+ fprintf (file, "%ld", offset);
+ }
+ else
+ gcc_unreachable ();
+
+ fprintf (file, "[");
+ rx_print_operand (file, op, 0);
+ fprintf (file, "].");
+
+ switch (GET_MODE_SIZE (GET_MODE (op)))
+ {
+ case 1:
+ gcc_assert (offset < 65535 * 1);
+ fprintf (file, "B");
+ break;
+ case 2:
+ gcc_assert (offset % 2 == 0);
+ gcc_assert (offset < 65535 * 2);
+ fprintf (file, "W");
+ break;
+ default:
+ gcc_assert (offset % 4 == 0);
+ gcc_assert (offset < 65535 * 4);
+ fprintf (file, "L");
+ break;
+ }
+ break;
+ }
+
+ /* Fall through. */
+
+ default:
+ switch (GET_CODE (op))
+ {
+ case MULT:
+ /* Should be the scaled part of an
+ indexed register indirect address. */
+ {
+ rtx base = XEXP (op, 0);
+ rtx index = XEXP (op, 1);
+
+ /* Check for a swaped index register and scaling factor.
+ Not sure if this can happen, but be prepared to handle it. */
+ if (CONST_INT_P (base) && REG_P (index))
+ {
+ rtx tmp = base;
+ base = index;
+ index = tmp;
+ }
+
+ gcc_assert (REG_P (base));
+ gcc_assert (REGNO (base) < FIRST_PSEUDO_REGISTER);
+ gcc_assert (CONST_INT_P (index));
+ /* Do not try to verify the value of the scalar as it is based
+ on the mode of the MEM not the mode of the MULT. (Which
+ will always be SImode). */
+ fprintf (file, "%s", reg_names [REGNO (base)]);
+ break;
+ }
+
+ case MEM:
+ output_address (XEXP (op, 0));
+ break;
+
+ case PLUS:
+ output_address (op);
+ break;
+
+ case REG:
+ gcc_assert (REGNO (op) < FIRST_PSEUDO_REGISTER);
+ fprintf (file, "%s", reg_names [REGNO (op)]);
+ break;
+
+ case SUBREG:
+ gcc_assert (subreg_regno (op) < FIRST_PSEUDO_REGISTER);
+ fprintf (file, "%s", reg_names [subreg_regno (op)]);
+ break;
+
+ /* This will only be single precision.... */
+ case CONST_DOUBLE:
+ {
+ unsigned long val;
+ REAL_VALUE_TYPE rv;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
+ REAL_VALUE_TO_TARGET_SINGLE (rv, val);
+ fprintf (file, TARGET_AS100_SYNTAX ? "#0%lxH" : "#0x%lx", val);
+ break;
+ }
+
+ case CONST_INT:
+ fprintf (file, "#");
+ rx_print_integer (file, INTVAL (op));
+ break;
+
+ case SYMBOL_REF:
+ case CONST:
+ case LABEL_REF:
+ case CODE_LABEL:
+ case UNSPEC:
+ rx_print_operand_address (file, op);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+ }
+}
+
+/* Returns an assembler template for a move instruction. */
+
+char *
+rx_gen_move_template (rtx * operands, bool is_movu)
+{
+ static char template [64];
+ const char * extension = TARGET_AS100_SYNTAX ? ".L" : "";
+ const char * src_template;
+ const char * dst_template;
+ rtx dest = operands[0];
+ rtx src = operands[1];
+
+ /* Decide which extension, if any, should be given to the move instruction. */
+ switch (CONST_INT_P (src) ? GET_MODE (dest) : GET_MODE (src))
+ {
+ case QImode:
+ /* The .B extension is not valid when
+ loading an immediate into a register. */
+ if (! REG_P (dest) || ! CONST_INT_P (src))
+ extension = ".B";
+ break;
+ case HImode:
+ if (! REG_P (dest) || ! CONST_INT_P (src))
+ /* The .W extension is not valid when
+ loading an immediate into a register. */
+ extension = ".W";
+ break;
+ case SFmode:
+ case SImode:
+ extension = ".L";
+ break;
+ case VOIDmode:
+ /* This mode is used by constants. */
+ break;
+ default:
+ debug_rtx (src);
+ gcc_unreachable ();
+ }
+
+ if (MEM_P (src) && rx_small_data_operand (XEXP (src, 0)))
+ src_template = "%%gp(%A1)[r13]";
+ else
+ src_template = "%1";
+
+ if (MEM_P (dest) && rx_small_data_operand (XEXP (dest, 0)))
+ dst_template = "%%gp(%A0)[r13]";
+ else
+ dst_template = "%0";
+
+ sprintf (template, "%s%s\t%s, %s", is_movu ? "movu" : "mov",
+ extension, src_template, dst_template);
+ return template;
+}
+
+/* Returns an assembler template for a conditional branch instruction. */
+
+const char *
+rx_gen_cond_branch_template (rtx condition, bool reversed)
+{
+ enum rtx_code code = GET_CODE (condition);
+
+
+ if ((cc_status.flags & CC_NO_OVERFLOW) && ! rx_float_compare_mode)
+ gcc_assert (code != GT && code != GE && code != LE && code != LT);
+
+ if ((cc_status.flags & CC_NO_CARRY) || rx_float_compare_mode)
+ gcc_assert (code != GEU && code != GTU && code != LEU && code != LTU);
+
+ if (reversed)
+ {
+ if (rx_float_compare_mode)
+ code = reverse_condition_maybe_unordered (code);
+ else
+ code = reverse_condition (code);
+ }
+
+ /* We do not worry about encoding the branch length here as GAS knows
+ how to choose the smallest version, and how to expand a branch that
+ is to a destination that is out of range. */
+
+ switch (code)
+ {
+ case UNEQ: return "bo\t1f\n\tbeq\t%0\n1:";
+ case LTGT: return "bo\t1f\n\tbne\t%0\n1:";
+ case UNLT: return "bo\t1f\n\tbn\t%0\n1:";
+ case UNGE: return "bo\t1f\n\tbpz\t%0\n1:";
+ case UNLE: return "bo\t1f\n\tbgt\t1f\n\tbra\t%0\n1:";
+ case UNGT: return "bo\t1f\n\tble\t1f\n\tbra\t%0\n1:";
+ case UNORDERED: return "bo\t%0";
+ case ORDERED: return "bno\t%0";
+
+ case LT: return rx_float_compare_mode ? "bn\t%0" : "blt\t%0";
+ case GE: return rx_float_compare_mode ? "bpz\t%0" : "bge\t%0";
+ case GT: return "bgt\t%0";
+ case LE: return "ble\t%0";
+ case GEU: return "bgeu\t%0";
+ case LTU: return "bltu\t%0";
+ case GTU: return "bgtu\t%0";
+ case LEU: return "bleu\t%0";
+ case EQ: return "beq\t%0";
+ case NE: return "bne\t%0";
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Return VALUE rounded up to the next ALIGNMENT boundary. */
+
+static inline unsigned int
+rx_round_up (unsigned int value, unsigned int alignment)
+{
+ alignment -= 1;
+ return (value + alignment) & (~ alignment);
+}
+
+/* Return the number of bytes in the argument registers
+ occupied by an argument of type TYPE and mode MODE. */
+
+unsigned int
+rx_function_arg_size (Mmode mode, const_tree type)
+{
+ unsigned int num_bytes;
+
+ num_bytes = (mode == BLKmode)
+ ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
+ return rx_round_up (num_bytes, UNITS_PER_WORD);
+}
+
+#define NUM_ARG_REGS 4
+#define MAX_NUM_ARG_BYTES (NUM_ARG_REGS * UNITS_PER_WORD)
+
+/* Return an RTL expression describing the register holding a function
+ parameter of mode MODE and type TYPE or NULL_RTX if the parameter should
+ be passed on the stack. CUM describes the previous parameters to the
+ function and NAMED is false if the parameter is part of a variable
+ parameter list, or the last named parameter before the start of a
+ variable parameter list. */
+
+rtx
+rx_function_arg (Fargs * cum, Mmode mode, const_tree type, bool named)
+{
+ unsigned int next_reg;
+ unsigned int bytes_so_far = *cum;
+ unsigned int size;
+ unsigned int rounded_size;
+
+ /* An exploded version of rx_function_arg_size. */
+ size = (mode == BLKmode) ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
+
+ rounded_size = rx_round_up (size, UNITS_PER_WORD);
+
+ /* Don't pass this arg via registers if there
+ are insufficient registers to hold all of it. */
+ if (rounded_size + bytes_so_far > MAX_NUM_ARG_BYTES)
+ return NULL_RTX;
+
+ /* Unnamed arguments and the last named argument in a
+ variadic function are always passed on the stack. */
+ if (!named)
+ return NULL_RTX;
+
+ /* Structures must occupy an exact number of registers,
+ otherwise they are passed on the stack. */
+ if ((type == NULL || AGGREGATE_TYPE_P (type))
+ && (size % UNITS_PER_WORD) != 0)
+ return NULL_RTX;
+
+ next_reg = (bytes_so_far / UNITS_PER_WORD) + 1;
+
+ return gen_rtx_REG (mode, next_reg);
+}
+
+/* Return an RTL describing where a function return value of type RET_TYPE
+ is held. */
+
+static rtx
+rx_function_value (const_tree ret_type,
+ const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
+ bool outgoing ATTRIBUTE_UNUSED)
+{
+ return gen_rtx_REG (TYPE_MODE (ret_type), FUNC_RETURN_REGNUM);
+}
+
+static bool
+rx_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
+{
+ HOST_WIDE_INT size;
+
+ if (TYPE_MODE (type) != BLKmode
+ && ! AGGREGATE_TYPE_P (type))
+ return false;
+
+ size = int_size_in_bytes (type);
+ /* Large structs and those whose size is not an
+ exact multiple of 4 are returned in memory. */
+ return size < 1
+ || size > 16
+ || (size % UNITS_PER_WORD) != 0;
+}
+
+static rtx
+rx_struct_value_rtx (tree fndecl ATTRIBUTE_UNUSED,
+ int incoming ATTRIBUTE_UNUSED)
+{
+ return gen_rtx_REG (Pmode, STRUCT_VAL_REGNUM);
+}
+
+static bool
+rx_return_in_msb (const_tree valtype)
+{
+ return TARGET_BIG_ENDIAN_DATA
+ && (AGGREGATE_TYPE_P (valtype) || TREE_CODE (valtype) == COMPLEX_TYPE);
+}
+
+/* Returns true if the provided function has the specified attribute. */
+
+static inline bool
+has_func_attr (const_tree decl, const char * func_attr)
+{
+ if (decl == NULL_TREE)
+ decl = current_function_decl;
+
+ return lookup_attribute (func_attr, DECL_ATTRIBUTES (decl)) != NULL_TREE;
+}
+
+/* Returns true if the provided function has the "fast_interrupt" attribute. */
+
+static inline bool
+is_fast_interrupt_func (const_tree decl)
+{
+ return has_func_attr (decl, "fast_interrupt");
+}
+
+/* Returns true if the provided function has the "interrupt" attribute. */
+
+static inline bool
+is_interrupt_func (const_tree decl)
+{
+ return has_func_attr (decl, "interrupt");
+}
+
+/* Returns true if the provided function has the "naked" attribute. */
+
+static inline bool
+is_naked_func (const_tree decl)
+{
+ return has_func_attr (decl, "naked");
+}
+
+static bool use_fixed_regs = false;
+
+void
+rx_conditional_register_usage (void)
+{
+ static bool using_fixed_regs = false;
+
+ if (rx_small_data_limit > 0)
+ fixed_regs[GP_BASE_REGNUM] = call_used_regs [GP_BASE_REGNUM] = 1;
+
+ if (use_fixed_regs != using_fixed_regs)
+ {
+ static char saved_fixed_regs[FIRST_PSEUDO_REGISTER];
+ static char saved_call_used_regs[FIRST_PSEUDO_REGISTER];
+
+ if (use_fixed_regs)
+ {
+ unsigned int switched = 0;
+ unsigned int r;
+
+ /* This is for fast interrupt handlers. Any register in
+ the range r10 to r13 (inclusive) that is currently
+ marked as fixed is now a viable, call-saved register.
+ All other registers are fixed. */
+ memcpy (saved_fixed_regs, fixed_regs, sizeof fixed_regs);
+ memcpy (saved_call_used_regs, call_used_regs, sizeof call_used_regs);
+
+ for (r = 1; r < 10; r++)
+ fixed_regs[r] = call_used_regs[r] = 1;
+
+ for (r = 10; r <= 13; r++)
+ if (fixed_regs[r])
+ {
+ fixed_regs[r] = 0;
+ call_used_regs[r] = 1;
+ ++ switched;
+ }
+ else
+ {
+ fixed_regs[r] = 1;
+ call_used_regs[r] = 1;
+ }
+
+ fixed_regs[14] = call_used_regs[14] = 1;
+ fixed_regs[15] = call_used_regs[15] = 1;
+
+ if (switched == 0)
+ {
+ static bool warned = false;
+
+ if (! warned)
+ {
+ warning (0, "no fixed registers available "
+ "for use by fast interrupt handler");
+ warned = true;
+ }
+ }
+ }
+ else
+ {
+ /* Restore the normal register masks. */
+ memcpy (fixed_regs, saved_fixed_regs, sizeof fixed_regs);
+ memcpy (call_used_regs, saved_call_used_regs, sizeof call_used_regs);
+ }
+
+ using_fixed_regs = use_fixed_regs;
+ }
+}
+
+/* Perform any actions necessary before starting to compile FNDECL.
+ For the RX we use this to make sure that we have the correct
+ set of register masks selected. If FNDECL is NULL then we are
+ compiling top level things. */
+
+static void
+rx_set_current_function (tree fndecl)
+{
+ /* Remember the last target of rx_set_current_function. */
+ static tree rx_previous_fndecl;
+ bool prev_was_fast_interrupt;
+ bool current_is_fast_interrupt;
+
+ /* Only change the context if the function changes. This hook is called
+ several times in the course of compiling a function, and we don't want
+ to slow things down too much or call target_reinit when it isn't safe. */
+ if (fndecl == rx_previous_fndecl)
+ return;
+
+ prev_was_fast_interrupt
+ = rx_previous_fndecl
+ ? is_fast_interrupt_func (rx_previous_fndecl) : false;
+
+ current_is_fast_interrupt
+ = fndecl ? is_fast_interrupt_func (fndecl) : false;
+
+ if (prev_was_fast_interrupt != current_is_fast_interrupt)
+ {
+ use_fixed_regs = current_is_fast_interrupt;
+ target_reinit ();
+ }
+
+ rx_previous_fndecl = fndecl;
+}
+
+/* Typical stack layout should looks like this after the function's prologue:
+
+ | |
+ -- ^
+ | | \ |
+ | | arguments saved | Increasing
+ | | on the stack | addresses
+ PARENT arg pointer -> | | /
+ -------------------------- ---- -------------------
+ CHILD |ret | return address
+ --
+ | | \
+ | | call saved
+ | | registers
+ | | /
+ --
+ | | \
+ | | local
+ | | variables
+ frame pointer -> | | /
+ --
+ | | \
+ | | outgoing | Decreasing
+ | | arguments | addresses
+ current stack pointer -> | | / |
+ -------------------------- ---- ------------------ V
+ | | */
+
+static unsigned int
+bit_count (unsigned int x)
+{
+ const unsigned int m1 = 0x55555555;
+ const unsigned int m2 = 0x33333333;
+ const unsigned int m4 = 0x0f0f0f0f;
+
+ x -= (x >> 1) & m1;
+ x = (x & m2) + ((x >> 2) & m2);
+ x = (x + (x >> 4)) & m4;
+ x += x >> 8;
+
+ return (x + (x >> 16)) & 0x3f;
+}
+
+/* Returns either the lowest numbered and highest numbered registers that
+ occupy the call-saved area of the stack frame, if the registers are
+ stored as a contiguous block, or else a bitmask of the individual
+ registers if they are stored piecemeal.
+
+ Also computes the size of the frame and the size of the outgoing
+ arguments block (in bytes). */
+
+static void
+rx_get_stack_layout (unsigned int * lowest,
+ unsigned int * highest,
+ unsigned int * register_mask,
+ unsigned int * frame_size,
+ unsigned int * stack_size)
+{
+ unsigned int reg;
+ unsigned int low;
+ unsigned int high;
+ unsigned int fixed_reg = 0;
+ unsigned int save_mask;
+ unsigned int pushed_mask;
+ unsigned int unneeded_pushes;
+
+ if (is_naked_func (NULL_TREE)
+ || is_fast_interrupt_func (NULL_TREE))
+ {
+ /* Naked functions do not create their own stack frame.
+ Instead the programmer must do that for us.
+
+ Fast interrupt handlers use fixed registers that have
+ been epsecially released to the function, so they do
+ not need or want a stack frame. */
+ * lowest = 0;
+ * highest = 0;
+ * register_mask = 0;
+ * frame_size = 0;
+ * stack_size = 0;
+ return;
+ }
+
+ for (save_mask = high = low = 0, reg = 1; reg < FIRST_PSEUDO_REGISTER; reg++)
+ {
+ if (df_regs_ever_live_p (reg)
+ && (! call_used_regs[reg]
+ /* Even call clobbered registered must
+ be pushed inside interrupt handlers. */
+ || is_interrupt_func (NULL_TREE)))
+ {
+ if (low == 0)
+ low = reg;
+ high = reg;
+
+ save_mask |= 1 << reg;
+ }
+
+ /* Remember if we see a fixed register
+ after having found the low register. */
+ if (low != 0 && fixed_reg == 0 && fixed_regs [reg])
+ fixed_reg = reg;
+ }
+
+ /* Decide if it would be faster fill in the call-saved area of the stack
+ frame using multiple PUSH instructions instead of a single PUSHM
+ instruction.
+
+ SAVE_MASK is a bitmask of the registers that must be stored in the
+ call-save area. PUSHED_MASK is a bitmask of the registers that would
+ be pushed into the area if we used a PUSHM instruction. UNNEEDED_PUSHES
+ is a bitmask of those registers in pushed_mask that are not in
+ save_mask.
+
+ We use a simple heuristic that says that it is better to use
+ multiple PUSH instructions if the number of unnecessary pushes is
+ greater than the number of necessary pushes.
+
+ We also use multiple PUSH instructions if there are any fixed registers
+ between LOW and HIGH. The only way that this can happen is if the user
+ has specified --fixed-<reg-name> on the command line and in such
+ circumstances we do not want to touch the fixed registers at all.
+
+ FIXME: Is it worth improving this heuristic ? */
+ pushed_mask = (-1 << low) & ~(-1 << (high + 1));
+ unneeded_pushes = (pushed_mask & (~ save_mask)) & pushed_mask;
+
+ if ((fixed_reg && fixed_reg <= high)
+ || (optimize_function_for_speed_p (cfun)
+ && bit_count (save_mask) < bit_count (unneeded_pushes)))
+ {
+ /* Use multiple pushes. */
+ * lowest = 0;
+ * highest = 0;
+ * register_mask = save_mask;
+ }
+ else
+ {
+ /* Use one push multiple instruction. */
+ * lowest = low;
+ * highest = high;
+ * register_mask = 0;
+ }
+
+ * frame_size = rx_round_up
+ (get_frame_size (), STACK_BOUNDARY / BITS_PER_UNIT);
+
+ if (crtl->args.size > 0)
+ * frame_size += rx_round_up
+ (crtl->args.size, STACK_BOUNDARY / BITS_PER_UNIT);
+
+ * stack_size = rx_round_up
+ (crtl->outgoing_args_size, STACK_BOUNDARY / BITS_PER_UNIT);
+}
+
+/* Generate a PUSHM instruction that matches the given operands. */
+
+void
+rx_emit_stack_pushm (rtx * operands)
+{
+ HOST_WIDE_INT last_reg;
+ rtx first_push;
+
+ gcc_assert (CONST_INT_P (operands[0]));
+ last_reg = (INTVAL (operands[0]) / UNITS_PER_WORD) - 1;
+
+ gcc_assert (GET_CODE (operands[1]) == PARALLEL);
+ first_push = XVECEXP (operands[1], 0, 1);
+ gcc_assert (SET_P (first_push));
+ first_push = SET_SRC (first_push);
+ gcc_assert (REG_P (first_push));
+
+ asm_fprintf (asm_out_file, "\tpushm\t%s-%s\n",
+ reg_names [REGNO (first_push) - last_reg],
+ reg_names [REGNO (first_push)]);
+}
+
+/* Generate a PARALLEL that will pass the rx_store_multiple_vector predicate. */
+
+static rtx
+gen_rx_store_vector (unsigned int low, unsigned int high)
+{
+ unsigned int i;
+ unsigned int count = (high - low) + 2;
+ rtx vector;
+
+ vector = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
+
+ XVECEXP (vector, 0, 0) =
+ gen_rtx_SET (SImode, stack_pointer_rtx,
+ gen_rtx_MINUS (SImode, stack_pointer_rtx,
+ GEN_INT ((count - 1) * UNITS_PER_WORD)));
+
+ for (i = 0; i < count - 1; i++)
+ XVECEXP (vector, 0, i + 1) =
+ gen_rtx_SET (SImode,
+ gen_rtx_MEM (SImode,
+ gen_rtx_MINUS (SImode, stack_pointer_rtx,
+ GEN_INT ((i + 1) * UNITS_PER_WORD))),
+ gen_rtx_REG (SImode, high - i));
+ return vector;
+}
+
+/* Mark INSN as being frame related. If it is a PARALLEL
+ then mark each element as being frame related as well. */
+
+static void
+mark_frame_related (rtx insn)
+{
+ RTX_FRAME_RELATED_P (insn) = 1;
+ insn = PATTERN (insn);
+
+ if (GET_CODE (insn) == PARALLEL)
+ {
+ unsigned int i;
+
+ for (i = 0; i < XVECLEN (insn, 0); i++)
+ RTX_FRAME_RELATED_P (XVECEXP (insn, 0, i)) = 1;
+ }
+}
+
+void
+rx_expand_prologue (void)
+{
+ unsigned int stack_size;
+ unsigned int frame_size;
+ unsigned int mask;
+ unsigned int low;
+ unsigned int high;
+ unsigned int reg;
+ rtx insn;
+
+ /* Naked functions use their own, programmer provided prologues. */
+ if (is_naked_func (NULL_TREE)
+ /* Fast interrupt functions never use the stack. */
+ || is_fast_interrupt_func (NULL_TREE))
+ return;
+
+ rx_get_stack_layout (& low, & high, & mask, & frame_size, & stack_size);
+
+ /* If we use any of the callee-saved registers, save them now. */
+ if (mask)
+ {
+ /* Push registers in reverse order. */
+ for (reg = FIRST_PSEUDO_REGISTER; reg --;)
+ if (mask & (1 << reg))
+ {
+ insn = emit_insn (gen_stack_push (gen_rtx_REG (SImode, reg)));
+ mark_frame_related (insn);
+ }
+ }
+ else if (low)
+ {
+ if (high == low)
+ insn = emit_insn (gen_stack_push (gen_rtx_REG (SImode, low)));
+ else
+ insn = emit_insn (gen_stack_pushm (GEN_INT (((high - low) + 1)
+ * UNITS_PER_WORD),
+ gen_rx_store_vector (low, high)));
+ mark_frame_related (insn);
+ }
+
+ if (is_interrupt_func (NULL_TREE) && TARGET_SAVE_ACC_REGISTER)
+ {
+ unsigned int acc_high, acc_low;
+
+ /* Interrupt handlers have to preserve the accumulator
+ register if so requested by the user. Use the first
+ two pushed register as intermediaries. */
+ if (mask)
+ {
+ acc_low = acc_high = 0;
+
+ for (reg = 1; reg < FIRST_PSEUDO_REGISTER; reg ++)
+ if (mask & (1 << reg))
+ {
+ if (acc_low == 0)
+ acc_low = reg;
+ else
+ {
+ acc_high = reg;
+ break;
+ }
+ }
+
+ /* We have assumed that there are at least two registers pushed... */
+ gcc_assert (acc_high != 0);
+
+ /* Note - the bottom 16 bits of the accumulator are inaccessible.
+ We just assume that they are zero. */
+ emit_insn (gen_mvfacmi (gen_rtx_REG (SImode, acc_low)));
+ emit_insn (gen_mvfachi (gen_rtx_REG (SImode, acc_high)));
+ emit_insn (gen_stack_push (gen_rtx_REG (SImode, acc_low)));
+ emit_insn (gen_stack_push (gen_rtx_REG (SImode, acc_high)));
+ }
+ else
+ {
+ acc_low = low;
+ acc_high = low + 1;
+
+ /* We have assumed that there are at least two registers pushed... */
+ gcc_assert (acc_high <= high);
+
+ emit_insn (gen_mvfacmi (gen_rtx_REG (SImode, acc_low)));
+ emit_insn (gen_mvfachi (gen_rtx_REG (SImode, acc_high)));
+ emit_insn (gen_stack_pushm (GEN_INT (2 * UNITS_PER_WORD),
+ gen_rx_store_vector (acc_low, acc_high)));
+ }
+
+ frame_size += 2 * UNITS_PER_WORD;
+ }
+
+ /* If needed, set up the frame pointer. */
+ if (frame_pointer_needed)
+ {
+ if (frame_size)
+ insn = emit_insn (gen_addsi3 (frame_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (- (HOST_WIDE_INT) frame_size)));
+ else
+ insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
+
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+
+ insn = NULL_RTX;
+
+ /* Allocate space for the outgoing args.
+ If the stack frame has not already been set up then handle this as well. */
+ if (stack_size)
+ {
+ if (frame_size)
+ {
+ if (frame_pointer_needed)
+ insn = emit_insn (gen_addsi3 (stack_pointer_rtx, frame_pointer_rtx,
+ GEN_INT (- (HOST_WIDE_INT)
+ stack_size)));
+ else
+ insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (- (HOST_WIDE_INT)
+ (frame_size + stack_size))));
+ }
+ else
+ insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (- (HOST_WIDE_INT) stack_size)));
+ }
+ else if (frame_size)
+ {
+ if (! frame_pointer_needed)
+ insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (- (HOST_WIDE_INT) frame_size)));
+ else
+ insn = emit_move_insn (stack_pointer_rtx, frame_pointer_rtx);
+ }
+
+ if (insn != NULL_RTX)
+ RTX_FRAME_RELATED_P (insn) = 1;
+}
+
+static void
+rx_output_function_prologue (FILE * file,
+ HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
+{
+ if (is_fast_interrupt_func (NULL_TREE))
+ asm_fprintf (file, "\t; Note: Fast Interrupt Handler\n");
+
+ if (is_interrupt_func (NULL_TREE))
+ asm_fprintf (file, "\t; Note: Interrupt Handler\n");
+
+ if (is_naked_func (NULL_TREE))
+ asm_fprintf (file, "\t; Note: Naked Function\n");
+
+ if (cfun->static_chain_decl != NULL)
+ asm_fprintf (file, "\t; Note: Nested function declared "
+ "inside another function.\n");
+
+ if (crtl->calls_eh_return)
+ asm_fprintf (file, "\t; Note: Calls __builtin_eh_return.\n");
+}
+
+/* Generate a POPM or RTSD instruction that matches the given operands. */
+
+void
+rx_emit_stack_popm (rtx * operands, bool is_popm)
+{
+ HOST_WIDE_INT stack_adjust;
+ HOST_WIDE_INT last_reg;
+ rtx first_push;
+
+ gcc_assert (CONST_INT_P (operands[0]));
+ stack_adjust = INTVAL (operands[0]);
+
+ gcc_assert (GET_CODE (operands[1]) == PARALLEL);
+ last_reg = XVECLEN (operands[1], 0) - (is_popm ? 2 : 3);
+
+ first_push = XVECEXP (operands[1], 0, 1);
+ gcc_assert (SET_P (first_push));
+ first_push = SET_DEST (first_push);
+ gcc_assert (REG_P (first_push));
+
+ if (is_popm)
+ asm_fprintf (asm_out_file, "\tpopm\t%s-%s\n",
+ reg_names [REGNO (first_push)],
+ reg_names [REGNO (first_push) + last_reg]);
+ else
+ asm_fprintf (asm_out_file, "\trtsd\t#%d, %s-%s\n",
+ (int) stack_adjust,
+ reg_names [REGNO (first_push)],
+ reg_names [REGNO (first_push) + last_reg]);
+}
+
+/* Generate a PARALLEL which will satisfy the rx_rtsd_vector predicate. */
+
+static rtx
+gen_rx_rtsd_vector (unsigned int adjust, unsigned int low, unsigned int high)
+{
+ unsigned int i;
+ unsigned int bias = 3;
+ unsigned int count = (high - low) + bias;
+ rtx vector;
+
+ vector = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
+
+ XVECEXP (vector, 0, 0) =
+ gen_rtx_SET (SImode, stack_pointer_rtx,
+ plus_constant (stack_pointer_rtx, adjust));
+
+ for (i = 0; i < count - 2; i++)
+ XVECEXP (vector, 0, i + 1) =
+ gen_rtx_SET (SImode,
+ gen_rtx_REG (SImode, low + i),
+ gen_rtx_MEM (SImode,
+ i == 0 ? stack_pointer_rtx
+ : plus_constant (stack_pointer_rtx,
+ i * UNITS_PER_WORD)));
+
+ XVECEXP (vector, 0, count - 1) = gen_rtx_RETURN (VOIDmode);
+
+ return vector;
+}
+
+/* Generate a PARALLEL which will satisfy the rx_load_multiple_vector predicate. */
+
+static rtx
+gen_rx_popm_vector (unsigned int low, unsigned int high)
+{
+ unsigned int i;
+ unsigned int count = (high - low) + 2;
+ rtx vector;
+
+ vector = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
+
+ XVECEXP (vector, 0, 0) =
+ gen_rtx_SET (SImode, stack_pointer_rtx,
+ plus_constant (stack_pointer_rtx,
+ (count - 1) * UNITS_PER_WORD));
+
+ for (i = 0; i < count - 1; i++)
+ XVECEXP (vector, 0, i + 1) =
+ gen_rtx_SET (SImode,
+ gen_rtx_REG (SImode, low + i),
+ gen_rtx_MEM (SImode,
+ i == 0 ? stack_pointer_rtx
+ : plus_constant (stack_pointer_rtx,
+ i * UNITS_PER_WORD)));
+
+ return vector;
+}
+
+void
+rx_expand_epilogue (bool is_sibcall)
+{
+ unsigned int low;
+ unsigned int high;
+ unsigned int frame_size;
+ unsigned int stack_size;
+ unsigned int register_mask;
+ unsigned int regs_size;
+ unsigned int reg;
+ unsigned HOST_WIDE_INT total_size;
+
+ if (is_naked_func (NULL_TREE))
+ {
+ /* Naked functions use their own, programmer provided epilogues.
+ But, in order to keep gcc happy we have to generate some kind of
+ epilogue RTL. */
+ emit_jump_insn (gen_naked_return ());
+ return;
+ }
+
+ rx_get_stack_layout (& low, & high, & register_mask,
+ & frame_size, & stack_size);
+
+ total_size = frame_size + stack_size;
+ regs_size = ((high - low) + 1) * UNITS_PER_WORD;
+
+ /* See if we are unable to use the special stack frame deconstruct and
+ return instructions. In most cases we can use them, but the exceptions
+ are:
+
+ - Sibling calling functions deconstruct the frame but do not return to
+ their caller. Instead they branch to their sibling and allow their
+ return instruction to return to this function's parent.
+
+ - Fast and normal interrupt handling functions have to use special
+ return instructions.
+
+ - Functions where we have pushed a fragmented set of registers into the
+ call-save area must have the same set of registers popped. */
+ if (is_sibcall
+ || is_fast_interrupt_func (NULL_TREE)
+ || is_interrupt_func (NULL_TREE)
+ || register_mask)
+ {
+ /* Cannot use the special instructions - deconstruct by hand. */
+ if (total_size)
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (total_size)));
+
+ if (is_interrupt_func (NULL_TREE) && TARGET_SAVE_ACC_REGISTER)
+ {
+ unsigned int acc_low, acc_high;
+
+ /* Reverse the saving of the accumulator register onto the stack.
+ Note we must adjust the saved "low" accumulator value as it
+ is really the middle 32-bits of the accumulator. */
+ if (register_mask)
+ {
+ acc_low = acc_high = 0;
+ for (reg = 1; reg < FIRST_PSEUDO_REGISTER; reg ++)
+ if (register_mask & (1 << reg))
+ {
+ if (acc_low == 0)
+ acc_low = reg;
+ else
+ {
+ acc_high = reg;
+ break;
+ }
+ }
+ emit_insn (gen_stack_pop (gen_rtx_REG (SImode, acc_high)));
+ emit_insn (gen_stack_pop (gen_rtx_REG (SImode, acc_low)));
+ }
+ else
+ {
+ acc_low = low;
+ acc_high = low + 1;
+ emit_insn (gen_stack_popm (GEN_INT (2 * UNITS_PER_WORD),
+ gen_rx_popm_vector (acc_low, acc_high)));
+ }
+
+ emit_insn (gen_ashlsi3 (gen_rtx_REG (SImode, acc_low),
+ gen_rtx_REG (SImode, acc_low),
+ GEN_INT (16)));
+ emit_insn (gen_mvtaclo (gen_rtx_REG (SImode, acc_low)));
+ emit_insn (gen_mvtachi (gen_rtx_REG (SImode, acc_high)));
+ }
+
+ if (register_mask)
+ {
+ for (reg = 0; reg < FIRST_PSEUDO_REGISTER; reg ++)
+ if (register_mask & (1 << reg))
+ emit_insn (gen_stack_pop (gen_rtx_REG (SImode, reg)));
+ }
+ else if (low)
+ {
+ if (high == low)
+ emit_insn (gen_stack_pop (gen_rtx_REG (SImode, low)));
+ else
+ emit_insn (gen_stack_popm (GEN_INT (regs_size),
+ gen_rx_popm_vector (low, high)));
+ }
+
+ if (is_fast_interrupt_func (NULL_TREE))
+ emit_jump_insn (gen_fast_interrupt_return ());
+ else if (is_interrupt_func (NULL_TREE))
+ emit_jump_insn (gen_exception_return ());
+ else if (! is_sibcall)
+ emit_jump_insn (gen_simple_return ());
+
+ return;
+ }
+
+ /* If we allocated space on the stack, free it now. */
+ if (total_size)
+ {
+ unsigned HOST_WIDE_INT rtsd_size;
+
+ /* See if we can use the RTSD instruction. */
+ rtsd_size = total_size + regs_size;
+ if (rtsd_size < 1024 && (rtsd_size % 4) == 0)
+ {
+ if (low)
+ emit_jump_insn (gen_pop_and_return
+ (GEN_INT (rtsd_size),
+ gen_rx_rtsd_vector (rtsd_size, low, high)));
+ else
+ emit_jump_insn (gen_deallocate_and_return (GEN_INT (total_size)));
+
+ return;
+ }
+
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (total_size)));
+ }
+
+ if (low)
+ emit_jump_insn (gen_pop_and_return (GEN_INT (regs_size),
+ gen_rx_rtsd_vector (regs_size,
+ low, high)));
+ else
+ emit_jump_insn (gen_simple_return ());
+}
+
+
+/* Compute the offset (in words) between FROM (arg pointer
+ or frame pointer) and TO (frame pointer or stack pointer).
+ See ASCII art comment at the start of rx_expand_prologue
+ for more information. */
+
+int
+rx_initial_elimination_offset (int from, int to)
+{
+ unsigned int low;
+ unsigned int high;
+ unsigned int frame_size;
+ unsigned int stack_size;
+ unsigned int mask;
+
+ rx_get_stack_layout (& low, & high, & mask, & frame_size, & stack_size);
+
+ if (from == ARG_POINTER_REGNUM)
+ {
+ /* Extend the computed size of the stack frame to
+ include the registers pushed in the prologue. */
+ if (low)
+ frame_size += ((high - low) + 1) * UNITS_PER_WORD;
+ else
+ frame_size += bit_count (mask) * UNITS_PER_WORD;
+
+ /* Remember to include the return address. */
+ frame_size += 1 * UNITS_PER_WORD;
+
+ if (to == FRAME_POINTER_REGNUM)
+ return frame_size;
+
+ gcc_assert (to == STACK_POINTER_REGNUM);
+ return frame_size + stack_size;
+ }
+
+ gcc_assert (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM);
+ return stack_size;
+}
+
+/* Update the status of the condition
+ codes (cc0) based on the given INSN. */
+
+void
+rx_notice_update_cc (rtx body, rtx insn)
+{
+ switch (get_attr_cc (insn))
+ {
+ case CC_NONE:
+ /* Insn does not affect cc0 at all. */
+ break;
+ case CC_CLOBBER:
+ /* Insn doesn't leave cc0 in a usable state. */
+ CC_STATUS_INIT;
+ break;
+ case CC_SET_ZSOC:
+ /* The insn sets all the condition code bits. */
+ CC_STATUS_INIT;
+ cc_status.value1 = SET_SRC (body);
+ break;
+ case CC_SET_ZSO:
+ /* Insn sets the Z,S and O flags, but not the C flag. */
+ CC_STATUS_INIT;
+ cc_status.flags |= CC_NO_CARRY;
+ /* Do not set the value1 field in this case. The final_scan_insn()
+ function naively believes that if cc_status.value1 is set then
+ it can eliminate *any* comparison against that value, even if
+ the type of comparison cannot be satisfied by the range of flag
+ bits being set here. See gcc.c-torture/execute/20041210-1.c
+ for an example of this in action. */
+ break;
+ case CC_SET_ZS:
+ /* Insn sets the Z and S flags, but not the O or C flags. */
+ CC_STATUS_INIT;
+ cc_status.flags |= (CC_NO_CARRY | CC_NO_OVERFLOW);
+ /* See comment above regarding cc_status.value1. */
+ break;
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Decide if a variable should go into one of the small data sections. */
+
+static bool
+rx_in_small_data (const_tree decl)
+{
+ int size;
+ const_tree section;
+
+ if (rx_small_data_limit == 0)
+ return false;
+
+ if (TREE_CODE (decl) != VAR_DECL)
+ return false;
+
+ /* We do not put read-only variables into a small data area because
+ they would be placed with the other read-only sections, far away
+ from the read-write data sections, and we only have one small
+ data area pointer.
+ Similarly commons are placed in the .bss section which might be
+ far away (and out of alignment with respect to) the .data section. */
+ if (TREE_READONLY (decl) || DECL_COMMON (decl))
+ return false;
+
+ section = DECL_SECTION_NAME (decl);
+ if (section)
+ {
+ const char * const name = TREE_STRING_POINTER (section);
+
+ return (strcmp (name, "D_2") == 0) || (strcmp (name, "B_2") == 0);
+ }
+
+ size = int_size_in_bytes (TREE_TYPE (decl));
+
+ return (size > 0) && (size <= rx_small_data_limit);
+}
+
+/* Return a section for X.
+ The only special thing we do here is to honor small data. */
+
+static section *
+rx_select_rtx_section (enum machine_mode mode,
+ rtx x,
+ unsigned HOST_WIDE_INT align)
+{
+ if (rx_small_data_limit > 0
+ && GET_MODE_SIZE (mode) <= rx_small_data_limit
+ && align <= (unsigned HOST_WIDE_INT) rx_small_data_limit * BITS_PER_UNIT)
+ return sdata_section;
+
+ return default_elf_select_rtx_section (mode, x, align);
+}
+
+static section *
+rx_select_section (tree decl,
+ int reloc,
+ unsigned HOST_WIDE_INT align)
+{
+ if (rx_small_data_limit > 0)
+ {
+ switch (categorize_decl_for_section (decl, reloc))
+ {
+ case SECCAT_SDATA: return sdata_section;
+ case SECCAT_SBSS: return sbss_section;
+ case SECCAT_SRODATA:
+ /* Fall through. We do not put small, read only
+ data into the C_2 section because we are not
+ using the C_2 section. We do not use the C_2
+ section because it is located with the other
+ read-only data sections, far away from the read-write
+ data sections and we only have one small data
+ pointer (r13). */
+ default:
+ break;
+ }
+ }
+
+ /* If we are supporting the Renesas assembler
+ we cannot use mergeable sections. */
+ if (TARGET_AS100_SYNTAX)
+ switch (categorize_decl_for_section (decl, reloc))
+ {
+ case SECCAT_RODATA_MERGE_CONST:
+ case SECCAT_RODATA_MERGE_STR_INIT:
+ case SECCAT_RODATA_MERGE_STR:
+ return readonly_data_section;
+
+ default:
+ break;
+ }
+
+ return default_elf_select_section (decl, reloc, align);
+}
+
+enum rx_builtin
+{
+ RX_BUILTIN_BRK,
+ RX_BUILTIN_CLRPSW,
+ RX_BUILTIN_INT,
+ RX_BUILTIN_MACHI,
+ RX_BUILTIN_MACLO,
+ RX_BUILTIN_MULHI,
+ RX_BUILTIN_MULLO,
+ RX_BUILTIN_MVFACHI,
+ RX_BUILTIN_MVFACMI,
+ RX_BUILTIN_MVFC,
+ RX_BUILTIN_MVTACHI,
+ RX_BUILTIN_MVTACLO,
+ RX_BUILTIN_MVTC,
+ RX_BUILTIN_MVTIPL,
+ RX_BUILTIN_RACW,
+ RX_BUILTIN_REVW,
+ RX_BUILTIN_RMPA,
+ RX_BUILTIN_ROUND,
+ RX_BUILTIN_SAT,
+ RX_BUILTIN_SETPSW,
+ RX_BUILTIN_WAIT,
+ RX_BUILTIN_max
+};
+
+static void
+rx_init_builtins (void)
+{
+#define ADD_RX_BUILTIN1(UC_NAME, LC_NAME, RET_TYPE, ARG_TYPE) \
+ add_builtin_function ("__builtin_rx_" LC_NAME, \
+ build_function_type_list (RET_TYPE##_type_node, \
+ ARG_TYPE##_type_node, \
+ NULL_TREE), \
+ RX_BUILTIN_##UC_NAME, \
+ BUILT_IN_MD, NULL, NULL_TREE)
+
+#define ADD_RX_BUILTIN2(UC_NAME, LC_NAME, RET_TYPE, ARG_TYPE1, ARG_TYPE2) \
+ add_builtin_function ("__builtin_rx_" LC_NAME, \
+ build_function_type_list (RET_TYPE##_type_node, \
+ ARG_TYPE1##_type_node,\
+ ARG_TYPE2##_type_node,\
+ NULL_TREE), \
+ RX_BUILTIN_##UC_NAME, \
+ BUILT_IN_MD, NULL, NULL_TREE)
+
+#define ADD_RX_BUILTIN3(UC_NAME,LC_NAME,RET_TYPE,ARG_TYPE1,ARG_TYPE2,ARG_TYPE3) \
+ add_builtin_function ("__builtin_rx_" LC_NAME, \
+ build_function_type_list (RET_TYPE##_type_node, \
+ ARG_TYPE1##_type_node,\
+ ARG_TYPE2##_type_node,\
+ ARG_TYPE3##_type_node,\
+ NULL_TREE), \
+ RX_BUILTIN_##UC_NAME, \
+ BUILT_IN_MD, NULL, NULL_TREE)
+
+ ADD_RX_BUILTIN1 (BRK, "brk", void, void);
+ ADD_RX_BUILTIN1 (CLRPSW, "clrpsw", void, integer);
+ ADD_RX_BUILTIN1 (SETPSW, "setpsw", void, integer);
+ ADD_RX_BUILTIN1 (INT, "int", void, integer);
+ ADD_RX_BUILTIN2 (MACHI, "machi", void, intSI, intSI);
+ ADD_RX_BUILTIN2 (MACLO, "maclo", void, intSI, intSI);
+ ADD_RX_BUILTIN2 (MULHI, "mulhi", void, intSI, intSI);
+ ADD_RX_BUILTIN2 (MULLO, "mullo", void, intSI, intSI);
+ ADD_RX_BUILTIN1 (MVFACHI, "mvfachi", intSI, void);
+ ADD_RX_BUILTIN1 (MVFACMI, "mvfacmi", intSI, void);
+ ADD_RX_BUILTIN1 (MVTACHI, "mvtachi", void, intSI);
+ ADD_RX_BUILTIN1 (MVTACLO, "mvtaclo", void, intSI);
+ ADD_RX_BUILTIN1 (RMPA, "rmpa", void, void);
+ ADD_RX_BUILTIN1 (MVFC, "mvfc", intSI, integer);
+ ADD_RX_BUILTIN2 (MVTC, "mvtc", void, integer, integer);
+ ADD_RX_BUILTIN1 (MVTIPL, "mvtipl", void, integer);
+ ADD_RX_BUILTIN1 (RACW, "racw", void, integer);
+ ADD_RX_BUILTIN1 (ROUND, "round", intSI, float);
+ ADD_RX_BUILTIN1 (REVW, "revw", intSI, intSI);
+ ADD_RX_BUILTIN1 (SAT, "sat", intSI, intSI);
+ ADD_RX_BUILTIN1 (WAIT, "wait", void, void);
+}
+
+static rtx
+rx_expand_void_builtin_1_arg (rtx arg, rtx (* gen_func)(rtx), bool reg)
+{
+ if (reg && ! REG_P (arg))
+ arg = force_reg (SImode, arg);
+
+ emit_insn (gen_func (arg));
+
+ return NULL_RTX;
+}
+
+static rtx
+rx_expand_builtin_mvtc (tree exp)
+{
+ rtx arg1 = expand_normal (CALL_EXPR_ARG (exp, 0));
+ rtx arg2 = expand_normal (CALL_EXPR_ARG (exp, 1));
+
+ if (! CONST_INT_P (arg1))
+ return NULL_RTX;
+
+ if (! REG_P (arg2))
+ arg2 = force_reg (SImode, arg2);
+
+ emit_insn (gen_mvtc (arg1, arg2));
+
+ return NULL_RTX;
+}
+
+static rtx
+rx_expand_builtin_mvfc (tree t_arg, rtx target)
+{
+ rtx arg = expand_normal (t_arg);
+
+ if (! CONST_INT_P (arg))
+ return NULL_RTX;
+
+ if (! REG_P (target))
+ target = force_reg (SImode, target);
+
+ emit_insn (gen_mvfc (target, arg));
+
+ return target;
+}
+
+static rtx
+rx_expand_builtin_mvtipl (rtx arg)
+{
+ /* The RX610 does not support the MVTIPL instruction. */
+ if (rx_cpu_type == RX610)
+ return NULL_RTX;
+
+ if (! CONST_INT_P (arg) || ! IN_RANGE (arg, 0, (1 << 4) - 1))
+ return NULL_RTX;
+
+ emit_insn (gen_mvtipl (arg));
+
+ return NULL_RTX;
+}
+
+static rtx
+rx_expand_builtin_mac (tree exp, rtx (* gen_func)(rtx, rtx))
+{
+ rtx arg1 = expand_normal (CALL_EXPR_ARG (exp, 0));
+ rtx arg2 = expand_normal (CALL_EXPR_ARG (exp, 1));
+
+ if (! REG_P (arg1))
+ arg1 = force_reg (SImode, arg1);
+
+ if (! REG_P (arg2))
+ arg2 = force_reg (SImode, arg2);
+
+ emit_insn (gen_func (arg1, arg2));
+
+ return NULL_RTX;
+}
+
+static rtx
+rx_expand_int_builtin_1_arg (rtx arg,
+ rtx target,
+ rtx (* gen_func)(rtx, rtx),
+ bool mem_ok)
+{
+ if (! REG_P (arg))
+ if (!mem_ok || ! MEM_P (arg))
+ arg = force_reg (SImode, arg);
+
+ if (target == NULL_RTX || ! REG_P (target))
+ target = gen_reg_rtx (SImode);
+
+ emit_insn (gen_func (target, arg));
+
+ return target;
+}
+
+static rtx
+rx_expand_int_builtin_0_arg (rtx target, rtx (* gen_func)(rtx))
+{
+ if (target == NULL_RTX || ! REG_P (target))
+ target = gen_reg_rtx (SImode);
+
+ emit_insn (gen_func (target));
+
+ return target;
+}
+
+static rtx
+rx_expand_builtin_round (rtx arg, rtx target)
+{
+ if ((! REG_P (arg) && ! MEM_P (arg))
+ || GET_MODE (arg) != SFmode)
+ arg = force_reg (SFmode, arg);
+
+ if (target == NULL_RTX || ! REG_P (target))
+ target = gen_reg_rtx (SImode);
+
+ emit_insn (gen_lrintsf2 (target, arg));
+
+ return target;
+}
+
+static rtx
+rx_expand_builtin (tree exp,
+ rtx target,
+ rtx subtarget ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ int ignore ATTRIBUTE_UNUSED)
+{
+ tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
+ tree arg = CALL_EXPR_ARGS (exp) ? CALL_EXPR_ARG (exp, 0) : NULL_TREE;
+ rtx op = arg ? expand_normal (arg) : NULL_RTX;
+ unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
+
+ switch (fcode)
+ {
+ case RX_BUILTIN_BRK: emit_insn (gen_brk ()); return NULL_RTX;
+ case RX_BUILTIN_CLRPSW: return rx_expand_void_builtin_1_arg
+ (op, gen_clrpsw, false);
+ case RX_BUILTIN_SETPSW: return rx_expand_void_builtin_1_arg
+ (op, gen_setpsw, false);
+ case RX_BUILTIN_INT: return rx_expand_void_builtin_1_arg
+ (op, gen_int, false);
+ case RX_BUILTIN_MACHI: return rx_expand_builtin_mac (exp, gen_machi);
+ case RX_BUILTIN_MACLO: return rx_expand_builtin_mac (exp, gen_maclo);
+ case RX_BUILTIN_MULHI: return rx_expand_builtin_mac (exp, gen_mulhi);
+ case RX_BUILTIN_MULLO: return rx_expand_builtin_mac (exp, gen_mullo);
+ case RX_BUILTIN_MVFACHI: return rx_expand_int_builtin_0_arg
+ (target, gen_mvfachi);
+ case RX_BUILTIN_MVFACMI: return rx_expand_int_builtin_0_arg
+ (target, gen_mvfacmi);
+ case RX_BUILTIN_MVTACHI: return rx_expand_void_builtin_1_arg
+ (op, gen_mvtachi, true);
+ case RX_BUILTIN_MVTACLO: return rx_expand_void_builtin_1_arg
+ (op, gen_mvtaclo, true);
+ case RX_BUILTIN_RMPA: emit_insn (gen_rmpa ()); return NULL_RTX;
+ case RX_BUILTIN_MVFC: return rx_expand_builtin_mvfc (arg, target);
+ case RX_BUILTIN_MVTC: return rx_expand_builtin_mvtc (exp);
+ case RX_BUILTIN_MVTIPL: return rx_expand_builtin_mvtipl (op);
+ case RX_BUILTIN_RACW: return rx_expand_void_builtin_1_arg
+ (op, gen_racw, false);
+ case RX_BUILTIN_ROUND: return rx_expand_builtin_round (op, target);
+ case RX_BUILTIN_REVW: return rx_expand_int_builtin_1_arg
+ (op, target, gen_revw, false);
+ case RX_BUILTIN_SAT: return rx_expand_int_builtin_1_arg
+ (op, target, gen_sat, false);
+ case RX_BUILTIN_WAIT: emit_insn (gen_wait ()); return NULL_RTX;
+
+ default:
+ internal_error ("bad builtin code");
+ break;
+ }
+
+ return NULL_RTX;
+}
+
+/* Place an element into a constructor or destructor section.
+ Like default_ctor_section_asm_out_constructor in varasm.c
+ except that it uses .init_array (or .fini_array) and it
+ handles constructor priorities. */
+
+static void
+rx_elf_asm_cdtor (rtx symbol, int priority, bool is_ctor)
+{
+ section * s;
+
+ if (priority != DEFAULT_INIT_PRIORITY)
+ {
+ char buf[18];
+
+ sprintf (buf, "%s.%.5u",
+ is_ctor ? ".init_array" : ".fini_array",
+ priority);
+ s = get_section (buf, SECTION_WRITE, NULL_TREE);
+ }
+ else if (is_ctor)
+ s = ctors_section;
+ else
+ s = dtors_section;
+
+ switch_to_section (s);
+ assemble_align (POINTER_SIZE);
+ assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
+}
+
+static void
+rx_elf_asm_constructor (rtx symbol, int priority)
+{
+ rx_elf_asm_cdtor (symbol, priority, /* is_ctor= */true);
+}
+
+static void
+rx_elf_asm_destructor (rtx symbol, int priority)
+{
+ rx_elf_asm_cdtor (symbol, priority, /* is_ctor= */false);
+}
+
+/* Check "fast_interrupt", "interrupt" and "naked" attributes. */
+
+static tree
+rx_handle_func_attribute (tree * node,
+ tree name,
+ tree args,
+ int flags ATTRIBUTE_UNUSED,
+ bool * no_add_attrs)
+{
+ gcc_assert (DECL_P (* node));
+ gcc_assert (args == NULL_TREE);
+
+ if (TREE_CODE (* node) != FUNCTION_DECL)
+ {
+ warning (OPT_Wattributes, "%qE attribute only applies to functions",
+ name);
+ * no_add_attrs = true;
+ }
+
+ /* FIXME: We ought to check for conflicting attributes. */
+
+ /* FIXME: We ought to check that the interrupt and exception
+ handler attributes have been applied to void functions. */
+ return NULL_TREE;
+}
+
+/* Table of RX specific attributes. */
+const struct attribute_spec rx_attribute_table[] =
+{
+ /* Name, min_len, max_len, decl_req, type_req, fn_type_req, handler. */
+ { "fast_interrupt", 0, 0, true, false, false, rx_handle_func_attribute },
+ { "interrupt", 0, 0, true, false, false, rx_handle_func_attribute },
+ { "naked", 0, 0, true, false, false, rx_handle_func_attribute },
+ { NULL, 0, 0, false, false, false, NULL }
+};
+
+static bool
+rx_allocate_stack_slots_for_args (void)
+{
+ /* Naked functions should not allocate stack slots for arguments. */
+ return ! is_naked_func (NULL_TREE);
+}
+
+static bool
+rx_func_attr_inlinable (const_tree decl)
+{
+ return ! is_fast_interrupt_func (decl)
+ && ! is_interrupt_func (decl)
+ && ! is_naked_func (decl);
+}
+
+static void
+rx_file_start (void)
+{
+ if (! TARGET_AS100_SYNTAX)
+ default_file_start ();
+}
+
+static bool
+rx_is_ms_bitfield_layout (const_tree record_type ATTRIBUTE_UNUSED)
+{
+ return TRUE;
+}
+
+/* Try to generate code for the "isnv" pattern which inserts bits
+ into a word.
+ operands[0] => Location to be altered.
+ operands[1] => Number of bits to change.
+ operands[2] => Starting bit.
+ operands[3] => Value to insert.
+ Returns TRUE if successful, FALSE otherwise. */
+
+bool
+rx_expand_insv (rtx * operands)
+{
+ if (INTVAL (operands[1]) != 1
+ || ! CONST_INT_P (operands[3]))
+ return false;
+
+ if (MEM_P (operands[0])
+ && INTVAL (operands[2]) > 7)
+ return false;
+
+ switch (INTVAL (operands[3]))
+ {
+ case 0:
+ if (MEM_P (operands[0]))
+ emit_insn (gen_bitclr_in_memory (operands[0], operands[0],
+ operands[2]));
+ else
+ emit_insn (gen_bitclr (operands[0], operands[0], operands[2]));
+ break;
+ case 1:
+ case -1:
+ if (MEM_P (operands[0]))
+ emit_insn (gen_bitset_in_memory (operands[0], operands[0],
+ operands[2]));
+ else
+ emit_insn (gen_bitset (operands[0], operands[0], operands[2]));
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+/* Returns true if X a legitimate constant for an immediate
+ operand on the RX. X is already known to satisfy CONSTANT_P. */
+
+bool
+rx_is_legitimate_constant (rtx x)
+{
+ HOST_WIDE_INT val;
+
+ switch (GET_CODE (x))
+ {
+ case CONST:
+ x = XEXP (x, 0);
+
+ if (GET_CODE (x) == PLUS)
+ {
+ if (! CONST_INT_P (XEXP (x, 1)))
+ return false;
+
+ /* GCC would not pass us CONST_INT + CONST_INT so we
+ know that we have {SYMBOL|LABEL} + CONST_INT. */
+ x = XEXP (x, 0);
+ gcc_assert (! CONST_INT_P (x));
+ }
+
+ switch (GET_CODE (x))
+ {
+ case LABEL_REF:
+ case SYMBOL_REF:
+ return true;
+
+ /* One day we may have to handle UNSPEC constants here. */
+ default:
+ /* FIXME: Can this ever happen ? */
+ abort ();
+ return false;
+ }
+ break;
+
+ case LABEL_REF:
+ case SYMBOL_REF:
+ return true;
+ case CONST_DOUBLE:
+ return rx_max_constant_size == 0;
+ case CONST_VECTOR:
+ return false;
+ default:
+ gcc_assert (CONST_INT_P (x));
+ break;
+ }
+
+ if (rx_max_constant_size == 0)
+ /* If there is no constraint on the size of constants
+ used as operands, then any value is legitimate. */
+ return true;
+
+ val = INTVAL (x);
+
+ /* rx_max_constant_size specifies the maximum number
+ of bytes that can be used to hold a signed value. */
+ return IN_RANGE (val, (-1 << (rx_max_constant_size * 8)),
+ ( 1 << (rx_max_constant_size * 8)));
+}
+
+/* This is a tri-state variable. The default value of 0 means that the user
+ has specified neither -mfpu nor -mnofpu on the command line. In this case
+ the selection of RX FPU instructions is entirely based upon the size of
+ the floating point object and whether unsafe math optimizations were
+ enabled. If 32-bit doubles have been enabled then both floats and doubles
+ can make use of FPU instructions, otherwise only floats may do so.
+
+ If the value is 1 then the user has specified -mfpu and the FPU
+ instructions should be used. Unsafe math optimizations will automatically
+ be enabled and doubles set to 32-bits. If the value is -1 then -mnofpu
+ has been specified and FPU instructions will not be used, even if unsafe
+ math optimizations have been enabled. */
+int rx_enable_fpu = 0;
+
+/* Extra processing for target specific command line options. */
+
+static bool
+rx_handle_option (size_t code, const char * arg ATTRIBUTE_UNUSED, int value)
+{
+ switch (code)
+ {
+ /* -mfpu enables the use of RX FPU instructions. This implies the use
+ of 32-bit doubles and also the enabling of fast math optimizations.
+ (Since the RX FPU instructions are not IEEE compliant). The -mnofpu
+ option disables the use of RX FPU instructions, but does not make
+ place any constraints on the size of doubles or the use of fast math
+ optimizations.
+
+ The selection of 32-bit vs 64-bit doubles is handled by the setting
+ of the 32BIT_DOUBLES mask in the rx.opt file. Enabling fast math
+ optimizations is performed in OVERRIDE_OPTIONS since if it was done
+ here it could be overridden by a -fno-fast-math option specified
+ *earlier* on the command line. (Target specific options are
+ processed before generic ones). */
+ case OPT_fpu:
+ rx_enable_fpu = 1;
+ break;
+
+ case OPT_nofpu:
+ rx_enable_fpu = -1;
+ break;
+
+ case OPT_mint_register_:
+ switch (value)
+ {
+ case 4:
+ fixed_regs[10] = call_used_regs [10] = 1;
+ /* Fall through. */
+ case 3:
+ fixed_regs[11] = call_used_regs [11] = 1;
+ /* Fall through. */
+ case 2:
+ fixed_regs[12] = call_used_regs [12] = 1;
+ /* Fall through. */
+ case 1:
+ fixed_regs[13] = call_used_regs [13] = 1;
+ /* Fall through. */
+ case 0:
+ return true;
+ default:
+ return false;
+ }
+ break;
+
+ case OPT_mmax_constant_size_:
+ /* Make sure that the -mmax-constant_size option is in range. */
+ return IN_RANGE (value, 0, 4);
+
+ case OPT_mcpu_:
+ case OPT_patch_:
+ if (strcasecmp (arg, "RX610") == 0)
+ rx_cpu_type = RX610;
+ /* FIXME: Should we check for non-RX cpu names here ? */
+ break;
+
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static int
+rx_address_cost (rtx addr, bool speed)
+{
+ rtx a, b;
+
+ if (GET_CODE (addr) != PLUS)
+ return COSTS_N_INSNS (1);
+
+ a = XEXP (addr, 0);
+ b = XEXP (addr, 1);
+
+ if (REG_P (a) && REG_P (b))
+ /* Try to discourage REG+REG addressing as it keeps two registers live. */
+ return COSTS_N_INSNS (4);
+
+ if (speed)
+ /* [REG+OFF] is just as fast as [REG]. */
+ return COSTS_N_INSNS (1);
+
+ if (CONST_INT_P (b)
+ && ((INTVAL (b) > 128) || INTVAL (b) < -127))
+ /* Try to discourage REG + <large OFF> when optimizing for size. */
+ return COSTS_N_INSNS (2);
+
+ return COSTS_N_INSNS (1);
+}
+
+static bool
+rx_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
+{
+ /* We can always eliminate to the frame pointer.
+ We can eliminate to the stack pointer unless a frame
+ pointer is needed. */
+
+ return to == FRAME_POINTER_REGNUM
+ || ( to == STACK_POINTER_REGNUM && ! frame_pointer_needed);
+}
+
+
+static void
+rx_trampoline_template (FILE * file)
+{
+ /* Output assembler code for a block containing the constant
+ part of a trampoline, leaving space for the variable parts.
+
+ On the RX, (where r8 is the static chain regnum) the trampoline
+ looks like:
+
+ mov #<static chain value>, r8
+ mov #<function's address>, r9
+ jmp r9
+
+ In big-endian-data-mode however instructions are read into the CPU
+ 4 bytes at a time. These bytes are then swapped around before being
+ passed to the decoder. So...we must partition our trampoline into
+ 4 byte packets and swap these packets around so that the instruction
+ reader will reverse the process. But, in order to avoid splitting
+ the 32-bit constants across these packet boundaries, (making inserting
+ them into the constructed trampoline very difficult) we have to pad the
+ instruction sequence with NOP insns. ie:
+
+ nop
+ nop
+ mov.l #<...>, r8
+ nop
+ nop
+ mov.l #<...>, r9
+ jmp r9
+ nop
+ nop */
+
+ if (! TARGET_BIG_ENDIAN_DATA)
+ {
+ asm_fprintf (file, "\tmov.L\t#0deadbeefH, r%d\n", STATIC_CHAIN_REGNUM);
+ asm_fprintf (file, "\tmov.L\t#0deadbeefH, r%d\n", TRAMPOLINE_TEMP_REGNUM);
+ asm_fprintf (file, "\tjmp\tr%d\n", TRAMPOLINE_TEMP_REGNUM);
+ }
+ else
+ {
+ char r8 = '0' + STATIC_CHAIN_REGNUM;
+ char r9 = '0' + TRAMPOLINE_TEMP_REGNUM;
+
+ if (TARGET_AS100_SYNTAX)
+ {
+ asm_fprintf (file, "\t.BYTE 0%c2H, 0fbH, 003H, 003H\n", r8);
+ asm_fprintf (file, "\t.BYTE 0deH, 0adH, 0beH, 0efH\n");
+ asm_fprintf (file, "\t.BYTE 0%c2H, 0fbH, 003H, 003H\n", r9);
+ asm_fprintf (file, "\t.BYTE 0deH, 0adH, 0beH, 0efH\n");
+ asm_fprintf (file, "\t.BYTE 003H, 003H, 00%cH, 07fH\n", r9);
+ }
+ else
+ {
+ asm_fprintf (file, "\t.byte 0x%c2, 0xfb, 0x03, 0x03\n", r8);
+ asm_fprintf (file, "\t.byte 0xde, 0xad, 0xbe, 0xef\n");
+ asm_fprintf (file, "\t.byte 0x%c2, 0xfb, 0x03, 0x03\n", r9);
+ asm_fprintf (file, "\t.byte 0xde, 0xad, 0xbe, 0xef\n");
+ asm_fprintf (file, "\t.byte 0x03, 0x03, 0x0%c, 0x7f\n", r9);
+ }
+ }
+}
+
+static void
+rx_trampoline_init (rtx tramp, tree fndecl, rtx chain)
+{
+ rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
+
+ emit_block_move (tramp, assemble_trampoline_template (),
+ GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
+
+ if (TARGET_BIG_ENDIAN_DATA)
+ {
+ emit_move_insn (adjust_address (tramp, SImode, 4), chain);
+ emit_move_insn (adjust_address (tramp, SImode, 12), fnaddr);
+ }
+ else
+ {
+ emit_move_insn (adjust_address (tramp, SImode, 2), chain);
+ emit_move_insn (adjust_address (tramp, SImode, 6 + 2), fnaddr);
+ }
+}
+
+#undef TARGET_FUNCTION_VALUE
+#define TARGET_FUNCTION_VALUE rx_function_value
+
+#undef TARGET_RETURN_IN_MSB
+#define TARGET_RETURN_IN_MSB rx_return_in_msb
+
+#undef TARGET_IN_SMALL_DATA_P
+#define TARGET_IN_SMALL_DATA_P rx_in_small_data
+
+#undef TARGET_RETURN_IN_MEMORY
+#define TARGET_RETURN_IN_MEMORY rx_return_in_memory
+
+#undef TARGET_HAVE_SRODATA_SECTION
+#define TARGET_HAVE_SRODATA_SECTION true
+
+#undef TARGET_ASM_SELECT_RTX_SECTION
+#define TARGET_ASM_SELECT_RTX_SECTION rx_select_rtx_section
+
+#undef TARGET_ASM_SELECT_SECTION
+#define TARGET_ASM_SELECT_SECTION rx_select_section
+
+#undef TARGET_INIT_BUILTINS
+#define TARGET_INIT_BUILTINS rx_init_builtins
+
+#undef TARGET_EXPAND_BUILTIN
+#define TARGET_EXPAND_BUILTIN rx_expand_builtin
+
+#undef TARGET_ASM_CONSTRUCTOR
+#define TARGET_ASM_CONSTRUCTOR rx_elf_asm_constructor
+
+#undef TARGET_ASM_DESTRUCTOR
+#define TARGET_ASM_DESTRUCTOR rx_elf_asm_destructor
+
+#undef TARGET_STRUCT_VALUE_RTX
+#define TARGET_STRUCT_VALUE_RTX rx_struct_value_rtx
+
+#undef TARGET_ATTRIBUTE_TABLE
+#define TARGET_ATTRIBUTE_TABLE rx_attribute_table
+
+#undef TARGET_ASM_FILE_START
+#define TARGET_ASM_FILE_START rx_file_start
+
+#undef TARGET_MS_BITFIELD_LAYOUT_P
+#define TARGET_MS_BITFIELD_LAYOUT_P rx_is_ms_bitfield_layout
+
+#undef TARGET_LEGITIMATE_ADDRESS_P
+#define TARGET_LEGITIMATE_ADDRESS_P rx_is_legitimate_address
+
+#undef TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS
+#define TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS rx_allocate_stack_slots_for_args
+
+#undef TARGET_ASM_FUNCTION_PROLOGUE
+#define TARGET_ASM_FUNCTION_PROLOGUE rx_output_function_prologue
+
+#undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
+#define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P rx_func_attr_inlinable
+
+#undef TARGET_SET_CURRENT_FUNCTION
+#define TARGET_SET_CURRENT_FUNCTION rx_set_current_function
+
+#undef TARGET_HANDLE_OPTION
+#define TARGET_HANDLE_OPTION rx_handle_option
+
+#undef TARGET_ASM_INTEGER
+#define TARGET_ASM_INTEGER rx_assemble_integer
+
+#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
+#define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
+
+#undef TARGET_MAX_ANCHOR_OFFSET
+#define TARGET_MAX_ANCHOR_OFFSET 32
+
+#undef TARGET_ADDRESS_COST
+#define TARGET_ADDRESS_COST rx_address_cost
+
+#undef TARGET_CAN_ELIMINATE
+#define TARGET_CAN_ELIMINATE rx_can_eliminate
+
+#undef TARGET_ASM_TRAMPOLINE_TEMPLATE
+#define TARGET_ASM_TRAMPOLINE_TEMPLATE rx_trampoline_template
+
+#undef TARGET_TRAMPOLINE_INIT
+#define TARGET_TRAMPOLINE_INIT rx_trampoline_init
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+/* #include "gt-rx.h" */
diff --git a/gcc/config/rx/rx.h b/gcc/config/rx/rx.h
new file mode 100644
index 00000000000..bb7cf7f1e3e
--- /dev/null
+++ b/gcc/config/rx/rx.h
@@ -0,0 +1,659 @@
+/* GCC backend definitions for the Renesas RX processor.
+ Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ Contributed by Red Hat.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+
+#define TARGET_CPU_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__RX__"); \
+ builtin_assert ("cpu=RX"); \
+ if (rx_cpu_type == RX610) \
+ builtin_assert ("machine=RX610"); \
+ else \
+ builtin_assert ("machine=RX600"); \
+ \
+ if (TARGET_BIG_ENDIAN_DATA) \
+ builtin_define ("__RX_BIG_ENDIAN__"); \
+ else \
+ builtin_define ("__RX_LITTLE_ENDIAN__");\
+ \
+ if (TARGET_32BIT_DOUBLES) \
+ builtin_define ("__RX_32BIT_DOUBLES__");\
+ else \
+ builtin_define ("__RX_64BIT_DOUBLES__");\
+ \
+ if (ALLOW_RX_FPU_INSNS) \
+ builtin_define ("__RX_FPU_INSNS__"); \
+ \
+ if (TARGET_AS100_SYNTAX) \
+ builtin_define ("__RX_AS100_SYNTAX__"); \
+ else \
+ builtin_define ("__RX_GAS_SYNTAX__"); \
+ } \
+ while (0)
+
+enum rx_cpu_types
+{
+ RX600,
+ RX610
+};
+
+extern enum rx_cpu_types rx_cpu_type;
+
+#undef CC1_SPEC
+#define CC1_SPEC "%{mas100-syntax:%{gdwarf*:%e-mas100-syntax is incompatible with -gdwarf}}"
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{pg:gcrt0.o%s}%{!pg:crt0.o%s} crtbegin.o%s"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend.o%s crtn.o%s"
+
+#undef ASM_SPEC
+#define ASM_SPEC "\
+%{mbig-endian-data:-mbig-endian-data} \
+%{m32bit-doubles:-m32bit-doubles} \
+%{!m32bit-doubles:-m64bit-doubles} \
+%{msmall-data-limit*:-msmall-data-limit} \
+%{mrelax:-relax} \
+"
+
+#undef LIB_SPEC
+#define LIB_SPEC " \
+--start-group \
+-lc \
+%{msim*:-lsim}%{!msim*:-lnosys} \
+%{fprofile-arcs|fprofile-generate|coverage:-lgcov} \
+--end-group \
+%{!T*: %{msim*:%Trx-sim.ld}%{!msim*:%Trx.ld}} \
+"
+
+#undef LINK_SPEC
+#define LINK_SPEC "%{mbig-endian-data:--oformat elf32-rx-be} %{mrelax:-relax}"
+
+
+#define BITS_BIG_ENDIAN 0
+#define BYTES_BIG_ENDIAN TARGET_BIG_ENDIAN_DATA
+#define WORDS_BIG_ENDIAN TARGET_BIG_ENDIAN_DATA
+
+#ifdef __RX_BIG_ENDIAN__
+#define LIBGCC2_WORDS_BIG_ENDIAN 1
+#else
+#define LIBGCC2_WORDS_BIG_ENDIAN 0
+#endif
+
+#define UNITS_PER_WORD 4
+
+#define INT_TYPE_SIZE 32
+#define LONG_TYPE_SIZE 32
+#define LONG_LONG_TYPE_SIZE 64
+
+#define FLOAT_TYPE_SIZE 32
+#define DOUBLE_TYPE_SIZE (TARGET_32BIT_DOUBLES ? 32 : 64)
+#define LONG_DOUBLE_TYPE_SIZE DOUBLE_TYPE_SIZE
+
+#ifdef __RX_32BIT_DOUBLES__
+#define LIBGCC2_HAS_DF_MODE 0
+#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 32
+#define LIBGCC2_DOUBLE_TYPE_SIZE 32
+#else
+#define LIBGCC2_HAS_DF_MODE 1
+#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 64
+#define LIBGCC2_DOUBLE_TYPE_SIZE 64
+#endif
+
+#define DEFAULT_SIGNED_CHAR 0
+
+#define STRICT_ALIGNMENT 1
+#define FUNCTION_BOUNDARY 8
+#define BIGGEST_ALIGNMENT 32
+#define STACK_BOUNDARY 32
+#define PARM_BOUNDARY 8
+
+#define FUNCTION_ARG_BOUNDARY(MODE, TYPE) 32
+
+#define STACK_GROWS_DOWNWARD 1
+#define FRAME_GROWS_DOWNWARD 0
+#define FIRST_PARM_OFFSET(FNDECL) 0
+
+#define MAX_REGS_PER_ADDRESS 2
+
+#define Pmode SImode
+#define POINTER_SIZE 32
+#undef SIZE_TYPE
+#define SIZE_TYPE "long unsigned int"
+#define POINTERS_EXTEND_UNSIGNED 1
+#define FUNCTION_MODE QImode
+#define CASE_VECTOR_MODE Pmode
+#define WORD_REGISTER_OPERATIONS 1
+#define HAS_LONG_COND_BRANCH 0
+#define HAS_LONG_UNCOND_BRANCH 0
+
+#define MOVE_MAX 4
+#define STARTING_FRAME_OFFSET 0
+
+#define RETURN_POPS_ARGS(FUNDECL, FUNTYPE, SIZE) 0
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+#define LEGITIMATE_CONSTANT_P(X) rx_is_legitimate_constant (X)
+
+#define HANDLE_PRAGMA_PACK_PUSH_POP 1
+
+#define HAVE_PRE_DECCREMENT 1
+#define HAVE_POST_INCREMENT 1
+
+#define MOVE_RATIO(SPEED) ((SPEED) ? 4 : 2)
+#define SLOW_BYTE_ACCESS 1
+
+#define STORE_FLAG_VALUE 1
+#define LOAD_EXTEND_OP(MODE) SIGN_EXTEND
+#define SHORT_IMMEDIATES_SIGN_EXTEND 1
+
+enum reg_class
+{
+ NO_REGS, /* No registers in set. */
+ GR_REGS, /* Integer registers. */
+ ALL_REGS, /* All registers. */
+ LIM_REG_CLASSES /* Max value + 1. */
+};
+
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "GR_REGS", \
+ "ALL_REGS" \
+}
+
+#define REG_CLASS_CONTENTS \
+{ \
+ { 0x00000000 }, /* No registers, */ \
+ { 0x0000ffff }, /* Integer registers. */ \
+ { 0x0000ffff } /* All registers. */ \
+}
+
+#define IRA_COVER_CLASSES \
+ { \
+ GR_REGS, LIM_REG_CLASSES \
+ }
+
+#define SMALL_REGISTER_CLASSES 0
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+#define CLASS_MAX_NREGS(CLASS, MODE) ((GET_MODE_SIZE (MODE) \
+ + UNITS_PER_WORD - 1) \
+ / UNITS_PER_WORD)
+
+#define GENERAL_REGS GR_REGS
+#define BASE_REG_CLASS GR_REGS
+#define INDEX_REG_CLASS GR_REGS
+
+#define FIRST_PSEUDO_REGISTER 16
+
+#define REGNO_REG_CLASS(REGNO) ((REGNO) < FIRST_PSEUDO_REGISTER \
+ ? GR_REGS : NO_REGS)
+
+#define STACK_POINTER_REGNUM 0
+#define FUNC_RETURN_REGNUM 1
+#define FRAME_POINTER_REGNUM 6
+#define ARG_POINTER_REGNUM 7
+#define STATIC_CHAIN_REGNUM 8
+#define TRAMPOLINE_TEMP_REGNUM 9
+#define STRUCT_VAL_REGNUM 15
+
+/* This is the register which is used to hold the address of the start
+ of the small data area, if that feature is being used. Note - this
+ register must not be call_used because otherwise library functions
+ that are compiled without small data support might clobber it.
+
+ FIXME: The function gcc/config/rx/rx.c:rx_gen_move_template() has a
+ built in copy of this register's name, rather than constructing the
+ name from this #define. */
+#define GP_BASE_REGNUM 13
+
+#define ELIMINABLE_REGS \
+{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
+ { ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM }, \
+ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM }}
+
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+ (OFFSET) = rx_initial_elimination_offset ((FROM), (TO))
+
+
+#define FUNCTION_ARG_REGNO_P(N) (((N) >= 1) && ((N) <= 4))
+#define FUNCTION_VALUE_REGNO_P(N) ((N) == FUNC_RETURN_REGNUM)
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+#define FIXED_REGISTERS \
+{ \
+ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 \
+}
+
+#define CALL_USED_REGISTERS \
+{ \
+ 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1 \
+}
+
+#define CONDITIONAL_REGISTER_USAGE \
+ rx_conditional_register_usage ()
+
+#define LIBCALL_VALUE(MODE) \
+ gen_rtx_REG (((GET_MODE_CLASS (MODE) != MODE_INT \
+ || GET_MODE_SIZE (MODE) >= 4) \
+ ? (MODE) \
+ : SImode), \
+ FUNC_RETURN_REGNUM)
+
+/* Order of allocation of registers. */
+
+#define REG_ALLOC_ORDER \
+{ 7, 10, 11, 12, 13, 14, 4, 3, 2, 1, 9, 8, 6, 5, 15 \
+}
+
+#define PREFERRED_RELOAD_CLASS(X,CLASS) CLASS
+
+#define REGNO_IN_RANGE(REGNO, MIN, MAX) \
+ (IN_RANGE ((REGNO), (MIN), (MAX)) \
+ || (reg_renumber != NULL \
+ && reg_renumber[(REGNO)] >= (MIN) \
+ && reg_renumber[(REGNO)] <= (MAX)))
+
+#ifdef REG_OK_STRICT
+#define REGNO_OK_FOR_BASE_P(regno) REGNO_IN_RANGE (regno, 0, 15)
+#else
+#define REGNO_OK_FOR_BASE_P(regno) 1
+#endif
+
+#define REGNO_OK_FOR_INDEX_P(regno) REGNO_OK_FOR_BASE_P (regno)
+
+#define RTX_OK_FOR_BASE(X, STRICT) \
+ ((STRICT) ? \
+ ( (REG_P (X) \
+ && REGNO_IN_RANGE (REGNO (X), 0, 15)) \
+ || (GET_CODE (X) == SUBREG \
+ && REG_P (SUBREG_REG (X)) \
+ && REGNO_IN_RANGE (REGNO (SUBREG_REG (X)), 0, 15))) \
+ : \
+ ( (REG_P (X) \
+ || (GET_CODE (X) == SUBREG \
+ && REG_P (SUBREG_REG (X))))))
+
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR, LABEL) \
+ do \
+ { \
+ if (rx_is_mode_dependent_addr (ADDR)) \
+ goto LABEL; \
+ } \
+ while (0)
+
+
+#define RETURN_ADDR_RTX(COUNT, FRAMEADDR) \
+ ((COUNT) == 0 \
+ ? gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, arg_pointer_rtx, GEN_INT (-4))) \
+ : NULL_RTX)
+
+#define INCOMING_RETURN_ADDR_RTX gen_rtx_MEM (Pmode, stack_pointer_rtx)
+
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+typedef unsigned int CUMULATIVE_ARGS;
+
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \
+ (CUM) = 0
+
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+ rx_function_arg (& CUM, MODE, TYPE, NAMED)
+
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ (CUM) += rx_function_arg_size (MODE, TYPE)
+
+#define TRAMPOLINE_SIZE (! TARGET_BIG_ENDIAN_DATA ? 14 : 20)
+#define TRAMPOLINE_ALIGNMENT 32
+
+#define NO_PROFILE_COUNTERS 1
+#define PROFILE_BEFORE_PROLOGUE 1
+
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ fprintf (FILE, "\tbsr\t__mcount\n");
+
+
+#define HARD_REGNO_NREGS(REGNO, MODE) CLASS_MAX_NREGS (0, MODE)
+
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ REGNO_REG_CLASS (REGNO) == GR_REGS
+
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ ( ( GET_MODE_CLASS (MODE1) == MODE_FLOAT \
+ || GET_MODE_CLASS (MODE1) == MODE_COMPLEX_FLOAT) \
+ == ( GET_MODE_CLASS (MODE2) == MODE_FLOAT \
+ || GET_MODE_CLASS (MODE2) == MODE_COMPLEX_FLOAT))
+
+
+#define REGISTER_NAMES \
+ { \
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" \
+ };
+
+#define ADDITIONAL_REGISTER_NAMES \
+{ \
+ { "sp", STACK_POINTER_REGNUM } \
+ , { "fp", FRAME_POINTER_REGNUM } \
+ , { "arg", ARG_POINTER_REGNUM } \
+ , { "chain", STATIC_CHAIN_REGNUM } \
+}
+
+#define DATA_SECTION_ASM_OP \
+ (TARGET_AS100_SYNTAX ? "\t.SECTION D,DATA" \
+ : "\t.section D,\"aw\",@progbits\n\t.p2align 2")
+
+#define SDATA_SECTION_ASM_OP \
+ (TARGET_AS100_SYNTAX ? "\t.SECTION D_2,DATA,ALIGN=2" \
+ : "\t.section D_2,\"aw\",@progbits\n\t.p2align 1")
+
+#undef READONLY_DATA_SECTION_ASM_OP
+#define READONLY_DATA_SECTION_ASM_OP \
+ (TARGET_AS100_SYNTAX ? "\t.SECTION C,ROMDATA,ALIGN=4" \
+ : "\t.section C,\"a\",@progbits\n\t.p2align 2")
+
+#define BSS_SECTION_ASM_OP \
+ (TARGET_AS100_SYNTAX ? "\t.SECTION B,DATA,ALIGN=4" \
+ : "\t.section B,\"w\",@nobits\n\t.p2align 2")
+
+#define SBSS_SECTION_ASM_OP \
+ (TARGET_AS100_SYNTAX ? "\t.SECTION B_2,DATA,ALIGN=2" \
+ : "\t.section B_2,\"w\",@nobits\n\t.p2align 1")
+
+/* The following definitions are conditional depending upon whether the
+ compiler is being built or crtstuff.c is being compiled by the built
+ compiler. */
+#if defined CRT_BEGIN || defined CRT_END
+# ifdef __RX_AS100_SYNTAX
+# define TEXT_SECTION_ASM_OP "\t.SECTION P,CODE"
+# define CTORS_SECTION_ASM_OP "\t.SECTION init_array,CODE"
+# define DTORS_SECTION_ASM_OP "\t.SECTION fini_array,CODE"
+# define INIT_ARRAY_SECTION_ASM_OP "\t.SECTION init_array,CODE"
+# define FINI_ARRAY_SECTION_ASM_OP "\t.SECTION fini_array,CODE"
+# else
+# define TEXT_SECTION_ASM_OP "\t.section P,\"ax\""
+# define CTORS_SECTION_ASM_OP \
+ "\t.section\t.init_array,\"aw\",@init_array"
+# define DTORS_SECTION_ASM_OP \
+ "\t.section\t.fini_array,\"aw\",@fini_array"
+# define INIT_ARRAY_SECTION_ASM_OP \
+ "\t.section\t.init_array,\"aw\",@init_array"
+# define FINI_ARRAY_SECTION_ASM_OP \
+ "\t.section\t.fini_array,\"aw\",@fini_array"
+# endif
+#else
+# define TEXT_SECTION_ASM_OP \
+ (TARGET_AS100_SYNTAX ? "\t.SECTION P,CODE" : "\t.section P,\"ax\"")
+
+# define CTORS_SECTION_ASM_OP \
+ (TARGET_AS100_SYNTAX ? "\t.SECTION init_array,CODE" \
+ : "\t.section\t.init_array,\"aw\",@init_array")
+
+# define DTORS_SECTION_ASM_OP \
+ (TARGET_AS100_SYNTAX ? "\t.SECTION fini_array,CODE" \
+ : "\t.section\t.fini_array,\"aw\",@fini_array")
+
+# define INIT_ARRAY_SECTION_ASM_OP \
+ (TARGET_AS100_SYNTAX ? "\t.SECTION init_array,CODE" \
+ : "\t.section\t.init_array,\"aw\",@init_array")
+
+# define FINI_ARRAY_SECTION_ASM_OP \
+ (TARGET_AS100_SYNTAX ? "\t.SECTION fini_array,CODE" \
+ : "\t.section\t.fini_array,\"aw\",@fini_array")
+#endif
+
+#define GLOBAL_ASM_OP \
+ (TARGET_AS100_SYNTAX ? "\t.GLB\t" : "\t.global\t")
+#define ASM_COMMENT_START " ;"
+#define ASM_APP_ON ""
+#define ASM_APP_OFF ""
+#define LOCAL_LABEL_PREFIX "L"
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX "_"
+
+#define ASM_OUTPUT_ALIGN(STREAM, LOG) \
+ do \
+ { \
+ if ((LOG) == 0) \
+ break; \
+ if (TARGET_AS100_SYNTAX) \
+ { \
+ if ((LOG) >= 2) \
+ fprintf (STREAM, "\t.ALIGN 4\t; %d alignment actually requested\n", 1 << (LOG)); \
+ else \
+ fprintf (STREAM, "\t.ALIGN 2\n"); \
+ } \
+ else \
+ fprintf (STREAM, "\t.balign %d\n", 1 << (LOG)); \
+ } \
+ while (0)
+
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+ fprintf (FILE, TARGET_AS100_SYNTAX ? "\t.LWORD L%d\n" : "\t.long .L%d\n", \
+ VALUE)
+
+/* This is how to output an element of a case-vector that is relative.
+ Note: The local label referenced by the "3b" below is emitted by
+ the tablejump insn. */
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
+ fprintf (FILE, TARGET_AS100_SYNTAX \
+ ? "\t.LWORD L%d - ?-\n" : "\t.long .L%d - 1b\n", VALUE)
+
+#define ASM_OUTPUT_SIZE_DIRECTIVE(STREAM, NAME, SIZE) \
+ do \
+ { \
+ HOST_WIDE_INT size_ = (SIZE); \
+ \
+ /* The as100 assembler does not have an equivalent of the SVR4 \
+ .size pseudo-op. */ \
+ if (TARGET_AS100_SYNTAX) \
+ break; \
+ \
+ fputs (SIZE_ASM_OP, STREAM); \
+ assemble_name (STREAM, NAME); \
+ fprintf (STREAM, ", " HOST_WIDE_INT_PRINT_DEC "\n", size_); \
+ } \
+ while (0)
+
+#define ASM_OUTPUT_MEASURED_SIZE(STREAM, NAME) \
+ do \
+ { \
+ /* The as100 assembler does not have an equivalent of the SVR4 \
+ .size pseudo-op. */ \
+ if (TARGET_AS100_SYNTAX) \
+ break; \
+ fputs (SIZE_ASM_OP, STREAM); \
+ assemble_name (STREAM, NAME); \
+ fputs (", .-", STREAM); \
+ assemble_name (STREAM, NAME); \
+ putc ('\n', STREAM); \
+ } \
+ while (0)
+
+#define ASM_OUTPUT_TYPE_DIRECTIVE(STREAM, NAME, TYPE) \
+ do \
+ { \
+ /* The as100 assembler does not have an equivalent of the SVR4 \
+ .size pseudo-op. */ \
+ if (TARGET_AS100_SYNTAX) \
+ break; \
+ fputs (TYPE_ASM_OP, STREAM); \
+ assemble_name (STREAM, NAME); \
+ fputs (", ", STREAM); \
+ fprintf (STREAM, TYPE_OPERAND_FMT, TYPE); \
+ putc ('\n', STREAM); \
+ } \
+ while (0)
+
+#undef ASM_GENERATE_INTERNAL_LABEL
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL, PREFIX, NUM) \
+ do \
+ { \
+ sprintf (LABEL, TARGET_AS100_SYNTAX ? "*%s%u" : "*.%s%u", \
+ PREFIX, (unsigned) (NUM)); \
+ } \
+ while (0)
+
+#undef ASM_OUTPUT_EXTERNAL
+#define ASM_OUTPUT_EXTERNAL(FILE, DECL, NAME) \
+ do \
+ { \
+ if (TARGET_AS100_SYNTAX) \
+ targetm.asm_out.globalize_label (FILE, NAME); \
+ default_elf_asm_output_external (FILE, DECL, NAME); \
+ } \
+ while (0)
+
+#undef ASM_OUTPUT_ALIGNED_COMMON
+#define ASM_OUTPUT_ALIGNED_COMMON(FILE, NAME, SIZE, ALIGN) \
+ do \
+ { \
+ if (TARGET_AS100_SYNTAX) \
+ { \
+ fprintf ((FILE), "\t.GLB\t"); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), "\n"); \
+ assemble_name ((FILE), (NAME)); \
+ switch ((ALIGN) / BITS_PER_UNIT) \
+ { \
+ case 4: \
+ fprintf ((FILE), ":\t.BLKL\t"HOST_WIDE_INT_PRINT_UNSIGNED"\n",\
+ (SIZE) / 4); \
+ break; \
+ case 2: \
+ fprintf ((FILE), ":\t.BLKW\t"HOST_WIDE_INT_PRINT_UNSIGNED"\n",\
+ (SIZE) / 2); \
+ break; \
+ default: \
+ fprintf ((FILE), ":\t.BLKB\t"HOST_WIDE_INT_PRINT_UNSIGNED"\n",\
+ (SIZE)); \
+ break; \
+ } \
+ } \
+ else \
+ { \
+ fprintf ((FILE), "%s", COMMON_ASM_OP); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n", \
+ (SIZE), (ALIGN) / BITS_PER_UNIT); \
+ } \
+ } \
+ while (0)
+
+#undef SKIP_ASM_OP
+#define SKIP_ASM_OP (TARGET_AS100_SYNTAX ? "\t.BLKB\t" : "\t.zero\t")
+
+#undef ASM_OUTPUT_LIMITED_STRING
+#define ASM_OUTPUT_LIMITED_STRING(FILE, STR) \
+ do \
+ { \
+ const unsigned char *_limited_str = \
+ (const unsigned char *) (STR); \
+ unsigned ch; \
+ \
+ fprintf ((FILE), TARGET_AS100_SYNTAX \
+ ? "\t.BYTE\t\"" : "\t.string\t\""); \
+ \
+ for (; (ch = *_limited_str); _limited_str++) \
+ { \
+ int escape; \
+ \
+ switch (escape = ESCAPES[ch]) \
+ { \
+ case 0: \
+ putc (ch, (FILE)); \
+ break; \
+ case 1: \
+ fprintf ((FILE), "\\%03o", ch); \
+ break; \
+ default: \
+ putc ('\\', (FILE)); \
+ putc (escape, (FILE)); \
+ break; \
+ } \
+ } \
+ \
+ fprintf ((FILE), TARGET_AS100_SYNTAX ? "\"\n\t.BYTE\t0\n" : "\"\n");\
+ } \
+ while (0)
+
+#undef IDENT_ASM_OP
+#define IDENT_ASM_OP (TARGET_AS100_SYNTAX \
+ ? "\t.END\t; Built by: ": "\t.ident\t")
+
+/* For PIC put jump tables into the text section so that the offsets that
+ they contain are always computed between two same-section symbols. */
+#define JUMP_TABLES_IN_TEXT_SECTION (flag_pic)
+
+#define PRINT_OPERAND(FILE, X, CODE) \
+ rx_print_operand (FILE, X, CODE)
+#define PRINT_OPERAND_ADDRESS(FILE, ADDR) \
+ rx_print_operand_address (FILE, ADDR)
+
+#define CC_NO_CARRY 0400
+#define NOTICE_UPDATE_CC(EXP, INSN) rx_notice_update_cc (EXP, INSN)
+
+extern int rx_float_compare_mode;
+
+/* This is a version of REG_P that also returns TRUE for SUBREGs. */
+#define RX_REG_P(rtl) (REG_P (rtl) || GET_CODE (rtl) == SUBREG)
+
+/* Like REG_P except that this macro is true for SET expressions. */
+#define SET_P(rtl) (GET_CODE (rtl) == SET)
+
+#define CAN_DEBUG_WITHOUT_FP 1
+
+/* The AS100 assembler does not support .leb128 and .uleb128, but
+ the compiler-build-time configure tests will have enabled their
+ use because GAS supports them. So default to generating STABS
+ debug information instead of DWARF2 when generating AS100
+ compatible output. */
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE (TARGET_AS100_SYNTAX \
+ ? DBX_DEBUG : DWARF2_DEBUG)
+
+#define INCOMING_FRAME_SP_OFFSET 4
+#define ARG_POINTER_CFA_OFFSET(FNDECL) 4
+#define FRAME_POINTER_CFA_OFFSET(FNDECL) 4
+
+extern int rx_enable_fpu;
+
+/* For some unknown reason LTO compression is not working, at
+ least on my local system. So set the default compression
+ level to none, for now.
+
+ For an explanation of rx_flag_no_fpu see rx_handle_option(). */
+#define OVERRIDE_OPTIONS \
+ do \
+ { \
+ if (flag_lto_compression_level == -1) \
+ flag_lto_compression_level = 0; \
+ \
+ if (rx_enable_fpu == 1) \
+ set_fast_math_flags (true); \
+ } \
+ while (0)
+
+/* This macro is used to decide when RX FPU instructions can be used. */
+#define ALLOW_RX_FPU_INSNS ((rx_enable_fpu != -1) \
+ && flag_unsafe_math_optimizations)
diff --git a/gcc/config/rx/rx.md b/gcc/config/rx/rx.md
new file mode 100644
index 00000000000..360f6235558
--- /dev/null
+++ b/gcc/config/rx/rx.md
@@ -0,0 +1,1766 @@
+;; Machine Description for Renesas RX processors
+;; Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+;; Contributed by Red Hat.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+
+;; This code iterator allows all branch instructions to
+;; be generated from a single define_expand template.
+(define_code_iterator most_cond [eq ne gt ge lt le gtu geu ltu leu
+ unordered ordered ])
+
+;; This code iterator is used for sign- and zero- extensions.
+(define_mode_iterator small_int_modes [(HI "") (QI "")])
+
+;; We do not handle DFmode here because it is either
+;; the same as SFmode, or if -m64bit-doubles is active
+;; then all operations on doubles have to be handled by
+;; library functions.
+(define_mode_iterator register_modes
+ [(SF "ALLOW_RX_FPU_INSNS") (SI "") (HI "") (QI "")])
+
+
+;; Used to map RX condition names to GCC
+;; condition names for builtin instructions.
+(define_code_iterator gcc_conds [eq ne gt ge lt le gtu geu ltu leu
+ unge unlt uneq ltgt])
+(define_code_attr rx_conds [(eq "eq") (ne "ne") (gt "gt") (ge "ge") (lt "lt")
+ (le "le") (gtu "gtu") (geu "geu") (ltu "ltu")
+ (leu "leu") (unge "pz") (unlt "n") (uneq "o")
+ (ltgt "no")])
+
+(define_constants
+ [
+ (SP_REG 0)
+
+ (UNSPEC_LOW_REG 0)
+ (UNSPEC_HIGH_REG 1)
+
+ (UNSPEC_RTE 10)
+ (UNSPEC_RTFI 11)
+ (UNSPEC_NAKED 12)
+
+ (UNSPEC_MOVSTR 20)
+ (UNSPEC_MOVMEM 21)
+ (UNSPEC_SETMEM 22)
+ (UNSPEC_STRLEN 23)
+ (UNSPEC_CMPSTRN 24)
+
+ (UNSPEC_BUILTIN_BRK 30)
+ (UNSPEC_BUILTIN_CLRPSW 31)
+ (UNSPEC_BUILTIN_INT 32)
+ (UNSPEC_BUILTIN_MACHI 33)
+ (UNSPEC_BUILTIN_MACLO 34)
+ (UNSPEC_BUILTIN_MULHI 35)
+ (UNSPEC_BUILTIN_MULLO 36)
+ (UNSPEC_BUILTIN_MVFACHI 37)
+ (UNSPEC_BUILTIN_MVFACMI 38)
+ (UNSPEC_BUILTIN_MVFC 39)
+ (UNSPEC_BUILTIN_MVFCP 40)
+ (UNSPEC_BUILTIN_MVTACHI 41)
+ (UNSPEC_BUILTIN_MVTACLO 42)
+ (UNSPEC_BUILTIN_MVTC 43)
+ (UNSPEC_BUILTIN_MVTIPL 44)
+ (UNSPEC_BUILTIN_RACW 45)
+ (UNSPEC_BUILTIN_REVW 46)
+ (UNSPEC_BUILTIN_RMPA 47)
+ (UNSPEC_BUILTIN_ROUND 48)
+ (UNSPEC_BUILTIN_SAT 49)
+ (UNSPEC_BUILTIN_SETPSW 50)
+ (UNSPEC_BUILTIN_WAIT 51)
+ ]
+)
+
+;; Condition code settings:
+;; none - insn does not affect the condition code bits
+;; set_zs - insn sets z,s to usable values;
+;; set_zso - insn sets z,s,o to usable values;
+;; set_zsoc - insn sets z,s,o,c to usable values;
+;; clobber - value of cc0 is unknown
+(define_attr "cc" "none,set_zs,set_zso,set_zsoc,clobber" (const_string "none"))
+
+(define_attr "length" "" (const_int 8))
+
+(include "predicates.md")
+(include "constraints.md")
+
+;; Pipeline description.
+
+;; The RX only has a single pipeline. It has five stages (fetch,
+;; decode, execute, memory access, writeback) each of which normally
+;; takes a single CPU clock cycle.
+
+;; The timings attribute consists of two numbers, the first is the
+;; throughput, which is the number of cycles the instruction takes
+;; to execute and generate a result. The second is the latency
+;; which is the effective number of cycles the instruction takes to
+;; execute if its result is used by the following instruction. The
+;; latency is always greater than or equal to the throughput.
+;; These values were taken from tables 2.13 and 2.14 in section 2.8
+;; of the RX610 Group Hardware Manual v0.11
+
+;; Note - it would be nice to use strings rather than integers for
+;; the possible values of this attribute, so that we can have the
+;; gcc build mechanism check for values that are not supported by
+;; the reservations below. But this will not work because the code
+;; in rx_adjust_sched_cost() needs integers not strings.
+
+(define_attr "timings" "" (const_int 11))
+
+(define_automaton "pipelining")
+(define_cpu_unit "throughput" "pipelining")
+
+(define_insn_reservation "throughput__1_latency__1" 1
+ (eq_attr "timings" "11") "throughput")
+(define_insn_reservation "throughput__1_latency__2" 2
+ (eq_attr "timings" "12") "throughput,nothing")
+(define_insn_reservation "throughput__2_latency__2" 1
+ (eq_attr "timings" "22") "throughput*2")
+(define_insn_reservation "throughput__3_latency__3" 1
+ (eq_attr "timings" "33") "throughput*3")
+(define_insn_reservation "throughput__3_latency__4" 2
+ (eq_attr "timings" "34") "throughput*3,nothing")
+(define_insn_reservation "throughput__4_latency__4" 1
+ (eq_attr "timings" "44") "throughput*4")
+(define_insn_reservation "throughput__4_latency__5" 2
+ (eq_attr "timings" "45") "throughput*4,nothing")
+(define_insn_reservation "throughput__5_latency__5" 1
+ (eq_attr "timings" "55") "throughput*5")
+(define_insn_reservation "throughput__5_latency__6" 2
+ (eq_attr "timings" "56") "throughput*5,nothing")
+(define_insn_reservation "throughput__6_latency__6" 1
+ (eq_attr "timings" "66") "throughput*6")
+(define_insn_reservation "throughput_10_latency_10" 1
+ (eq_attr "timings" "1010") "throughput*10")
+(define_insn_reservation "throughput_11_latency_11" 1
+ (eq_attr "timings" "1111") "throughput*11")
+(define_insn_reservation "throughput_16_latency_16" 1
+ (eq_attr "timings" "1616") "throughput*16")
+(define_insn_reservation "throughput_18_latency_18" 1
+ (eq_attr "timings" "1818") "throughput*18")
+
+;; Comparisons
+
+(define_expand "cbranchsi4"
+ [(set (cc0) (compare:CC (match_operand:SI 1 "register_operand")
+ (match_operand:SI 2 "rx_source_operand")))
+ (set (pc)
+ (if_then_else (match_operator:SI 0 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 3 ""))
+ (pc)))]
+ ""
+ ""
+)
+
+(define_expand "cbranchsf4"
+ [(set (cc0) (compare:CC (match_operand:SF 1 "register_operand")
+ (match_operand:SF 2 "rx_source_operand")))
+ (set (pc)
+ (if_then_else (match_operator:SI 0 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 3 ""))
+ (pc)))]
+ "ALLOW_RX_FPU_INSNS && ! flag_non_call_exceptions"
+ ""
+)
+
+;; The TST instruction is not used as it does not set the Carry flag,
+;; so for example, the LessThan comparison cannot be tested.
+;;
+;; (define_insn "tstsi"
+;; [(set (cc0)
+;; (match_operand:SI 0 "rx_source_operand" "r,i,Q")))]
+;; ""
+;; {
+;; rx_float_compare_mode = false;
+;; return "tst\t%Q0";
+;; }
+;; [(set_attr "cc" "set_zs")
+;; (set_attr "timings" "11,11,33")
+;; (set_attr "length" "3,7,6")]
+;; )
+
+(define_insn "cmpsi"
+ [(set (cc0) (compare:CC
+ (match_operand:SI 0 "register_operand" "r,r,r,r,r,r,r")
+ (match_operand:SI 1 "rx_source_operand"
+ "r,Uint04,Int08,Sint16,Sint24,i,Q")))]
+ ""
+ {
+ rx_float_compare_mode = false;
+ return "cmp\t%Q1, %Q0";
+ }
+ [(set_attr "cc" "set_zsoc")
+ (set_attr "timings" "11,11,11,11,11,11,33")
+ (set_attr "length" "2,2,3,4,5,6,5")]
+)
+
+;; This pattern is disabled when -fnon-call-exceptions is active because
+;; it could generate a floating point exception, which would introduce an
+;; edge into the flow graph between this insn and the conditional branch
+;; insn to follow, thus breaking the cc0 relationship. Run the g++ test
+;; g++.dg/eh/080514-1.C to see this happen.
+(define_insn "cmpsf"
+ [(set (cc0)
+ (compare:CC (match_operand:SF 0 "register_operand" "r,r,r")
+ (match_operand:SF 1 "rx_source_operand" "r,i,Q")))]
+ "ALLOW_RX_FPU_INSNS && ! flag_non_call_exceptions"
+ {
+ rx_float_compare_mode = true;
+ return "fcmp\t%1, %0";
+ }
+ [(set_attr "cc" "set_zso")
+ (set_attr "timings" "11,11,33")
+ (set_attr "length" "3,7,5")]
+)
+
+;; Flow Control Instructions:
+
+(define_expand "b<code>"
+ [(set (pc)
+ (if_then_else (most_cond (cc0) (const_int 0))
+ (label_ref (match_operand 0))
+ (pc)))]
+ ""
+ ""
+)
+
+(define_insn "*conditional_branch"
+ [(set (pc)
+ (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ {
+ return rx_gen_cond_branch_template (operands[1], false);
+ }
+ [(set_attr "length" "8") ;; This length is wrong, but it is
+ ;; too hard to compute statically.
+ (set_attr "timings" "33") ;; The timing assumes that the branch is taken.
+ (set_attr "cc" "clobber")] ;; FIXME: This clobber is wrong.
+)
+
+(define_insn "*reveresed_conditional_branch"
+ [(set (pc)
+ (if_then_else (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ {
+ return rx_gen_cond_branch_template (operands[1], true);
+ }
+ [(set_attr "length" "8") ;; This length is wrong, but it is
+ ;; too hard to compute statically.
+ (set_attr "timings" "33") ;; The timing assumes that the branch is taken.
+ (set_attr "cc" "clobber")] ;; FIXME: This clobber is wrong.
+)
+
+(define_insn "jump"
+ [(set (pc) (label_ref (match_operand 0 "" "")))]
+ ""
+ "bra\t%0"
+ [(set_attr "length" "4")
+ (set_attr "timings" "33")
+ (set_attr "cc" "clobber")] ;; FIXME: This clobber is wrong.
+)
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "register_operand" "r"))]
+ ""
+ "jmp\t%0"
+ [(set_attr "length" "2")
+ (set_attr "timings" "33")
+ (set_attr "cc" "clobber")] ;; FIXME: This clobber is wrong.
+)
+
+(define_insn "tablejump"
+ [(set (pc) (match_operand:SI 0 "register_operand" "r"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ { return flag_pic ? (TARGET_AS100_SYNTAX ? "\n?:\tbra\t%0"
+ : "\n1:\tbra\t%0")
+ : "jmp\t%0";
+ }
+ [(set_attr "cc" "clobber") ;; FIXME: This clobber is wrong.
+ (set_attr "timings" "33")
+ (set_attr "length" "2")]
+)
+
+(define_insn "simple_return"
+ [(return)]
+ ""
+ "rts"
+ [(set_attr "length" "1")
+ (set_attr "timings" "55")]
+)
+
+(define_insn "deallocate_and_return"
+ [(set (reg:SI SP_REG)
+ (plus:SI (reg:SI SP_REG)
+ (match_operand:SI 0 "immediate_operand" "i")))
+ (return)]
+ ""
+ "rtsd\t%0"
+ [(set_attr "length" "2")
+ (set_attr "timings" "55")]
+)
+
+(define_insn "pop_and_return"
+ [(match_parallel 1 "rx_rtsd_vector"
+ [(set:SI (reg:SI SP_REG)
+ (plus:SI (reg:SI SP_REG)
+ (match_operand:SI
+ 0 "const_int_operand" "n")))])]
+ "reload_completed"
+ {
+ rx_emit_stack_popm (operands, false);
+ return "";
+ }
+ [(set_attr "length" "3")
+ (set_attr "timings" "56")]
+)
+
+(define_insn "fast_interrupt_return"
+ [(unspec_volatile [(return)] UNSPEC_RTFI) ]
+ ""
+ "rtfi"
+ [(set_attr "length" "2")
+ (set_attr "timings" "33")]
+)
+
+(define_insn "exception_return"
+ [(unspec_volatile [(return)] UNSPEC_RTE) ]
+ ""
+ "rte"
+ [(set_attr "length" "2")
+ (set_attr "timings" "66")]
+)
+
+(define_insn "naked_return"
+ [(unspec_volatile [(return)] UNSPEC_NAKED) ]
+ ""
+ "; Naked function: epilogue provided by programmer."
+)
+
+
+;; Note - the following set of patterns do not use the "memory_operand"
+;; predicate or an "m" constraint because we do not allow symbol_refs
+;; or label_refs as legitmate memory addresses. This matches the
+;; behaviour of most of the RX instructions. Only the call/branch
+;; instructions are allowed to refer to symbols/labels directly.
+;; The call operands are in QImode because that is the value of
+;; FUNCTION_MODE
+
+(define_expand "call"
+ [(call (match_operand:QI 0 "general_operand")
+ (match_operand:SI 1 "general_operand"))]
+ ""
+ {
+ rtx dest = XEXP (operands[0], 0);
+
+ if (! rx_call_operand (dest, Pmode))
+ dest = force_reg (Pmode, dest);
+ emit_call_insn (gen_call_internal (dest, operands[1]));
+ DONE;
+ }
+)
+
+(define_insn "call_internal"
+ [(call (mem:QI (match_operand:SI 0 "rx_call_operand" "r,Symbol"))
+ (match_operand:SI 1 "general_operand" "g,g"))]
+ ""
+ "@
+ jsr\t%A0
+ bsr\t%A0"
+ [(set_attr "length" "2,4")
+ (set_attr "timings" "33")]
+)
+
+(define_expand "call_value"
+ [(set (match_operand 0 "register_operand")
+ (call (match_operand:QI 1 "general_operand")
+ (match_operand:SI 2 "general_operand")))]
+ ""
+ {
+ rtx dest = XEXP (operands[1], 0);
+
+ if (! rx_call_operand (dest, Pmode))
+ dest = force_reg (Pmode, dest);
+ emit_call_insn (gen_call_value_internal (operands[0], dest, operands[2]));
+ DONE;
+ }
+)
+
+(define_insn "call_value_internal"
+ [(set (match_operand 0 "register_operand" "=r,r")
+ (call (mem:QI (match_operand:SI 1 "rx_call_operand" "r,Symbol"))
+ (match_operand:SI 2 "general_operand" "g,g")))]
+ ""
+ "@
+ jsr\t%A1
+ bsr\t%A1"
+ [(set_attr "length" "2,4")
+ (set_attr "timings" "33")]
+)
+
+(define_insn "sibcall"
+ [(call (mem:QI (match_operand:SI 0 "rx_symbolic_call_operand" "Symbol"))
+ (match_operand:SI 1 "general_operand" "g"))
+ (return)
+ (use (match_operand 2 "" ""))]
+ ""
+ "bra\t%A0"
+ [(set_attr "length" "4")
+ (set_attr "timings" "33")]
+)
+
+(define_insn "sibcall_value"
+ [(set (match_operand 0 "register_operand" "=r")
+ (call (mem:QI (match_operand:SI 1 "rx_symbolic_call_operand" "Symbol"))
+ (match_operand:SI 2 "general_operand" "g")))
+ (return)
+ (use (match_operand 3 "" ""))]
+ ""
+ "bra\t%A1"
+ [(set_attr "length" "4")
+ (set_attr "timings" "33")]
+)
+
+;; Function Prologue/Epilogue Instructions
+
+(define_expand "prologue"
+ [(const_int 0)]
+ ""
+ "rx_expand_prologue (); DONE;"
+)
+
+(define_expand "epilogue"
+ [(return)]
+ ""
+ "rx_expand_epilogue (false); DONE;"
+)
+
+(define_expand "sibcall_epilogue"
+ [(return)]
+ ""
+ "rx_expand_epilogue (true); DONE;"
+)
+
+;; Move Instructions
+
+;; Note - we do not allow memory to memory moves, even though the ISA
+;; supports them. The reason is that the conditions on such moves are
+;; too restrictive, specifically the source addressing mode is limited
+;; by the destination addressing mode and vice versa. (For example it
+;; is not possible to use indexed register indirect addressing for one
+;; of the operands if the other operand is anything other than a register,
+;; but it is possible to use register relative addressing when the other
+;; operand also uses register relative or register indirect addressing).
+;;
+;; GCC does not support computing legitimate addresses based on the
+;; nature of other operands involved in the instruction, and reload is
+;; not smart enough to cope with a whole variety of different memory
+;; addressing constraints, so it is simpler and safer to just refuse
+;; to support memory to memory moves.
+
+(define_expand "mov<register_modes:mode>"
+ [(set (match_operand:register_modes 0 "general_operand")
+ (match_operand:register_modes 1 "general_operand"))]
+ ""
+ {
+ if (MEM_P (operand0) && MEM_P (operand1))
+ operands[1] = copy_to_mode_reg (<register_modes:MODE>mode, operand1);
+ }
+)
+
+(define_insn "*mov<register_modes:mode>_internal"
+ [(set (match_operand:register_modes
+ 0 "nonimmediate_operand" "=r,r,r,r,r,r,m,Q,Q,Q,Q")
+ (match_operand:register_modes
+ 1 "general_operand" "Int08,Sint16,Sint24,i,r,m,r,Int08,Sint16,Sint24,i"))]
+ ""
+ { return rx_gen_move_template (operands, false); }
+ [(set_attr "length" "3,4,5,6,2,4,6,5,6,7,8")
+ (set_attr "timings" "11,11,11,11,11,12,11,11,11,11,11")]
+)
+
+(define_insn "extend<small_int_modes:mode>si2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (sign_extend:SI (match_operand:small_int_modes
+ 1 "nonimmediate_operand" "r,m")))]
+ ""
+ { return rx_gen_move_template (operands, false); }
+ [(set_attr "length" "2,6")
+ (set_attr "timings" "11,12")]
+)
+
+(define_insn "zero_extend<small_int_modes:mode>si2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (zero_extend:SI (match_operand:small_int_modes
+ 1 "nonimmediate_operand" "r,m")))]
+ ""
+ { return rx_gen_move_template (operands, true); }
+ [(set_attr "length" "2,4")
+ (set_attr "timings" "11,12")]
+)
+
+(define_insn "stack_push"
+ [(set:SI (reg:SI SP_REG)
+ (minus:SI (reg:SI SP_REG)
+ (const_int 4)))
+ (set:SI (mem:SI (reg:SI SP_REG))
+ (match_operand:SI 0 "register_operand" "r"))]
+ ""
+ "push.l\t%0"
+ [(set_attr "length" "2")]
+)
+
+(define_insn "stack_pushm"
+ [(match_parallel 1 "rx_store_multiple_vector"
+ [(set:SI (reg:SI SP_REG)
+ (minus:SI (reg:SI SP_REG)
+ (match_operand:SI
+ 0 "const_int_operand" "n")))])]
+ "reload_completed"
+ {
+ rx_emit_stack_pushm (operands);
+ return "";
+ }
+ [(set_attr "length" "2")
+ (set_attr "timings" "44")] ;; The timing is a guesstimate average timing.
+)
+
+(define_insn "stack_pop"
+ [(set:SI (match_operand:SI 0 "register_operand" "=r")
+ (mem:SI (reg:SI SP_REG)))
+ (set:SI (reg:SI SP_REG)
+ (plus:SI (reg:SI SP_REG)
+ (const_int 4)))]
+ ""
+ "pop\t%0"
+ [(set_attr "length" "2")
+ (set_attr "timings" "12")]
+)
+
+(define_insn "stack_popm"
+ [(match_parallel 1 "rx_load_multiple_vector"
+ [(set:SI (reg:SI SP_REG)
+ (plus:SI (reg:SI SP_REG)
+ (match_operand:SI
+ 0 "const_int_operand" "n")))])]
+ "reload_completed"
+ {
+ rx_emit_stack_popm (operands, true);
+ return "";
+ }
+ [(set_attr "length" "2")
+ (set_attr "timings" "45")] ;; The timing is a guesstimate average timing.
+)
+
+(define_insn "cstoresi4"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r,r")
+ (match_operator:SI
+ 1 "comparison_operator"
+ [(match_operand:SI
+ 2 "register_operand" "r,r,r,r,r,r,r")
+ (match_operand:SI
+ 3 "rx_source_operand" "r,Uint04,Int08,Sint16,Sint24,i,Q")]))]
+ ""
+ {
+ rx_float_compare_mode = false;
+ return "cmp\t%Q3, %Q2\n\tsc%B1.L\t%0";
+ }
+ [(set_attr "cc" "set_zsoc")
+ (set_attr "timings" "22,22,22,22,22,22,44")
+ (set_attr "length" "5,5,6,7,8,9,8")]
+)
+
+(define_expand "movsicc"
+ [(set (match_operand:SI 0 "register_operand")
+ (if_then_else:SI (match_operand:SI 1 "comparison_operator")
+ (match_operand:SI 2 "nonmemory_operand")
+ (match_operand:SI 3 "immediate_operand")))]
+ ""
+ {
+ if (GET_CODE (operands[1]) != EQ && GET_CODE (operands[1]) != NE)
+ FAIL;
+ if (! CONST_INT_P (operands[3]))
+ FAIL;
+ }
+)
+
+(define_insn "*movsieq"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (if_then_else:SI (eq (match_operand:SI
+ 3 "register_operand" "r,r,r")
+ (match_operand:SI
+ 4 "rx_source_operand" "riQ,riQ,riQ"))
+ (match_operand:SI
+ 1 "nonmemory_operand" "0,i,r")
+ (match_operand:SI
+ 2 "immediate_operand" "i,i,i")))]
+ ""
+ "@
+ cmp\t%Q4, %Q3\n\tstnz\t%2, %0
+ cmp\t%Q4, %Q3\n\tmov.l\t%2, %0\n\tstz\t%1, %0
+ cmp\t%Q4, %Q3\n\tmov.l\t%1, %0\n\tstnz\t%2, %0"
+ [(set_attr "cc" "set_zsoc")
+ (set_attr "length" "13,19,15")
+ (set_attr "timings" "22,33,33")]
+)
+
+(define_insn "*movsine"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (if_then_else:SI (ne (match_operand:SI 3 "register_operand" "r,r,r")
+ (match_operand:SI 4 "rx_source_operand" "riQ,riQ,riQ"))
+ (match_operand:SI 1 "nonmemory_operand" "0,i,r")
+ (match_operand:SI 2 "immediate_operand" "i,i,i")))]
+ ""
+ "@
+ cmp\t%Q4, %Q3\n\tstz\t%2, %0
+ cmp\t%Q4, %Q3\n\tmov.l\t%2, %0\n\tstnz\t%1, %0
+ cmp\t%Q4, %Q3\n\tmov.l\t%1, %0\n\tstz\t%2, %0"
+ [(set_attr "cc" "set_zsoc")
+ (set_attr "length" "13,19,15")
+ (set_attr "timings" "22,33,33")]
+)
+
+;; Arithmetic Instructions
+
+(define_insn "abssi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (abs:SI (match_operand:SI 1 "register_operand" "0,r")))]
+ ""
+ "@
+ abs\t%0
+ abs\t%1, %0"
+ [(set_attr "cc" "set_zso")
+ (set_attr "length" "2,3")]
+)
+
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "register_operand"
+ "=r,r,r,r,r,r,r,r,r,r,r,r")
+ (plus:SI (match_operand:SI
+ 1 "register_operand"
+ "%0,0,0,0,0,0,r,r,r,r,r,0")
+ (match_operand:SI
+ 2 "rx_source_operand"
+ "r,Uint04,Sint08,Sint16,Sint24,i,r,Sint08,Sint16,Sint24,i,Q")))]
+ ""
+ "@
+ add\t%2, %0
+ add\t%2, %0
+ add\t%2, %0
+ add\t%2, %0
+ add\t%2, %0
+ add\t%2, %0
+ add\t%2, %1, %0
+ add\t%2, %1, %0
+ add\t%2, %1, %0
+ add\t%2, %1, %0
+ add\t%2, %1, %0
+ add\t%Q2, %0"
+ [(set_attr "cc" "set_zsoc")
+ (set_attr "timings" "11,11,11,11,11,11,11,11,11,11,11,33")
+ (set_attr "length" "2,2,3,4,5,6,3,3,4,5,6,5")]
+)
+
+(define_insn "adddi3"
+ [(set (match_operand:DI 0 "register_operand" "=r,r,r,r,r,r")
+ (plus:DI (match_operand:DI 1 "register_operand" "%0,0,0,0,0,0")
+ (match_operand:DI 2 "rx_source_operand"
+ "r,Sint08,Sint16,Sint24,i,Q")))]
+ ""
+ "add\t%L2, %L0\n\tadc\t%H2, %H0"
+ [(set_attr "cc" "set_zsoc")
+ (set_attr "timings" "22,22,22,22,22,44")
+ (set_attr "length" "5,7,9,11,13,11")]
+)
+
+(define_insn "andsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r,r,r,r")
+ (and:SI (match_operand:SI 1 "register_operand" "%0,0,0,0,0,0,r,0,Q")
+ (match_operand:SI
+ 2 "rx_source_operand"
+ "r,Uint04,Sint08,Sint16,Sint24,i,r,Q,0")))]
+ ""
+ "@
+ and\t%2, %0
+ and\t%2, %0
+ and\t%2, %0
+ and\t%2, %0
+ and\t%2, %0
+ and\t%2, %0
+ and\t%2, %1, %0
+ and\t%Q2, %0
+ and\t%Q1, %0"
+ [(set_attr "cc" "set_zs")
+ (set_attr "timings" "11,11,11,11,11,11,11,33,33")
+ (set_attr "length" "2,2,3,4,5,6,3,5,5")]
+)
+
+;; Byte swap (single 32-bit value).
+(define_insn "bswapsi2"
+ [(set (match_operand:SI 0 "register_operand" "+r")
+ (bswap:SI (match_operand:SI 1 "register_operand" "r")))]
+ ""
+ "revl\t%1, %0"
+ [(set_attr "length" "3")]
+)
+
+;; Byte swap (single 16-bit value). Note - we ignore the swapping of the high 16-bits.
+(define_insn "bswaphi2"
+ [(set (match_operand:HI 0 "register_operand" "+r")
+ (bswap:HI (match_operand:HI 1 "register_operand" "r")))]
+ ""
+ "revw\t%1, %0"
+ [(set_attr "length" "3")]
+)
+
+(define_insn "divsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r")
+ (div:SI (match_operand:SI 1 "register_operand" "0,0,0,0,0,0")
+ (match_operand:SI
+ 2 "rx_source_operand" "r,Sint08,Sint16,Sint24,i,Q")))]
+ ""
+ "div\t%Q2, %0"
+ [(set_attr "cc" "clobber")
+ (set_attr "timings" "1111") ;; Strictly speaking the timing should be
+ ;; 2222, but that is a worst case sceanario.
+ (set_attr "length" "3,4,5,6,7,6")]
+)
+
+(define_insn "udivsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r")
+ (udiv:SI (match_operand:SI 1 "register_operand" "0,0,0,0,0,0")
+ (match_operand:SI
+ 2 "rx_source_operand" "r,Sint08,Sint16,Sint24,i,Q")))]
+ ""
+ "divu\t%Q2, %0"
+ [(set_attr "cc" "clobber")
+ (set_attr "timings" "1010") ;; Strictly speaking the timing should be
+ ;; 2020, but that is a worst case sceanario.
+ (set_attr "length" "3,4,5,6,7,6")]
+)
+
+;; Note - these patterns are suppressed in big-endian mode because they
+;; generate a little endian result. ie the most significant word of the
+;; result is placed in the higher numbered register of the destination
+;; register pair.
+
+(define_insn "mulsidi3"
+ [(set (match_operand:DI 0 "register_operand" "=r,r,r,r,r,r")
+ (mult:DI (sign_extend:DI (match_operand:SI
+ 1 "register_operand" "%0,0,0,0,0,0"))
+ (sign_extend:DI (match_operand:SI
+ 2 "rx_source_operand"
+ "r,Sint08,Sint16,Sint24,i,Q"))))]
+ "! TARGET_BIG_ENDIAN_DATA"
+ "@
+ emul\t%Q2, %0
+ emul\t%Q2, %0
+ emul\t%Q2, %0
+ emul\t%Q2, %0
+ emul\t%Q2, %0
+ emul\t%Q2, %0"
+ [(set_attr "length" "3,4,5,6,7,6")
+ (set_attr "timings" "22,22,22,22,22,44")]
+)
+
+;; See comment for mulsidi3.
+;; Note - the zero_extends are to distinguish this pattern from the
+;; mulsidi3 pattern. Immediate mode addressing is not supported
+;; because gcc cannot handle the expression: (zero_extend (const_int)).
+(define_insn "umulsidi3"
+ [(set (match_operand:DI 0 "register_operand"
+ "=r,r")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand"
+ "%0,0"))
+ (zero_extend:DI (match_operand:SI 2 "rx_compare_operand"
+ "r,Q"))))]
+ "! TARGET_BIG_ENDIAN_DATA"
+ "@
+ emulu\t%Q2, %0
+ emulu\t%Q2, %0"
+ [(set_attr "length" "3,6")
+ (set_attr "timings" "22,44")]
+)
+
+(define_insn "smaxsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r")
+ (smax:SI (match_operand:SI 1 "register_operand" "%0,0,0,0,0,0")
+ (match_operand:SI 2 "rx_source_operand"
+ "r,Sint08,Sint16,Sint24,i,Q")))]
+ ""
+ "max\t%Q2, %0"
+ [(set_attr "length" "3,4,5,6,7,6")
+ (set_attr "timings" "11,11,11,11,11,33")]
+)
+
+(define_insn "sminsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r,r")
+ (smin:SI (match_operand:SI 1 "register_operand" "%0,0,0,0,0,0,r")
+ (match_operand:SI 2 "rx_source_operand"
+ "r,Sint08,Sint16,Sint24,i,Q,r")))]
+ ""
+ "@
+ min\t%Q2, %0
+ min\t%Q2, %0
+ min\t%Q2, %0
+ min\t%Q2, %0
+ min\t%Q2, %0
+ min\t%Q2, %0
+ mov.l\t%1,%0\n\tmin\t%Q2, %0"
+ [(set_attr "length" "3,4,5,6,7,6,5")
+ (set_attr "timings" "11,11,11,11,11,33,22")]
+)
+
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r,r,r,r")
+ (mult:SI (match_operand:SI 1 "register_operand" "%0,0,0,0,0,0,0,Q,r")
+ (match_operand:SI 2 "rx_source_operand"
+ "r,Uint04,Sint08,Sint16,Sint24,i,Q,0,r")))]
+ ""
+ "@
+ mul\t%Q2, %0
+ mul\t%Q2, %0
+ mul\t%Q2, %0
+ mul\t%Q2, %0
+ mul\t%Q2, %0
+ mul\t%Q2, %0
+ mul\t%Q2, %0
+ mul\t%Q1, %0
+ mul\t%Q2, %1, %0"
+ [(set_attr "length" "2,2,3,4,5,6,5,5,3")
+ (set_attr "timings" "11,11,11,11,11,11,33,33,11")]
+)
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (neg:SI (match_operand:SI 1 "register_operand" "0,r")))]
+ ;; The NEG instruction does not comply with -fwrapv semantics.
+ ;; See gcc.c-torture/execute/pr22493-1.c for an example of this.
+ "! flag_wrapv"
+ "@
+ neg\t%0
+ neg\t%1, %0"
+ [(set_attr "length" "2,3")]
+)
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (not:SI (match_operand:SI 1 "register_operand" "0,r")))]
+ ""
+ "@
+ not\t%0
+ not\t%1, %0"
+ [(set_attr "cc" "set_zs")
+ (set_attr "length" "2,3")]
+)
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r,r,r,r")
+ (ior:SI (match_operand:SI 1 "register_operand" "%0,0,0,0,0,0,r,0,Q")
+ (match_operand:SI 2 "rx_source_operand"
+ "r,Uint04,Sint08,Sint16,Sint24,i,r,Q,0")))]
+ ""
+ "@
+ or\t%2, %0
+ or\t%2, %0
+ or\t%2, %0
+ or\t%2, %0
+ or\t%2, %0
+ or\t%2, %0
+ or\t%2, %1, %0
+ or\t%Q2, %0
+ or\t%Q1, %0"
+ [(set_attr "cc" "set_zs")
+ (set_attr "timings" "11,11,11,11,11,11,11,33,33")
+ (set_attr "length" "2,2,3,4,5,6,3,5,5")]
+)
+
+(define_insn "rotlsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (rotate:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "rx_shift_operand" "rn")))]
+ ""
+ "rotl\t%2, %0"
+ [(set_attr "cc" "set_zs")
+ (set_attr "length" "3")]
+)
+
+(define_insn "rotrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (rotatert:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "rx_shift_operand" "rn")))]
+ ""
+ "rotr\t%2, %0"
+ [(set_attr "cc" "set_zs")
+ (set_attr "length" "3")]
+)
+
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (ashiftrt:SI (match_operand:SI 1 "register_operand" "0,0,r")
+ (match_operand:SI 2 "rx_shift_operand" "r,n,n")))]
+ ""
+ "@
+ shar\t%2, %0
+ shar\t%2, %0
+ shar\t%2, %1, %0"
+ [(set_attr "cc" "set_zsoc")
+ (set_attr "length" "3,2,3")]
+)
+
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "0,0,r")
+ (match_operand:SI 2 "rx_shift_operand" "r,n,n")))]
+ ""
+ "@
+ shlr\t%2, %0
+ shlr\t%2, %0
+ shlr\t%2, %1, %0"
+ [(set_attr "cc" "set_zsoc")
+ (set_attr "length" "3,2,3")]
+)
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (ashift:SI (match_operand:SI 1 "register_operand" "0,0,r")
+ (match_operand:SI 2 "rx_shift_operand" "r,n,n")))]
+ ""
+ "@
+ shll\t%2, %0
+ shll\t%2, %0
+ shll\t%2, %1, %0"
+ [(set_attr "cc" "set_zsoc")
+ (set_attr "length" "3,2,3")]
+)
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r")
+ (minus:SI (match_operand:SI 1 "register_operand" "0,0,0,r,0")
+ (match_operand:SI 2 "rx_source_operand" "r,Uint04,n,r,Q")))]
+ ""
+ "@
+ sub\t%2, %0
+ sub\t%2, %0
+ add\t%N2, %0
+ sub\t%2, %1, %0
+ sub\t%Q2, %0"
+ [(set_attr "cc" "set_zsoc")
+ (set_attr "timings" "11,11,11,11,33")
+ (set_attr "length" "2,2,6,3,5")]
+)
+
+(define_insn "subdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (minus:DI (match_operand:DI 1 "register_operand" "0,0")
+ (match_operand:DI 2 "rx_source_operand" "r,Q")))]
+ ""
+ "sub\t%L2, %L0\n\tsbb\t%H2, %H0"
+ [(set_attr "cc" "set_zsoc")
+ (set_attr "timings" "22,44")
+ (set_attr "length" "5,11")]
+)
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r,r")
+ (xor:SI (match_operand:SI 1 "register_operand" "%0,0,0,0,0,0")
+ (match_operand:SI 2 "rx_source_operand"
+ "r,Sint08,Sint16,Sint24,i,Q")))]
+ ""
+ "@
+ xor\t%Q2, %0
+ xor\t%Q2, %0
+ xor\t%Q2, %0
+ xor\t%Q2, %0
+ xor\t%Q2, %0
+ xor\t%Q2, %0"
+ [(set_attr "cc" "set_zs")
+ (set_attr "timings" "11,11,11,11,11,33")
+ (set_attr "length" "3,4,5,6,7,6")]
+)
+
+;; Floating Point Instructions
+
+(define_insn "addsf3"
+ [(set (match_operand:SF 0 "register_operand" "=r,r,r")
+ (plus:SF (match_operand:SF 1 "register_operand" "%0,0,0")
+ (match_operand:SF 2 "rx_source_operand" "r,F,Q")))]
+ "ALLOW_RX_FPU_INSNS"
+ "@
+ fadd\t%2, %0
+ fadd\t%2, %0
+ fadd\t%2, %0"
+ [(set_attr "cc" "set_zs")
+ (set_attr "timings" "44,44,66")
+ (set_attr "length" "3,7,5")]
+)
+
+(define_insn "divsf3"
+ [(set (match_operand:SF 0 "register_operand" "=r,r,r")
+ (div:SF (match_operand:SF 1 "register_operand" "0,0,0")
+ (match_operand:SF 2 "rx_source_operand" "r,F,Q")))]
+ "ALLOW_RX_FPU_INSNS"
+ "fdiv\t%2, %0"
+ [(set_attr "cc" "set_zs")
+ (set_attr "timings" "1616,1616,1818")
+ (set_attr "length" "3,7,5")]
+)
+
+(define_insn "mulsf3"
+ [(set (match_operand:SF 0 "register_operand" "=r,r,r")
+ (mult:SF (match_operand:SF 1 "register_operand" "%0,0,0")
+ (match_operand:SF 2 "rx_source_operand" "r,F,Q")))]
+ "ALLOW_RX_FPU_INSNS"
+ "@
+ fmul\t%2, %0
+ fmul\t%2, %0
+ fmul\t%2, %0"
+ [(set_attr "cc" "set_zs")
+ (set_attr "timings" "33,33,55")
+ (set_attr "length" "3,7,5")]
+)
+
+(define_insn "subsf3"
+ [(set (match_operand:SF 0 "register_operand" "=r,r,r")
+ (minus:SF (match_operand:SF 1 "register_operand" "0,0,0")
+ (match_operand:SF 2 "rx_source_operand" "r,F,Q")))]
+ "ALLOW_RX_FPU_INSNS"
+ "fsub\t%2, %0"
+ [(set_attr "cc" "set_zs")
+ (set_attr "timings" "44,44,66")
+ (set_attr "length" "3,7,5")]
+)
+
+(define_insn "fix_truncsfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (fix:SI (match_operand:SF 1 "rx_compare_operand" "r,Q")))]
+ "ALLOW_RX_FPU_INSNS"
+ "ftoi\t%1, %0"
+ [(set_attr "cc" "set_zs")
+ (set_attr "timings" "22,44")
+ (set_attr "length" "3,5")]
+)
+
+(define_insn "floatsisf2"
+ [(set (match_operand:SF 0 "register_operand" "=r,r")
+ (float:SF (match_operand:SI 1 "rx_compare_operand" "r,Q")))]
+ "ALLOW_RX_FPU_INSNS"
+ "itof\t%1, %0"
+ [(set_attr "cc" "set_zs")
+ (set_attr "timings" "22,44")
+ (set_attr "length" "3,6")]
+)
+
+;; Bit manipulation instructions.
+;; Note - there are two versions of each pattern because the memory
+;; accessing versions use QImode whilst the register accessing
+;; versions use SImode.
+;; The peephole are here because the combiner only looks at a maximum
+;; of three instructions at a time.
+
+(define_insn "bitset"
+ [(set:SI (match_operand:SI 0 "register_operand" "+r")
+ (ior:SI (match_operand:SI 1 "register_operand" "0")
+ (ashift:SI (const_int 1)
+ (match_operand:SI 2 "nonmemory_operand" "ri"))))]
+ ""
+ "bset\t%2, %0"
+ [(set_attr "length" "3")]
+)
+
+(define_insn "bitset_in_memory"
+ [(set:QI (match_operand:QI 0 "memory_operand" "+m")
+ (ior:QI (match_operand:QI 1 "memory_operand" "0")
+ (ashift:QI (const_int 1)
+ (match_operand:QI 2 "nonmemory_operand" "ri"))))]
+ ""
+ "bset\t%2, %0.B"
+ [(set_attr "length" "3")
+ (set_attr "timings" "34")]
+)
+
+;; (set (reg A) (const_int 1))
+;; (set (reg A) (ashift (reg A) (reg B)))
+;; (set (reg C) (ior (reg A) (reg C)))
+(define_peephole2
+ [(set:SI (match_operand:SI 0 "register_operand" "")
+ (const_int 1))
+ (set:SI (match_dup 0)
+ (ashift:SI (match_dup 0)
+ (match_operand:SI 1 "register_operand" "")))
+ (set:SI (match_operand:SI 2 "register_operand" "")
+ (ior:SI (match_dup 0)
+ (match_dup 2)))]
+ "dead_or_set_p (insn, operands[0])"
+ [(set:SI (match_dup 2)
+ (ior:SI (match_dup 2)
+ (ashift:SI (const_int 1)
+ (match_dup 1))))]
+)
+
+;; (set (reg A) (const_int 1))
+;; (set (reg A) (ashift (reg A) (reg B)))
+;; (set (reg A) (ior (reg A) (reg C)))
+;; (set (reg C) (reg A)
+(define_peephole2
+ [(set:SI (match_operand:SI 0 "register_operand" "")
+ (const_int 1))
+ (set:SI (match_dup 0)
+ (ashift:SI (match_dup 0)
+ (match_operand:SI 1 "register_operand" "")))
+ (set:SI (match_dup 0)
+ (ior:SI (match_dup 0)
+ (match_operand:SI 2 "register_operand" "")))
+ (set:SI (match_dup 2) (match_dup 0))]
+ "dead_or_set_p (insn, operands[0])"
+ [(set:SI (match_dup 2)
+ (ior:SI (match_dup 2)
+ (ashift:SI (const_int 1)
+ (match_dup 1))))]
+)
+
+(define_insn "bitinvert"
+ [(set:SI (match_operand:SI 0 "register_operand" "+r")
+ (xor:SI (match_operand:SI 1 "register_operand" "0")
+ (ashift:SI (const_int 1)
+ (match_operand:SI 2 "nonmemory_operand" "ri"))))]
+ ""
+ "bnot\t%2, %0"
+ [(set_attr "length" "3")]
+)
+
+(define_insn "bitinvert_in_memory"
+ [(set:QI (match_operand:QI 0 "memory_operand" "+m")
+ (xor:QI (match_operand:QI 1 "register_operand" "0")
+ (ashift:QI (const_int 1)
+ (match_operand:QI 2 "nonmemory_operand" "ri"))))]
+ ""
+ "bnot\t%2, %0.B"
+ [(set_attr "length" "5")
+ (set_attr "timings" "33")]
+)
+
+;; (set (reg A) (const_int 1))
+;; (set (reg A) (ashift (reg A) (reg B)))
+;; (set (reg C) (xor (reg A) (reg C)))
+(define_peephole2
+ [(set:SI (match_operand:SI 0 "register_operand" "")
+ (const_int 1))
+ (set:SI (match_dup 0)
+ (ashift:SI (match_dup 0)
+ (match_operand:SI 1 "register_operand" "")))
+ (set:SI (match_operand:SI 2 "register_operand" "")
+ (xor:SI (match_dup 0)
+ (match_dup 2)))]
+ "dead_or_set_p (insn, operands[0])"
+ [(set:SI (match_dup 2)
+ (xor:SI (match_dup 2)
+ (ashift:SI (const_int 1)
+ (match_dup 1))))]
+ ""
+)
+
+;; (set (reg A) (const_int 1))
+;; (set (reg A) (ashift (reg A) (reg B)))
+;; (set (reg A) (xor (reg A) (reg C)))
+;; (set (reg C) (reg A))
+(define_peephole2
+ [(set:SI (match_operand:SI 0 "register_operand" "")
+ (const_int 1))
+ (set:SI (match_dup 0)
+ (ashift:SI (match_dup 0)
+ (match_operand:SI 1 "register_operand" "")))
+ (set:SI (match_dup 0)
+ (xor:SI (match_dup 0)
+ (match_operand:SI 2 "register_operand" "")))
+ (set:SI (match_dup 2) (match_dup 0))]
+ "dead_or_set_p (insn, operands[0])"
+ [(set:SI (match_dup 2)
+ (xor:SI (match_dup 2)
+ (ashift:SI (const_int 1)
+ (match_dup 1))))]
+ ""
+)
+
+(define_insn "bitclr"
+ [(set:SI (match_operand:SI 0 "register_operand" "+r")
+ (and:SI (match_operand:SI 1 "register_operand" "0")
+ (not:SI (ashift:SI (const_int 1)
+ (match_operand:SI 2 "nonmemory_operand" "ri")))))]
+ ""
+ "bclr\t%2, %0"
+ [(set_attr "length" "3")]
+)
+
+(define_insn "bitclr_in_memory"
+ [(set:QI (match_operand:QI 0 "memory_operand" "+m")
+ (and:QI (match_operand:QI 1 "memory_operand" "0")
+ (not:QI (ashift:QI (const_int 1)
+ (match_operand:QI 2 "nonmemory_operand" "ri")))))]
+ ""
+ "bclr\t%2, %0.B"
+ [(set_attr "length" "3")
+ (set_attr "timings" "34")]
+)
+
+;; (set (reg A) (const_int -2))
+;; (set (reg A) (rotate (reg A) (reg B)))
+;; (set (reg C) (and (reg A) (reg C)))
+(define_peephole2
+ [(set:SI (match_operand:SI 0 "register_operand" "")
+ (const_int -2))
+ (set:SI (match_dup 0)
+ (rotate:SI (match_dup 0)
+ (match_operand:SI 1 "register_operand" "")))
+ (set:SI (match_operand:SI 2 "register_operand" "")
+ (and:SI (match_dup 0)
+ (match_dup 2)))]
+ "dead_or_set_p (insn, operands[0])"
+ [(set:SI (match_dup 2)
+ (and:SI (match_dup 2)
+ (not:SI (ashift:SI (const_int 1)
+ (match_dup 1)))))]
+)
+
+;; (set (reg A) (const_int -2))
+;; (set (reg A) (rotate (reg A) (reg B)))
+;; (set (reg A) (and (reg A) (reg C)))
+;; (set (reg C) (reg A)
+(define_peephole2
+ [(set:SI (match_operand:SI 0 "register_operand" "")
+ (const_int -2))
+ (set:SI (match_dup 0)
+ (rotate:SI (match_dup 0)
+ (match_operand:SI 1 "register_operand" "")))
+ (set:SI (match_dup 0)
+ (and:SI (match_dup 0)
+ (match_operand:SI 2 "register_operand" "")))
+ (set:SI (match_dup 2) (match_dup 0))]
+ "dead_or_set_p (insn, operands[0])"
+ [(set:SI (match_dup 2)
+ (and:SI (match_dup 2)
+ (not:SI (ashift:SI (const_int 1)
+ (match_dup 1)))))]
+)
+
+(define_expand "insv"
+ [(set:SI (zero_extract:SI (match_operand:SI
+ 0 "nonimmediate_operand") ;; Destination
+ (match_operand
+ 1 "immediate_operand") ;; # of bits to set
+ (match_operand
+ 2 "immediate_operand")) ;; Starting bit
+ (match_operand
+ 3 "immediate_operand"))] ;; Bits to insert
+ ""
+ {
+ if (rx_expand_insv (operands))
+ DONE;
+ FAIL;
+ }
+)
+
+;; Atomic exchange operation.
+
+(define_insn "sync_lock_test_and_setsi"
+ [(set:SI (match_operand:SI 0 "register_operand" "=r,r")
+ (match_operand:SI 1 "rx_compare_operand" "=r,Q"))
+ (set:SI (match_dup 1)
+ (match_operand:SI 2 "register_operand" "0,0"))]
+ ""
+ "xchg\t%1, %0"
+ [(set_attr "length" "3,6")
+ (set_attr "timings" "22")]
+)
+
+;; Block move functions.
+
+(define_expand "movstr"
+ [(set:SI (match_operand:BLK 1 "memory_operand") ;; Dest
+ (match_operand:BLK 2 "memory_operand")) ;; Source
+ (use (match_operand:SI 0 "register_operand")) ;; Updated Dest
+ ]
+ ""
+ {
+ rtx addr1 = gen_rtx_REG (SImode, 1);
+ rtx addr2 = gen_rtx_REG (SImode, 2);
+ rtx len = gen_rtx_REG (SImode, 3);
+ rtx dest_copy = gen_reg_rtx (SImode);
+
+ emit_move_insn (len, GEN_INT (-1));
+ emit_move_insn (addr1, force_operand (XEXP (operands[1], 0), NULL_RTX));
+ emit_move_insn (addr2, force_operand (XEXP (operands[2], 0), NULL_RTX));
+ operands[1] = replace_equiv_address_nv (operands[1], addr1);
+ operands[2] = replace_equiv_address_nv (operands[2], addr2);
+ emit_move_insn (dest_copy, addr1);
+ emit_insn (gen_rx_movstr ());
+ emit_move_insn (len, GEN_INT (-1));
+ emit_insn (gen_rx_strend (operands[0], dest_copy));
+ DONE;
+ }
+)
+
+(define_insn "rx_movstr"
+ [(set:SI (mem:BLK (reg:SI 1))
+ (mem:BLK (reg:SI 2)))
+ (unspec_volatile:BLK [(reg:SI 1) (reg:SI 2) (reg:SI 3)] UNSPEC_MOVSTR)
+ (clobber (reg:SI 1))
+ (clobber (reg:SI 2))
+ (clobber (reg:SI 3))
+ ]
+ ""
+ "smovu"
+ [(set_attr "length" "2")
+ (set_attr "timings" "1111")] ;; The timing is a guesstimate.
+)
+
+(define_insn "rx_strend"
+ [(set:SI (match_operand:SI 0 "register_operand" "=r")
+ (unspec_volatile:SI [(match_operand:SI 1 "register_operand" "r")
+ (reg:SI 3)] UNSPEC_STRLEN))
+ (clobber (reg:SI 1))
+ (clobber (reg:SI 2))
+ (clobber (reg:SI 3))
+ ]
+ ""
+ "mov\t%1, r1\n\tmov\t#0, r2\n\tsuntil.b\n\tmov\tr1, %0\n\tsub\t#1, %0"
+ [(set_attr "length" "10")
+ (set_attr "cc" "clobber")
+ (set_attr "timings" "1111")] ;; The timing is a guesstimate.
+)
+
+(define_expand "movmemsi"
+ [(parallel
+ [(set (match_operand:BLK 0 "memory_operand") ;; Dest
+ (match_operand:BLK 1 "memory_operand")) ;; Source
+ (use (match_operand:SI 2 "register_operand")) ;; Length in bytes
+ (match_operand 3 "immediate_operand") ;; Align
+ (unspec_volatile:BLK [(reg:SI 1) (reg:SI 2) (reg:SI 3)] UNSPEC_MOVMEM)]
+ )]
+ ""
+ {
+ rtx addr1 = gen_rtx_REG (SImode, 1);
+ rtx addr2 = gen_rtx_REG (SImode, 2);
+ rtx len = gen_rtx_REG (SImode, 3);
+
+ if (REG_P (operands[0]) && (REGNO (operands[0]) == 2
+ || REGNO (operands[0]) == 3))
+ FAIL;
+ if (REG_P (operands[1]) && (REGNO (operands[1]) == 1
+ || REGNO (operands[1]) == 3))
+ FAIL;
+ if (REG_P (operands[2]) && (REGNO (operands[2]) == 1
+ || REGNO (operands[2]) == 2))
+ FAIL;
+ emit_move_insn (addr1, force_operand (XEXP (operands[0], 0), NULL_RTX));
+ emit_move_insn (addr2, force_operand (XEXP (operands[1], 0), NULL_RTX));
+ emit_move_insn (len, force_operand (operands[2], NULL_RTX));
+ operands[0] = replace_equiv_address_nv (operands[0], addr1);
+ operands[1] = replace_equiv_address_nv (operands[1], addr2);
+ emit_insn (gen_rx_movmem ());
+ DONE;
+ }
+)
+
+(define_insn "rx_movmem"
+ [(set (mem:BLK (reg:SI 1))
+ (mem:BLK (reg:SI 2)))
+ (use (reg:SI 3))
+ (unspec_volatile:BLK [(reg:SI 1) (reg:SI 2) (reg:SI 3)] UNSPEC_MOVMEM)
+ (clobber (reg:SI 1))
+ (clobber (reg:SI 2))
+ (clobber (reg:SI 3))]
+ ""
+ "smovf"
+ [(set_attr "length" "2")
+ (set_attr "timings" "1111")] ;; The timing is a guesstimate.
+)
+
+(define_expand "setmemsi"
+ [(set (match_operand:BLK 0 "memory_operand") ;; Dest
+ (match_operand:QI 2 "nonmemory_operand")) ;; Value
+ (use (match_operand:SI 1 "nonmemory_operand")) ;; Length
+ (match_operand 3 "immediate_operand") ;; Align
+ (unspec_volatile:BLK [(reg:SI 1) (reg:SI 2) (reg:SI 3)] UNSPEC_SETMEM)]
+ ""
+ {
+ rtx addr = gen_rtx_REG (SImode, 1);
+ rtx val = gen_rtx_REG (QImode, 2);
+ rtx len = gen_rtx_REG (SImode, 3);
+
+ emit_move_insn (addr, force_operand (XEXP (operands[0], 0), NULL_RTX));
+ emit_move_insn (len, force_operand (operands[1], NULL_RTX));
+ emit_move_insn (val, operands[2]);
+ emit_insn (gen_rx_setmem ());
+ DONE;
+ }
+)
+
+(define_insn "rx_setmem"
+ [(set:BLK (mem:BLK (reg:SI 1)) (reg 2))
+ (unspec_volatile:BLK [(reg:SI 1) (reg:SI 2) (reg:SI 3)] UNSPEC_SETMEM)
+ (clobber (reg:SI 1))
+ (clobber (reg:SI 3))]
+ ""
+ "sstr.b"
+ [(set_attr "length" "2")
+ (set_attr "timings" "1111")] ;; The timing is a guesstimate.
+)
+
+(define_expand "cmpstrnsi"
+ [(set (match_operand:SI
+ 0 "register_operand") ;; Result
+ (unspec_volatile:SI [(match_operand:BLK
+ 1 "memory_operand") ;; String1
+ (match_operand:BLK
+ 2 "memory_operand")] ;; String2
+ UNSPEC_CMPSTRN))
+ (use (match_operand:SI
+ 3 "register_operand")) ;; Max Length
+ (match_operand:SI
+ 4 "immediate_operand")] ;; Known Align
+ ""
+ {
+ rtx str1 = gen_rtx_REG (SImode, 1);
+ rtx str2 = gen_rtx_REG (SImode, 2);
+ rtx len = gen_rtx_REG (SImode, 3);
+
+ emit_move_insn (str1, force_operand (XEXP (operands[1], 0), NULL_RTX));
+ emit_move_insn (str2, force_operand (XEXP (operands[2], 0), NULL_RTX));
+ emit_move_insn (len, force_operand (operands[3], NULL_RTX));
+
+ emit_insn (gen_rx_cmpstrn (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+)
+
+(define_expand "cmpstrsi"
+ [(set (match_operand:SI
+ 0 "register_operand") ;; Result
+ (unspec_volatile:SI [(match_operand:BLK
+ 1 "memory_operand") ;; String1
+ (match_operand:BLK
+ 2 "memory_operand")] ;; String2
+ UNSPEC_CMPSTRN))
+ (match_operand:SI
+ 3 "immediate_operand")] ;; Known Align
+ ""
+ {
+ rtx str1 = gen_rtx_REG (SImode, 1);
+ rtx str2 = gen_rtx_REG (SImode, 2);
+ rtx len = gen_rtx_REG (SImode, 3);
+
+ emit_move_insn (str1, force_reg (SImode, XEXP (operands[1], 0)));
+ emit_move_insn (str2, force_reg (SImode, XEXP (operands[2], 0)));
+ emit_move_insn (len, GEN_INT (-1));
+
+ emit_insn (gen_rx_cmpstrn (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+)
+
+(define_insn "rx_cmpstrn"
+ [(set:SI (match_operand:SI 0 "register_operand" "=r")
+ (unspec_volatile:SI [(reg:SI 1) (reg:SI 2) (reg:SI 3)]
+ UNSPEC_CMPSTRN))
+ (use (match_operand:BLK 1 "memory_operand" "m"))
+ (use (match_operand:BLK 2 "memory_operand" "m"))
+ (clobber (reg:SI 1))
+ (clobber (reg:SI 2))
+ (clobber (reg:SI 3))]
+ ""
+ "scmpu ; Perform the string comparison
+ mov #-1, %0 ; Set up -1 result (which cannot be created
+ ; by the SC insn)
+ bnc ?+ ; If Carry is not set skip over
+ scne.L %0 ; Set result based on Z flag
+?:
+"
+ [(set_attr "length" "9")
+ (set_attr "timings" "1111")] ;; The timing is a guesstimate.
+)
+
+;; Builtin Functions
+;;
+;; GCC does not have the ability to generate the following instructions
+;; on its own so they are provided as builtins instead. To use them from
+;; a program for example invoke them as __builtin_rx_<insn_name>. For
+;; example:
+;;
+;; int short_byte_swap (int arg) { return __builtin_rx_revw (arg); }
+
+;;---------- Accumulator Support ------------------------
+
+;; Multiply & Accumulate (high)
+(define_insn "machi"
+ [(unspec:SI [(match_operand:SI 0 "register_operand" "r")
+ (match_operand:SI 1 "register_operand" "r")]
+ UNSPEC_BUILTIN_MACHI)]
+ ""
+ "machi\t%0, %1"
+ [(set_attr "length" "3")]
+)
+
+;; Multiply & Accumulate (low)
+(define_insn "maclo"
+ [(unspec:SI [(match_operand:SI 0 "register_operand" "r")
+ (match_operand:SI 1 "register_operand" "r")]
+ UNSPEC_BUILTIN_MACLO)]
+ ""
+ "maclo\t%0, %1"
+ [(set_attr "length" "3")]
+)
+
+;; Multiply (high)
+(define_insn "mulhi"
+ [(unspec:SI [(match_operand:SI 0 "register_operand" "r")
+ (match_operand:SI 1 "register_operand" "r")]
+ UNSPEC_BUILTIN_MULHI)]
+ ""
+ "mulhi\t%0, %1"
+ [(set_attr "length" "3")]
+)
+
+;; Multiply (low)
+(define_insn "mullo"
+ [(unspec:SI [(match_operand:SI 0 "register_operand" "r")
+ (match_operand:SI 1 "register_operand" "r")]
+ UNSPEC_BUILTIN_MULLO)]
+ ""
+ "mullo\t%0, %1"
+ [(set_attr "length" "3")]
+)
+
+;; Move from Accumulator (high)
+(define_insn "mvfachi"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(const_int 0)]
+ UNSPEC_BUILTIN_MVFACHI))]
+ ""
+ "mvfachi\t%0"
+ [(set_attr "length" "3")]
+)
+
+;; Move from Accumulator (middle)
+(define_insn "mvfacmi"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(const_int 0)]
+ UNSPEC_BUILTIN_MVFACMI))]
+ ""
+ "mvfacmi\t%0"
+ [(set_attr "length" "3")]
+)
+
+;; Move to Accumulator (high)
+(define_insn "mvtachi"
+ [(unspec_volatile:SI [(match_operand:SI 0 "register_operand" "r")]
+ UNSPEC_BUILTIN_MVTACHI)]
+ ""
+ "mvtachi\t%0"
+ [(set_attr "length" "3")]
+)
+
+;; Move to Accumulator (low)
+(define_insn "mvtaclo"
+ [(unspec_volatile:SI [(match_operand:SI 0 "register_operand" "r")]
+ UNSPEC_BUILTIN_MVTACLO)]
+ ""
+ "mvtaclo\t%0"
+ [(set_attr "length" "3")]
+)
+
+;; Round Accumulator
+(define_insn "racw"
+ [(unspec_volatile:SI [(match_operand:SI 0 "immediate_operand" "i")]
+ UNSPEC_BUILTIN_RACW)]
+ ""
+ "racw\t%0"
+ [(set_attr "length" "3")]
+)
+
+;; Repeat multiply and accumulate
+(define_insn "rmpa"
+ [(unspec:SI [(const_int 0) (reg:SI 1) (reg:SI 2) (reg:SI 3)
+ (reg:SI 4) (reg:SI 5) (reg:SI 6)]
+ UNSPEC_BUILTIN_RMPA)
+ (clobber (reg:SI 1))
+ (clobber (reg:SI 2))
+ (clobber (reg:SI 3))]
+ ""
+ "rmpa"
+ [(set_attr "length" "2")
+ (set_attr "timings" "1010")]
+)
+
+;;---------- Arithmetic ------------------------
+
+;; Byte swap (two 16-bit values).
+(define_insn "revw"
+ [(set (match_operand:SI 0 "register_operand" "+r")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "r")]
+ UNSPEC_BUILTIN_REVW))]
+ ""
+ "revw\t%1, %0"
+ [(set_attr "length" "3")]
+)
+
+;; Round to integer.
+(define_insn "lrintsf2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (unspec:SI [(match_operand:SF 1 "rx_compare_operand" "r,Q")]
+ UNSPEC_BUILTIN_ROUND))]
+ ""
+ "round\t%1, %0"
+ [(set_attr "cc" "set_zs")
+ (set_attr "timings" "22,44")
+ (set_attr "length" "3,5")]
+)
+
+;; Saturate to 32-bits
+(define_insn "sat"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "0")]
+ UNSPEC_BUILTIN_SAT))]
+ ""
+ "sat\t%0"
+ [(set_attr "length" "2")]
+)
+
+;;---------- Control Registers ------------------------
+
+;; Clear Processor Status Word
+(define_insn "clrpsw"
+ [(unspec:SI [(match_operand:SI 0 "immediate_operand" "i")]
+ UNSPEC_BUILTIN_CLRPSW)
+ (clobber (cc0))]
+ ""
+ "clrpsw\t%F0"
+ [(set_attr "length" "2")
+ (set_attr "cc" "clobber")]
+)
+
+;; Set Processor Status Word
+(define_insn "setpsw"
+ [(unspec:SI [(match_operand:SI 0 "immediate_operand" "i")]
+ UNSPEC_BUILTIN_SETPSW)
+ (clobber (cc0))]
+ ""
+ "setpsw\t%F0"
+ [(set_attr "length" "2")
+ (set_attr "cc" "clobber")]
+)
+
+;; Move from control register
+(define_insn "mvfc"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "immediate_operand" "i")]
+ UNSPEC_BUILTIN_MVFC))]
+ ""
+ "mvfc\t%C1, %0"
+ [(set_attr "length" "3")]
+)
+
+;; Move to control register
+(define_insn "mvtc"
+ [(unspec:SI [(match_operand:SI 0 "immediate_operand" "i,i")
+ (match_operand:SI 1 "nonmemory_operand" "r,i")]
+ UNSPEC_BUILTIN_MVTC)]
+ ""
+ "mvtc\t%1, %C0"
+ [(set_attr "length" "3,7")]
+ ;; Ignore possible clobbering of the comparison flags in the
+ ;; PSW register. This is a cc0 target so any cc0 setting
+ ;; instruction will always be paired with a cc0 user, without
+ ;; the possibility of this instruction being placed in between
+ ;; them.
+)
+
+;; Move to interrupt priority level
+(define_insn "mvtipl"
+ [(unspec:SI [(match_operand:SI 0 "immediate_operand" "Uint04")]
+ UNSPEC_BUILTIN_MVTIPL)]
+ ""
+ "mvtipl\t%0"
+ [(set_attr "length" "3")]
+)
+
+;;---------- Interrupts ------------------------
+
+;; Break
+(define_insn "brk"
+ [(unspec_volatile [(const_int 0)]
+ UNSPEC_BUILTIN_BRK)]
+ ""
+ "brk"
+ [(set_attr "length" "1")
+ (set_attr "timings" "66")]
+)
+
+;; Interrupt
+(define_insn "int"
+ [(unspec_volatile:SI [(match_operand:SI 0 "immediate_operand" "i")]
+ UNSPEC_BUILTIN_INT)]
+ ""
+ "int\t%0"
+ [(set_attr "length" "3")]
+)
+
+;; Wait
+(define_insn "wait"
+ [(unspec_volatile [(const_int 0)]
+ UNSPEC_BUILTIN_WAIT)]
+ ""
+ "wait"
+ [(set_attr "length" "2")]
+)
+
+;;---------- CoProcessor Support ------------------------
+
+;; FIXME: The instructions are currently commented out because
+;; the bit patterns have not been finalized, so the assembler
+;; does not support them. Once they are decided and the assembler
+;; supports them, enable the instructions here.
+
+;; Move from co-processor register
+(define_insn "mvfcp"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "immediate_operand" "i")
+ (match_operand:SI 2 "immediate_operand" "i")]
+ UNSPEC_BUILTIN_MVFCP))]
+ ""
+ "; mvfcp\t%1, %0, %2"
+ [(set_attr "length" "5")]
+)
+
+;;---------- Misc ------------------------
+
+;; Required by cfglayout.c...
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "nop"
+ [(set_attr "length" "1")]
+)
diff --git a/gcc/config/rx/rx.opt b/gcc/config/rx/rx.opt
new file mode 100644
index 00000000000..768d565b478
--- /dev/null
+++ b/gcc/config/rx/rx.opt
@@ -0,0 +1,98 @@
+; Command line options for the Renesas RX port of GCC.
+; Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+; Contributed by Red Hat.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+;---------------------------------------------------
+
+m32bit-doubles
+Target RejectNegative Mask(32BIT_DOUBLES)
+Stores doubles in 32 bits.
+
+m64bit-doubles
+Target RejectNegative InverseMask(32BIT_DOUBLES)
+Store doubles in 64 bits. This is the default.
+
+fpu
+Target RejectNegative Mask(32BIT_DOUBLES) MaskExists
+Enable the use of RX FPU instructions.
+
+nofpu
+Target RejectNegative InverseMask(32BIT_DOUBLES) MaskExists
+Disable the use of RX FPU instructions.
+
+;---------------------------------------------------
+
+mcpu=
+Target RejectNegative Joined Var(rx_cpu_name)
+Specify the target RX cpu type.
+
+patch=
+Target RejectNegative Joined Var(rx_cpu_name)
+Alias for -mcpu.
+
+;---------------------------------------------------
+
+mbig-endian-data
+Target RejectNegative Mask(BIG_ENDIAN_DATA)
+Data is stored in big-endian format.
+
+mlittle-endian-data
+Target RejectNegative InverseMask(BIG_ENDIAN_DATA)
+Data is stored in little-endian format. (Default).
+
+;---------------------------------------------------
+
+msmall-data-limit=
+Target RejectNegative Joined UInteger Var(rx_small_data_limit) Init(0)
+Maximum size of global and static variables which can be placed into the small data area.
+
+;---------------------------------------------------
+
+msim
+Target
+Use the simulator runtime.
+
+;---------------------------------------------------
+
+mas100-syntax
+Target Mask(AS100_SYNTAX)
+Generate assembler output that is compatible with the Renesas AS100 assembler. This may restrict some of the compiler's capabilities. The default is to generate GAS compatable syntax.
+
+;---------------------------------------------------
+
+mrelax
+Target
+Enable linker relaxation.
+
+;---------------------------------------------------
+
+mmax-constant-size=
+Target RejectNegative Joined UInteger Var(rx_max_constant_size) Init(0)
+Maximum size in bytes of constant values allowed as operands.
+
+;---------------------------------------------------
+
+mint-register=
+Target RejectNegative Joined UInteger Var(rx_interrupt_registers) Init(0)
+Specifies the number of registers to reserve for interrupt handlers.
+
+;---------------------------------------------------
+
+msave-acc-in-interrupts
+Target Mask(SAVE_ACC_REGISTER)
+Specifies whether interrupt functions should save and restore the accumulator register.
diff --git a/gcc/config/rx/t-rx b/gcc/config/rx/t-rx
new file mode 100644
index 00000000000..eb1ca48d3a3
--- /dev/null
+++ b/gcc/config/rx/t-rx
@@ -0,0 +1,32 @@
+# Makefile fragment for building GCC for the Renesas RX target.
+# Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+# Contributed by Red Hat.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published
+# by the Free Software Foundation; either version 3, or (at your
+# option) any later version.
+#
+# GCC is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+# the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public
+# License along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+# Enable multilibs:
+
+MULTILIB_OPTIONS = m32bit-doubles mbig-endian-data
+MULTILIB_DIRNAMES = 32fp big-endian-data
+MULTILIB_MATCHES = m32bit-doubles=fpu
+MULTILIB_EXCEPTIONS =
+MULTILIB_EXTRA_OPTS =
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+
+EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o