summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorrth <rth@138bc75d-0d04-0410-961f-82ee72b054a4>2007-04-27 14:47:57 +0000
committerrth <rth@138bc75d-0d04-0410-961f-82ee72b054a4>2007-04-27 14:47:57 +0000
commit0d96cd2ba4ca5bf0e1681d683bd566b940fe1cea (patch)
tree13f1f56c69e4fb85cba6ff3f0491b57313c3d470
parent83a20bafd7318b3a7cdd89402c3e74475c547506 (diff)
downloadgcc-0d96cd2ba4ca5bf0e1681d683bd566b940fe1cea.tar.gz
* config/alpha/predicates.md (aligned_memory_operand): Mark
as define_special_predicate. (unaligned_memory_operand, normal_memory_operand): Likewise. (reg_or_unaligned_mem_operand): Remove. (any_memory_operand): Match the documentation and check for non-renumbered pseudos during reload. * config/alpha/alpha.c (alpha_secondary_reload): Rename from alpha_secondary_reload_class, update to new interface, make static. Handle CQImode like HImode. Remove FP subreg check. (alpha_expand_mov): Use replace_equiv_address. (alpha_expand_mov_nobwx): Use any_memory_operand. (TARGET_SECONDARY_RELOAD): New. * config/alpha/alpha.h (SECONDARY_INPUT_RELOAD_CLASS): Remove. (SECONDARY_OUTPUT_RELOAD_CLASS): Remove. * config/alpha/sync.md (I12MODE, I48MODE, modesuffix): Move ... * config/alpha/alpha.md: ... here. (RELOAD12, reloadmode): New. (movcqi): New. (reload_in<RELOAD12>): Macro-ize from reload_inqi, reload_inhi. Don't handle the aligned case here. (reload_out<RELOAD12>): Macro-ize from reload_outqi, reload_outhi. (reload_in<I12MODE>_aligned): Macro-ize from reload_inqi_help, reload_inhi_help. Don't expect a scratch register. (reload_out<I12MODE>_aligned): Macro-ize from reload_outqi_help, reload_outhi_help. * config/alpha/alpha-protos.h (alpha_secondary_reload_class): Remove. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@124220 138bc75d-0d04-0410-961f-82ee72b054a4
-rw-r--r--gcc/ChangeLog29
-rw-r--r--gcc/config/alpha/alpha-protos.h3
-rw-r--r--gcc/config/alpha/alpha.c127
-rw-r--r--gcc/config/alpha/alpha.h13
-rw-r--r--gcc/config/alpha/alpha.md245
-rw-r--r--gcc/config/alpha/predicates.md32
-rw-r--r--gcc/config/alpha/sync.md4
7 files changed, 209 insertions, 244 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 75cb9da3b16..1b3f0efdaa9 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,32 @@
+2007-04-27 Richard Henderson <rth@redhat.com>
+
+ * config/alpha/predicates.md (aligned_memory_operand): Mark
+ as define_special_predicate.
+ (unaligned_memory_operand, normal_memory_operand): Likewise.
+ (reg_or_unaligned_mem_operand): Remove.
+ (any_memory_operand): Match the documentation and check for
+ non-renumbered pseudos during reload.
+ * config/alpha/alpha.c (alpha_secondary_reload): Rename from
+ alpha_secondary_reload_class, update to new interface, make static.
+ Handle CQImode like HImode. Remove FP subreg check.
+ (alpha_expand_mov): Use replace_equiv_address.
+ (alpha_expand_mov_nobwx): Use any_memory_operand.
+ (TARGET_SECONDARY_RELOAD): New.
+ * config/alpha/alpha.h (SECONDARY_INPUT_RELOAD_CLASS): Remove.
+ (SECONDARY_OUTPUT_RELOAD_CLASS): Remove.
+ * config/alpha/sync.md (I12MODE, I48MODE, modesuffix): Move ...
+ * config/alpha/alpha.md: ... here.
+ (RELOAD12, reloadmode): New.
+ (movcqi): New.
+ (reload_in<RELOAD12>): Macro-ize from reload_inqi, reload_inhi.
+ Don't handle the aligned case here.
+ (reload_out<RELOAD12>): Macro-ize from reload_outqi, reload_outhi.
+ (reload_in<I12MODE>_aligned): Macro-ize from reload_inqi_help,
+ reload_inhi_help. Don't expect a scratch register.
+ (reload_out<I12MODE>_aligned): Macro-ize from reload_outqi_help,
+ reload_outhi_help.
+ * config/alpha/alpha-protos.h (alpha_secondary_reload_class): Remove.
+
2007-04-27 Richard Guenther <rguenther@suse.de>
* tree-ssa-forwprop.c (get_prop_dest_stmt): Fix comment typo.
diff --git a/gcc/config/alpha/alpha-protos.h b/gcc/config/alpha/alpha-protos.h
index 2ba5dfb3a09..e7e2d2d08cd 100644
--- a/gcc/config/alpha/alpha-protos.h
+++ b/gcc/config/alpha/alpha-protos.h
@@ -50,9 +50,6 @@ extern void get_aligned_mem (rtx, rtx *, rtx *);
extern rtx get_unaligned_address (rtx);
extern rtx get_unaligned_offset (rtx, HOST_WIDE_INT);
extern enum reg_class alpha_preferred_reload_class (rtx, enum reg_class);
-extern enum reg_class alpha_secondary_reload_class (enum reg_class,
- enum machine_mode, rtx,
- int);
extern void alpha_set_memflags (rtx, rtx);
extern bool alpha_split_const_mov (enum machine_mode, rtx *);
diff --git a/gcc/config/alpha/alpha.c b/gcc/config/alpha/alpha.c
index be6067eb903..32bb92c89d0 100644
--- a/gcc/config/alpha/alpha.c
+++ b/gcc/config/alpha/alpha.c
@@ -1533,47 +1533,39 @@ alpha_preferred_reload_class(rtx x, enum reg_class class)
return class;
}
-/* Loading and storing HImode or QImode values to and from memory
- usually requires a scratch register. The exceptions are loading
- QImode and HImode from an aligned address to a general register
- unless byte instructions are permitted.
+/* Inform reload about cases where moving X with a mode MODE to a register in
+ CLASS requires an extra scratch or immediate register. Return the class
+ needed for the immediate register. */
- We also cannot load an unaligned address or a paradoxical SUBREG
- into an FP register.
-
- We also cannot do integral arithmetic into FP regs, as might result
- from register elimination into a DImode fp register. */
-
-enum reg_class
-alpha_secondary_reload_class (enum reg_class class, enum machine_mode mode,
- rtx x, int in)
+static enum reg_class
+alpha_secondary_reload (bool in_p, rtx x, enum reg_class class,
+ enum machine_mode mode, secondary_reload_info *sri)
{
- if ((mode == QImode || mode == HImode) && ! TARGET_BWX)
+ /* Loading and storing HImode or QImode values to and from memory
+ usually requires a scratch register. */
+ if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode))
{
- if (GET_CODE (x) == MEM
- || (GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER)
- || (GET_CODE (x) == SUBREG
- && (GET_CODE (SUBREG_REG (x)) == MEM
- || (GET_CODE (SUBREG_REG (x)) == REG
- && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER))))
+ if (any_memory_operand (x, mode))
{
- if (!in || !aligned_memory_operand(x, mode))
- return GENERAL_REGS;
+ if (in_p)
+ {
+ if (!aligned_memory_operand (x, mode))
+ sri->icode = reload_in_optab[mode];
+ }
+ else
+ sri->icode = reload_out_optab[mode];
+ return NO_REGS;
}
}
+ /* We also cannot do integral arithmetic into FP regs, as might result
+ from register elimination into a DImode fp register. */
if (class == FLOAT_REGS)
{
- if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == AND)
- return GENERAL_REGS;
-
- if (GET_CODE (x) == SUBREG
- && (GET_MODE_SIZE (GET_MODE (x))
- > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)))))
+ if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND)
return GENERAL_REGS;
-
- if (in && INTEGRAL_MODE_P (mode)
- && ! (memory_operand (x, mode) || x == const0_rtx))
+ if (in_p && INTEGRAL_MODE_P (mode)
+ && !MEM_P (x) && !REG_P (x) && !CONST_INT_P (x))
return GENERAL_REGS;
}
@@ -2160,8 +2152,7 @@ alpha_expand_mov (enum machine_mode mode, rtx *operands)
if (reload_in_progress)
{
emit_move_insn (operands[0], XEXP (operands[1], 0));
- operands[1] = copy_rtx (operands[1]);
- XEXP (operands[1], 0) = operands[0];
+ operands[1] = replace_equiv_address (operands[1], operands[0]);
}
else
operands[1] = validize_mem (operands[1]);
@@ -2174,32 +2165,27 @@ alpha_expand_mov (enum machine_mode mode, rtx *operands)
bool
alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
{
+ rtx seq;
+
/* If the output is not a register, the input must be. */
- if (GET_CODE (operands[0]) == MEM)
+ if (MEM_P (operands[0]))
operands[1] = force_reg (mode, operands[1]);
/* Handle four memory cases, unaligned and aligned for either the input
or the output. The only case where we can be called during reload is
for aligned loads; all other cases require temporaries. */
- if (GET_CODE (operands[1]) == MEM
- || (GET_CODE (operands[1]) == SUBREG
- && GET_CODE (SUBREG_REG (operands[1])) == MEM)
- || (reload_in_progress && GET_CODE (operands[1]) == REG
- && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER)
- || (reload_in_progress && GET_CODE (operands[1]) == SUBREG
- && GET_CODE (SUBREG_REG (operands[1])) == REG
- && REGNO (SUBREG_REG (operands[1])) >= FIRST_PSEUDO_REGISTER))
+ if (any_memory_operand (operands[1], mode))
{
if (aligned_memory_operand (operands[1], mode))
{
if (reload_in_progress)
{
- emit_insn ((mode == QImode
- ? gen_reload_inqi_help
- : gen_reload_inhi_help)
- (operands[0], operands[1],
- gen_rtx_REG (SImode, REGNO (operands[0]))));
+ if (mode == QImode)
+ seq = gen_reload_inqi_aligned (operands[0], operands[1]);
+ else
+ seq = gen_reload_inhi_aligned (operands[0], operands[1]);
+ emit_insn (seq);
}
else
{
@@ -2216,10 +2202,13 @@ alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
else
subtarget = gen_reg_rtx (DImode), copyout = true;
- emit_insn ((mode == QImode
- ? gen_aligned_loadqi
- : gen_aligned_loadhi)
- (subtarget, aligned_mem, bitnum, scratch));
+ if (mode == QImode)
+ seq = gen_aligned_loadqi (subtarget, aligned_mem,
+ bitnum, scratch);
+ else
+ seq = gen_aligned_loadhi (subtarget, aligned_mem,
+ bitnum, scratch);
+ emit_insn (seq);
if (copyout)
emit_move_insn (operands[0], gen_lowpart (mode, subtarget));
@@ -2231,7 +2220,7 @@ alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
code depend on parameter evaluation order which will cause
bootstrap failures. */
- rtx temp1, temp2, seq, subtarget;
+ rtx temp1, temp2, subtarget, ua;
bool copyout;
temp1 = gen_reg_rtx (DImode);
@@ -2243,11 +2232,12 @@ alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
else
subtarget = gen_reg_rtx (DImode), copyout = true;
- seq = ((mode == QImode
- ? gen_unaligned_loadqi
- : gen_unaligned_loadhi)
- (subtarget, get_unaligned_address (operands[1]),
- temp1, temp2));
+ ua = get_unaligned_address (operands[1]);
+ if (mode == QImode)
+ seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2);
+ else
+ seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2);
+
alpha_set_memflags (seq, operands[1]);
emit_insn (seq);
@@ -2257,14 +2247,7 @@ alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
return true;
}
- if (GET_CODE (operands[0]) == MEM
- || (GET_CODE (operands[0]) == SUBREG
- && GET_CODE (SUBREG_REG (operands[0])) == MEM)
- || (reload_in_progress && GET_CODE (operands[0]) == REG
- && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER)
- || (reload_in_progress && GET_CODE (operands[0]) == SUBREG
- && GET_CODE (SUBREG_REG (operands[0])) == REG
- && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER))
+ if (any_memory_operand (operands[0], mode))
{
if (aligned_memory_operand (operands[0], mode))
{
@@ -2282,11 +2265,12 @@ alpha_expand_mov_nobwx (enum machine_mode mode, rtx *operands)
rtx temp1 = gen_reg_rtx (DImode);
rtx temp2 = gen_reg_rtx (DImode);
rtx temp3 = gen_reg_rtx (DImode);
- rtx seq = ((mode == QImode
- ? gen_unaligned_storeqi
- : gen_unaligned_storehi)
- (get_unaligned_address (operands[0]),
- operands[1], temp1, temp2, temp3));
+ rtx ua = get_unaligned_address (operands[0]);
+
+ if (mode == QImode)
+ seq = gen_unaligned_storeqi (ua, operands[1], temp1, temp2, temp3);
+ else
+ seq = gen_unaligned_storehi (ua, operands[1], temp1, temp2, temp3);
alpha_set_memflags (seq, operands[0]);
emit_insn (seq);
@@ -10703,6 +10687,9 @@ alpha_init_libfuncs (void)
#undef TARGET_ARG_PARTIAL_BYTES
#define TARGET_ARG_PARTIAL_BYTES alpha_arg_partial_bytes
+#undef TARGET_SECONDARY_RELOAD
+#define TARGET_SECONDARY_RELOAD alpha_secondary_reload
+
#undef TARGET_SCALAR_MODE_SUPPORTED_P
#define TARGET_SCALAR_MODE_SUPPORTED_P alpha_scalar_mode_supported_p
#undef TARGET_VECTOR_MODE_SUPPORTED_P
diff --git a/gcc/config/alpha/alpha.h b/gcc/config/alpha/alpha.h
index 6e30d7b3658..958d0eac7b6 100644
--- a/gcc/config/alpha/alpha.h
+++ b/gcc/config/alpha/alpha.h
@@ -582,19 +582,6 @@ enum reg_class {
#define PREFERRED_RELOAD_CLASS alpha_preferred_reload_class
-/* Loading and storing HImode or QImode values to and from memory
- usually requires a scratch register. The exceptions are loading
- QImode and HImode from an aligned address to a general register
- unless byte instructions are permitted.
- We also cannot load an unaligned address or a paradoxical SUBREG into an
- FP register. */
-
-#define SECONDARY_INPUT_RELOAD_CLASS(CLASS,MODE,IN) \
- alpha_secondary_reload_class((CLASS), (MODE), (IN), 1)
-
-#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS,MODE,OUT) \
- alpha_secondary_reload_class((CLASS), (MODE), (OUT), 0)
-
/* If we are copying between general and FP registers, we need a memory
location unless the FIX extension is available. */
diff --git a/gcc/config/alpha/alpha.md b/gcc/config/alpha/alpha.md
index b86a4777621..785082a7f82 100644
--- a/gcc/config/alpha/alpha.md
+++ b/gcc/config/alpha/alpha.md
@@ -87,6 +87,16 @@
(UNSPECV_SC 16) ; store-conditional
])
+;; On non-BWX targets, CQImode must be handled the similarly to HImode
+;; when generating reloads.
+(define_mode_macro RELOAD12 [QI HI CQI])
+(define_mode_attr reloadmode [(QI "qi") (HI "hi") (CQI "hi")])
+
+;; Other mode macros
+(define_mode_macro I12MODE [QI HI])
+(define_mode_macro I48MODE [SI DI])
+(define_mode_attr modesuffix [(SI "l") (DI "q")])
+
;; Where necessary, the suffixes _le and _be are used to distinguish between
;; little-endian and big-endian patterns.
;;
@@ -6085,136 +6095,120 @@
DONE;
})
-;; Here are the versions for reload. Note that in the unaligned cases
-;; we know that the operand must not be a pseudo-register because stack
-;; slots are always aligned references.
-
-(define_expand "reload_inqi"
- [(parallel [(match_operand:QI 0 "register_operand" "=r")
- (match_operand:QI 1 "any_memory_operand" "m")
- (match_operand:TI 2 "register_operand" "=&r")])]
- "! TARGET_BWX"
+;; We need to hook into the extra support that we have for HImode
+;; reloads when BWX insns are not available.
+(define_expand "movcqi"
+ [(set (match_operand:CQI 0 "nonimmediate_operand" "")
+ (match_operand:CQI 1 "general_operand" ""))]
+ "!TARGET_BWX"
{
- rtx scratch, seq;
-
- if (aligned_memory_operand (operands[1], QImode))
+ if (GET_CODE (operands[0]) == CONCAT || GET_CODE (operands[1]) == CONCAT)
+ ;
+ else if (!any_memory_operand (operands[0], CQImode))
{
- seq = gen_reload_inqi_help (operands[0], operands[1],
- gen_rtx_REG (SImode, REGNO (operands[2])));
+ if (!any_memory_operand (operands[1], CQImode))
+ {
+ emit_move_insn (gen_lowpart (HImode, operands[0]),
+ gen_lowpart (HImode, operands[1]));
+ DONE;
+ }
+ if (aligned_memory_operand (operands[1], CQImode))
+ {
+ bool done;
+ do_aligned1:
+ operands[1] = gen_lowpart (HImode, operands[1]);
+ do_aligned2:
+ operands[0] = gen_lowpart (HImode, operands[0]);
+ done = alpha_expand_mov_nobwx (HImode, operands);
+ gcc_assert (done);
+ DONE;
+ }
}
- else
+ else if (aligned_memory_operand (operands[0], CQImode))
{
- rtx addr;
-
- /* It is possible that one of the registers we got for operands[2]
- might coincide with that of operands[0] (which is why we made
- it TImode). Pick the other one to use as our scratch. */
- if (REGNO (operands[0]) == REGNO (operands[2]))
- scratch = gen_rtx_REG (DImode, REGNO (operands[2]) + 1);
- else
- scratch = gen_rtx_REG (DImode, REGNO (operands[2]));
-
- addr = get_unaligned_address (operands[1]);
- operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
- seq = gen_unaligned_loadqi (operands[0], addr, scratch, operands[0]);
- alpha_set_memflags (seq, operands[1]);
+ if (MEM_P (operands[1]))
+ {
+ rtx x = gen_reg_rtx (HImode);
+ emit_move_insn (gen_lowpart (CQImode, x), operands[1]);
+ operands[1] = x;
+ goto do_aligned2;
+ }
+ goto do_aligned1;
}
- emit_insn (seq);
+
+ gcc_assert (!reload_in_progress);
+ emit_move_complex_parts (operands[0], operands[1]);
DONE;
})
-(define_expand "reload_inhi"
- [(parallel [(match_operand:HI 0 "register_operand" "=r")
- (match_operand:HI 1 "any_memory_operand" "m")
+;; Here are the versions for reload.
+;;
+;; The aligned input case is recognized early in alpha_secondary_reload
+;; in order to avoid allocating an unnecessary scratch register.
+;;
+;; Note that in the unaligned cases we know that the operand must not be
+;; a pseudo-register because stack slots are always aligned references.
+
+(define_expand "reload_in<mode>"
+ [(parallel [(match_operand:RELOAD12 0 "register_operand" "=r")
+ (match_operand:RELOAD12 1 "any_memory_operand" "m")
(match_operand:TI 2 "register_operand" "=&r")])]
- "! TARGET_BWX"
+ "!TARGET_BWX"
{
- rtx scratch, seq;
+ rtx scratch, seq, addr;
+ unsigned regno = REGNO (operands[2]);
- if (aligned_memory_operand (operands[1], HImode))
- {
- seq = gen_reload_inhi_help (operands[0], operands[1],
- gen_rtx_REG (SImode, REGNO (operands[2])));
- }
- else
- {
- rtx addr;
+ /* It is possible that one of the registers we got for operands[2]
+ might coincide with that of operands[0] (which is why we made
+ it TImode). Pick the other one to use as our scratch. */
+ if (regno == REGNO (operands[0]))
+ regno++;
+ scratch = gen_rtx_REG (DImode, regno);
- /* It is possible that one of the registers we got for operands[2]
- might coincide with that of operands[0] (which is why we made
- it TImode). Pick the other one to use as our scratch. */
- if (REGNO (operands[0]) == REGNO (operands[2]))
- scratch = gen_rtx_REG (DImode, REGNO (operands[2]) + 1);
- else
- scratch = gen_rtx_REG (DImode, REGNO (operands[2]));
+ addr = get_unaligned_address (operands[1]);
+ operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
+ seq = gen_unaligned_load<reloadmode> (operands[0], addr,
+ scratch, operands[0]);
+ alpha_set_memflags (seq, operands[1]);
- addr = get_unaligned_address (operands[1]);
- operands[0] = gen_rtx_REG (DImode, REGNO (operands[0]));
- seq = gen_unaligned_loadhi (operands[0], addr, scratch, operands[0]);
- alpha_set_memflags (seq, operands[1]);
- }
emit_insn (seq);
DONE;
})
-(define_expand "reload_outqi"
- [(parallel [(match_operand:QI 0 "any_memory_operand" "=m")
- (match_operand:QI 1 "register_operand" "r")
+(define_expand "reload_out<mode>"
+ [(parallel [(match_operand:RELOAD12 0 "any_memory_operand" "=m")
+ (match_operand:RELOAD12 1 "register_operand" "r")
(match_operand:TI 2 "register_operand" "=&r")])]
"! TARGET_BWX"
{
- if (aligned_memory_operand (operands[0], QImode))
- {
- emit_insn (gen_reload_outqi_help
- (operands[0], operands[1],
- gen_rtx_REG (SImode, REGNO (operands[2])),
- gen_rtx_REG (SImode, REGNO (operands[2]) + 1)));
- }
- else
- {
- rtx addr = get_unaligned_address (operands[0]);
- rtx scratch1 = gen_rtx_REG (DImode, REGNO (operands[2]));
- rtx scratch2 = gen_rtx_REG (DImode, REGNO (operands[2]) + 1);
- rtx scratch3 = scratch1;
- rtx seq;
-
- if (GET_CODE (addr) == REG)
- scratch1 = addr;
+ unsigned regno = REGNO (operands[2]);
- seq = gen_unaligned_storeqi (addr, operands[1], scratch1,
- scratch2, scratch3);
- alpha_set_memflags (seq, operands[0]);
- emit_insn (seq);
+ if (<MODE>mode == CQImode)
+ {
+ operands[0] = gen_lowpart (HImode, operands[0]);
+ operands[1] = gen_lowpart (HImode, operands[1]);
}
- DONE;
-})
-(define_expand "reload_outhi"
- [(parallel [(match_operand:HI 0 "any_memory_operand" "=m")
- (match_operand:HI 1 "register_operand" "r")
- (match_operand:TI 2 "register_operand" "=&r")])]
- "! TARGET_BWX"
-{
- if (aligned_memory_operand (operands[0], HImode))
+ if (aligned_memory_operand (operands[0], <MODE>mode))
{
- emit_insn (gen_reload_outhi_help
+ emit_insn (gen_reload_out<reloadmode>_aligned
(operands[0], operands[1],
- gen_rtx_REG (SImode, REGNO (operands[2])),
- gen_rtx_REG (SImode, REGNO (operands[2]) + 1)));
+ gen_rtx_REG (SImode, regno),
+ gen_rtx_REG (SImode, regno + 1)));
}
else
{
rtx addr = get_unaligned_address (operands[0]);
- rtx scratch1 = gen_rtx_REG (DImode, REGNO (operands[2]));
- rtx scratch2 = gen_rtx_REG (DImode, REGNO (operands[2]) + 1);
+ rtx scratch1 = gen_rtx_REG (DImode, regno);
+ rtx scratch2 = gen_rtx_REG (DImode, regno + 1);
rtx scratch3 = scratch1;
rtx seq;
if (GET_CODE (addr) == REG)
scratch1 = addr;
- seq = gen_unaligned_storehi (addr, operands[1], scratch1,
- scratch2, scratch3);
+ seq = gen_unaligned_store<reloadmode> (addr, operands[1], scratch1,
+ scratch2, scratch3);
alpha_set_memflags (seq, operands[0]);
emit_insn (seq);
}
@@ -6225,65 +6219,30 @@
;; always get a proper address for a stack slot during reload_foo
;; expansion, so we must delay our address manipulations until after.
-(define_insn_and_split "reload_inqi_help"
- [(set (match_operand:QI 0 "register_operand" "=r")
- (match_operand:QI 1 "memory_operand" "m"))
- (clobber (match_operand:SI 2 "register_operand" "=r"))]
- "! TARGET_BWX && (reload_in_progress || reload_completed)"
+(define_insn_and_split "reload_in<mode>_aligned"
+ [(set (match_operand:I12MODE 0 "register_operand" "=r")
+ (match_operand:I12MODE 1 "memory_operand" "m"))]
+ "!TARGET_BWX && (reload_in_progress || reload_completed)"
"#"
- "! TARGET_BWX && reload_completed"
+ "!TARGET_BWX && reload_completed"
[(const_int 0)]
{
rtx aligned_mem, bitnum;
get_aligned_mem (operands[1], &aligned_mem, &bitnum);
- operands[0] = gen_lowpart (DImode, operands[0]);
- emit_insn (gen_aligned_loadqi (operands[0], aligned_mem, bitnum,
- operands[2]));
- DONE;
-})
-
-(define_insn_and_split "reload_inhi_help"
- [(set (match_operand:HI 0 "register_operand" "=r")
- (match_operand:HI 1 "memory_operand" "m"))
- (clobber (match_operand:SI 2 "register_operand" "=r"))]
- "! TARGET_BWX && (reload_in_progress || reload_completed)"
- "#"
- "! TARGET_BWX && reload_completed"
- [(const_int 0)]
-{
- rtx aligned_mem, bitnum;
- get_aligned_mem (operands[1], &aligned_mem, &bitnum);
- operands[0] = gen_lowpart (DImode, operands[0]);
- emit_insn (gen_aligned_loadhi (operands[0], aligned_mem, bitnum,
- operands[2]));
- DONE;
-})
-
-(define_insn_and_split "reload_outqi_help"
- [(set (match_operand:QI 0 "memory_operand" "=m")
- (match_operand:QI 1 "register_operand" "r"))
- (clobber (match_operand:SI 2 "register_operand" "=r"))
- (clobber (match_operand:SI 3 "register_operand" "=r"))]
- "! TARGET_BWX && (reload_in_progress || reload_completed)"
- "#"
- "! TARGET_BWX && reload_completed"
- [(const_int 0)]
-{
- rtx aligned_mem, bitnum;
- get_aligned_mem (operands[0], &aligned_mem, &bitnum);
- emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
- operands[2], operands[3]));
+ emit_insn (gen_aligned_load<reloadmode>
+ (gen_lowpart (DImode, operands[0]), aligned_mem, bitnum,
+ gen_rtx_REG (SImode, REGNO (operands[0]))));
DONE;
})
-(define_insn_and_split "reload_outhi_help"
- [(set (match_operand:HI 0 "memory_operand" "=m")
- (match_operand:HI 1 "register_operand" "r"))
+(define_insn_and_split "reload_out<mode>_aligned"
+ [(set (match_operand:I12MODE 0 "memory_operand" "=m")
+ (match_operand:I12MODE 1 "register_operand" "r"))
(clobber (match_operand:SI 2 "register_operand" "=r"))
(clobber (match_operand:SI 3 "register_operand" "=r"))]
- "! TARGET_BWX && (reload_in_progress || reload_completed)"
+ "!TARGET_BWX && (reload_in_progress || reload_completed)"
"#"
- "! TARGET_BWX && reload_completed"
+ "!TARGET_BWX && reload_completed"
[(const_int 0)]
{
rtx aligned_mem, bitnum;
diff --git a/gcc/config/alpha/predicates.md b/gcc/config/alpha/predicates.md
index 0b2c22f16dd..a4e9c9e5abc 100644
--- a/gcc/config/alpha/predicates.md
+++ b/gcc/config/alpha/predicates.md
@@ -434,7 +434,7 @@
;; use recog during reload, so pretending these codes are accepted
;; pessimizes things a tad.
-(define_predicate "aligned_memory_operand"
+(define_special_predicate "aligned_memory_operand"
(ior (match_test "op = resolve_reload_operand (op), 0")
(match_code "mem"))
{
@@ -462,7 +462,7 @@
;; Similar, but return 1 if OP is a MEM which is not alignable.
-(define_predicate "unaligned_memory_operand"
+(define_special_predicate "unaligned_memory_operand"
(ior (match_test "op = resolve_reload_operand (op), 0")
(match_code "mem"))
{
@@ -489,20 +489,30 @@
})
;; Return 1 if OP is any memory location. During reload a pseudo matches.
-(define_predicate "any_memory_operand"
- (ior (match_code "mem,reg")
- (and (match_code "subreg")
- (match_test "GET_CODE (SUBREG_REG (op)) == REG"))))
+(define_special_predicate "any_memory_operand"
+ (match_code "mem,reg,subreg")
+{
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
-;; Return 1 if OP is either a register or an unaligned memory location.
-(define_predicate "reg_or_unaligned_mem_operand"
- (ior (match_operand 0 "register_operand")
- (match_operand 0 "unaligned_memory_operand")))
+ if (MEM_P (op))
+ return true;
+ if (reload_in_progress && REG_P (op))
+ {
+ unsigned regno = REGNO (op);
+ if (HARD_REGISTER_NUM_P (regno))
+ return false;
+ else
+ return reg_renumber[regno] < 0;
+ }
+
+ return false;
+})
;; Return 1 is OP is a memory location that is not a reference
;; (using an AND) to an unaligned location. Take into account
;; what reload will do.
-(define_predicate "normal_memory_operand"
+(define_special_predicate "normal_memory_operand"
(ior (match_test "op = resolve_reload_operand (op), 0")
(and (match_code "mem")
(match_test "GET_CODE (XEXP (op, 0)) != AND"))))
diff --git a/gcc/config/alpha/sync.md b/gcc/config/alpha/sync.md
index 1c34ce54b1c..9a90245b5a5 100644
--- a/gcc/config/alpha/sync.md
+++ b/gcc/config/alpha/sync.md
@@ -19,10 +19,6 @@
;; the Free Software Foundation, 51 Franklin Street, Fifth Floor,
;; Boston, MA 02110-1301, USA.
-(define_mode_macro I12MODE [QI HI])
-(define_mode_macro I48MODE [SI DI])
-(define_mode_attr modesuffix [(SI "l") (DI "q")])
-
(define_code_macro FETCHOP [plus minus ior xor and])
(define_code_attr fetchop_name
[(plus "add") (minus "sub") (ior "ior") (xor "xor") (and "and")])