diff options
author | ghazi <ghazi@138bc75d-0d04-0410-961f-82ee72b054a4> | 2008-08-06 16:12:51 +0000 |
---|---|---|
committer | ghazi <ghazi@138bc75d-0d04-0410-961f-82ee72b054a4> | 2008-08-06 16:12:51 +0000 |
commit | 8deb3959b001122f1d9f0f8320adc8bc77844046 (patch) | |
tree | cb2367793dcc0fbbf5f9b739495dda670be306d8 /gcc/config/sh/sh.c | |
parent | fd4a16cd41d4a3ac842e975bf90f8af446abaf79 (diff) | |
download | gcc-8deb3959b001122f1d9f0f8320adc8bc77844046.tar.gz |
* config/alpha/alpha.c (alpha_preferred_reload_class,
alpha_secondary_reload, alpha_emit_set_const_1, function_value,
alpha_output_mi_thunk_osf): Avoid C++ keywords.
* config/arm/arm.c (output_move_vfp, output_move_neon): Likewise.
* config/arm/arm.md: Likewise.
* config/avr/avr-protos.h (preferred_reload_class,
test_hard_reg_class, avr_simplify_comparison_p,
out_shift_with_cnt, class_max_nregs): Likewise.
* config/avr/avr.c (class_max_nregs, avr_simplify_comparison_p,
output_movqi, output_movhi, output_movsisf, out_shift_with_cnt,
preferred_reload_class, test_hard_reg_class): Likewise.
* config/bfin/bfin.c (legitimize_pic_address, hard_regno_mode_ok,
bfin_memory_move_cost, bfin_secondary_reload,
bfin_output_mi_thunk): Likewise.
* config/crx/crx.c (crx_secondary_reload_class,
crx_memory_move_cost): Likewise.
* config/frv/frv-protos.h (frv_secondary_reload_class,
frv_class_likely_spilled_p, frv_class_max_nregs): Likewise.
* config/frv/frv.c (frv_override_options, frv_alloc_temp_reg,
frv_secondary_reload_class, frv_class_likely_spilled_p,
frv_class_max_nregs): Likewise.
* config/h8300/h8300.c (h8300_classify_operand,
h8300_unary_length, h8300_bitfield_length, h8300_asm_insn_count):
Likewise.
* config/i386/winnt.c (i386_pe_declare_function_type): Likewise.
* config/ia64/ia64.c (ia64_preferred_reload_class,
ia64_secondary_reload_class, ia64_output_mi_thunk): Likewise.
* config/iq2000/iq2000.c (gen_int_relational): Likewise.
* config/m32c/m32c.c (class_can_hold_mode, m32c_output_compare):
Likewise.
* config/m68hc11/m68hc11.c (preferred_reload_class,
m68hc11_memory_move_cost): Likewise.
* config/mcore/mcore.c (mcore_secondary_reload_class,
mcore_reload_class): Likewise.
* config/mips/mips.c (mips_hard_regno_mode_ok_p,
mips_class_max_nregs, mips_cannot_change_mode_class,
mips_preferred_reload_class, mips_secondary_reload_class,
mips_output_mi_thunk): Likewise.
* config/mmix/mmix.c (mmix_preferred_reload_class,
mmix_preferred_output_reload_class, mmix_secondary_reload_class):
Likewise.
* config/mn10300/mn10300.c (mn10300_secondary_reload_class):
Likewise.
* config/pa/pa.c (pa_secondary_reload, pa_combine_instructions,
pa_can_combine_p, pa_cannot_change_mode_class): Likewise.
* config/pa/pa.h (LEGITIMIZE_RELOAD_ADDRESS): Likewise.
* config/rs6000/rs6000.c (paired_expand_vector_init,
rs6000_secondary_reload_class, rs6000_output_mi_thunk,
compare_section_name, rs6000_memory_move_cost): Likewise.
* config/s390/s390.c (s390_emit_compare_and_swap,
s390_preferred_reload_class, s390_secondary_reload,
legitimize_pic_address, legitimize_tls_address,
legitimize_reload_address, s390_expand_cs_hqi, s390_expand_atomic,
s390_class_max_nregs): Likewise.
* config/s390/s390.h (LEGITIMIZE_RELOAD_ADDRESS): Likewise.
* config/s390/s390.md: Likewise.
* config/score/score-protos.h (score_secondary_reload_class,
score_preferred_reload_class): Likewise.
* config/score/score.c (score_preferred_reload_class,
score_secondary_reload_class): Likewise.
* config/score/score3.c (score3_output_mi_thunk,
score3_preferred_reload_class, score3_secondary_reload_class,
score3_hard_regno_mode_ok): Likewise.
* config/score/score3.h (score3_preferred_reload_class,
score3_secondary_reload_class): Likewise.
* config/score/score7.c (score7_output_mi_thunk,
score7_preferred_reload_class, score7_secondary_reload_class,
score7_hard_regno_mode_ok): Likewise.
* config/score/score7.h (score7_preferred_reload_class,
score7_secondary_reload_class): Likewise.
* config/sh/sh.c (prepare_move_operands, output_far_jump,
output_branchy_insn, add_constant, gen_block_redirect,
sh_insn_length_adjustment, sh_cannot_change_mode_class,
sh_output_mi_thunk, replace_n_hard_rtx, sh_secondary_reload):
Likewise.
* config/sparc/sparc.c (sparc_output_mi_thunk): Likewise.
* config/stormy16/stormy16.c (xstormy16_output_cbranch_hi,
xstormy16_output_cbranch_si, xstormy16_secondary_reload_class,
xstormy16_preferred_reload_class): Likewise.
* config/xtensa/xtensa.c (xtensa_expand_compare_and_swap,
xtensa_expand_atomic, override_options,
xtensa_preferred_reload_class, xtensa_secondary_reload_class):
Likewise.
* reorg.c (try_merge_delay_insns): Likewise.
* tree.c (merge_dllimport_decl_attributes): Likewise.
* config/frv/frv.c (frv_print_operand): Change isalpha to ISALPHA.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@138813 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/config/sh/sh.c')
-rw-r--r-- | gcc/config/sh/sh.c | 168 |
1 files changed, 84 insertions, 84 deletions
diff --git a/gcc/config/sh/sh.c b/gcc/config/sh/sh.c index 60a940bbc84..74060738ee9 100644 --- a/gcc/config/sh/sh.c +++ b/gcc/config/sh/sh.c @@ -1300,9 +1300,9 @@ prepare_move_operands (rtx operands[], enum machine_mode mode) { /* This is like change_address_1 (operands[0], mode, 0, 1) , except that we can't use that function because it is static. */ - rtx new = change_address (operands[0], mode, 0); - MEM_COPY_ATTRIBUTES (new, operands[0]); - operands[0] = new; + rtx new_rtx = change_address (operands[0], mode, 0); + MEM_COPY_ATTRIBUTES (new_rtx, operands[0]); + operands[0] = new_rtx; } /* This case can happen while generating code to move the result @@ -1915,14 +1915,14 @@ print_slot (rtx insn) const char * output_far_jump (rtx insn, rtx op) { - struct { rtx lab, reg, op; } this; + struct { rtx lab, reg, op; } this_jmp; rtx braf_base_lab = NULL_RTX; const char *jump; int far; int offset = branch_dest (insn) - INSN_ADDRESSES (INSN_UID (insn)); rtx prev; - this.lab = gen_label_rtx (); + this_jmp.lab = gen_label_rtx (); if (TARGET_SH2 && offset >= -32764 @@ -1948,10 +1948,10 @@ output_far_jump (rtx insn, rtx op) if (GET_CODE ((prev = prev_nonnote_insn (insn))) == INSN && INSN_CODE (prev) == CODE_FOR_indirect_jump_scratch) { - this.reg = SET_DEST (XVECEXP (PATTERN (prev), 0, 0)); - if (REGNO (this.reg) == R0_REG && flag_pic && ! TARGET_SH2) + this_jmp.reg = SET_DEST (XVECEXP (PATTERN (prev), 0, 0)); + if (REGNO (this_jmp.reg) == R0_REG && flag_pic && ! TARGET_SH2) jump = "mov.l r1,@-r15; mova %O0,r0; mov.l @r0,r1; add r1,r0; mov.l @r15+,r1; jmp @%1"; - output_asm_insn (jump, &this.lab); + output_asm_insn (jump, &this_jmp.lab); if (dbr_sequence_length ()) print_slot (final_sequence); else @@ -1963,7 +1963,7 @@ output_far_jump (rtx insn, rtx op) if (dbr_sequence_length ()) print_slot (final_sequence); - this.reg = gen_rtx_REG (SImode, 13); + this_jmp.reg = gen_rtx_REG (SImode, 13); /* We must keep the stack aligned to 8-byte boundaries on SH5. Fortunately, MACL is fixed and call-clobbered, and we never need its value across jumps, so save r13 in it instead of in @@ -1972,7 +1972,7 @@ output_far_jump (rtx insn, rtx op) output_asm_insn ("lds r13, macl", 0); else output_asm_insn ("mov.l r13,@-r15", 0); - output_asm_insn (jump, &this.lab); + output_asm_insn (jump, &this_jmp.lab); if (TARGET_SH5) output_asm_insn ("sts macl, r13", 0); else @@ -1986,16 +1986,16 @@ output_far_jump (rtx insn, rtx op) } if (far) output_asm_insn (".align 2", 0); - (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (this.lab)); - this.op = op; + (*targetm.asm_out.internal_label) (asm_out_file, "L", CODE_LABEL_NUMBER (this_jmp.lab)); + this_jmp.op = op; if (far && flag_pic) { if (TARGET_SH2) - this.lab = braf_base_lab; - output_asm_insn (".long %O2-%O0", &this.lab); + this_jmp.lab = braf_base_lab; + output_asm_insn (".long %O2-%O0", &this_jmp.lab); } else - output_asm_insn (far ? ".long %O2" : ".word %O2-%O0", &this.lab); + output_asm_insn (far ? ".long %O2" : ".word %O2-%O0", &this_jmp.lab); return ""; } @@ -2092,14 +2092,14 @@ output_branch (int logic, rtx insn, rtx *operands) } } -/* Output a code sequence for INSN using TEMPLATE with OPERANDS; but before, +/* Output a code sequence for INSN using TEMPL with OPERANDS; but before, fill in operands 9 as a label to the successor insn. We try to use jump threading where possible. IF CODE matches the comparison in the IF_THEN_ELSE of a following jump, we assume the jump is taken. I.e. EQ means follow jmp and bf, NE means follow jmp and bt, if the address is in range. */ const char * -output_branchy_insn (enum rtx_code code, const char *template, +output_branchy_insn (enum rtx_code code, const char *templ, rtx insn, rtx *operands) { rtx next_insn = NEXT_INSN (insn); @@ -2115,7 +2115,7 @@ output_branchy_insn (enum rtx_code code, const char *template, INSN_ADDRESSES_NEW (operands[9], INSN_ADDRESSES (INSN_UID (next_insn)) + get_attr_length (next_insn)); - return template; + return templ; } else { @@ -2127,7 +2127,7 @@ output_branchy_insn (enum rtx_code code, const char *template, /* branch_true */ src = XEXP (src, 1); operands[9] = src; - return template; + return templ; } } } @@ -2136,7 +2136,7 @@ output_branchy_insn (enum rtx_code code, const char *template, INSN_ADDRESSES_NEW (operands[9], INSN_ADDRESSES (INSN_UID (insn)) + get_attr_length (insn)); - return template; + return templ; } const char * @@ -3483,7 +3483,7 @@ static rtx add_constant (rtx x, enum machine_mode mode, rtx last_value) { int i; - rtx lab, new; + rtx lab, new_rtx; label_ref_list_t ref, newref; /* First see if we've already got it. */ @@ -3499,14 +3499,14 @@ add_constant (rtx x, enum machine_mode mode, rtx last_value) } if (rtx_equal_p (x, pool_vector[i].value)) { - lab = new = 0; + lab = new_rtx = 0; if (! last_value || ! i || ! rtx_equal_p (last_value, pool_vector[i-1].value)) { - new = gen_label_rtx (); - LABEL_REFS (new) = pool_vector[i].label; - pool_vector[i].label = lab = new; + new_rtx = gen_label_rtx (); + LABEL_REFS (new_rtx) = pool_vector[i].label; + pool_vector[i].label = lab = new_rtx; } if (lab && pool_window_label) { @@ -3516,8 +3516,8 @@ add_constant (rtx x, enum machine_mode mode, rtx last_value) newref->next = ref; pool_vector[pool_window_last].wend = newref; } - if (new) - pool_window_label = new; + if (new_rtx) + pool_window_label = new_rtx; pool_window_last = i; return lab; } @@ -4421,7 +4421,7 @@ gen_block_redirect (rtx jump, int addr, int need_block) rtx scan; /* Don't look for the stack pointer as a scratch register, it would cause trouble if an interrupt occurred. */ - unsigned try = 0x7fff, used; + unsigned attempt = 0x7fff, used; int jump_left = flag_expensive_optimizations + 1; /* It is likely that the most recent eligible instruction is wanted for @@ -4442,7 +4442,7 @@ gen_block_redirect (rtx jump, int addr, int need_block) && GET_CODE (PATTERN (scan)) != CLOBBER && get_attr_in_delay_slot (scan) == IN_DELAY_SLOT_YES) { - try &= ~regs_used (PATTERN (scan), 0); + attempt &= ~regs_used (PATTERN (scan), 0); break; } } @@ -4460,9 +4460,9 @@ gen_block_redirect (rtx jump, int addr, int need_block) if (code == CALL_INSN) used |= regs_used (CALL_INSN_FUNCTION_USAGE (scan), 0); dead |= (used >> 16) & ~used; - if (dead & try) + if (dead & attempt) { - dead &= try; + dead &= attempt; break; } if (code == JUMP_INSN) @@ -8775,14 +8775,14 @@ sh_insn_length_adjustment (rtx insn) { int sum = 0; rtx body = PATTERN (insn); - const char *template; + const char *templ; char c; int maybe_label = 1; if (GET_CODE (body) == ASM_INPUT) - template = XSTR (body, 0); + templ = XSTR (body, 0); else if (asm_noperands (body) >= 0) - template + templ = decode_asm_operands (body, NULL, NULL, NULL, NULL, NULL); else return 0; @@ -8791,20 +8791,20 @@ sh_insn_length_adjustment (rtx insn) int ppi_adjust = 0; do - c = *template++; + c = *templ++; while (c == ' ' || c == '\t'); /* all sh-dsp parallel-processing insns start with p. The only non-ppi sh insn starting with p is pref. The only ppi starting with pr is prnd. */ - if ((c == 'p' || c == 'P') && strncasecmp ("re", template, 2)) + if ((c == 'p' || c == 'P') && strncasecmp ("re", templ, 2)) ppi_adjust = 2; /* The repeat pseudo-insn expands two three insns, a total of six bytes in size. */ else if ((c == 'r' || c == 'R') - && ! strncasecmp ("epeat", template, 5)) + && ! strncasecmp ("epeat", templ, 5)) ppi_adjust = 4; while (c && c != '\n' - && ! IS_ASM_LOGICAL_LINE_SEPARATOR (c, template)) + && ! IS_ASM_LOGICAL_LINE_SEPARATOR (c, templ)) { /* If this is a label, it is obviously not a ppi insn. */ if (c == ':' && maybe_label) @@ -8814,7 +8814,7 @@ sh_insn_length_adjustment (rtx insn) } else if (c == '\'' || c == '"') maybe_label = 0; - c = *template++; + c = *templ++; } sum += ppi_adjust; maybe_label = c != ':'; @@ -10234,24 +10234,24 @@ sh_expand_binop_v2sf (enum rtx_code code, rtx op0, rtx op1, rtx op2) is invalid. */ bool sh_cannot_change_mode_class (enum machine_mode from, enum machine_mode to, - enum reg_class class) + enum reg_class rclass) { /* We want to enable the use of SUBREGs as a means to VEC_SELECT a single element of a vector. */ if (to == SFmode && VECTOR_MODE_P (from) && GET_MODE_INNER (from) == SFmode) - return (reg_classes_intersect_p (GENERAL_REGS, class)); + return (reg_classes_intersect_p (GENERAL_REGS, rclass)); if (GET_MODE_SIZE (from) != GET_MODE_SIZE (to)) { if (TARGET_LITTLE_ENDIAN) { if (GET_MODE_SIZE (to) < 8 || GET_MODE_SIZE (from) < 8) - return reg_classes_intersect_p (DF_REGS, class); + return reg_classes_intersect_p (DF_REGS, rclass); } else { if (GET_MODE_SIZE (from) < 8) - return reg_classes_intersect_p (DF_HI_REGS, class); + return reg_classes_intersect_p (DF_HI_REGS, rclass); } } return 0; @@ -10371,7 +10371,7 @@ sh_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED, { CUMULATIVE_ARGS cum; int structure_value_byref = 0; - rtx this, this_value, sibcall, insns, funexp; + rtx this_rtx, this_value, sibcall, insns, funexp; tree funtype = TREE_TYPE (function); int simple_add = CONST_OK_FOR_ADD (delta); int did_load = 0; @@ -10399,7 +10399,7 @@ sh_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED, FUNCTION_ARG_ADVANCE (cum, Pmode, ptype, 1); } - this = FUNCTION_ARG (cum, Pmode, ptr_type_node, 1); + this_rtx = FUNCTION_ARG (cum, Pmode, ptr_type_node, 1); /* For SHcompact, we only have r0 for a scratch register: r1 is the static chain pointer (even if you can't have nested virtual functions @@ -10440,7 +10440,7 @@ sh_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED, error ("Need a call-clobbered target register"); } - this_value = plus_constant (this, delta); + this_value = plus_constant (this_rtx, delta); if (vcall_offset && (simple_add || scratch0 != scratch1) && strict_memory_address_p (ptr_mode, this_value)) @@ -10452,11 +10452,11 @@ sh_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED, if (!delta) ; /* Do nothing. */ else if (simple_add) - emit_move_insn (this, this_value); + emit_move_insn (this_rtx, this_value); else { emit_move_insn (scratch1, GEN_INT (delta)); - emit_insn (gen_add2_insn (this, scratch1)); + emit_insn (gen_add2_insn (this_rtx, scratch1)); } if (vcall_offset) @@ -10464,7 +10464,7 @@ sh_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED, rtx offset_addr; if (!did_load) - emit_load_ptr (scratch0, this); + emit_load_ptr (scratch0, this_rtx); offset_addr = plus_constant (scratch0, vcall_offset); if (strict_memory_address_p (ptr_mode, offset_addr)) @@ -10474,7 +10474,7 @@ sh_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED, /* scratch0 != scratch1, and we have indexed loads. Get better schedule by loading the offset into r1 and using an indexed load - then the load of r1 can issue before the load from - (this + delta) finishes. */ + (this_rtx + delta) finishes. */ emit_move_insn (scratch1, GEN_INT (vcall_offset)); offset_addr = gen_rtx_PLUS (Pmode, scratch0, scratch1); } @@ -10495,7 +10495,7 @@ sh_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED, if (Pmode != ptr_mode) scratch0 = gen_rtx_TRUNCATE (ptr_mode, scratch0); - emit_insn (gen_add2_insn (this, scratch0)); + emit_insn (gen_add2_insn (this_rtx, scratch0)); } /* Generate a tail call to the target function. */ @@ -10530,7 +10530,7 @@ sh_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED, } sibcall = emit_call_insn (sibcall); SIBLING_CALL_P (sibcall) = 1; - use_reg (&CALL_INSN_FUNCTION_USAGE (sibcall), this); + use_reg (&CALL_INSN_FUNCTION_USAGE (sibcall), this_rtx); emit_barrier (); /* Run just enough of rest_of_compilation to do scheduling and get @@ -10930,19 +10930,19 @@ replace_n_hard_rtx (rtx x, rtx *replacements, int n_replacements, int modify) if (GET_CODE (x) == SUBREG) { - rtx new = replace_n_hard_rtx (SUBREG_REG (x), replacements, + rtx new_rtx = replace_n_hard_rtx (SUBREG_REG (x), replacements, n_replacements, modify); - if (GET_CODE (new) == CONST_INT) + if (GET_CODE (new_rtx) == CONST_INT) { - x = simplify_subreg (GET_MODE (x), new, + x = simplify_subreg (GET_MODE (x), new_rtx, GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x)); if (! x) abort (); } else if (modify) - SUBREG_REG (x) = new; + SUBREG_REG (x) = new_rtx; return x; } @@ -10990,18 +10990,18 @@ replace_n_hard_rtx (rtx x, rtx *replacements, int n_replacements, int modify) } else if (GET_CODE (x) == ZERO_EXTEND) { - rtx new = replace_n_hard_rtx (XEXP (x, 0), replacements, + rtx new_rtx = replace_n_hard_rtx (XEXP (x, 0), replacements, n_replacements, modify); - if (GET_CODE (new) == CONST_INT) + if (GET_CODE (new_rtx) == CONST_INT) { x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x), - new, GET_MODE (XEXP (x, 0))); + new_rtx, GET_MODE (XEXP (x, 0))); if (! x) abort (); } else if (modify) - XEXP (x, 0) = new; + XEXP (x, 0) = new_rtx; return x; } @@ -11009,26 +11009,26 @@ replace_n_hard_rtx (rtx x, rtx *replacements, int n_replacements, int modify) fmt = GET_RTX_FORMAT (GET_CODE (x)); for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--) { - rtx new; + rtx new_rtx; if (fmt[i] == 'e') { - new = replace_n_hard_rtx (XEXP (x, i), replacements, + new_rtx = replace_n_hard_rtx (XEXP (x, i), replacements, n_replacements, modify); - if (!new) + if (!new_rtx) return NULL_RTX; if (modify) - XEXP (x, i) = new; + XEXP (x, i) = new_rtx; } else if (fmt[i] == 'E') for (j = XVECLEN (x, i) - 1; j >= 0; j--) { - new = replace_n_hard_rtx (XVECEXP (x, i, j), replacements, + new_rtx = replace_n_hard_rtx (XVECEXP (x, i, j), replacements, n_replacements, modify); - if (!new) + if (!new_rtx) return NULL_RTX; if (modify) - XVECEXP (x, i, j) = new; + XVECEXP (x, i, j) = new_rtx; } } @@ -11162,12 +11162,12 @@ shmedia_prepare_call_address (rtx fnaddr, int is_sibcall) } enum reg_class -sh_secondary_reload (bool in_p, rtx x, enum reg_class class, +sh_secondary_reload (bool in_p, rtx x, enum reg_class rclass, enum machine_mode mode, secondary_reload_info *sri) { if (in_p) { - if (REGCLASS_HAS_FP_REG (class) + if (REGCLASS_HAS_FP_REG (rclass) && ! TARGET_SHMEDIA && immediate_operand ((x), mode) && ! ((fp_zero_operand (x) || fp_one_operand (x)) @@ -11187,13 +11187,13 @@ sh_secondary_reload (bool in_p, rtx x, enum reg_class class, default: abort (); } - if (class == FPUL_REGS + if (rclass == FPUL_REGS && ((GET_CODE (x) == REG && (REGNO (x) == MACL_REG || REGNO (x) == MACH_REG || REGNO (x) == T_REG)) || GET_CODE (x) == PLUS)) return GENERAL_REGS; - if (class == FPUL_REGS && immediate_operand (x, mode)) + if (rclass == FPUL_REGS && immediate_operand (x, mode)) { if (satisfies_constraint_I08 (x) || fp_zero_operand (x)) return GENERAL_REGS; @@ -11202,11 +11202,11 @@ sh_secondary_reload (bool in_p, rtx x, enum reg_class class, sri->icode = CODE_FOR_reload_insi__i_fpul; return NO_REGS; } - if (class == FPSCR_REGS + if (rclass == FPSCR_REGS && ((GET_CODE (x) == REG && REGNO (x) >= FIRST_PSEUDO_REGISTER) || (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == PLUS))) return GENERAL_REGS; - if (REGCLASS_HAS_FP_REG (class) + if (REGCLASS_HAS_FP_REG (rclass) && TARGET_SHMEDIA && immediate_operand (x, mode) && x != CONST0_RTX (GET_MODE (x)) @@ -11219,24 +11219,24 @@ sh_secondary_reload (bool in_p, rtx x, enum reg_class class, ? CODE_FOR_reload_inqi : CODE_FOR_reload_inhi); return NO_REGS; } - if (TARGET_SHMEDIA && class == GENERAL_REGS + if (TARGET_SHMEDIA && rclass == GENERAL_REGS && (GET_CODE (x) == LABEL_REF || PIC_DIRECT_ADDR_P (x))) return TARGET_REGS; } /* end of input-only processing. */ - if (((REGCLASS_HAS_FP_REG (class) + if (((REGCLASS_HAS_FP_REG (rclass) && (GET_CODE (x) == REG && (GENERAL_OR_AP_REGISTER_P (REGNO (x)) || (FP_REGISTER_P (REGNO (x)) && mode == SImode && TARGET_FMOVD)))) - || (REGCLASS_HAS_GENERAL_REG (class) + || (REGCLASS_HAS_GENERAL_REG (rclass) && GET_CODE (x) == REG && FP_REGISTER_P (REGNO (x)))) && ! TARGET_SHMEDIA && (mode == SFmode || mode == SImode)) return FPUL_REGS; - if ((class == FPUL_REGS - || (REGCLASS_HAS_FP_REG (class) + if ((rclass == FPUL_REGS + || (REGCLASS_HAS_FP_REG (rclass) && ! TARGET_SHMEDIA && mode == SImode)) && (GET_CODE (x) == MEM || (GET_CODE (x) == REG @@ -11244,20 +11244,20 @@ sh_secondary_reload (bool in_p, rtx x, enum reg_class class, || REGNO (x) == T_REG || system_reg_operand (x, VOIDmode))))) { - if (class == FPUL_REGS) + if (rclass == FPUL_REGS) return GENERAL_REGS; return FPUL_REGS; } - if ((class == TARGET_REGS - || (TARGET_SHMEDIA && class == SIBCALL_REGS)) + if ((rclass == TARGET_REGS + || (TARGET_SHMEDIA && rclass == SIBCALL_REGS)) && !satisfies_constraint_Csy (x) && (GET_CODE (x) != REG || ! GENERAL_REGISTER_P (REGNO (x)))) return GENERAL_REGS; - if ((class == MAC_REGS || class == PR_REGS) + if ((rclass == MAC_REGS || rclass == PR_REGS) && GET_CODE (x) == REG && ! GENERAL_REGISTER_P (REGNO (x)) - && class != REGNO_REG_CLASS (REGNO (x))) + && rclass != REGNO_REG_CLASS (REGNO (x))) return GENERAL_REGS; - if (class != GENERAL_REGS && GET_CODE (x) == REG + if (rclass != GENERAL_REGS && GET_CODE (x) == REG && TARGET_REGISTER_P (REGNO (x))) return GENERAL_REGS; return NO_REGS; |