summaryrefslogtreecommitdiff
path: root/gcc/config/m68k/m68k.md
diff options
context:
space:
mode:
authorschwab <schwab@138bc75d-0d04-0410-961f-82ee72b054a4>2012-09-30 17:29:16 +0000
committerschwab <schwab@138bc75d-0d04-0410-961f-82ee72b054a4>2012-09-30 17:29:16 +0000
commitfacacc46058f32a2b4d1b4a5aa99cd3d428e9ae1 (patch)
treec06ba559d6a1c5e89873c542340d7894fe59587e /gcc/config/m68k/m68k.md
parent4e7744bb0cba5e6f23720b93bdefc863c6dba5f8 (diff)
downloadgcc-facacc46058f32a2b4d1b4a5aa99cd3d428e9ae1.tar.gz
* config/m68k/m68k.md: Add names to bitfield insert and extract
insns. (*insv_8_16_reg): Remove constraints and conditions that assume that operand 0 could be a MEM. (*extzv_8_16_reg, *extv_8_16_reg): Likewise, for operand 1. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@191872 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/config/m68k/m68k.md')
-rw-r--r--gcc/config/m68k/m68k.md102
1 files changed, 31 insertions, 71 deletions
diff --git a/gcc/config/m68k/m68k.md b/gcc/config/m68k/m68k.md
index 82807d8da6f..31be33edeb1 100644
--- a/gcc/config/m68k/m68k.md
+++ b/gcc/config/m68k/m68k.md
@@ -5603,7 +5603,7 @@
; The move is allowed to be odd byte aligned, because that's still faster
; than an odd byte aligned bit-field instruction.
;
-(define_insn ""
+(define_insn "*insv_32_mem"
[(set (zero_extract:SI (match_operand:QI 0 "memory_operand" "+o")
(const_int 32)
(match_operand:SI 1 "const_int_operand" "n"))
@@ -5619,32 +5619,17 @@
return "move%.l %2,%0";
})
-(define_insn ""
- [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+do")
+(define_insn "*insv_8_16_reg"
+ [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+d")
(match_operand:SI 1 "const_int_operand" "n")
(match_operand:SI 2 "const_int_operand" "n"))
(match_operand:SI 3 "register_operand" "d"))]
"TARGET_68020 && TARGET_BITFIELD
&& (INTVAL (operands[1]) == 8 || INTVAL (operands[1]) == 16)
- && INTVAL (operands[2]) % INTVAL (operands[1]) == 0
- && (GET_CODE (operands[0]) == REG
- || ! mode_dependent_address_p (XEXP (operands[0], 0),
- MEM_ADDR_SPACE (operands[0])))"
+ && INTVAL (operands[2]) % INTVAL (operands[1]) == 0"
{
- if (REG_P (operands[0]))
- {
- if (INTVAL (operands[1]) + INTVAL (operands[2]) != 32)
- return "bfins %3,%0{%b2:%b1}";
- }
- else
- operands[0] = adjust_address (operands[0],
- INTVAL (operands[1]) == 8 ? QImode : HImode,
- INTVAL (operands[2]) / 8);
-
- if (GET_CODE (operands[3]) == MEM)
- operands[3] = adjust_address (operands[3],
- INTVAL (operands[1]) == 8 ? QImode : HImode,
- (32 - INTVAL (operands[1])) / 8);
+ if (INTVAL (operands[1]) + INTVAL (operands[2]) != 32)
+ return "bfins %3,%0{%b2:%b1}";
if (INTVAL (operands[1]) == 8)
return "move%.b %3,%0";
@@ -5659,7 +5644,7 @@
; The move is allowed to be odd byte aligned, because that's still faster
; than an odd byte aligned bit-field instruction.
;
-(define_insn ""
+(define_insn "*extzv_32_mem"
[(set (match_operand:SI 0 "nonimmediate_operand" "=rm")
(zero_extract:SI (match_operand:QI 1 "memory_src_operand" "oS")
(const_int 32)
@@ -5675,34 +5660,20 @@
return "move%.l %1,%0";
})
-(define_insn ""
+(define_insn "*extzv_8_16_reg"
[(set (match_operand:SI 0 "nonimmediate_operand" "=&d")
- (zero_extract:SI (match_operand:SI 1 "register_operand" "do")
+ (zero_extract:SI (match_operand:SI 1 "register_operand" "d")
(match_operand:SI 2 "const_int_operand" "n")
(match_operand:SI 3 "const_int_operand" "n")))]
"TARGET_68020 && TARGET_BITFIELD
&& (INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 16)
- && INTVAL (operands[3]) % INTVAL (operands[2]) == 0
- && (GET_CODE (operands[1]) == REG
- || ! mode_dependent_address_p (XEXP (operands[1], 0),
- MEM_ADDR_SPACE (operands[1])))"
+ && INTVAL (operands[3]) % INTVAL (operands[2]) == 0"
{
cc_status.flags |= CC_NOT_NEGATIVE;
- if (REG_P (operands[1]))
- {
- if (INTVAL (operands[2]) + INTVAL (operands[3]) != 32)
- return "bfextu %1{%b3:%b2},%0";
- }
- else
- operands[1]
- = adjust_address (operands[1], SImode, INTVAL (operands[3]) / 8);
+ if (INTVAL (operands[2]) + INTVAL (operands[3]) != 32)
+ return "bfextu %1{%b3:%b2},%0";
output_asm_insn ("clr%.l %0", operands);
- if (GET_CODE (operands[0]) == MEM)
- operands[0] = adjust_address (operands[0],
- INTVAL (operands[2]) == 8 ? QImode : HImode,
- (32 - INTVAL (operands[1])) / 8);
-
if (INTVAL (operands[2]) == 8)
return "move%.b %1,%0";
return "move%.w %1,%0";
@@ -5715,7 +5686,7 @@
; The move is allowed to be odd byte aligned, because that's still faster
; than an odd byte aligned bit-field instruction.
;
-(define_insn ""
+(define_insn "*extv_32_mem"
[(set (match_operand:SI 0 "nonimmediate_operand" "=rm")
(sign_extract:SI (match_operand:QI 1 "memory_src_operand" "oS")
(const_int 32)
@@ -5731,28 +5702,17 @@
return "move%.l %1,%0";
})
-(define_insn ""
+(define_insn "*extv_8_16_reg"
[(set (match_operand:SI 0 "nonimmediate_operand" "=d")
- (sign_extract:SI (match_operand:SI 1 "register_operand" "do")
+ (sign_extract:SI (match_operand:SI 1 "register_operand" "d")
(match_operand:SI 2 "const_int_operand" "n")
(match_operand:SI 3 "const_int_operand" "n")))]
"TARGET_68020 && TARGET_BITFIELD
&& (INTVAL (operands[2]) == 8 || INTVAL (operands[2]) == 16)
- && INTVAL (operands[3]) % INTVAL (operands[2]) == 0
- && (GET_CODE (operands[1]) == REG
- || ! mode_dependent_address_p (XEXP (operands[1], 0),
- MEM_ADDR_SPACE (operands[1])))"
+ && INTVAL (operands[3]) % INTVAL (operands[2]) == 0"
{
- if (REG_P (operands[1]))
- {
- if (INTVAL (operands[2]) + INTVAL (operands[3]) != 32)
- return "bfexts %1{%b3:%b2},%0";
- }
- else
- operands[1]
- = adjust_address (operands[1],
- INTVAL (operands[2]) == 8 ? QImode : HImode,
- INTVAL (operands[3]) / 8);
+ if (INTVAL (operands[2]) + INTVAL (operands[3]) != 32)
+ return "bfexts %1{%b3:%b2},%0";
if (INTVAL (operands[2]) == 8)
return "move%.b %1,%0\;extb%.l %0";
@@ -5771,7 +5731,7 @@
"TARGET_68020 && TARGET_BITFIELD"
"")
-(define_insn ""
+(define_insn "*extv_bfexts_mem"
[(set (match_operand:SI 0 "register_operand" "=d")
(sign_extract:SI (match_operand:QI 1 "memory_operand" "o")
(match_operand:SI 2 "nonmemory_operand" "dn")
@@ -5787,7 +5747,7 @@
"TARGET_68020 && TARGET_BITFIELD"
"")
-(define_insn ""
+(define_insn "*extzv_bfextu_mem"
[(set (match_operand:SI 0 "register_operand" "=d")
(zero_extract:SI (match_operand:QI 1 "memory_operand" "o")
(match_operand:SI 2 "nonmemory_operand" "dn")
@@ -5806,7 +5766,7 @@
return "bfextu %1{%b3:%b2},%0";
})
-(define_insn ""
+(define_insn "*insv_bfchg_mem"
[(set (zero_extract:SI (match_operand:QI 0 "memory_operand" "+o")
(match_operand:SI 1 "nonmemory_operand" "dn")
(match_operand:SI 2 "nonmemory_operand" "dn"))
@@ -5821,7 +5781,7 @@
return "bfchg %0{%b2:%b1}";
})
-(define_insn ""
+(define_insn "*insv_bfclr_mem"
[(set (zero_extract:SI (match_operand:QI 0 "memory_operand" "+o")
(match_operand:SI 1 "nonmemory_operand" "dn")
(match_operand:SI 2 "nonmemory_operand" "dn"))
@@ -5832,7 +5792,7 @@
return "bfclr %0{%b2:%b1}";
})
-(define_insn ""
+(define_insn "*insv_bfset_mem"
[(set (zero_extract:SI (match_operand:QI 0 "memory_operand" "+o")
(match_operand:SI 1 "general_operand" "dn")
(match_operand:SI 2 "general_operand" "dn"))
@@ -5851,7 +5811,7 @@
"TARGET_68020 && TARGET_BITFIELD"
"")
-(define_insn ""
+(define_insn "*insv_bfins_mem"
[(set (zero_extract:SI (match_operand:QI 0 "memory_operand" "+o")
(match_operand:SI 1 "nonmemory_operand" "dn")
(match_operand:SI 2 "nonmemory_operand" "dn"))
@@ -5862,7 +5822,7 @@
;; Now recognize bit-field insns that operate on registers
;; (or at least were intended to do so).
-(define_insn ""
+(define_insn "*extv_bfexts_reg"
[(set (match_operand:SI 0 "nonimmediate_operand" "=d")
(sign_extract:SI (match_operand:SI 1 "register_operand" "d")
(match_operand:SI 2 "const_int_operand" "n")
@@ -5870,7 +5830,7 @@
"TARGET_68020 && TARGET_BITFIELD"
"bfexts %1{%b3:%b2},%0")
-(define_insn ""
+(define_insn "*extv_bfextu_reg"
[(set (match_operand:SI 0 "nonimmediate_operand" "=d")
(zero_extract:SI (match_operand:SI 1 "register_operand" "d")
(match_operand:SI 2 "const_int_operand" "n")
@@ -5889,7 +5849,7 @@
return "bfextu %1{%b3:%b2},%0";
})
-(define_insn ""
+(define_insn "*insv_bfclr_reg"
[(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+d")
(match_operand:SI 1 "const_int_operand" "n")
(match_operand:SI 2 "const_int_operand" "n"))
@@ -5900,7 +5860,7 @@
return "bfclr %0{%b2:%b1}";
})
-(define_insn ""
+(define_insn "*insv_bfset_reg"
[(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+d")
(match_operand:SI 1 "const_int_operand" "n")
(match_operand:SI 2 "const_int_operand" "n"))
@@ -5911,7 +5871,7 @@
return "bfset %0{%b2:%b1}";
})
-(define_insn ""
+(define_insn "*insv_bfins_reg"
[(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+d")
(match_operand:SI 1 "const_int_operand" "n")
(match_operand:SI 2 "const_int_operand" "n"))
@@ -5932,7 +5892,7 @@
;; Special patterns for optimizing bit-field instructions.
-(define_insn ""
+(define_insn "*tst_bftst_mem"
[(set (cc0)
(compare (zero_extract:SI (match_operand:QI 0 "memory_operand" "o")
(match_operand:SI 1 "const_int_operand" "n")
@@ -5958,7 +5918,7 @@
;;; now handle the register cases
-(define_insn ""
+(define_insn "*tst_bftst_reg"
[(set (cc0)
(compare (zero_extract:SI (match_operand:SI 0 "register_operand" "d")
(match_operand:SI 1 "const_int_operand" "n")