summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorrsandifo <rsandifo@138bc75d-0d04-0410-961f-82ee72b054a4>2012-11-28 20:13:22 +0000
committerrsandifo <rsandifo@138bc75d-0d04-0410-961f-82ee72b054a4>2012-11-28 20:13:22 +0000
commit06bedae0473d8cbcc76725a6f7a7d052f58cd456 (patch)
treeaaafc068a028daa1ab2ecddaa3afa63e83019d13
parent5475e2d3733ee98a279d98e9fe15fcff58f1a215 (diff)
downloadgcc-06bedae0473d8cbcc76725a6f7a7d052f58cd456.tar.gz
gcc/
PR middle-end/55438 * expmed.c (simple_mem_bitfield_p): New function, extracted from store_bit_field_1 and extract_bit_field_1. Use GET_MODE_ALIGNMENT rather than bitsize when checking the alignment. (store_bit_field_1, extract_bit_field_1): Call it. * stor-layout.c (bit_field_mode_iterator::bit_field_mode_iterator): Don't limit ALIGN_. Assume that memory is mapped in chunks of at least word size, regardless of BIGGEST_ALIGNMENT. (bit_field_mode_iterator::get_mode): Use GET_MODE_ALIGNMENT rather than unit when checking the alignment. (get_best_mode): Use GET_MODE_ALIGNMENT. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@193905 138bc75d-0d04-0410-961f-82ee72b054a4
-rw-r--r--gcc/ChangeLog14
-rw-r--r--gcc/expmed.c29
-rw-r--r--gcc/stor-layout.c20
3 files changed, 43 insertions, 20 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 5dfe94af589..5350355484c 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,17 @@
+2012-11-28 Richard Sandiford <rdsandiford@googlemail.com>
+
+ PR middle-end/55438
+ * expmed.c (simple_mem_bitfield_p): New function, extracted from
+ store_bit_field_1 and extract_bit_field_1. Use GET_MODE_ALIGNMENT
+ rather than bitsize when checking the alignment.
+ (store_bit_field_1, extract_bit_field_1): Call it.
+ * stor-layout.c (bit_field_mode_iterator::bit_field_mode_iterator):
+ Don't limit ALIGN_. Assume that memory is mapped in chunks of at
+ least word size, regardless of BIGGEST_ALIGNMENT.
+ (bit_field_mode_iterator::get_mode): Use GET_MODE_ALIGNMENT rather
+ than unit when checking the alignment.
+ (get_best_mode): Use GET_MODE_ALIGNMENT.
+
2012-11-28 Vladimir Makarov <vmakarov@redhat.com>
PR rtl-optimization/55512
diff --git a/gcc/expmed.c b/gcc/expmed.c
index fc29ac41d72..d75f031a8c3 100644
--- a/gcc/expmed.c
+++ b/gcc/expmed.c
@@ -416,6 +416,21 @@ lowpart_bit_field_p (unsigned HOST_WIDE_INT bitnum,
else
return bitnum % BITS_PER_WORD == 0;
}
+
+/* Return true if OP is a memory and if a bitfield of size BITSIZE at
+ bit number BITNUM can be treated as a simple value of mode MODE. */
+
+static bool
+simple_mem_bitfield_p (rtx op0, unsigned HOST_WIDE_INT bitsize,
+ unsigned HOST_WIDE_INT bitnum, enum machine_mode mode)
+{
+ return (MEM_P (op0)
+ && bitnum % BITS_PER_UNIT == 0
+ && bitsize == GET_MODE_BITSIZE (mode)
+ && (!SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
+ || (bitnum % GET_MODE_ALIGNMENT (mode) == 0
+ && MEM_ALIGN (op0) >= GET_MODE_ALIGNMENT (mode))));
+}
/* Try to use instruction INSV to store VALUE into a field of OP0.
BITSIZE and BITNUM are as for store_bit_field. */
@@ -624,12 +639,7 @@ store_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
/* If the target is memory, storing any naturally aligned field can be
done with a simple store. For targets that support fast unaligned
memory, any naturally sized, unit aligned field can be done directly. */
- if (MEM_P (op0)
- && bitnum % BITS_PER_UNIT == 0
- && bitsize == GET_MODE_BITSIZE (fieldmode)
- && (!SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
- || (bitnum % bitsize == 0
- && MEM_ALIGN (op0) % bitsize == 0)))
+ if (simple_mem_bitfield_p (op0, bitsize, bitnum, fieldmode))
{
op0 = adjust_bitfield_address (op0, fieldmode, bitnum / BITS_PER_UNIT);
emit_move_insn (op0, value);
@@ -1455,12 +1465,7 @@ extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
/* Extraction of a full MODE1 value can be done with a load as long as
the field is on a byte boundary and is sufficiently aligned. */
- if (MEM_P (op0)
- && bitnum % BITS_PER_UNIT == 0
- && bitsize == GET_MODE_BITSIZE (mode1)
- && (!SLOW_UNALIGNED_ACCESS (mode1, MEM_ALIGN (op0))
- || (bitnum % bitsize == 0
- && MEM_ALIGN (op0) % bitsize == 0)))
+ if (simple_mem_bitfield_p (op0, bitsize, bitnum, mode1))
{
op0 = adjust_bitfield_address (op0, mode1, bitnum / BITS_PER_UNIT);
return convert_extracted_bit_field (op0, mode, tmode, unsignedp);
diff --git a/gcc/stor-layout.c b/gcc/stor-layout.c
index d0c093f9364..3d97796da5c 100644
--- a/gcc/stor-layout.c
+++ b/gcc/stor-layout.c
@@ -2643,15 +2643,17 @@ bit_field_mode_iterator
unsigned int align, bool volatilep)
: mode_ (GET_CLASS_NARROWEST_MODE (MODE_INT)), bitsize_ (bitsize),
bitpos_ (bitpos), bitregion_start_ (bitregion_start),
- bitregion_end_ (bitregion_end), align_ (MIN (align, BIGGEST_ALIGNMENT)),
+ bitregion_end_ (bitregion_end), align_ (align),
volatilep_ (volatilep), count_ (0)
{
if (!bitregion_end_)
{
- /* We can assume that any aligned chunk of ALIGN_ bits that overlaps
+ /* We can assume that any aligned chunk of UNITS bits that overlaps
the bitfield is mapped and won't trap. */
- bitregion_end_ = bitpos + bitsize + align_ - 1;
- bitregion_end_ -= bitregion_end_ % align_ + 1;
+ unsigned HOST_WIDE_INT units = MIN (align, MAX (BIGGEST_ALIGNMENT,
+ BITS_PER_WORD));
+ bitregion_end_ = bitpos + bitsize + units - 1;
+ bitregion_end_ -= bitregion_end_ % units + 1;
}
}
@@ -2694,7 +2696,8 @@ bit_field_mode_iterator::next_mode (enum machine_mode *out_mode)
break;
/* Stop if the mode requires too much alignment. */
- if (unit > align_ && SLOW_UNALIGNED_ACCESS (mode_, align_))
+ if (GET_MODE_ALIGNMENT (mode_) > align_
+ && SLOW_UNALIGNED_ACCESS (mode_, align_))
break;
*out_mode = mode_;
@@ -2753,8 +2756,9 @@ get_best_mode (int bitsize, int bitpos,
enum machine_mode widest_mode = VOIDmode;
enum machine_mode mode;
while (iter.next_mode (&mode)
- /* ??? For historical reasons, reject modes that are wider than
- the alignment. This has both advantages and disadvantages.
+ /* ??? For historical reasons, reject modes that would normally
+ receive greater alignment, even if unaligned accesses are
+ acceptable. This has both advantages and disadvantages.
Removing this check means that something like:
struct s { unsigned int x; unsigned int y; };
@@ -2808,7 +2812,7 @@ get_best_mode (int bitsize, int bitpos,
causes store_bit_field to keep a 128-bit memory reference,
so that the final bitfield reference still has a MEM_EXPR
and MEM_OFFSET. */
- && GET_MODE_BITSIZE (mode) <= align
+ && GET_MODE_ALIGNMENT (mode) <= align
&& (largest_mode == VOIDmode
|| GET_MODE_SIZE (mode) <= GET_MODE_SIZE (largest_mode)))
{