diff options
author | edlinger <edlinger@138bc75d-0d04-0410-961f-82ee72b054a4> | 2015-04-01 16:09:48 +0000 |
---|---|---|
committer | edlinger <edlinger@138bc75d-0d04-0410-961f-82ee72b054a4> | 2015-04-01 16:09:48 +0000 |
commit | dda86e31615f18ecf94831401251fb37f1967735 (patch) | |
tree | 685a24f72b5aab58aa712a1d6f12a24c7ec9f71c /gcc/expmed.c | |
parent | abab7c700188c03510cb9776b15007432816355b (diff) | |
download | gcc-dda86e31615f18ecf94831401251fb37f1967735.tar.gz |
gcc:
2015-04-01 Bernd Edlinger <bernd.edlinger@hotmail.de>
* expmed.c (strict_volatile_bitfield_p): Check that the access will
not cross a MODESIZE boundary.
(store_bit_field, extract_bit_field): Added assertions in the
strict volatile bitfields code path.
testsuite:
2015-04-01 Bernd Edlinger <bernd.edlinger@hotmail.de>
* gcc.dg/pr23623.c: Added aligned attribute.
* gcc.dg/20141029-1.c: Likewise.
* gcc.dg/20150306-1.c: New test.
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@221809 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/expmed.c')
-rw-r--r-- | gcc/expmed.c | 29 |
1 files changed, 20 insertions, 9 deletions
diff --git a/gcc/expmed.c b/gcc/expmed.c index e0b2619034d..6327629d458 100644 --- a/gcc/expmed.c +++ b/gcc/expmed.c @@ -472,9 +472,13 @@ strict_volatile_bitfield_p (rtx op0, unsigned HOST_WIDE_INT bitsize, return false; /* Check for cases of unaligned fields that must be split. */ - if (bitnum % BITS_PER_UNIT + bitsize > modesize - || (STRICT_ALIGNMENT - && bitnum % GET_MODE_ALIGNMENT (fieldmode) + bitsize > modesize)) + if (bitnum % modesize + bitsize > modesize) + return false; + + /* The memory must be sufficiently aligned for a MODESIZE access. + This condition guarantees, that the memory access will not + touch anything after the end of the structure. */ + if (MEM_ALIGN (op0) < modesize) return false; /* Check for cases where the C++ memory model applies. */ @@ -973,13 +977,15 @@ store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, if (strict_volatile_bitfield_p (str_rtx, bitsize, bitnum, fieldmode, bitregion_start, bitregion_end)) { - /* Storing any naturally aligned field can be done with a simple - store. For targets that support fast unaligned memory, any - naturally sized, unit aligned field can be done directly. */ + /* Storing of a full word can be done with a simple store. + We know here that the field can be accessed with one single + instruction. For targets that support unaligned memory, + an unaligned access may be necessary. */ if (bitsize == GET_MODE_BITSIZE (fieldmode)) { str_rtx = adjust_bitfield_address (str_rtx, fieldmode, bitnum / BITS_PER_UNIT); + gcc_assert (bitnum % BITS_PER_UNIT == 0); emit_move_insn (str_rtx, value); } else @@ -988,6 +994,7 @@ store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, str_rtx = narrow_bit_field_mem (str_rtx, fieldmode, bitsize, bitnum, &bitnum); + gcc_assert (bitnum + bitsize <= GET_MODE_BITSIZE (fieldmode)); temp = copy_to_reg (str_rtx); if (!store_bit_field_1 (temp, bitsize, bitnum, 0, 0, fieldmode, value, true)) @@ -1790,17 +1797,21 @@ extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, if (strict_volatile_bitfield_p (str_rtx, bitsize, bitnum, mode1, 0, 0)) { - /* Extraction of a full MODE1 value can be done with a load as long as - the field is on a byte boundary and is sufficiently aligned. */ - if (bitsize == GET_MODE_BITSIZE(mode1)) + /* Extraction of a full MODE1 value can be done with a simple load. + We know here that the field can be accessed with one single + instruction. For targets that support unaligned memory, + an unaligned access may be necessary. */ + if (bitsize == GET_MODE_BITSIZE (mode1)) { rtx result = adjust_bitfield_address (str_rtx, mode1, bitnum / BITS_PER_UNIT); + gcc_assert (bitnum % BITS_PER_UNIT == 0); return convert_extracted_bit_field (result, mode, tmode, unsignedp); } str_rtx = narrow_bit_field_mem (str_rtx, mode1, bitsize, bitnum, &bitnum); + gcc_assert (bitnum + bitsize <= GET_MODE_BITSIZE (mode1)); str_rtx = copy_to_reg (str_rtx); } |