Commit b6dd42a9 by Bernd Edlinger Committed by Bernd Edlinger

expmed.c (strict_volatile_bitfield_p): Check that the access will not cross a MODESIZE boundary.

gcc:
2015-04-01  Bernd Edlinger  <bernd.edlinger@hotmail.de>

        * expmed.c (strict_volatile_bitfield_p): Check that the access will
        not cross a MODESIZE boundary.
        (store_bit_field, extract_bit_field): Added assertions in the
        strict volatile bitfields code path.

testsuite:
2015-04-01  Bernd Edlinger  <bernd.edlinger@hotmail.de>

        * gcc.dg/pr23623.c: Added aligned attribute.
        * gcc.dg/20141029-1.c: Likewise.
        * gcc.dg/20150306-1.c: New test.

From-SVN: r221809
parent 14e51ef2
2015-04-01 Bernd Edlinger <bernd.edlinger@hotmail.de>
* expmed.c (strict_volatile_bitfield_p): Check that the access will
not cross a MODESIZE boundary.
(store_bit_field, extract_bit_field): Added assertions in the
strict volatile bitfields code path.
2015-04-01 Max Ostapenko <m.ostapenko@partner.samsung.com>
PR target/65624
......
......@@ -472,9 +472,13 @@ strict_volatile_bitfield_p (rtx op0, unsigned HOST_WIDE_INT bitsize,
return false;
/* Check for cases of unaligned fields that must be split. */
if (bitnum % BITS_PER_UNIT + bitsize > modesize
|| (STRICT_ALIGNMENT
&& bitnum % GET_MODE_ALIGNMENT (fieldmode) + bitsize > modesize))
if (bitnum % modesize + bitsize > modesize)
return false;
/* The memory must be sufficiently aligned for a MODESIZE access.
This condition guarantees, that the memory access will not
touch anything after the end of the structure. */
if (MEM_ALIGN (op0) < modesize)
return false;
/* Check for cases where the C++ memory model applies. */
......@@ -973,13 +977,15 @@ store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
if (strict_volatile_bitfield_p (str_rtx, bitsize, bitnum, fieldmode,
bitregion_start, bitregion_end))
{
/* Storing any naturally aligned field can be done with a simple
store. For targets that support fast unaligned memory, any
naturally sized, unit aligned field can be done directly. */
/* Storing of a full word can be done with a simple store.
We know here that the field can be accessed with one single
instruction. For targets that support unaligned memory,
an unaligned access may be necessary. */
if (bitsize == GET_MODE_BITSIZE (fieldmode))
{
str_rtx = adjust_bitfield_address (str_rtx, fieldmode,
bitnum / BITS_PER_UNIT);
gcc_assert (bitnum % BITS_PER_UNIT == 0);
emit_move_insn (str_rtx, value);
}
else
......@@ -988,6 +994,7 @@ store_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
str_rtx = narrow_bit_field_mem (str_rtx, fieldmode, bitsize, bitnum,
&bitnum);
gcc_assert (bitnum + bitsize <= GET_MODE_BITSIZE (fieldmode));
temp = copy_to_reg (str_rtx);
if (!store_bit_field_1 (temp, bitsize, bitnum, 0, 0,
fieldmode, value, true))
......@@ -1790,17 +1797,21 @@ extract_bit_field (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
if (strict_volatile_bitfield_p (str_rtx, bitsize, bitnum, mode1, 0, 0))
{
/* Extraction of a full MODE1 value can be done with a load as long as
the field is on a byte boundary and is sufficiently aligned. */
if (bitsize == GET_MODE_BITSIZE(mode1))
/* Extraction of a full MODE1 value can be done with a simple load.
We know here that the field can be accessed with one single
instruction. For targets that support unaligned memory,
an unaligned access may be necessary. */
if (bitsize == GET_MODE_BITSIZE (mode1))
{
rtx result = adjust_bitfield_address (str_rtx, mode1,
bitnum / BITS_PER_UNIT);
gcc_assert (bitnum % BITS_PER_UNIT == 0);
return convert_extracted_bit_field (result, mode, tmode, unsignedp);
}
str_rtx = narrow_bit_field_mem (str_rtx, mode1, bitsize, bitnum,
&bitnum);
gcc_assert (bitnum + bitsize <= GET_MODE_BITSIZE (mode1));
str_rtx = copy_to_reg (str_rtx);
}
......
2015-04-01 Bernd Edlinger <bernd.edlinger@hotmail.de>
* gcc.dg/pr23623.c: Added aligned attribute.
* gcc.dg/20141029-1.c: Likewise.
* gcc.dg/20150306-1.c: New test.
2015-04-01 Marek Polacek <polacek@redhat.com>
PR c++/65554
......
......@@ -14,7 +14,7 @@ struct system_periph {
unsigned short :8;
} BIT;
} ALL;
};
} __attribute__((aligned(2)));
void
foo()
......
/* { dg-do run } */
/* { dg-require-effective-target size32plus } */
/* { dg-options "-fstrict-volatile-bitfields" } */
struct s
{
char x : 8;
unsigned int y : 31;
} __attribute__((packed));
volatile struct s global;
int
main ()
{
global.y = 0x7FFFFFFF;
if (global.y != 0x7FFFFFFF)
__builtin_abort ();
return 0;
}
......@@ -10,19 +10,19 @@ extern struct
{
unsigned int b : 1;
unsigned int : 31;
} bf1;
} __attribute__((aligned(4))) bf1;
extern volatile struct
{
unsigned int b : 1;
unsigned int : 31;
} bf2;
} __attribute__((aligned(4))) bf2;
extern struct
{
volatile unsigned int b : 1;
volatile unsigned int : 31;
} bf3;
} __attribute__((aligned(4))) bf3;
void writeb(void)
{
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment