Commit 00efe3ea by Richard Sandiford Committed by Richard Sandiford

re PR middle-end/55438 (tmpdir-gcc.dg-struct-layout-1/t001 - t028, …

re PR middle-end/55438 (tmpdir-gcc.dg-struct-layout-1/t001 - t028,  gcc.c-torture/execute/991118-1.c, gcc.c-torture/execute/bf64-1.c, ICE)

gcc/
	PR middle-end/55438
	* expmed.c (simple_mem_bitfield_p): New function, extracted from
	store_bit_field_1 and extract_bit_field_1.  Use GET_MODE_ALIGNMENT
	rather than bitsize when checking the alignment.
	(store_bit_field_1, extract_bit_field_1): Call it.
	* stor-layout.c (bit_field_mode_iterator::bit_field_mode_iterator):
	Don't limit ALIGN_.  Assume that memory is mapped in chunks of at
	least word size, regardless of BIGGEST_ALIGNMENT.
	(bit_field_mode_iterator::get_mode): Use GET_MODE_ALIGNMENT rather
	than unit when checking the alignment.
	(get_best_mode): Use GET_MODE_ALIGNMENT.

From-SVN: r193905
parent 1099bb0a
2012-11-28 Richard Sandiford <rdsandiford@googlemail.com>
PR middle-end/55438
* expmed.c (simple_mem_bitfield_p): New function, extracted from
store_bit_field_1 and extract_bit_field_1. Use GET_MODE_ALIGNMENT
rather than bitsize when checking the alignment.
(store_bit_field_1, extract_bit_field_1): Call it.
* stor-layout.c (bit_field_mode_iterator::bit_field_mode_iterator):
Don't limit ALIGN_. Assume that memory is mapped in chunks of at
least word size, regardless of BIGGEST_ALIGNMENT.
(bit_field_mode_iterator::get_mode): Use GET_MODE_ALIGNMENT rather
than unit when checking the alignment.
(get_best_mode): Use GET_MODE_ALIGNMENT.
2012-11-28 Vladimir Makarov <vmakarov@redhat.com> 2012-11-28 Vladimir Makarov <vmakarov@redhat.com>
PR rtl-optimization/55512 PR rtl-optimization/55512
...@@ -416,6 +416,21 @@ lowpart_bit_field_p (unsigned HOST_WIDE_INT bitnum, ...@@ -416,6 +416,21 @@ lowpart_bit_field_p (unsigned HOST_WIDE_INT bitnum,
else else
return bitnum % BITS_PER_WORD == 0; return bitnum % BITS_PER_WORD == 0;
} }
/* Return true if OP is a memory and if a bitfield of size BITSIZE at
bit number BITNUM can be treated as a simple value of mode MODE. */
static bool
simple_mem_bitfield_p (rtx op0, unsigned HOST_WIDE_INT bitsize,
unsigned HOST_WIDE_INT bitnum, enum machine_mode mode)
{
return (MEM_P (op0)
&& bitnum % BITS_PER_UNIT == 0
&& bitsize == GET_MODE_BITSIZE (mode)
&& (!SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (op0))
|| (bitnum % GET_MODE_ALIGNMENT (mode) == 0
&& MEM_ALIGN (op0) >= GET_MODE_ALIGNMENT (mode))));
}
/* Try to use instruction INSV to store VALUE into a field of OP0. /* Try to use instruction INSV to store VALUE into a field of OP0.
BITSIZE and BITNUM are as for store_bit_field. */ BITSIZE and BITNUM are as for store_bit_field. */
...@@ -624,12 +639,7 @@ store_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, ...@@ -624,12 +639,7 @@ store_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
/* If the target is memory, storing any naturally aligned field can be /* If the target is memory, storing any naturally aligned field can be
done with a simple store. For targets that support fast unaligned done with a simple store. For targets that support fast unaligned
memory, any naturally sized, unit aligned field can be done directly. */ memory, any naturally sized, unit aligned field can be done directly. */
if (MEM_P (op0) if (simple_mem_bitfield_p (op0, bitsize, bitnum, fieldmode))
&& bitnum % BITS_PER_UNIT == 0
&& bitsize == GET_MODE_BITSIZE (fieldmode)
&& (!SLOW_UNALIGNED_ACCESS (fieldmode, MEM_ALIGN (op0))
|| (bitnum % bitsize == 0
&& MEM_ALIGN (op0) % bitsize == 0)))
{ {
op0 = adjust_bitfield_address (op0, fieldmode, bitnum / BITS_PER_UNIT); op0 = adjust_bitfield_address (op0, fieldmode, bitnum / BITS_PER_UNIT);
emit_move_insn (op0, value); emit_move_insn (op0, value);
...@@ -1455,12 +1465,7 @@ extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize, ...@@ -1455,12 +1465,7 @@ extract_bit_field_1 (rtx str_rtx, unsigned HOST_WIDE_INT bitsize,
/* Extraction of a full MODE1 value can be done with a load as long as /* Extraction of a full MODE1 value can be done with a load as long as
the field is on a byte boundary and is sufficiently aligned. */ the field is on a byte boundary and is sufficiently aligned. */
if (MEM_P (op0) if (simple_mem_bitfield_p (op0, bitsize, bitnum, mode1))
&& bitnum % BITS_PER_UNIT == 0
&& bitsize == GET_MODE_BITSIZE (mode1)
&& (!SLOW_UNALIGNED_ACCESS (mode1, MEM_ALIGN (op0))
|| (bitnum % bitsize == 0
&& MEM_ALIGN (op0) % bitsize == 0)))
{ {
op0 = adjust_bitfield_address (op0, mode1, bitnum / BITS_PER_UNIT); op0 = adjust_bitfield_address (op0, mode1, bitnum / BITS_PER_UNIT);
return convert_extracted_bit_field (op0, mode, tmode, unsignedp); return convert_extracted_bit_field (op0, mode, tmode, unsignedp);
......
...@@ -2643,15 +2643,17 @@ bit_field_mode_iterator ...@@ -2643,15 +2643,17 @@ bit_field_mode_iterator
unsigned int align, bool volatilep) unsigned int align, bool volatilep)
: mode_ (GET_CLASS_NARROWEST_MODE (MODE_INT)), bitsize_ (bitsize), : mode_ (GET_CLASS_NARROWEST_MODE (MODE_INT)), bitsize_ (bitsize),
bitpos_ (bitpos), bitregion_start_ (bitregion_start), bitpos_ (bitpos), bitregion_start_ (bitregion_start),
bitregion_end_ (bitregion_end), align_ (MIN (align, BIGGEST_ALIGNMENT)), bitregion_end_ (bitregion_end), align_ (align),
volatilep_ (volatilep), count_ (0) volatilep_ (volatilep), count_ (0)
{ {
if (!bitregion_end_) if (!bitregion_end_)
{ {
/* We can assume that any aligned chunk of ALIGN_ bits that overlaps /* We can assume that any aligned chunk of UNITS bits that overlaps
the bitfield is mapped and won't trap. */ the bitfield is mapped and won't trap. */
bitregion_end_ = bitpos + bitsize + align_ - 1; unsigned HOST_WIDE_INT units = MIN (align, MAX (BIGGEST_ALIGNMENT,
bitregion_end_ -= bitregion_end_ % align_ + 1; BITS_PER_WORD));
bitregion_end_ = bitpos + bitsize + units - 1;
bitregion_end_ -= bitregion_end_ % units + 1;
} }
} }
...@@ -2694,7 +2696,8 @@ bit_field_mode_iterator::next_mode (enum machine_mode *out_mode) ...@@ -2694,7 +2696,8 @@ bit_field_mode_iterator::next_mode (enum machine_mode *out_mode)
break; break;
/* Stop if the mode requires too much alignment. */ /* Stop if the mode requires too much alignment. */
if (unit > align_ && SLOW_UNALIGNED_ACCESS (mode_, align_)) if (GET_MODE_ALIGNMENT (mode_) > align_
&& SLOW_UNALIGNED_ACCESS (mode_, align_))
break; break;
*out_mode = mode_; *out_mode = mode_;
...@@ -2753,8 +2756,9 @@ get_best_mode (int bitsize, int bitpos, ...@@ -2753,8 +2756,9 @@ get_best_mode (int bitsize, int bitpos,
enum machine_mode widest_mode = VOIDmode; enum machine_mode widest_mode = VOIDmode;
enum machine_mode mode; enum machine_mode mode;
while (iter.next_mode (&mode) while (iter.next_mode (&mode)
/* ??? For historical reasons, reject modes that are wider than /* ??? For historical reasons, reject modes that would normally
the alignment. This has both advantages and disadvantages. receive greater alignment, even if unaligned accesses are
acceptable. This has both advantages and disadvantages.
Removing this check means that something like: Removing this check means that something like:
struct s { unsigned int x; unsigned int y; }; struct s { unsigned int x; unsigned int y; };
...@@ -2808,7 +2812,7 @@ get_best_mode (int bitsize, int bitpos, ...@@ -2808,7 +2812,7 @@ get_best_mode (int bitsize, int bitpos,
causes store_bit_field to keep a 128-bit memory reference, causes store_bit_field to keep a 128-bit memory reference,
so that the final bitfield reference still has a MEM_EXPR so that the final bitfield reference still has a MEM_EXPR
and MEM_OFFSET. */ and MEM_OFFSET. */
&& GET_MODE_BITSIZE (mode) <= align && GET_MODE_ALIGNMENT (mode) <= align
&& (largest_mode == VOIDmode && (largest_mode == VOIDmode
|| GET_MODE_SIZE (mode) <= GET_MODE_SIZE (largest_mode))) || GET_MODE_SIZE (mode) <= GET_MODE_SIZE (largest_mode)))
{ {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment