Commit 8c59e5e7 by Richard Sandiford Committed by Richard Sandiford

poly_int: C++ bitfield regions

This patch changes C++ bitregion_start/end values from constants to
poly_ints.  Although it's unlikely that the size needs to be polynomial
in practice, the offset could be with future language extensions.

2017-12-20  Richard Sandiford  <richard.sandiford@linaro.org>
	    Alan Hayward  <alan.hayward@arm.com>
	    David Sherwood  <david.sherwood@arm.com>

gcc/
	* expmed.h (store_bit_field): Change bitregion_start and
	bitregion_end from unsigned HOST_WIDE_INT to poly_uint64.
	* expmed.c (adjust_bit_field_mem_for_reg, strict_volatile_bitfield_p)
	(store_bit_field_1, store_integral_bit_field, store_bit_field)
	(store_fixed_bit_field, store_split_bit_field): Likewise.
	* expr.c (store_constructor_field, store_field): Likewise.
	(optimize_bitfield_assignment_op): Likewise.  Make the same change
	to bitsize and bitpos.
	* machmode.h (bit_field_mode_iterator): Change m_bitregion_start
	and m_bitregion_end from HOST_WIDE_INT to poly_int64.  Make the
	same change in the constructor arguments.
	(get_best_mode): Change bitregion_start and bitregion_end from
	unsigned HOST_WIDE_INT to poly_uint64.
	* stor-layout.c (bit_field_mode_iterator::bit_field_mode_iterator):
	Change bitregion_start and bitregion_end from HOST_WIDE_INT to
	poly_int64.
	(bit_field_mode_iterator::next_mode): Update for new types
	of m_bitregion_start and m_bitregion_end.
	(get_best_mode): Change bitregion_start and bitregion_end from
	unsigned HOST_WIDE_INT to poly_uint64.

Co-Authored-By: Alan Hayward <alan.hayward@arm.com>
Co-Authored-By: David Sherwood <david.sherwood@arm.com>

From-SVN: r255879
parent fc60a416
......@@ -2,6 +2,31 @@
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
* expmed.h (store_bit_field): Change bitregion_start and
bitregion_end from unsigned HOST_WIDE_INT to poly_uint64.
* expmed.c (adjust_bit_field_mem_for_reg, strict_volatile_bitfield_p)
(store_bit_field_1, store_integral_bit_field, store_bit_field)
(store_fixed_bit_field, store_split_bit_field): Likewise.
* expr.c (store_constructor_field, store_field): Likewise.
(optimize_bitfield_assignment_op): Likewise. Make the same change
to bitsize and bitpos.
* machmode.h (bit_field_mode_iterator): Change m_bitregion_start
and m_bitregion_end from HOST_WIDE_INT to poly_int64. Make the
same change in the constructor arguments.
(get_best_mode): Change bitregion_start and bitregion_end from
unsigned HOST_WIDE_INT to poly_uint64.
* stor-layout.c (bit_field_mode_iterator::bit_field_mode_iterator):
Change bitregion_start and bitregion_end from HOST_WIDE_INT to
poly_int64.
(bit_field_mode_iterator::next_mode): Update for new types
of m_bitregion_start and m_bitregion_end.
(get_best_mode): Change bitregion_start and bitregion_end from
unsigned HOST_WIDE_INT to poly_uint64.
2017-12-20 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
* rtl.h (simplify_gen_subreg): Add a temporary overload that
accepts poly_uint64 offsets.
* expmed.h (extract_bit_field): Take bitsize and bitnum as
......@@ -50,14 +50,12 @@ struct target_expmed *this_target_expmed = &default_target_expmed;
static bool store_integral_bit_field (rtx, opt_scalar_int_mode,
unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT,
poly_uint64, poly_uint64,
machine_mode, rtx, bool, bool);
static void store_fixed_bit_field (rtx, opt_scalar_int_mode,
unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT,
poly_uint64, poly_uint64,
rtx, scalar_int_mode, bool);
static void store_fixed_bit_field_1 (rtx, scalar_int_mode,
unsigned HOST_WIDE_INT,
......@@ -66,8 +64,7 @@ static void store_fixed_bit_field_1 (rtx, scalar_int_mode,
static void store_split_bit_field (rtx, opt_scalar_int_mode,
unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT,
poly_uint64, poly_uint64,
rtx, scalar_int_mode, bool);
static rtx extract_integral_bit_field (rtx, opt_scalar_int_mode,
unsigned HOST_WIDE_INT,
......@@ -472,8 +469,8 @@ static rtx
adjust_bit_field_mem_for_reg (enum extraction_pattern pattern,
rtx op0, HOST_WIDE_INT bitsize,
HOST_WIDE_INT bitnum,
unsigned HOST_WIDE_INT bitregion_start,
unsigned HOST_WIDE_INT bitregion_end,
poly_uint64 bitregion_start,
poly_uint64 bitregion_end,
machine_mode fieldmode,
unsigned HOST_WIDE_INT *new_bitnum)
{
......@@ -537,8 +534,8 @@ static bool
strict_volatile_bitfield_p (rtx op0, unsigned HOST_WIDE_INT bitsize,
unsigned HOST_WIDE_INT bitnum,
scalar_int_mode fieldmode,
unsigned HOST_WIDE_INT bitregion_start,
unsigned HOST_WIDE_INT bitregion_end)
poly_uint64 bitregion_start,
poly_uint64 bitregion_end)
{
unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (fieldmode);
......@@ -565,9 +562,10 @@ strict_volatile_bitfield_p (rtx op0, unsigned HOST_WIDE_INT bitsize,
return false;
/* Check for cases where the C++ memory model applies. */
if (bitregion_end != 0
&& (bitnum - bitnum % modesize < bitregion_start
|| bitnum - bitnum % modesize + modesize - 1 > bitregion_end))
if (maybe_ne (bitregion_end, 0U)
&& (maybe_lt (bitnum - bitnum % modesize, bitregion_start)
|| maybe_gt (bitnum - bitnum % modesize + modesize - 1,
bitregion_end)))
return false;
return true;
......@@ -731,8 +729,7 @@ store_bit_field_using_insv (const extraction_insn *insv, rtx op0,
static bool
store_bit_field_1 (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum,
unsigned HOST_WIDE_INT bitregion_start,
unsigned HOST_WIDE_INT bitregion_end,
poly_uint64 bitregion_start, poly_uint64 bitregion_end,
machine_mode fieldmode,
rtx value, bool reverse, bool fallback_p)
{
......@@ -859,8 +856,8 @@ static bool
store_integral_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
unsigned HOST_WIDE_INT bitsize,
unsigned HOST_WIDE_INT bitnum,
unsigned HOST_WIDE_INT bitregion_start,
unsigned HOST_WIDE_INT bitregion_end,
poly_uint64 bitregion_start,
poly_uint64 bitregion_end,
machine_mode fieldmode,
rtx value, bool reverse, bool fallback_p)
{
......@@ -1086,8 +1083,7 @@ store_integral_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
void
store_bit_field (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum,
unsigned HOST_WIDE_INT bitregion_start,
unsigned HOST_WIDE_INT bitregion_end,
poly_uint64 bitregion_start, poly_uint64 bitregion_end,
machine_mode fieldmode,
rtx value, bool reverse)
{
......@@ -1134,15 +1130,12 @@ store_bit_field (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum,
/* Under the C++0x memory model, we must not touch bits outside the
bit region. Adjust the address to start at the beginning of the
bit region. */
if (MEM_P (str_rtx) && bitregion_start > 0)
if (MEM_P (str_rtx) && maybe_ne (bitregion_start, 0U))
{
scalar_int_mode best_mode;
machine_mode addr_mode = VOIDmode;
HOST_WIDE_INT offset;
gcc_assert ((bitregion_start % BITS_PER_UNIT) == 0);
offset = bitregion_start / BITS_PER_UNIT;
poly_uint64 offset = exact_div (bitregion_start, BITS_PER_UNIT);
bitnum -= bitregion_start;
poly_int64 size = bits_to_bytes_round_up (bitnum + bitsize);
bitregion_end -= bitregion_start;
......@@ -1175,8 +1168,7 @@ static void
store_fixed_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
unsigned HOST_WIDE_INT bitsize,
unsigned HOST_WIDE_INT bitnum,
unsigned HOST_WIDE_INT bitregion_start,
unsigned HOST_WIDE_INT bitregion_end,
poly_uint64 bitregion_start, poly_uint64 bitregion_end,
rtx value, scalar_int_mode value_mode, bool reverse)
{
/* There is a case not handled here:
......@@ -1331,8 +1323,7 @@ static void
store_split_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
unsigned HOST_WIDE_INT bitsize,
unsigned HOST_WIDE_INT bitpos,
unsigned HOST_WIDE_INT bitregion_start,
unsigned HOST_WIDE_INT bitregion_end,
poly_uint64 bitregion_start, poly_uint64 bitregion_end,
rtx value, scalar_int_mode value_mode, bool reverse)
{
unsigned int unit, total_bits, bitsdone = 0;
......@@ -1380,9 +1371,9 @@ store_split_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
UNIT close to the end of the region as needed. If op0 is a REG
or SUBREG of REG, don't do this, as there can't be data races
on a register and we can expand shorter code in some cases. */
if (bitregion_end
if (maybe_ne (bitregion_end, 0U)
&& unit > BITS_PER_UNIT
&& bitpos + bitsdone - thispos + unit > bitregion_end + 1
&& maybe_gt (bitpos + bitsdone - thispos + unit, bitregion_end + 1)
&& !REG_P (op0)
&& (GET_CODE (op0) != SUBREG || !REG_P (SUBREG_REG (op0))))
{
......
......@@ -719,8 +719,7 @@ extern rtx expand_divmod (int, enum tree_code, machine_mode, rtx, rtx,
#endif
extern void store_bit_field (rtx, poly_uint64, poly_uint64,
unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT,
poly_uint64, poly_uint64,
machine_mode, rtx, bool);
extern rtx extract_bit_field (rtx, poly_uint64, poly_uint64, int, rtx,
machine_mode, machine_mode, bool, rtx *);
......
......@@ -79,13 +79,9 @@ static void emit_block_move_via_loop (rtx, rtx, rtx, unsigned);
static void clear_by_pieces (rtx, unsigned HOST_WIDE_INT, unsigned int);
static rtx_insn *compress_float_constant (rtx, rtx);
static rtx get_subtarget (rtx);
static void store_constructor_field (rtx, unsigned HOST_WIDE_INT,
HOST_WIDE_INT, unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT, machine_mode,
tree, int, alias_set_type, bool);
static void store_constructor (tree, rtx, int, HOST_WIDE_INT, bool);
static rtx store_field (rtx, HOST_WIDE_INT, HOST_WIDE_INT,
unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT,
poly_uint64, poly_uint64,
machine_mode, tree, alias_set_type, bool, bool);
static unsigned HOST_WIDE_INT highest_pow2_factor_for_target (const_tree, const_tree);
......@@ -4616,10 +4612,10 @@ get_subtarget (rtx x)
and there's nothing else to do. */
static bool
optimize_bitfield_assignment_op (unsigned HOST_WIDE_INT bitsize,
unsigned HOST_WIDE_INT bitpos,
unsigned HOST_WIDE_INT bitregion_start,
unsigned HOST_WIDE_INT bitregion_end,
optimize_bitfield_assignment_op (poly_uint64 pbitsize,
poly_uint64 pbitpos,
poly_uint64 pbitregion_start,
poly_uint64 pbitregion_end,
machine_mode mode1, rtx str_rtx,
tree to, tree src, bool reverse)
{
......@@ -4631,7 +4627,12 @@ optimize_bitfield_assignment_op (unsigned HOST_WIDE_INT bitsize,
gimple *srcstmt;
enum tree_code code;
unsigned HOST_WIDE_INT bitsize, bitpos, bitregion_start, bitregion_end;
if (mode1 != VOIDmode
|| !pbitsize.is_constant (&bitsize)
|| !pbitpos.is_constant (&bitpos)
|| !pbitregion_start.is_constant (&bitregion_start)
|| !pbitregion_end.is_constant (&bitregion_end)
|| bitsize >= BITS_PER_WORD
|| str_bitsize > BITS_PER_WORD
|| TREE_SIDE_EFFECTS (to)
......@@ -6104,8 +6105,8 @@ all_zeros_p (const_tree exp)
static void
store_constructor_field (rtx target, unsigned HOST_WIDE_INT bitsize,
HOST_WIDE_INT bitpos,
unsigned HOST_WIDE_INT bitregion_start,
unsigned HOST_WIDE_INT bitregion_end,
poly_uint64 bitregion_start,
poly_uint64 bitregion_end,
machine_mode mode,
tree exp, int cleared,
alias_set_type alias_set, bool reverse)
......@@ -6784,8 +6785,7 @@ store_constructor (tree exp, rtx target, int cleared, HOST_WIDE_INT size,
static rtx
store_field (rtx target, HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos,
unsigned HOST_WIDE_INT bitregion_start,
unsigned HOST_WIDE_INT bitregion_end,
poly_uint64 bitregion_start, poly_uint64 bitregion_end,
machine_mode mode, tree exp,
alias_set_type alias_set, bool nontemporal, bool reverse)
{
......
......@@ -760,7 +760,7 @@ class bit_field_mode_iterator
{
public:
bit_field_mode_iterator (HOST_WIDE_INT, HOST_WIDE_INT,
HOST_WIDE_INT, HOST_WIDE_INT,
poly_int64, poly_int64,
unsigned int, bool);
bool next_mode (scalar_int_mode *);
bool prefer_smaller_modes ();
......@@ -771,8 +771,8 @@ private:
for invalid input such as gcc.dg/pr48335-8.c. */
HOST_WIDE_INT m_bitsize;
HOST_WIDE_INT m_bitpos;
HOST_WIDE_INT m_bitregion_start;
HOST_WIDE_INT m_bitregion_end;
poly_int64 m_bitregion_start;
poly_int64 m_bitregion_end;
unsigned int m_align;
bool m_volatilep;
int m_count;
......@@ -780,8 +780,7 @@ private:
/* Find the best mode to use to access a bit field. */
extern bool get_best_mode (int, int, unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT, unsigned int,
extern bool get_best_mode (int, int, poly_uint64, poly_uint64, unsigned int,
unsigned HOST_WIDE_INT, bool, scalar_int_mode *);
/* Determine alignment, 1<=result<=BIGGEST_ALIGNMENT. */
......
......@@ -2751,15 +2751,15 @@ fixup_unsigned_type (tree type)
bit_field_mode_iterator
::bit_field_mode_iterator (HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos,
HOST_WIDE_INT bitregion_start,
HOST_WIDE_INT bitregion_end,
poly_int64 bitregion_start,
poly_int64 bitregion_end,
unsigned int align, bool volatilep)
: m_mode (NARROWEST_INT_MODE), m_bitsize (bitsize),
m_bitpos (bitpos), m_bitregion_start (bitregion_start),
m_bitregion_end (bitregion_end), m_align (align),
m_volatilep (volatilep), m_count (0)
{
if (!m_bitregion_end)
if (known_eq (m_bitregion_end, 0))
{
/* We can assume that any aligned chunk of ALIGN bits that overlaps
the bitfield is mapped and won't trap, provided that ALIGN isn't
......@@ -2769,8 +2769,8 @@ bit_field_mode_iterator
= MIN (align, MAX (BIGGEST_ALIGNMENT, BITS_PER_WORD));
if (bitsize <= 0)
bitsize = 1;
m_bitregion_end = bitpos + bitsize + units - 1;
m_bitregion_end -= m_bitregion_end % units + 1;
HOST_WIDE_INT end = bitpos + bitsize + units - 1;
m_bitregion_end = end - end % units - 1;
}
}
......@@ -2807,10 +2807,11 @@ bit_field_mode_iterator::next_mode (scalar_int_mode *out_mode)
/* Stop if the mode goes outside the bitregion. */
HOST_WIDE_INT start = m_bitpos - substart;
if (m_bitregion_start && start < m_bitregion_start)
if (maybe_ne (m_bitregion_start, 0)
&& maybe_lt (start, m_bitregion_start))
break;
HOST_WIDE_INT end = start + unit;
if (end > m_bitregion_end + 1)
if (maybe_gt (end, m_bitregion_end + 1))
break;
/* Stop if the mode requires too much alignment. */
......@@ -2866,8 +2867,7 @@ bit_field_mode_iterator::prefer_smaller_modes ()
bool
get_best_mode (int bitsize, int bitpos,
unsigned HOST_WIDE_INT bitregion_start,
unsigned HOST_WIDE_INT bitregion_end,
poly_uint64 bitregion_start, poly_uint64 bitregion_end,
unsigned int align,
unsigned HOST_WIDE_INT largest_mode_bitsize, bool volatilep,
scalar_int_mode *best_mode)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment