Commit 4a110e34 by Richard Sandiford Committed by Richard Sandiford

Make tree-ssa-dse.c:normalize_ref return a bool

This patch moves the check for an overlapping byte to normalize_ref
from its callers, so that it's easier to convert to poly_ints later.
It's not really worth it on its own.

2017-11-01  Richard Sandiford  <richard.sandiford@linaro.org>

gcc/
	* tree-ssa-dse.c (normalize_ref): Check whether the ranges overlap
	and return false if not.
	(clear_bytes_written_by, live_bytes_read): Update accordingly.

From-SVN: r254313
parent 7fc53ba4
2017-11-01 Richard Sandiford <richard.sandiford@linaro.org> 2017-11-01 Richard Sandiford <richard.sandiford@linaro.org>
* tree-ssa-dse.c (normalize_ref): Check whether the ranges overlap
and return false if not.
(clear_bytes_written_by, live_bytes_read): Update accordingly.
2017-11-01 Richard Sandiford <richard.sandiford@linaro.org>
* tree-ssa-alias.h (ranges_overlap_p): Return false if either * tree-ssa-alias.h (ranges_overlap_p): Return false if either
range is known to be empty. range is known to be empty.
...@@ -137,13 +137,11 @@ valid_ao_ref_for_dse (ao_ref *ref) ...@@ -137,13 +137,11 @@ valid_ao_ref_for_dse (ao_ref *ref)
&& (ref->size != -1)); && (ref->size != -1));
} }
/* Normalize COPY (an ao_ref) relative to REF. Essentially when we are /* Try to normalize COPY (an ao_ref) relative to REF. Essentially when we are
done COPY will only refer bytes found within REF. done COPY will only refer bytes found within REF. Return true if COPY
is known to intersect at least one byte of REF. */
We have already verified that COPY intersects at least one static bool
byte with REF. */
static void
normalize_ref (ao_ref *copy, ao_ref *ref) normalize_ref (ao_ref *copy, ao_ref *ref)
{ {
/* If COPY starts before REF, then reset the beginning of /* If COPY starts before REF, then reset the beginning of
...@@ -151,13 +149,22 @@ normalize_ref (ao_ref *copy, ao_ref *ref) ...@@ -151,13 +149,22 @@ normalize_ref (ao_ref *copy, ao_ref *ref)
number of bytes removed from COPY. */ number of bytes removed from COPY. */
if (copy->offset < ref->offset) if (copy->offset < ref->offset)
{ {
copy->size -= (ref->offset - copy->offset); HOST_WIDE_INT diff = ref->offset - copy->offset;
if (copy->size <= diff)
return false;
copy->size -= diff;
copy->offset = ref->offset; copy->offset = ref->offset;
} }
HOST_WIDE_INT diff = copy->offset - ref->offset;
if (ref->size <= diff)
return false;
/* If COPY extends beyond REF, chop off its size appropriately. */ /* If COPY extends beyond REF, chop off its size appropriately. */
if (copy->offset + copy->size > ref->offset + ref->size) HOST_WIDE_INT limit = ref->size - diff;
copy->size -= (copy->offset + copy->size - (ref->offset + ref->size)); if (copy->size > limit)
copy->size = limit;
return true;
} }
/* Clear any bytes written by STMT from the bitmap LIVE_BYTES. The base /* Clear any bytes written by STMT from the bitmap LIVE_BYTES. The base
...@@ -179,14 +186,10 @@ clear_bytes_written_by (sbitmap live_bytes, gimple *stmt, ao_ref *ref) ...@@ -179,14 +186,10 @@ clear_bytes_written_by (sbitmap live_bytes, gimple *stmt, ao_ref *ref)
if (valid_ao_ref_for_dse (&write) if (valid_ao_ref_for_dse (&write)
&& operand_equal_p (write.base, ref->base, OEP_ADDRESS_OF) && operand_equal_p (write.base, ref->base, OEP_ADDRESS_OF)
&& write.size == write.max_size && write.size == write.max_size
&& ((write.offset < ref->offset && normalize_ref (&write, ref))
&& write.offset + write.size > ref->offset)
|| (write.offset >= ref->offset
&& write.offset < ref->offset + ref->size)))
{ {
normalize_ref (&write, ref); HOST_WIDE_INT start = write.offset - ref->offset;
bitmap_clear_range (live_bytes, bitmap_clear_range (live_bytes, start / BITS_PER_UNIT,
(write.offset - ref->offset) / BITS_PER_UNIT,
write.size / BITS_PER_UNIT); write.size / BITS_PER_UNIT);
} }
} }
...@@ -480,21 +483,20 @@ live_bytes_read (ao_ref use_ref, ao_ref *ref, sbitmap live) ...@@ -480,21 +483,20 @@ live_bytes_read (ao_ref use_ref, ao_ref *ref, sbitmap live)
{ {
/* We have already verified that USE_REF and REF hit the same object. /* We have already verified that USE_REF and REF hit the same object.
Now verify that there's actually an overlap between USE_REF and REF. */ Now verify that there's actually an overlap between USE_REF and REF. */
if (ranges_overlap_p (use_ref.offset, use_ref.size, ref->offset, ref->size)) if (normalize_ref (&use_ref, ref))
{ {
normalize_ref (&use_ref, ref); HOST_WIDE_INT start = use_ref.offset - ref->offset;
HOST_WIDE_INT size = use_ref.size;
/* If USE_REF covers all of REF, then it will hit one or more /* If USE_REF covers all of REF, then it will hit one or more
live bytes. This avoids useless iteration over the bitmap live bytes. This avoids useless iteration over the bitmap
below. */ below. */
if (use_ref.offset <= ref->offset if (start == 0 && size == ref->size)
&& use_ref.offset + use_ref.size >= ref->offset + ref->size)
return true; return true;
/* Now check if any of the remaining bits in use_ref are set in LIVE. */ /* Now check if any of the remaining bits in use_ref are set in LIVE. */
unsigned int start = (use_ref.offset - ref->offset) / BITS_PER_UNIT; return bitmap_bit_in_range_p (live, start / BITS_PER_UNIT,
unsigned int end = start + (use_ref.size / BITS_PER_UNIT) - 1; (start + size - 1) / BITS_PER_UNIT);
return bitmap_bit_in_range_p (live, start, end);
} }
return true; return true;
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment