Commit 1b2253d4 by Richard Biener Committed by Richard Biener

re PR tree-optimization/56817 (ICE in hide_evolution_in_other_loops_than_loop)

2013-04-03  Richard Biener  <rguenther@suse.de>

	PR tree-optimization/56817
	* tree-ssa-loop-ivcanon.c (tree_unroll_loops_completely):
	Split out ...
	(tree_unroll_loops_completely_1): ... new function to manually
	walk the loop tree, properly defering outer loops of unrolled
	loops to later iterations.

	* g++.dg/torture/pr56817.C: New testcase.

From-SVN: r197411
parent 38000232
2013-04-03 Richard Biener <rguenther@suse.de>
PR tree-optimization/56817
* tree-ssa-loop-ivcanon.c (tree_unroll_loops_completely):
Split out ...
(tree_unroll_loops_completely_1): ... new function to manually
walk the loop tree, properly defering outer loops of unrolled
loops to later iterations.
2013-04-03 Marc Glisse <marc.glisse@inria.fr> 2013-04-03 Marc Glisse <marc.glisse@inria.fr>
* tree-vect-stmts.c (vectorizable_store): Accept BIT_FIELD_REF. * tree-vect-stmts.c (vectorizable_store): Accept BIT_FIELD_REF.
......
2013-04-03 Richard Biener <rguenther@suse.de>
PR tree-optimization/56817
* g++.dg/torture/pr56817.C: New testcase.
2013-04-03 Marc Glisse <marc.glisse@inria.fr> 2013-04-03 Marc Glisse <marc.glisse@inria.fr>
* gcc.dg/vect/bb-slp-31.c: New file. * gcc.dg/vect/bb-slp-31.c: New file.
......
// { dg-do compile }
// { dg-options "--param max-unroll-times=32" }
struct A {};
A **q;
struct B
{
A **j;
B () { j = q; }
A *& operator[] (unsigned long x) { return j[x]; }
};
struct C
{
C (int r) : v (), s (r) {}
A *& operator () (int i, int j) { return v[i * s + j]; }
B v;
int s;
};
struct D
{
D ()
{
unsigned h = 2;
for (int i = 0; i < 1; ++i, h *= 2)
{
C w (h);
for (unsigned j = 0; j < h; ++j)
for (unsigned k = 0; k < h; ++k)
w (j, k) = new A;
}
}
};
void
foo ()
{
for (int i = 0; i < 3; i++)
A (), A (), D ();
}
...@@ -1097,36 +1097,35 @@ propagate_constants_for_unrolling (basic_block bb) ...@@ -1097,36 +1097,35 @@ propagate_constants_for_unrolling (basic_block bb)
} }
} }
/* Unroll LOOPS completely if they iterate just few times. Unless /* Process loops from innermost to outer, stopping at the innermost
MAY_INCREASE_SIZE is true, perform the unrolling only if the loop we unrolled. */
size of the code does not increase. */
unsigned int static bool
tree_unroll_loops_completely (bool may_increase_size, bool unroll_outer) tree_unroll_loops_completely_1 (bool may_increase_size, bool unroll_outer,
vec<loop_p, va_stack>& father_stack,
struct loop *loop)
{ {
vec<loop_p, va_stack> father_stack; struct loop *loop_father;
loop_iterator li; bool changed = false;
struct loop *loop; struct loop *inner;
bool changed;
enum unroll_level ul; enum unroll_level ul;
int iteration = 0;
bool irred_invalidated = false;
vec_stack_alloc (loop_p, father_stack, 16);
do
{
changed = false;
bitmap loop_closed_ssa_invalidated = NULL;
if (loops_state_satisfies_p (LOOP_CLOSED_SSA)) /* Process inner loops first. */
loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL); for (inner = loop->inner; inner != NULL; inner = inner->next)
changed |= tree_unroll_loops_completely_1 (may_increase_size,
unroll_outer, father_stack,
inner);
free_numbers_of_iterations_estimates (); /* If we changed an inner loop we cannot process outer loops in this
estimate_numbers_of_iterations (); iteration because SSA form is not up-to-date. Continue with
siblings of outer loops instead. */
if (changed)
return true;
FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST) /* Try to unroll this loop. */
{ loop_father = loop_outer (loop);
struct loop *loop_father = loop_outer (loop); if (!loop_father)
return false;
if (may_increase_size && optimize_loop_nest_for_speed_p (loop) if (may_increase_size && optimize_loop_nest_for_speed_p (loop)
/* Unroll outermost loops only if asked to do so or they do /* Unroll outermost loops only if asked to do so or they do
...@@ -1139,7 +1138,6 @@ tree_unroll_loops_completely (bool may_increase_size, bool unroll_outer) ...@@ -1139,7 +1138,6 @@ tree_unroll_loops_completely (bool may_increase_size, bool unroll_outer)
if (canonicalize_loop_induction_variables if (canonicalize_loop_induction_variables
(loop, false, ul, !flag_tree_loop_ivcanon)) (loop, false, ul, !flag_tree_loop_ivcanon))
{ {
changed = true;
/* If we'll continue unrolling, we need to propagate constants /* If we'll continue unrolling, we need to propagate constants
within the new basic blocks to fold away induction variable within the new basic blocks to fold away induction variable
computations; otherwise, the size might blow up before the computations; otherwise, the size might blow up before the
...@@ -1149,9 +1147,40 @@ tree_unroll_loops_completely (bool may_increase_size, bool unroll_outer) ...@@ -1149,9 +1147,40 @@ tree_unroll_loops_completely (bool may_increase_size, bool unroll_outer)
father_stack.safe_push (loop_father); father_stack.safe_push (loop_father);
loop_father->aux = loop_father; loop_father->aux = loop_father;
} }
return true;
} }
}
return false;
}
/* Unroll LOOPS completely if they iterate just few times. Unless
MAY_INCREASE_SIZE is true, perform the unrolling only if the
size of the code does not increase. */
unsigned int
tree_unroll_loops_completely (bool may_increase_size, bool unroll_outer)
{
vec<loop_p, va_stack> father_stack;
bool changed;
int iteration = 0;
bool irred_invalidated = false;
vec_stack_alloc (loop_p, father_stack, 16);
do
{
changed = false;
bitmap loop_closed_ssa_invalidated = NULL;
if (loops_state_satisfies_p (LOOP_CLOSED_SSA))
loop_closed_ssa_invalidated = BITMAP_ALLOC (NULL);
free_numbers_of_iterations_estimates ();
estimate_numbers_of_iterations ();
changed = tree_unroll_loops_completely_1 (may_increase_size,
unroll_outer, father_stack,
current_loops->tree_root);
if (changed) if (changed)
{ {
struct loop **iter; struct loop **iter;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment