Commit b021b385 by Jeff Law Committed by Jeff Law

re PR middle-end/36550 (Wrong "may be used uninitialized" warning (conditional PHIs))

	PR tree-optimization/36550
	* tree-ssa-threadupdate.c (count_stmts_and_phis_in_block): New.
	(mark_threaded_blocks): Rewrite code to avoid block copying when
	optimizing for size.  Don't pessimize blocks which will be
	copied, but all the statements will be dead.

	PR tree-optimization/36550
	* gcc.dg/tree-ssa/pr36550.c: New test.

From-SVN: r255731
parent 5806e062
2017-12-15 Jeff Law <law@redhat.com>
PR tree-optimization/36550
* tree-ssa-threadupdate.c (count_stmts_and_phis_in_block): New.
(mark_threaded_blocks): Rewrite code to avoid block copying when
optimizing for size. Don't pessimize blocks which will be
copied, but all the statements will be dead.
2017-12-15 Alexandre Oliva <aoliva@redhat.com> 2017-12-15 Alexandre Oliva <aoliva@redhat.com>
PR tree-optimization/81165 PR tree-optimization/81165
2017-12-15 Jeff Law <law@redhat.com>
PR tree-optimization/36550
* gcc.dg/tree-ssa/pr36550.c: New test.
2017-12-15 Alexandre Oliva <aoliva@redhat.com> 2017-12-15 Alexandre Oliva <aoliva@redhat.com>
PR tree-optimization/81165 PR tree-optimization/81165
......
/* { dg-do compile } */
/* { dg-options "-Os -Wuninitialized" } */
void bail(void) __attribute__((noreturn));
unsigned once(void);
int pr(char**argv)
{
char *bug;
unsigned check = once();
if (check) {
if (*argv)
bug = *++argv;
} else {
bug = *argv++;
if (!*argv)
bail();
}
/* now bug is set except if (check && !*argv) */
if (check) {
if (!*argv)
return 0;
}
/* if we ever get here then bug is set */
return *bug != 'X';
}
...@@ -1737,6 +1737,31 @@ phi_args_equal_on_edges (edge e1, edge e2) ...@@ -1737,6 +1737,31 @@ phi_args_equal_on_edges (edge e1, edge e2)
return true; return true;
} }
/* Return the number of non-debug statements and non-virtual PHIs in a
block. */
static unsigned int
count_stmts_and_phis_in_block (basic_block bb)
{
unsigned int num_stmts = 0;
gphi_iterator gpi;
for (gpi = gsi_start_phis (bb); !gsi_end_p (gpi); gsi_next (&gpi))
if (!virtual_operand_p (PHI_RESULT (gpi.phi ())))
num_stmts++;
gimple_stmt_iterator gsi;
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple *stmt = gsi_stmt (gsi);
if (!is_gimple_debug (stmt))
num_stmts++;
}
return num_stmts;
}
/* Walk through the registered jump threads and convert them into a /* Walk through the registered jump threads and convert them into a
form convenient for this pass. form convenient for this pass.
...@@ -1856,28 +1881,51 @@ mark_threaded_blocks (bitmap threaded_blocks) ...@@ -1856,28 +1881,51 @@ mark_threaded_blocks (bitmap threaded_blocks)
} }
} }
/* If optimizing for size, only thread through block if we don't have /* When optimizing for size, prune all thread paths where statement
to duplicate it or it's an otherwise empty redirection block. */ duplication is necessary.
We walk the jump thread path looking for copied blocks. There's
two types of copied blocks.
EDGE_COPY_SRC_JOINER_BLOCK is always copied and thus we will
cancel the jump threading request when optimizing for size.
EDGE_COPY_SRC_BLOCK which is copied, but some of its statements
will be killed by threading. If threading does not kill all of
its statements, then we should cancel the jump threading request
when optimizing for size. */
if (optimize_function_for_size_p (cfun)) if (optimize_function_for_size_p (cfun))
{ {
EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi) EXECUTE_IF_SET_IN_BITMAP (tmp, 0, i, bi)
{ {
bb = BASIC_BLOCK_FOR_FN (cfun, i); FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, i)->preds)
if (EDGE_COUNT (bb->preds) > 1 if (e->aux)
&& !redirection_block_p (bb)) {
{ vec<jump_thread_edge *> *path = THREAD_PATH (e);
FOR_EACH_EDGE (e, ei, bb->preds)
{ unsigned int j;
if (e->aux) for (j = 1; j < path->length (); j++)
{ {
vec<jump_thread_edge *> *path = THREAD_PATH (e); bb = (*path)[j]->e->src;
delete_jump_thread_path (path); if (redirection_block_p (bb))
e->aux = NULL; ;
} else if ((*path)[j]->type == EDGE_COPY_SRC_JOINER_BLOCK
} || ((*path)[j]->type == EDGE_COPY_SRC_BLOCK
} && (count_stmts_and_phis_in_block (bb)
else != estimate_threading_killed_stmts (bb))))
bitmap_set_bit (threaded_blocks, i); break;
}
if (j != path->length ())
{
if (dump_file && (dump_flags & TDF_DETAILS))
dump_jump_thread_path (dump_file, *path, 0);
delete_jump_thread_path (path);
e->aux = NULL;
}
else
bitmap_set_bit (threaded_blocks, i);
}
} }
} }
else else
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment