Commit 361b51c0 by Jeff Law Committed by Jeff Law

tree-ssa-threadupdate.c (struct redirection_data): New field intermediate_edge.


	* tree-ssa-threadupdate.c (struct redirection_data): New field
	intermediate_edge.
	(THREAD_TARGET2): Define.
	(redirection_data_eq): Also check that the intermediate edge is
	equal.
	(lookup_redirection_data): Drop useless argument.  Extract the
	outgoing_edge and intermediate edge from E.  Callers updated.
	(copy_phi_args, update_destination_phis): New functions.
	(fix_duplicate_block_edges): Likewise.
	(create_edge_and_update_destination_phis): Duplicate all the edges
	hung off e->aux.  Use copy_phi_args.
	(create_duplicates): Use fix_duplicate_block_edges.
	(fixup_template_block): Likewise.
	(redirect_edges): If necessary, redirect the joiner block's incoming
	edge to the duplicate of the joiner block.
	(thread_block): Don't muck up loops when threading through a joiner
	block.
	(thread_through_loop_header): Handle threading through a joiner
	block.
	(mark_threaded_blocks, register_jump_thread): Likewise.
	* tree-flow.h (register_jump_thread): Add new argument.  Callers
	updated.
	* tree-ssa-threadedge.c (phi_args_equal_on_edges): New function.
	(thread_across_edge): Handle threading through a joiner block.
	

	
	* gcc.dg/builtin-object-size-1.c: Update to handle changes from
	improved jump threading.
	* gcc.dg/builtin-object-size-2.c: Likewise.
	* gcc.dg/tree-ssa/20030728-1.c: Likewise.

From-SVN: r175114
parent 67a7c837
2011-06-16 Jeff Law <law@redhat.com>
* tree-ssa-threadupdate.c (struct redirection_data): New field
intermediate_edge.
(THREAD_TARGET2): Define.
(redirection_data_eq): Also check that the intermediate edge is
equal.
(lookup_redirection_data): Drop useless argument. Extract the
outgoing_edge and intermediate edge from E. Callers updated.
(copy_phi_args, update_destination_phis): New functions.
(fix_duplicate_block_edges): Likewise.
(create_edge_and_update_destination_phis): Duplicate all the edges
hung off e->aux. Use copy_phi_args.
(create_duplicates): Use fix_duplicate_block_edges.
(fixup_template_block): Likewise.
(redirect_edges): If necessary, redirect the joiner block's incoming
edge to the duplicate of the joiner block.
(thread_block): Don't muck up loops when threading through a joiner
block.
(thread_through_loop_header): Handle threading through a joiner
block.
(mark_threaded_blocks, register_jump_thread): Likewise.
* tree-flow.h (register_jump_thread): Add new argument. Callers
updated.
* tree-ssa-threadedge.c (phi_args_equal_on_edges): New function.
(thread_across_edge): Handle threading through a joiner block.
2011-06-16 Martin Jambor <mjambor@suse.cz>
PR tree-optimization/49343
......@@ -320,7 +347,7 @@
* ddg.c (add_intra_loop_mem_dep): New function.
(build_intra_loop_deps): Call it.
2011-05-06 Jeff Law <law@redhat.com>
2011-06-13 Jeff Law <law@redhat.com>
* df-problems.c (df_lr_local_compute): Manually CSE
PIC_OFFSET_TABLE_REGNUM.
......
2011-06-16 Jeff Law <law@redhat.com>
* gcc.dg/builtin-object-size-1.c: Update to handle chances from
improved jump threading.
* gcc.dg/builtin-object-size-2.c: Likewise.
* gcc.dg/tree-ssa/20030728-1.c: Likewise.
2011-06-16 Janus Weil <janus@gcc.gnu.org>
PR fortran/49074
......
......@@ -64,7 +64,11 @@ test1 (void *q, int x)
r = malloc (30);
else
r = calloc (2, 16);
if (__builtin_object_size (r, 0) != 2 * 16)
/* We may duplicate this test onto the two exit paths. On one path
the size will be 32, the other it will be 30. If we don't duplicate
this test, then the size will be 32. */
if (__builtin_object_size (r, 0) != 2 * 16
&& __builtin_object_size (r, 0) != 30)
abort ();
if (x < 20)
r = malloc (30);
......
......@@ -60,7 +60,11 @@ test1 (void *q, int x)
r = malloc (30);
else
r = calloc (2, 16);
if (__builtin_object_size (r, 1) != 2 * 16)
/* We may duplicate this test onto the two exit paths. On one path
the size will be 32, the other it will be 30. If we don't duplicate
this test, then the size will be 32. */
if (__builtin_object_size (r, 1) != 2 * 16
&& __builtin_object_size (r, 1) != 30)
abort ();
if (x < 20)
r = malloc (30);
......
......@@ -41,7 +41,9 @@ objects_must_conflict_p (t1, t2)
return foo (t2 ? get_alias_set (t2) : 0);
}
/* There should be two assignments of variables to the value zero. */
/* { dg-final { scan-rtl-dump-times "PART.. = 0" 2 "expand"} } */
/* There should be one assignment of variables to the value zero. There
used to be two assignments, but improvements in threading allowed the
second to be propagated into all its uses and eliminated. */
/* { dg-final { scan-rtl-dump-times "PART.. = 0" 1 "expand"} } */
/* { dg-final { cleanup-rtl-dump "expand" } } */
......@@ -808,7 +808,7 @@ bool may_be_nonaddressable_p (tree expr);
/* In tree-ssa-threadupdate.c. */
extern bool thread_through_all_blocks (bool);
extern void register_jump_thread (edge, edge);
extern void register_jump_thread (edge, edge, edge);
/* In gimplify.c */
tree force_gimple_operand_1 (tree, gimple_seq *, gimple_predicate, tree);
......
/* SSA Jump Threading
Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010
Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
Free Software Foundation, Inc.
Contributed by Jeff Law <law@redhat.com>
......@@ -652,6 +652,27 @@ thread_around_empty_block (edge taken_edge,
return NULL;
}
/* E1 and E2 are edges into the same basic block. Return TRUE if the
PHI arguments associated with those edges are equal or there are no
PHI arguments, otherwise return FALSE. */
static bool
phi_args_equal_on_edges (edge e1, edge e2)
{
gimple_stmt_iterator gsi;
int indx1 = e1->dest_idx;
int indx2 = e2->dest_idx;
for (gsi = gsi_start_phis (e1->dest); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple phi = gsi_stmt (gsi);
if (!operand_equal_p (gimple_phi_arg_def (phi, indx1),
gimple_phi_arg_def (phi, indx2), 0))
return false;
}
return true;
}
/* We are exiting E->src, see if E->dest ends with a conditional
jump which has a known value when reached via E.
......@@ -770,11 +791,73 @@ thread_across_edge (gimple dummy_cond,
}
remove_temporary_equivalences (stack);
register_jump_thread (e, taken_edge);
register_jump_thread (e, taken_edge, NULL);
return;
}
}
/* We were unable to determine what out edge from E->dest is taken. However,
we might still be able to thread through successors of E->dest. This
often occurs when E->dest is a joiner block which then fans back out
based on redundant tests.
If so, we'll copy E->dest and redirect the appropriate predecessor to
the copy. Within the copy of E->dest, we'll thread one or more edges
to points deeper in the CFG.
This is a stopgap until we have a more structured approach to path
isolation. */
{
edge e2, e3, taken_edge;
edge_iterator ei;
bool found = false;
bitmap visited = BITMAP_ALLOC (NULL);
/* Look at each successor of E->dest to see if we can thread through it. */
FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
{
/* Avoid threading to any block we have already visited. */
bitmap_clear (visited);
bitmap_set_bit (visited, taken_edge->dest->index);
bitmap_set_bit (visited, e->dest->index);
/* Record whether or not we were able to thread through a successor
of E->dest. */
found = false;
e3 = taken_edge;
do
{
e2 = thread_around_empty_block (e3,
dummy_cond,
handle_dominating_asserts,
simplify,
visited);
if (e2)
{
e3 = e2;
found = true;
}
}
while (e2);
/* If we were able to thread through a successor of E->dest, then
record the jump threading opportunity. */
if (found)
{
edge tmp;
/* If there is already an edge from the block to be duplicated
(E2->src) to the final target (E3->dest), then make sure that
the PHI args associated with the edges E2 and E3 are the
same. */
tmp = find_edge (taken_edge->src, e3->dest);
if (!tmp || phi_args_equal_on_edges (tmp, e3))
register_jump_thread (e, taken_edge, e3);
}
}
BITMAP_FREE (visited);
}
fail:
remove_temporary_equivalences (stack);
}
/* Thread edges through blocks and update the control flow and SSA graphs.
Copyright (C) 2004, 2005, 2006, 2007, 2008, 2010 Free Software Foundation,
Inc.
Copyright (C) 2004, 2005, 2006, 2007, 2008, 2010, 201
Free Software Foundation, Inc.
This file is part of GCC.
......@@ -121,6 +121,8 @@ struct redirection_data
its single successor. */
edge outgoing_edge;
edge intermediate_edge;
/* A list of incoming edges which we want to thread to
OUTGOING_EDGE->dest. */
struct el *incoming_edges;
......@@ -153,6 +155,7 @@ static VEC(edge,heap) *threaded_edges;
threading is attached to the AUX field for the incoming edge. Use these
macros to access the underlying structure attached to the AUX field. */
#define THREAD_TARGET(E) ((edge *)(E)->aux)[0]
#define THREAD_TARGET2(E) ((edge *)(E)->aux)[1]
/* Jump threading statistics. */
......@@ -231,8 +234,10 @@ redirection_data_eq (const void *p1, const void *p2)
{
edge e1 = ((const struct redirection_data *)p1)->outgoing_edge;
edge e2 = ((const struct redirection_data *)p2)->outgoing_edge;
edge e3 = ((const struct redirection_data *)p1)->intermediate_edge;
edge e4 = ((const struct redirection_data *)p2)->intermediate_edge;
return e1 == e2;
return e1 == e2 && e3 == e4;
}
/* Given an outgoing edge E lookup and return its entry in our hash table.
......@@ -242,7 +247,7 @@ redirection_data_eq (const void *p1, const void *p2)
edges associated with E in the hash table. */
static struct redirection_data *
lookup_redirection_data (edge e, edge incoming_edge, enum insert_option insert)
lookup_redirection_data (edge e, enum insert_option insert)
{
void **slot;
struct redirection_data *elt;
......@@ -250,7 +255,9 @@ lookup_redirection_data (edge e, edge incoming_edge, enum insert_option insert)
/* Build a hash table element so we can see if E is already
in the table. */
elt = XNEW (struct redirection_data);
elt->outgoing_edge = e;
elt->intermediate_edge = THREAD_TARGET2 (e) ? THREAD_TARGET (e) : NULL;
elt->outgoing_edge = THREAD_TARGET2 (e) ? THREAD_TARGET2 (e)
: THREAD_TARGET (e);
elt->dup_block = NULL;
elt->incoming_edges = NULL;
......@@ -270,7 +277,7 @@ lookup_redirection_data (edge e, edge incoming_edge, enum insert_option insert)
{
*slot = (void *)elt;
elt->incoming_edges = XNEW (struct el);
elt->incoming_edges->e = incoming_edge;
elt->incoming_edges->e = e;
elt->incoming_edges->next = NULL;
return elt;
}
......@@ -290,7 +297,7 @@ lookup_redirection_data (edge e, edge incoming_edge, enum insert_option insert)
{
struct el *el = XNEW (struct el);
el->next = elt->incoming_edges;
el->e = incoming_edge;
el->e = e;
elt->incoming_edges = el;
}
......@@ -298,6 +305,41 @@ lookup_redirection_data (edge e, edge incoming_edge, enum insert_option insert)
}
}
/* For each PHI in BB, copy the argument associated with SRC_E to TGT_E. */
static void
copy_phi_args (basic_block bb, edge src_e, edge tgt_e)
{
gimple_stmt_iterator gsi;
int src_indx = src_e->dest_idx;
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gimple phi = gsi_stmt (gsi);
source_location locus = gimple_phi_arg_location (phi, src_indx);
add_phi_arg (phi, gimple_phi_arg_def (phi, src_indx), tgt_e, locus);
}
}
/* We have recently made a copy of ORIG_BB, including its outgoing
edges. The copy is NEW_BB. Every PHI node in every direct successor of
ORIG_BB has a new argument associated with edge from NEW_BB to the
successor. Initialize the PHI argument so that it is equal to the PHI
argument associated with the edge from ORIG_BB to the successor. */
static void
update_destination_phis (basic_block orig_bb, basic_block new_bb)
{
edge_iterator ei;
edge e;
FOR_EACH_EDGE (e, ei, orig_bb->succs)
{
edge e2 = find_edge (new_bb, e->dest);
copy_phi_args (e->dest, e, e2);
}
}
/* Given a duplicate block and its single destination (both stored
in RD). Create an edge between the duplicate and its single
destination.
......@@ -310,7 +352,6 @@ create_edge_and_update_destination_phis (struct redirection_data *rd,
basic_block bb)
{
edge e = make_edge (bb, rd->outgoing_edge->dest, EDGE_FALLTHRU);
gimple_stmt_iterator gsi;
rescan_loop_exit (e, true, false);
e->probability = REG_BR_PROB_BASE;
......@@ -318,8 +359,9 @@ create_edge_and_update_destination_phis (struct redirection_data *rd,
if (rd->outgoing_edge->aux)
{
e->aux = (edge *) XNEWVEC (edge, 1);
e->aux = (edge *) XNEWVEC (edge, 2);
THREAD_TARGET(e) = THREAD_TARGET (rd->outgoing_edge);
THREAD_TARGET2(e) = THREAD_TARGET2 (rd->outgoing_edge);
}
else
{
......@@ -330,17 +372,43 @@ create_edge_and_update_destination_phis (struct redirection_data *rd,
from the duplicate block, then we will need to add a new argument
to them. The argument should have the same value as the argument
associated with the outgoing edge stored in RD. */
for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
copy_phi_args (e->dest, rd->outgoing_edge, e);
}
/* Wire up the outgoing edges from the duplicate block and
update any PHIs as needed. */
static void
fix_duplicate_block_edges (struct redirection_data *rd,
struct local_info *local_info)
{
/* If we were threading through an joiner block, then we want
to keep its control statement and redirect an outgoing edge.
Else we want to remove the control statement & edges, then create
a new outgoing edge. In both cases we may need to update PHIs. */
if (THREAD_TARGET2 (rd->incoming_edges->e) == rd->outgoing_edge)
{
gimple phi = gsi_stmt (gsi);
source_location locus;
int indx = rd->outgoing_edge->dest_idx;
edge victim;
edge e2;
edge e = rd->incoming_edges->e;
/* This updates the PHIs at the destination of the duplicate
block. */
update_destination_phis (local_info->bb, rd->dup_block);
locus = gimple_phi_arg_location (phi, indx);
add_phi_arg (phi, gimple_phi_arg_def (phi, indx), e, locus);
/* Find the edge from the duplicate block to the block we're
threading through. That's the edge we want to redirect. */
victim = find_edge (rd->dup_block, THREAD_TARGET (e)->dest);
e2 = redirect_edge_and_branch (victim, THREAD_TARGET2 (e)->dest);
/* This updates the PHI at the target of the threaded edge. */
copy_phi_args (e2->dest, THREAD_TARGET2 (e), e2);
}
else
{
remove_ctrl_stmt_and_useless_edges (rd->dup_block, NULL);
create_edge_and_update_destination_phis (rd, rd->dup_block);
}
}
/* Hash table traversal callback routine to create duplicate blocks. */
static int
......@@ -365,9 +433,8 @@ create_duplicates (void **slot, void *data)
create_block_for_threading (local_info->template_block, rd);
/* Go ahead and wire up outgoing edges and update PHIs for the duplicate
block. */
remove_ctrl_stmt_and_useless_edges (rd->dup_block, NULL);
create_edge_and_update_destination_phis (rd, rd->dup_block);
block. */
fix_duplicate_block_edges (rd, local_info);
}
/* Keep walking the hash table. */
......@@ -384,12 +451,16 @@ fixup_template_block (void **slot, void *data)
struct redirection_data *rd = (struct redirection_data *) *slot;
struct local_info *local_info = (struct local_info *)data;
/* If this is the template block, then create its outgoing edges
and halt the hash table traversal. */
/* If this is the template block halt the traversal after updating
it appropriately.
If we were threading through an joiner block, then we want
to keep its control statement and redirect an outgoing edge.
Else we want to remove the control statement & edges, then create
a new outgoing edge. In both cases we may need to update PHIs. */
if (rd->dup_block && rd->dup_block == local_info->template_block)
{
remove_ctrl_stmt_and_useless_edges (rd->dup_block, NULL);
create_edge_and_update_destination_phis (rd, rd->dup_block);
fix_duplicate_block_edges (rd, local_info);
return 0;
}
......@@ -419,8 +490,18 @@ redirect_edges (void **slot, void *data)
free (el);
thread_stats.num_threaded_edges++;
/* If we are threading through a joiner block, then we have to
find the edge we want to redirect and update some PHI nodes. */
if (THREAD_TARGET2 (e))
{
edge e2;
if (rd->dup_block)
/* We want to redirect the incoming edge to the joiner block (E)
to instead reach the duplicate of the joiner block. */
e2 = redirect_edge_and_branch (e, rd->dup_block);
flush_pending_stmts (e2);
}
else if (rd->dup_block)
{
edge e2;
......@@ -546,14 +627,18 @@ thread_block (basic_block bb, bool noloop_only)
if (e->aux == NULL)
continue;
e2 = THREAD_TARGET (e);
if (THREAD_TARGET2 (e))
e2 = THREAD_TARGET2 (e);
else
e2 = THREAD_TARGET (e);
if (!e2
/* If NOLOOP_ONLY is true, we only allow threading through the
header of a loop to exit edges. */
|| (noloop_only
&& bb == bb->loop_father->header
&& (!loop_exit_edge_p (bb->loop_father, e2))))
&& (!loop_exit_edge_p (bb->loop_father, e2)
|| THREAD_TARGET2 (e))))
continue;
if (e->dest == e2->src)
......@@ -562,7 +647,7 @@ thread_block (basic_block bb, bool noloop_only)
/* Insert the outgoing edge into the hash table if it is not
already in the hash table. */
lookup_redirection_data (e2, e, INSERT);
lookup_redirection_data (e, INSERT);
}
/* We do not update dominance info. */
......@@ -817,6 +902,8 @@ thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
if (latch->aux)
{
if (THREAD_TARGET2 (latch))
goto fail;
tgt_edge = THREAD_TARGET (latch);
tgt_bb = tgt_edge->dest;
}
......@@ -840,6 +927,8 @@ thread_through_loop_header (struct loop *loop, bool may_peel_loop_headers)
goto fail;
}
if (THREAD_TARGET2 (e))
goto fail;
tgt_edge = THREAD_TARGET (e);
atgt_bb = tgt_edge->dest;
if (!tgt_bb)
......@@ -967,13 +1056,14 @@ mark_threaded_blocks (bitmap threaded_blocks)
edge e;
edge_iterator ei;
for (i = 0; i < VEC_length (edge, threaded_edges); i += 2)
for (i = 0; i < VEC_length (edge, threaded_edges); i += 3)
{
edge e = VEC_index (edge, threaded_edges, i);
edge *x = (edge *) XNEWVEC (edge, 1);
edge *x = (edge *) XNEWVEC (edge, 2);
e->aux = x;
THREAD_TARGET (e) = VEC_index (edge, threaded_edges, i + 1);
THREAD_TARGET2 (e) = VEC_index (edge, threaded_edges, i + 2);
bitmap_set_bit (tmp, e->dest->index);
}
......@@ -1085,7 +1175,7 @@ thread_through_all_blocks (bool may_peel_loop_headers)
after fixing the SSA graph. */
void
register_jump_thread (edge e, edge e2)
register_jump_thread (edge e, edge e2, edge e3)
{
/* This can occur if we're jumping to a constant address or
or something similar. Just get out now. */
......@@ -1102,4 +1192,5 @@ register_jump_thread (edge e, edge e2)
VEC_safe_push (edge, heap, threaded_edges, e);
VEC_safe_push (edge, heap, threaded_edges, e2);
VEC_safe_push (edge, heap, threaded_edges, e3);
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment