Commit da68b8f4 by Sebastian Pop Committed by Sebastian Pop

remove dead code of commutative_reductions

This code is not used anymore after we removed the previous loop optimizer (not
based on the ISL scheduler.)  We will add back the detection of commutative
reductions after we improve the code generation of scalar dependences (by not
going out of SSA for scalar dependences just to expose them to the data
dependence graph.)

Patch passed bootstrap and check on x86_64-linux with ISL-0.15.

2015-09-29  Sebastian Pop  <s.pop@samsung.com>
            Aditya Kumar  <aditya.k7@samsung.com>

            * graphite-sese-to-poly.c (gsi_for_phi_node): Remove.
            (nb_data_writes_in_bb): Remove.
            (split_pbb): Remove.
            (split_reduction_stmt): Remove.
            (is_reduction_operation_p): Remove.
            (phi_contains_arg): Remove.
            (follow_ssa_with_commutative_ops): Remove.
            (detect_commutative_reduction_arg): Remove.
            (detect_commutative_reduction_assign): Remove.
            (follow_inital_value_to_phi): Remove.
            (edge_initial_value_for_loop_phi): Remove.
            (initial_value_for_loop_phi): Remove.
            (used_outside_reduction): Remove.
            (detect_commutative_reduction): Remove.
            (translate_scalar_reduction_to_array_for_stmt): Remove.
            (remove_phi): Remove.
            (dr_indices_valid_in_loop): Remove.
            (close_phi_written_to_memory): Remove.
            (translate_scalar_reduction_to_array): Remove.
            (rewrite_commutative_reductions_out_of_ssa_close_phi): Remove.
            (rewrite_commutative_reductions_out_of_ssa_loop): Remove.
            (rewrite_commutative_reductions_out_of_ssa): Remove.
            (build_poly_scop): Remove call to rewrite_commutative_reductions_out_of_ssa.

Co-Authored-By: Aditya Kumar <aditya.k7@samsung.com>

From-SVN: r228255
parent a53378d2
2015-09-29 Sebastian Pop <s.pop@samsung.com>
Aditya Kumar <aditya.k7@samsung.com>
* graphite-sese-to-poly.c (gsi_for_phi_node): Remove.
(nb_data_writes_in_bb): Remove.
(split_pbb): Remove.
(split_reduction_stmt): Remove.
(is_reduction_operation_p): Remove.
(phi_contains_arg): Remove.
(follow_ssa_with_commutative_ops): Remove.
(detect_commutative_reduction_arg): Remove.
(detect_commutative_reduction_assign): Remove.
(follow_inital_value_to_phi): Remove.
(edge_initial_value_for_loop_phi): Remove.
(initial_value_for_loop_phi): Remove.
(used_outside_reduction): Remove.
(detect_commutative_reduction): Remove.
(translate_scalar_reduction_to_array_for_stmt): Remove.
(remove_phi): Remove.
(dr_indices_valid_in_loop): Remove.
(close_phi_written_to_memory): Remove.
(translate_scalar_reduction_to_array): Remove.
(rewrite_commutative_reductions_out_of_ssa_close_phi): Remove.
(rewrite_commutative_reductions_out_of_ssa_loop): Remove.
(rewrite_commutative_reductions_out_of_ssa): Remove.
(build_poly_scop): Remove call to rewrite_commutative_reductions_out_of_ssa.
2015-09-29 Evandro Menezes <e.menezes@samsung.com> 2015-09-29 Evandro Menezes <e.menezes@samsung.com>
* config/arm/types.md (neon_ldp, neon_ldp_q, neon_stp, neon_stp_q): * config/arm/types.md (neon_ldp, neon_ldp_q, neon_stp, neon_stp_q):
...@@ -1919,22 +1919,6 @@ build_scop_drs (scop_p scop) ...@@ -1919,22 +1919,6 @@ build_scop_drs (scop_p scop)
build_pbb_drs (pbb); build_pbb_drs (pbb);
} }
/* Return a gsi at the position of the phi node STMT. */
static gphi_iterator
gsi_for_phi_node (gphi *stmt)
{
gphi_iterator psi;
basic_block bb = gimple_bb (stmt);
for (psi = gsi_start_phis (bb); !gsi_end_p (psi); gsi_next (&psi))
if (stmt == psi.phi ())
return psi;
gcc_unreachable ();
return psi;
}
/* Analyze all the data references of STMTS and add them to the /* Analyze all the data references of STMTS and add them to the
GBB_DATA_REFS vector of BB. */ GBB_DATA_REFS vector of BB. */
...@@ -2515,590 +2499,6 @@ nb_pbbs_in_loops (scop_p scop) ...@@ -2515,590 +2499,6 @@ nb_pbbs_in_loops (scop_p scop)
return res; return res;
} }
/* Return the number of data references in BB that write in
memory. */
static int
nb_data_writes_in_bb (basic_block bb)
{
int res = 0;
gimple_stmt_iterator gsi;
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
if (gimple_vdef (gsi_stmt (gsi)))
res++;
return res;
}
/* Splits at STMT the basic block BB represented as PBB in the
polyhedral form. */
static edge
split_pbb (scop_p scop, poly_bb_p pbb, basic_block bb, gimple *stmt)
{
edge e1 = split_block (bb, stmt);
new_pbb_from_pbb (scop, pbb, e1->dest);
return e1;
}
/* Splits STMT out of its current BB. This is done for reduction
statements for which we want to ignore data dependences. */
static basic_block
split_reduction_stmt (scop_p scop, gimple *stmt)
{
basic_block bb = gimple_bb (stmt);
poly_bb_p pbb = pbb_from_bb (bb);
gimple_bb_p gbb = gbb_from_bb (bb);
edge e1;
int i;
data_reference_p dr;
/* Do not split basic blocks with no writes to memory: the reduction
will be the only write to memory. */
if (nb_data_writes_in_bb (bb) == 0
/* Or if we have already marked BB as a reduction. */
|| PBB_IS_REDUCTION (pbb_from_bb (bb)))
return bb;
e1 = split_pbb (scop, pbb, bb, stmt);
/* Split once more only when the reduction stmt is not the only one
left in the original BB. */
if (!gsi_one_before_end_p (gsi_start_nondebug_bb (bb)))
{
gimple_stmt_iterator gsi = gsi_last_bb (bb);
gsi_prev (&gsi);
e1 = split_pbb (scop, pbb, bb, gsi_stmt (gsi));
}
/* A part of the data references will end in a different basic block
after the split: move the DRs from the original GBB to the newly
created GBB1. */
FOR_EACH_VEC_ELT (GBB_DATA_REFS (gbb), i, dr)
{
basic_block bb1 = gimple_bb (DR_STMT (dr));
if (bb1 != bb)
{
gimple_bb_p gbb1 = gbb_from_bb (bb1);
GBB_DATA_REFS (gbb1).safe_push (dr);
GBB_DATA_REFS (gbb).ordered_remove (i);
i--;
}
}
return e1->dest;
}
/* Return true when stmt is a reduction operation. */
static inline bool
is_reduction_operation_p (gimple *stmt)
{
enum tree_code code;
gcc_assert (is_gimple_assign (stmt));
code = gimple_assign_rhs_code (stmt);
if (!commutative_tree_code (code)
|| !associative_tree_code (code))
return false;
tree type = TREE_TYPE (gimple_assign_lhs (stmt));
if (FLOAT_TYPE_P (type))
return flag_associative_math;
if (ANY_INTEGRAL_TYPE_P (type))
return (TYPE_OVERFLOW_WRAPS (type)
|| !operation_can_overflow (code));
return false;
}
/* Returns true when PHI contains an argument ARG. */
static bool
phi_contains_arg (gphi *phi, tree arg)
{
size_t i;
for (i = 0; i < gimple_phi_num_args (phi); i++)
if (operand_equal_p (arg, gimple_phi_arg_def (phi, i), 0))
return true;
return false;
}
/* Return a loop phi node that corresponds to a reduction containing LHS. */
static gphi *
follow_ssa_with_commutative_ops (tree arg, tree lhs)
{
gimple *stmt;
if (TREE_CODE (arg) != SSA_NAME)
return NULL;
stmt = SSA_NAME_DEF_STMT (arg);
if (gimple_code (stmt) == GIMPLE_NOP
|| gimple_code (stmt) == GIMPLE_CALL)
return NULL;
if (gphi *phi = dyn_cast <gphi *> (stmt))
{
if (phi_contains_arg (phi, lhs))
return phi;
return NULL;
}
if (!is_gimple_assign (stmt))
return NULL;
if (gimple_num_ops (stmt) == 2)
return follow_ssa_with_commutative_ops (gimple_assign_rhs1 (stmt), lhs);
if (is_reduction_operation_p (stmt))
{
gphi *res
= follow_ssa_with_commutative_ops (gimple_assign_rhs1 (stmt), lhs);
return res ? res :
follow_ssa_with_commutative_ops (gimple_assign_rhs2 (stmt), lhs);
}
return NULL;
}
/* Detect commutative and associative scalar reductions starting at
the STMT. Return the phi node of the reduction cycle, or NULL. */
static gphi *
detect_commutative_reduction_arg (tree lhs, gimple *stmt, tree arg,
vec<gimple *> *in,
vec<gimple *> *out)
{
gphi *phi = follow_ssa_with_commutative_ops (arg, lhs);
if (!phi)
return NULL;
in->safe_push (stmt);
out->safe_push (stmt);
return phi;
}
/* Detect commutative and associative scalar reductions starting at
STMT. Return the phi node of the reduction cycle, or NULL. */
static gphi *
detect_commutative_reduction_assign (gimple *stmt, vec<gimple *> *in,
vec<gimple *> *out)
{
tree lhs = gimple_assign_lhs (stmt);
if (gimple_num_ops (stmt) == 2)
return detect_commutative_reduction_arg (lhs, stmt,
gimple_assign_rhs1 (stmt),
in, out);
if (is_reduction_operation_p (stmt))
{
gphi *res = detect_commutative_reduction_arg (lhs, stmt,
gimple_assign_rhs1 (stmt),
in, out);
return res ? res
: detect_commutative_reduction_arg (lhs, stmt,
gimple_assign_rhs2 (stmt),
in, out);
}
return NULL;
}
/* Return a loop phi node that corresponds to a reduction containing LHS. */
static gphi *
follow_inital_value_to_phi (tree arg, tree lhs)
{
gimple *stmt;
if (!arg || TREE_CODE (arg) != SSA_NAME)
return NULL;
stmt = SSA_NAME_DEF_STMT (arg);
if (gphi *phi = dyn_cast <gphi *> (stmt))
if (phi_contains_arg (phi, lhs))
return phi;
return NULL;
}
/* Return the argument of the loop PHI that is the initial value coming
from outside the loop. */
static edge
edge_initial_value_for_loop_phi (gphi *phi)
{
size_t i;
for (i = 0; i < gimple_phi_num_args (phi); i++)
{
edge e = gimple_phi_arg_edge (phi, i);
if (loop_depth (e->src->loop_father)
< loop_depth (e->dest->loop_father))
return e;
}
return NULL;
}
/* Return the argument of the loop PHI that is the initial value coming
from outside the loop. */
static tree
initial_value_for_loop_phi (gphi *phi)
{
size_t i;
for (i = 0; i < gimple_phi_num_args (phi); i++)
{
edge e = gimple_phi_arg_edge (phi, i);
if (loop_depth (e->src->loop_father)
< loop_depth (e->dest->loop_father))
return gimple_phi_arg_def (phi, i);
}
return NULL_TREE;
}
/* Returns true when DEF is used outside the reduction cycle of
LOOP_PHI. */
static bool
used_outside_reduction (tree def, gimple *loop_phi)
{
use_operand_p use_p;
imm_use_iterator imm_iter;
loop_p loop = loop_containing_stmt (loop_phi);
/* In LOOP, DEF should be used only in LOOP_PHI. */
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, def)
{
gimple *stmt = USE_STMT (use_p);
if (stmt != loop_phi
&& !is_gimple_debug (stmt)
&& flow_bb_inside_loop_p (loop, gimple_bb (stmt)))
return true;
}
return false;
}
/* Detect commutative and associative scalar reductions belonging to
the SCOP starting at the loop closed phi node STMT. Return the phi
node of the reduction cycle, or NULL. */
static gphi *
detect_commutative_reduction (scop_p scop, gimple *stmt, vec<gimple *> *in,
vec<gimple *> *out)
{
if (scalar_close_phi_node_p (stmt))
{
gimple *def;
gphi *loop_phi, *phi, *close_phi = as_a <gphi *> (stmt);
tree init, lhs, arg = gimple_phi_arg_def (close_phi, 0);
if (TREE_CODE (arg) != SSA_NAME)
return NULL;
/* Note that loop close phi nodes should have a single argument
because we translated the representation into a canonical form
before Graphite: see canonicalize_loop_closed_ssa_form. */
gcc_assert (gimple_phi_num_args (close_phi) == 1);
def = SSA_NAME_DEF_STMT (arg);
if (!stmt_in_sese_p (def, SCOP_REGION (scop))
|| !(loop_phi = detect_commutative_reduction (scop, def, in, out)))
return NULL;
lhs = gimple_phi_result (close_phi);
init = initial_value_for_loop_phi (loop_phi);
phi = follow_inital_value_to_phi (init, lhs);
if (phi && (used_outside_reduction (lhs, phi)
|| !has_single_use (gimple_phi_result (phi))))
return NULL;
in->safe_push (loop_phi);
out->safe_push (close_phi);
return phi;
}
if (gimple_code (stmt) == GIMPLE_ASSIGN)
return detect_commutative_reduction_assign (stmt, in, out);
return NULL;
}
/* Translate the scalar reduction statement STMT to an array RED
knowing that its recursive phi node is LOOP_PHI. */
static void
translate_scalar_reduction_to_array_for_stmt (scop_p scop, tree red,
gimple *stmt, gphi *loop_phi)
{
tree res = gimple_phi_result (loop_phi);
gassign *assign = gimple_build_assign (res, unshare_expr (red));
gimple_stmt_iterator gsi;
insert_stmts (scop, assign, NULL, gsi_after_labels (gimple_bb (loop_phi)));
assign = gimple_build_assign (unshare_expr (red), gimple_assign_lhs (stmt));
gsi = gsi_for_stmt (stmt);
gsi_next (&gsi);
insert_stmts (scop, assign, NULL, gsi);
}
/* Removes the PHI node and resets all the debug stmts that are using
the PHI_RESULT. */
static void
remove_phi (gphi *phi)
{
imm_use_iterator imm_iter;
tree def;
use_operand_p use_p;
gimple_stmt_iterator gsi;
auto_vec<gimple *, 3> update;
unsigned int i;
gimple *stmt;
def = PHI_RESULT (phi);
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, def)
{
stmt = USE_STMT (use_p);
if (is_gimple_debug (stmt))
{
gimple_debug_bind_reset_value (stmt);
update.safe_push (stmt);
}
}
FOR_EACH_VEC_ELT (update, i, stmt)
update_stmt (stmt);
gsi = gsi_for_phi_node (phi);
remove_phi_node (&gsi, false);
}
/* Helper function for for_each_index. For each INDEX of the data
reference REF, returns true when its indices are valid in the loop
nest LOOP passed in as DATA. */
static bool
dr_indices_valid_in_loop (tree ref ATTRIBUTE_UNUSED, tree *index, void *data)
{
loop_p loop;
basic_block header, def_bb;
gimple *stmt;
if (TREE_CODE (*index) != SSA_NAME)
return true;
loop = *((loop_p *) data);
header = loop->header;
stmt = SSA_NAME_DEF_STMT (*index);
if (!stmt)
return true;
def_bb = gimple_bb (stmt);
if (!def_bb)
return true;
return dominated_by_p (CDI_DOMINATORS, header, def_bb);
}
/* When the result of a CLOSE_PHI is written to a memory location,
return a pointer to that memory reference, otherwise return
NULL_TREE. */
static tree
close_phi_written_to_memory (gphi *close_phi)
{
imm_use_iterator imm_iter;
use_operand_p use_p;
gimple *stmt;
tree res, def = gimple_phi_result (close_phi);
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, def)
if ((stmt = USE_STMT (use_p))
&& gimple_code (stmt) == GIMPLE_ASSIGN
&& (res = gimple_assign_lhs (stmt)))
{
switch (TREE_CODE (res))
{
case VAR_DECL:
case PARM_DECL:
case RESULT_DECL:
return res;
case ARRAY_REF:
case MEM_REF:
{
tree arg = gimple_phi_arg_def (close_phi, 0);
loop_p nest = loop_containing_stmt (SSA_NAME_DEF_STMT (arg));
/* FIXME: this restriction is for id-{24,25}.f and
could be handled by duplicating the computation of
array indices before the loop of the close_phi. */
if (for_each_index (&res, dr_indices_valid_in_loop, &nest))
return res;
}
/* Fallthru. */
default:
continue;
}
}
return NULL_TREE;
}
/* Rewrite out of SSA the reduction described by the loop phi nodes
IN, and the close phi nodes OUT. IN and OUT are structured by loop
levels like this:
IN: stmt, loop_n, ..., loop_0
OUT: stmt, close_n, ..., close_0
the first element is the reduction statement, and the next elements
are the loop and close phi nodes of each of the outer loops. */
static void
translate_scalar_reduction_to_array (scop_p scop,
vec<gimple *> in,
vec<gimple *> out)
{
gimple *loop_stmt;
unsigned int i = out.length () - 1;
tree red = close_phi_written_to_memory (as_a <gphi *> (out[i]));
FOR_EACH_VEC_ELT (in, i, loop_stmt)
{
gimple *close_stmt = out[i];
if (i == 0)
{
basic_block bb = split_reduction_stmt (scop, loop_stmt);
poly_bb_p pbb = pbb_from_bb (bb);
PBB_IS_REDUCTION (pbb) = true;
gcc_assert (close_stmt == loop_stmt);
if (!red)
red = create_zero_dim_array
(gimple_assign_lhs (loop_stmt), "Commutative_Associative_Reduction");
translate_scalar_reduction_to_array_for_stmt (scop, red, loop_stmt,
as_a <gphi *> (in[1]));
continue;
}
gphi *loop_phi = as_a <gphi *> (loop_stmt);
gphi *close_phi = as_a <gphi *> (close_stmt);
if (i == in.length () - 1)
{
insert_out_of_ssa_copy (scop, gimple_phi_result (close_phi),
unshare_expr (red), close_phi);
insert_out_of_ssa_copy_on_edge
(scop, edge_initial_value_for_loop_phi (loop_phi),
unshare_expr (red), initial_value_for_loop_phi (loop_phi));
}
remove_phi (loop_phi);
remove_phi (close_phi);
}
}
/* Rewrites out of SSA a commutative reduction at CLOSE_PHI. Returns
true when something has been changed. */
static bool
rewrite_commutative_reductions_out_of_ssa_close_phi (scop_p scop,
gphi *close_phi)
{
bool res;
auto_vec<gimple *, 10> in;
auto_vec<gimple *, 10> out;
detect_commutative_reduction (scop, close_phi, &in, &out);
res = in.length () > 1;
if (res)
translate_scalar_reduction_to_array (scop, in, out);
return res;
}
/* Rewrites all the commutative reductions from LOOP out of SSA.
Returns true when something has been changed. */
static bool
rewrite_commutative_reductions_out_of_ssa_loop (scop_p scop,
loop_p loop)
{
gphi_iterator gsi;
edge exit = single_exit (loop);
tree res;
bool changed = false;
if (!exit)
return false;
for (gsi = gsi_start_phis (exit->dest); !gsi_end_p (gsi); gsi_next (&gsi))
if ((res = gimple_phi_result (gsi.phi ()))
&& !virtual_operand_p (res)
&& !scev_analyzable_p (res, SCOP_REGION (scop)))
changed |= rewrite_commutative_reductions_out_of_ssa_close_phi
(scop, gsi.phi ());
return changed;
}
/* Rewrites all the commutative reductions from SCOP out of SSA. */
static void
rewrite_commutative_reductions_out_of_ssa (scop_p scop)
{
loop_p loop;
bool changed = false;
sese region = SCOP_REGION (scop);
FOR_EACH_LOOP (loop, 0)
if (loop_in_sese_p (loop, region))
changed |= rewrite_commutative_reductions_out_of_ssa_loop (scop, loop);
if (changed)
{
scev_reset_htab ();
gsi_commit_edge_inserts ();
update_ssa (TODO_update_ssa);
#ifdef ENABLE_CHECKING
verify_loop_closed_ssa (true);
#endif
}
}
/* Can all ivs be represented by a signed integer? /* Can all ivs be represented by a signed integer?
As ISL might generate negative values in its expressions, signed loop ivs As ISL might generate negative values in its expressions, signed loop ivs
are required in the backend. */ are required in the backend. */
...@@ -3154,8 +2554,6 @@ build_poly_scop (scop_p scop) ...@@ -3154,8 +2554,6 @@ build_poly_scop (scop_p scop)
if (!scop_ivs_can_be_represented (scop)) if (!scop_ivs_can_be_represented (scop))
return; return;
rewrite_commutative_reductions_out_of_ssa (scop);
build_sese_loop_nests (region); build_sese_loop_nests (region);
/* Record all conditions in REGION. */ /* Record all conditions in REGION. */
sese_dom_walker (CDI_DOMINATORS, region).walk (cfun->cfg->x_entry_block_ptr); sese_dom_walker (CDI_DOMINATORS, region).walk (cfun->cfg->x_entry_block_ptr);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment