Commit 59fd17e3 by Richard Biener Committed by Richard Biener

revert: [multiple changes]

2013-01-03  Richard Biener  <rguenther@suse.de>

        Revert
        2013-01-03  Richard Biener  <rguenther@suse.de>

        PR tree-optimization/55857
        * tree-vect-stmts.c (vectorizable_load): Do not setup
        re-alignment for invariant loads.

        2013-01-02  Richard Biener  <rguenther@suse.de>

        * tree-vect-stmts.c (vectorizable_load): When vectorizing an
        invariant load do not generate a vector load from the scalar
        location.

From-SVN: r194856
parent fc883b84
2013-01-03 Richard Biener <rguenther@suse.de> 2013-01-03 Richard Biener <rguenther@suse.de>
Revert
2013-01-03 Richard Biener <rguenther@suse.de>
PR tree-optimization/55857
* tree-vect-stmts.c (vectorizable_load): Do not setup
re-alignment for invariant loads.
2013-01-02 Richard Biener <rguenther@suse.de>
* tree-vect-stmts.c (vectorizable_load): When vectorizing an
invariant load do not generate a vector load from the scalar
location.
2013-01-03 Richard Biener <rguenther@suse.de>
* tree-vect-loop.c (vect_analyze_loop_form): Clarify reason * tree-vect-loop.c (vect_analyze_loop_form): Clarify reason
for not vectorizing. for not vectorizing.
* tree-vect-data-refs.c (vect_create_addr_base_for_vector_ref): Do * tree-vect-data-refs.c (vect_create_addr_base_for_vector_ref): Do
......
...@@ -4927,8 +4927,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, ...@@ -4927,8 +4927,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
if ((alignment_support_scheme == dr_explicit_realign_optimized if ((alignment_support_scheme == dr_explicit_realign_optimized
|| alignment_support_scheme == dr_explicit_realign) || alignment_support_scheme == dr_explicit_realign)
&& !compute_in_loop && !compute_in_loop)
&& !integer_zerop (DR_STEP (dr)))
{ {
msq = vect_setup_realignment (first_stmt, gsi, &realignment_token, msq = vect_setup_realignment (first_stmt, gsi, &realignment_token,
alignment_support_scheme, NULL_TREE, alignment_support_scheme, NULL_TREE,
...@@ -4989,19 +4988,6 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, ...@@ -4989,19 +4988,6 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
/* Record the mapping between SSA_NAMEs and statements. */ /* Record the mapping between SSA_NAMEs and statements. */
vect_record_grouped_load_vectors (stmt, dr_chain); vect_record_grouped_load_vectors (stmt, dr_chain);
} }
/* Handle invariant-load. */
else if (inv_p && !bb_vinfo)
{
gimple_stmt_iterator gsi2 = *gsi;
gcc_assert (!grouped_load && !slp_perm);
gsi_next (&gsi2);
new_temp = vect_init_vector (stmt, scalar_dest,
vectype, &gsi2);
new_stmt = SSA_NAME_DEF_STMT (new_temp);
/* Store vector loads in the corresponding SLP_NODE. */
if (slp)
SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
}
else else
{ {
for (i = 0; i < vec_num; i++) for (i = 0; i < vec_num; i++)
...@@ -5149,6 +5135,17 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, ...@@ -5149,6 +5135,17 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
} }
} }
/* 4. Handle invariant-load. */
if (inv_p && !bb_vinfo)
{
gimple_stmt_iterator gsi2 = *gsi;
gcc_assert (!grouped_load);
gsi_next (&gsi2);
new_temp = vect_init_vector (stmt, scalar_dest,
vectype, &gsi2);
new_stmt = SSA_NAME_DEF_STMT (new_temp);
}
if (negative) if (negative)
{ {
tree perm_mask = perm_mask_for_reverse (vectype); tree perm_mask = perm_mask_for_reverse (vectype);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment