Commit 86a91c0a by Richard Sandiford Committed by Richard Sandiford

[28/46] Use stmt_vec_info instead of gimple stmts internally (part 1)

This first part makes functions use stmt_vec_infos instead of
gimple stmts in cases where the stmt_vec_info was already available
and where the change is mechanical.  Most of it is just replacing
"stmt" with "stmt_info".

2018-07-31  Richard Sandiford  <richard.sandiford@arm.com>

gcc/
	* tree-vect-data-refs.c (vect_slp_analyze_node_dependences):
	(vect_check_gather_scatter, vect_create_data_ref_ptr, bump_vector_ptr)
	(vect_permute_store_chain, vect_setup_realignment)
	(vect_permute_load_chain, vect_shift_permute_load_chain)
	(vect_transform_grouped_load): Use stmt_vec_info rather than gimple
	stmts internally, and when passing values to other vectorizer routines.
	* tree-vect-loop-manip.c (vect_can_advance_ivs_p): Likewise.
	* tree-vect-loop.c (vect_analyze_scalar_cycles_1)
	(vect_analyze_loop_operations, get_initial_def_for_reduction)
	(vect_create_epilog_for_reduction, vectorize_fold_left_reduction)
	(vectorizable_reduction, vectorizable_induction)
	(vectorizable_live_operation, vect_transform_loop_stmt)
	(vect_transform_loop): Likewise.
	* tree-vect-patterns.c (vect_reassociating_reduction_p)
	(vect_recog_widen_op_pattern, vect_recog_mixed_size_cond_pattern)
	(vect_recog_bool_pattern, vect_recog_gather_scatter_pattern): Likewise.
	* tree-vect-slp.c (vect_analyze_slp_instance): Likewise.
	(vect_slp_analyze_node_operations_1): Likewise.
	* tree-vect-stmts.c (vect_mark_relevant, process_use)
	(exist_non_indexing_operands_for_use_p, vect_init_vector_1)
	(vect_mark_stmts_to_be_vectorized, vect_get_vec_def_for_operand)
	(vect_finish_stmt_generation_1, get_group_load_store_type)
	(get_load_store_type, vect_build_gather_load_calls)
	(vectorizable_bswap, vectorizable_call, vectorizable_simd_clone_call)
	(vect_create_vectorized_demotion_stmts, vectorizable_conversion)
	(vectorizable_assignment, vectorizable_shift, vectorizable_operation)
	(vectorizable_store, vectorizable_load, vectorizable_condition)
	(vectorizable_comparison, vect_analyze_stmt, vect_transform_stmt)
	(supportable_widening_operation): Likewise.
	(vect_get_vector_types_for_stmt): Likewise.
	* tree-vectorizer.h (vect_dr_behavior): Likewise.

From-SVN: r263143
parent 91987857
2018-07-31 Richard Sandiford <richard.sandiford@arm.com> 2018-07-31 Richard Sandiford <richard.sandiford@arm.com>
* tree-vect-data-refs.c (vect_slp_analyze_node_dependences):
(vect_check_gather_scatter, vect_create_data_ref_ptr, bump_vector_ptr)
(vect_permute_store_chain, vect_setup_realignment)
(vect_permute_load_chain, vect_shift_permute_load_chain)
(vect_transform_grouped_load): Use stmt_vec_info rather than gimple
stmts internally, and when passing values to other vectorizer routines.
* tree-vect-loop-manip.c (vect_can_advance_ivs_p): Likewise.
* tree-vect-loop.c (vect_analyze_scalar_cycles_1)
(vect_analyze_loop_operations, get_initial_def_for_reduction)
(vect_create_epilog_for_reduction, vectorize_fold_left_reduction)
(vectorizable_reduction, vectorizable_induction)
(vectorizable_live_operation, vect_transform_loop_stmt)
(vect_transform_loop): Likewise.
* tree-vect-patterns.c (vect_reassociating_reduction_p)
(vect_recog_widen_op_pattern, vect_recog_mixed_size_cond_pattern)
(vect_recog_bool_pattern, vect_recog_gather_scatter_pattern): Likewise.
* tree-vect-slp.c (vect_analyze_slp_instance): Likewise.
(vect_slp_analyze_node_operations_1): Likewise.
* tree-vect-stmts.c (vect_mark_relevant, process_use)
(exist_non_indexing_operands_for_use_p, vect_init_vector_1)
(vect_mark_stmts_to_be_vectorized, vect_get_vec_def_for_operand)
(vect_finish_stmt_generation_1, get_group_load_store_type)
(get_load_store_type, vect_build_gather_load_calls)
(vectorizable_bswap, vectorizable_call, vectorizable_simd_clone_call)
(vect_create_vectorized_demotion_stmts, vectorizable_conversion)
(vectorizable_assignment, vectorizable_shift, vectorizable_operation)
(vectorizable_store, vectorizable_load, vectorizable_condition)
(vectorizable_comparison, vect_analyze_stmt, vect_transform_stmt)
(supportable_widening_operation): Likewise.
(vect_get_vector_types_for_stmt): Likewise.
* tree-vectorizer.h (vect_dr_behavior): Likewise.
2018-07-31 Richard Sandiford <richard.sandiford@arm.com>
* tree-vect-data-refs.c (vect_analyze_data_ref_dependence) * tree-vect-data-refs.c (vect_analyze_data_ref_dependence)
(vect_slp_analyze_node_dependences, vect_analyze_data_ref_accesses) (vect_slp_analyze_node_dependences, vect_analyze_data_ref_accesses)
(vect_permute_store_chain, vect_permute_load_chain) (vect_permute_store_chain, vect_permute_load_chain)
......
...@@ -712,7 +712,7 @@ vect_slp_analyze_node_dependences (slp_instance instance, slp_tree node, ...@@ -712,7 +712,7 @@ vect_slp_analyze_node_dependences (slp_instance instance, slp_tree node,
been sunk to (and we verify if we can do that as well). */ been sunk to (and we verify if we can do that as well). */
if (gimple_visited_p (stmt)) if (gimple_visited_p (stmt))
{ {
if (stmt != last_store) if (stmt_info != last_store)
continue; continue;
unsigned i; unsigned i;
stmt_vec_info store_info; stmt_vec_info store_info;
...@@ -3666,7 +3666,7 @@ vect_check_gather_scatter (gimple *stmt, loop_vec_info loop_vinfo, ...@@ -3666,7 +3666,7 @@ vect_check_gather_scatter (gimple *stmt, loop_vec_info loop_vinfo,
/* See whether this is already a call to a gather/scatter internal function. /* See whether this is already a call to a gather/scatter internal function.
If not, see whether it's a masked load or store. */ If not, see whether it's a masked load or store. */
gcall *call = dyn_cast <gcall *> (stmt); gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
if (call && gimple_call_internal_p (call)) if (call && gimple_call_internal_p (call))
{ {
ifn = gimple_call_internal_fn (call); ifn = gimple_call_internal_fn (call);
...@@ -4677,8 +4677,8 @@ vect_create_data_ref_ptr (gimple *stmt, tree aggr_type, struct loop *at_loop, ...@@ -4677,8 +4677,8 @@ vect_create_data_ref_ptr (gimple *stmt, tree aggr_type, struct loop *at_loop,
if (loop_vinfo) if (loop_vinfo)
{ {
loop = LOOP_VINFO_LOOP (loop_vinfo); loop = LOOP_VINFO_LOOP (loop_vinfo);
nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt); nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt_info);
containing_loop = (gimple_bb (stmt))->loop_father; containing_loop = (gimple_bb (stmt_info->stmt))->loop_father;
pe = loop_preheader_edge (loop); pe = loop_preheader_edge (loop);
} }
else else
...@@ -4786,7 +4786,7 @@ vect_create_data_ref_ptr (gimple *stmt, tree aggr_type, struct loop *at_loop, ...@@ -4786,7 +4786,7 @@ vect_create_data_ref_ptr (gimple *stmt, tree aggr_type, struct loop *at_loop,
/* Create: (&(base[init_val+offset]+byte_offset) in the loop preheader. */ /* Create: (&(base[init_val+offset]+byte_offset) in the loop preheader. */
new_temp = vect_create_addr_base_for_vector_ref (stmt, &new_stmt_list, new_temp = vect_create_addr_base_for_vector_ref (stmt_info, &new_stmt_list,
offset, byte_offset); offset, byte_offset);
if (new_stmt_list) if (new_stmt_list)
{ {
...@@ -4934,7 +4934,7 @@ bump_vector_ptr (tree dataref_ptr, gimple *ptr_incr, gimple_stmt_iterator *gsi, ...@@ -4934,7 +4934,7 @@ bump_vector_ptr (tree dataref_ptr, gimple *ptr_incr, gimple_stmt_iterator *gsi,
new_dataref_ptr = make_ssa_name (TREE_TYPE (dataref_ptr)); new_dataref_ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
incr_stmt = gimple_build_assign (new_dataref_ptr, POINTER_PLUS_EXPR, incr_stmt = gimple_build_assign (new_dataref_ptr, POINTER_PLUS_EXPR,
dataref_ptr, update); dataref_ptr, update);
vect_finish_stmt_generation (stmt, incr_stmt, gsi); vect_finish_stmt_generation (stmt_info, incr_stmt, gsi);
/* Copy the points-to information if it exists. */ /* Copy the points-to information if it exists. */
if (DR_PTR_INFO (dr)) if (DR_PTR_INFO (dr))
...@@ -5282,7 +5282,7 @@ vect_permute_store_chain (vec<tree> dr_chain, ...@@ -5282,7 +5282,7 @@ vect_permute_store_chain (vec<tree> dr_chain,
data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low"); data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1, perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1,
vect2, perm3_mask_low); vect2, perm3_mask_low);
vect_finish_stmt_generation (stmt, perm_stmt, gsi); vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
vect1 = data_ref; vect1 = data_ref;
vect2 = dr_chain[2]; vect2 = dr_chain[2];
...@@ -5293,7 +5293,7 @@ vect_permute_store_chain (vec<tree> dr_chain, ...@@ -5293,7 +5293,7 @@ vect_permute_store_chain (vec<tree> dr_chain,
data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high"); data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1, perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect1,
vect2, perm3_mask_high); vect2, perm3_mask_high);
vect_finish_stmt_generation (stmt, perm_stmt, gsi); vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
(*result_chain)[j] = data_ref; (*result_chain)[j] = data_ref;
} }
} }
...@@ -5332,7 +5332,7 @@ vect_permute_store_chain (vec<tree> dr_chain, ...@@ -5332,7 +5332,7 @@ vect_permute_store_chain (vec<tree> dr_chain,
high = make_temp_ssa_name (vectype, NULL, "vect_inter_high"); high = make_temp_ssa_name (vectype, NULL, "vect_inter_high");
perm_stmt = gimple_build_assign (high, VEC_PERM_EXPR, vect1, perm_stmt = gimple_build_assign (high, VEC_PERM_EXPR, vect1,
vect2, perm_mask_high); vect2, perm_mask_high);
vect_finish_stmt_generation (stmt, perm_stmt, gsi); vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
(*result_chain)[2*j] = high; (*result_chain)[2*j] = high;
/* Create interleaving stmt: /* Create interleaving stmt:
...@@ -5342,7 +5342,7 @@ vect_permute_store_chain (vec<tree> dr_chain, ...@@ -5342,7 +5342,7 @@ vect_permute_store_chain (vec<tree> dr_chain,
low = make_temp_ssa_name (vectype, NULL, "vect_inter_low"); low = make_temp_ssa_name (vectype, NULL, "vect_inter_low");
perm_stmt = gimple_build_assign (low, VEC_PERM_EXPR, vect1, perm_stmt = gimple_build_assign (low, VEC_PERM_EXPR, vect1,
vect2, perm_mask_low); vect2, perm_mask_low);
vect_finish_stmt_generation (stmt, perm_stmt, gsi); vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
(*result_chain)[2*j+1] = low; (*result_chain)[2*j+1] = low;
} }
memcpy (dr_chain.address (), result_chain->address (), memcpy (dr_chain.address (), result_chain->address (),
...@@ -5415,7 +5415,7 @@ vect_setup_realignment (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -5415,7 +5415,7 @@ vect_setup_realignment (gimple *stmt, gimple_stmt_iterator *gsi,
struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
struct loop *loop = NULL; struct loop *loop = NULL;
edge pe = NULL; edge pe = NULL;
tree scalar_dest = gimple_assign_lhs (stmt); tree scalar_dest = gimple_assign_lhs (stmt_info->stmt);
tree vec_dest; tree vec_dest;
gimple *inc; gimple *inc;
tree ptr; tree ptr;
...@@ -5429,13 +5429,13 @@ vect_setup_realignment (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -5429,13 +5429,13 @@ vect_setup_realignment (gimple *stmt, gimple_stmt_iterator *gsi,
bool inv_p; bool inv_p;
bool compute_in_loop = false; bool compute_in_loop = false;
bool nested_in_vect_loop = false; bool nested_in_vect_loop = false;
struct loop *containing_loop = (gimple_bb (stmt))->loop_father; struct loop *containing_loop = (gimple_bb (stmt_info->stmt))->loop_father;
struct loop *loop_for_initial_load = NULL; struct loop *loop_for_initial_load = NULL;
if (loop_vinfo) if (loop_vinfo)
{ {
loop = LOOP_VINFO_LOOP (loop_vinfo); loop = LOOP_VINFO_LOOP (loop_vinfo);
nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt); nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt_info);
} }
gcc_assert (alignment_support_scheme == dr_explicit_realign gcc_assert (alignment_support_scheme == dr_explicit_realign
...@@ -5518,9 +5518,9 @@ vect_setup_realignment (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -5518,9 +5518,9 @@ vect_setup_realignment (gimple *stmt, gimple_stmt_iterator *gsi,
gcc_assert (!compute_in_loop); gcc_assert (!compute_in_loop);
vec_dest = vect_create_destination_var (scalar_dest, vectype); vec_dest = vect_create_destination_var (scalar_dest, vectype);
ptr = vect_create_data_ref_ptr (stmt, vectype, loop_for_initial_load, ptr = vect_create_data_ref_ptr (stmt_info, vectype,
NULL_TREE, &init_addr, NULL, &inc, loop_for_initial_load, NULL_TREE,
true, &inv_p); &init_addr, NULL, &inc, true, &inv_p);
if (TREE_CODE (ptr) == SSA_NAME) if (TREE_CODE (ptr) == SSA_NAME)
new_temp = copy_ssa_name (ptr); new_temp = copy_ssa_name (ptr);
else else
...@@ -5562,7 +5562,7 @@ vect_setup_realignment (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -5562,7 +5562,7 @@ vect_setup_realignment (gimple *stmt, gimple_stmt_iterator *gsi,
if (!init_addr) if (!init_addr)
{ {
/* Generate the INIT_ADDR computation outside LOOP. */ /* Generate the INIT_ADDR computation outside LOOP. */
init_addr = vect_create_addr_base_for_vector_ref (stmt, &stmts, init_addr = vect_create_addr_base_for_vector_ref (stmt_info, &stmts,
NULL_TREE); NULL_TREE);
if (loop) if (loop)
{ {
...@@ -5890,7 +5890,7 @@ vect_permute_load_chain (vec<tree> dr_chain, ...@@ -5890,7 +5890,7 @@ vect_permute_load_chain (vec<tree> dr_chain,
data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low"); data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_low");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect, perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect,
second_vect, perm3_mask_low); second_vect, perm3_mask_low);
vect_finish_stmt_generation (stmt, perm_stmt, gsi); vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
/* Create interleaving stmt (high part of): /* Create interleaving stmt (high part of):
high = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k, high = VEC_PERM_EXPR <first_vect, second_vect2, {k, 3 + k, 6 + k,
...@@ -5900,7 +5900,7 @@ vect_permute_load_chain (vec<tree> dr_chain, ...@@ -5900,7 +5900,7 @@ vect_permute_load_chain (vec<tree> dr_chain,
data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high"); data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle3_high");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect, perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, first_vect,
second_vect, perm3_mask_high); second_vect, perm3_mask_high);
vect_finish_stmt_generation (stmt, perm_stmt, gsi); vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
(*result_chain)[k] = data_ref; (*result_chain)[k] = data_ref;
} }
} }
...@@ -5935,7 +5935,7 @@ vect_permute_load_chain (vec<tree> dr_chain, ...@@ -5935,7 +5935,7 @@ vect_permute_load_chain (vec<tree> dr_chain,
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
first_vect, second_vect, first_vect, second_vect,
perm_mask_even); perm_mask_even);
vect_finish_stmt_generation (stmt, perm_stmt, gsi); vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
(*result_chain)[j/2] = data_ref; (*result_chain)[j/2] = data_ref;
/* data_ref = permute_odd (first_data_ref, second_data_ref); */ /* data_ref = permute_odd (first_data_ref, second_data_ref); */
...@@ -5943,7 +5943,7 @@ vect_permute_load_chain (vec<tree> dr_chain, ...@@ -5943,7 +5943,7 @@ vect_permute_load_chain (vec<tree> dr_chain,
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
first_vect, second_vect, first_vect, second_vect,
perm_mask_odd); perm_mask_odd);
vect_finish_stmt_generation (stmt, perm_stmt, gsi); vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
(*result_chain)[j/2+length/2] = data_ref; (*result_chain)[j/2+length/2] = data_ref;
} }
memcpy (dr_chain.address (), result_chain->address (), memcpy (dr_chain.address (), result_chain->address (),
...@@ -6143,26 +6143,26 @@ vect_shift_permute_load_chain (vec<tree> dr_chain, ...@@ -6143,26 +6143,26 @@ vect_shift_permute_load_chain (vec<tree> dr_chain,
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
first_vect, first_vect, first_vect, first_vect,
perm2_mask1); perm2_mask1);
vect_finish_stmt_generation (stmt, perm_stmt, gsi); vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
vect[0] = data_ref; vect[0] = data_ref;
data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2"); data_ref = make_temp_ssa_name (vectype, NULL, "vect_shuffle2");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
second_vect, second_vect, second_vect, second_vect,
perm2_mask2); perm2_mask2);
vect_finish_stmt_generation (stmt, perm_stmt, gsi); vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
vect[1] = data_ref; vect[1] = data_ref;
data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift"); data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
vect[0], vect[1], shift1_mask); vect[0], vect[1], shift1_mask);
vect_finish_stmt_generation (stmt, perm_stmt, gsi); vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
(*result_chain)[j/2 + length/2] = data_ref; (*result_chain)[j/2 + length/2] = data_ref;
data_ref = make_temp_ssa_name (vectype, NULL, "vect_select"); data_ref = make_temp_ssa_name (vectype, NULL, "vect_select");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
vect[0], vect[1], select_mask); vect[0], vect[1], select_mask);
vect_finish_stmt_generation (stmt, perm_stmt, gsi); vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
(*result_chain)[j/2] = data_ref; (*result_chain)[j/2] = data_ref;
} }
memcpy (dr_chain.address (), result_chain->address (), memcpy (dr_chain.address (), result_chain->address (),
...@@ -6259,7 +6259,7 @@ vect_shift_permute_load_chain (vec<tree> dr_chain, ...@@ -6259,7 +6259,7 @@ vect_shift_permute_load_chain (vec<tree> dr_chain,
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
dr_chain[k], dr_chain[k], dr_chain[k], dr_chain[k],
perm3_mask); perm3_mask);
vect_finish_stmt_generation (stmt, perm_stmt, gsi); vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
vect[k] = data_ref; vect[k] = data_ref;
} }
...@@ -6269,7 +6269,7 @@ vect_shift_permute_load_chain (vec<tree> dr_chain, ...@@ -6269,7 +6269,7 @@ vect_shift_permute_load_chain (vec<tree> dr_chain,
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR,
vect[k % 3], vect[(k + 1) % 3], vect[k % 3], vect[(k + 1) % 3],
shift1_mask); shift1_mask);
vect_finish_stmt_generation (stmt, perm_stmt, gsi); vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
vect_shift[k] = data_ref; vect_shift[k] = data_ref;
} }
...@@ -6280,7 +6280,7 @@ vect_shift_permute_load_chain (vec<tree> dr_chain, ...@@ -6280,7 +6280,7 @@ vect_shift_permute_load_chain (vec<tree> dr_chain,
vect_shift[(4 - k) % 3], vect_shift[(4 - k) % 3],
vect_shift[(3 - k) % 3], vect_shift[(3 - k) % 3],
shift2_mask); shift2_mask);
vect_finish_stmt_generation (stmt, perm_stmt, gsi); vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
vect[k] = data_ref; vect[k] = data_ref;
} }
...@@ -6289,13 +6289,13 @@ vect_shift_permute_load_chain (vec<tree> dr_chain, ...@@ -6289,13 +6289,13 @@ vect_shift_permute_load_chain (vec<tree> dr_chain,
data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift3"); data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift3");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[0], perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[0],
vect[0], shift3_mask); vect[0], shift3_mask);
vect_finish_stmt_generation (stmt, perm_stmt, gsi); vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
(*result_chain)[nelt % 3] = data_ref; (*result_chain)[nelt % 3] = data_ref;
data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift4"); data_ref = make_temp_ssa_name (vectype, NULL, "vect_shift4");
perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[1], perm_stmt = gimple_build_assign (data_ref, VEC_PERM_EXPR, vect[1],
vect[1], shift4_mask); vect[1], shift4_mask);
vect_finish_stmt_generation (stmt, perm_stmt, gsi); vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
(*result_chain)[0] = data_ref; (*result_chain)[0] = data_ref;
return true; return true;
} }
...@@ -6328,10 +6328,10 @@ vect_transform_grouped_load (gimple *stmt, vec<tree> dr_chain, int size, ...@@ -6328,10 +6328,10 @@ vect_transform_grouped_load (gimple *stmt, vec<tree> dr_chain, int size,
mode = TYPE_MODE (STMT_VINFO_VECTYPE (stmt_info)); mode = TYPE_MODE (STMT_VINFO_VECTYPE (stmt_info));
if (targetm.sched.reassociation_width (VEC_PERM_EXPR, mode) > 1 if (targetm.sched.reassociation_width (VEC_PERM_EXPR, mode) > 1
|| pow2p_hwi (size) || pow2p_hwi (size)
|| !vect_shift_permute_load_chain (dr_chain, size, stmt, || !vect_shift_permute_load_chain (dr_chain, size, stmt_info,
gsi, &result_chain)) gsi, &result_chain))
vect_permute_load_chain (dr_chain, size, stmt, gsi, &result_chain); vect_permute_load_chain (dr_chain, size, stmt_info, gsi, &result_chain);
vect_record_grouped_load_vectors (stmt, result_chain); vect_record_grouped_load_vectors (stmt_info, result_chain);
result_chain.release (); result_chain.release ();
} }
......
...@@ -1381,7 +1381,7 @@ vect_can_advance_ivs_p (loop_vec_info loop_vinfo) ...@@ -1381,7 +1381,7 @@ vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
if (dump_enabled_p ()) if (dump_enabled_p ())
{ {
dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: "); dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi_info->stmt, 0);
} }
/* Skip virtual phi's. The data dependences that are associated with /* Skip virtual phi's. The data dependences that are associated with
......
...@@ -526,7 +526,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop) ...@@ -526,7 +526,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
|| (LOOP_VINFO_LOOP (loop_vinfo) != loop || (LOOP_VINFO_LOOP (loop_vinfo) != loop
&& TREE_CODE (step) != INTEGER_CST)) && TREE_CODE (step) != INTEGER_CST))
{ {
worklist.safe_push (phi); worklist.safe_push (stmt_vinfo);
continue; continue;
} }
...@@ -1595,11 +1595,12 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo) ...@@ -1595,11 +1595,12 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo)
need_to_vectorize = true; need_to_vectorize = true;
if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
&& ! PURE_SLP_STMT (stmt_info)) && ! PURE_SLP_STMT (stmt_info))
ok = vectorizable_induction (phi, NULL, NULL, NULL, &cost_vec); ok = vectorizable_induction (stmt_info, NULL, NULL, NULL,
&cost_vec);
else if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def else if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
|| STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle) || STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
&& ! PURE_SLP_STMT (stmt_info)) && ! PURE_SLP_STMT (stmt_info))
ok = vectorizable_reduction (phi, NULL, NULL, NULL, NULL, ok = vectorizable_reduction (stmt_info, NULL, NULL, NULL, NULL,
&cost_vec); &cost_vec);
} }
...@@ -1607,7 +1608,7 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo) ...@@ -1607,7 +1608,7 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo)
if (ok if (ok
&& STMT_VINFO_LIVE_P (stmt_info) && STMT_VINFO_LIVE_P (stmt_info)
&& !PURE_SLP_STMT (stmt_info)) && !PURE_SLP_STMT (stmt_info))
ok = vectorizable_live_operation (phi, NULL, NULL, -1, NULL, ok = vectorizable_live_operation (stmt_info, NULL, NULL, -1, NULL,
&cost_vec); &cost_vec);
if (!ok) if (!ok)
...@@ -4045,7 +4046,7 @@ get_initial_def_for_reduction (gimple *stmt, tree init_val, ...@@ -4045,7 +4046,7 @@ get_initial_def_for_reduction (gimple *stmt, tree init_val,
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree scalar_type = TREE_TYPE (init_val); tree scalar_type = TREE_TYPE (init_val);
tree vectype = get_vectype_for_scalar_type (scalar_type); tree vectype = get_vectype_for_scalar_type (scalar_type);
enum tree_code code = gimple_assign_rhs_code (stmt); enum tree_code code = gimple_assign_rhs_code (stmt_vinfo->stmt);
tree def_for_init; tree def_for_init;
tree init_def; tree init_def;
REAL_VALUE_TYPE real_init_val = dconst0; REAL_VALUE_TYPE real_init_val = dconst0;
...@@ -4057,8 +4058,8 @@ get_initial_def_for_reduction (gimple *stmt, tree init_val, ...@@ -4057,8 +4058,8 @@ get_initial_def_for_reduction (gimple *stmt, tree init_val,
gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type) gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
|| SCALAR_FLOAT_TYPE_P (scalar_type)); || SCALAR_FLOAT_TYPE_P (scalar_type));
gcc_assert (nested_in_vect_loop_p (loop, stmt) gcc_assert (nested_in_vect_loop_p (loop, stmt_vinfo)
|| loop == (gimple_bb (stmt))->loop_father); || loop == (gimple_bb (stmt_vinfo->stmt))->loop_father);
vect_reduction_type reduction_type vect_reduction_type reduction_type
= STMT_VINFO_VEC_REDUCTION_TYPE (stmt_vinfo); = STMT_VINFO_VEC_REDUCTION_TYPE (stmt_vinfo);
...@@ -4127,7 +4128,7 @@ get_initial_def_for_reduction (gimple *stmt, tree init_val, ...@@ -4127,7 +4128,7 @@ get_initial_def_for_reduction (gimple *stmt, tree init_val,
if (reduction_type != COND_REDUCTION if (reduction_type != COND_REDUCTION
&& reduction_type != EXTRACT_LAST_REDUCTION) && reduction_type != EXTRACT_LAST_REDUCTION)
{ {
init_def = vect_get_vec_def_for_operand (init_val, stmt); init_def = vect_get_vec_def_for_operand (init_val, stmt_vinfo);
break; break;
} }
} }
...@@ -4406,7 +4407,7 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt, ...@@ -4406,7 +4407,7 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
tree vec_dest; tree vec_dest;
tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest; tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest;
gimple *epilog_stmt = NULL; gimple *epilog_stmt = NULL;
enum tree_code code = gimple_assign_rhs_code (stmt); enum tree_code code = gimple_assign_rhs_code (stmt_info->stmt);
gimple *exit_phi; gimple *exit_phi;
tree bitsize; tree bitsize;
tree adjustment_def = NULL; tree adjustment_def = NULL;
...@@ -4435,7 +4436,7 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt, ...@@ -4435,7 +4436,7 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
if (slp_node) if (slp_node)
group_size = SLP_TREE_SCALAR_STMTS (slp_node).length (); group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
if (nested_in_vect_loop_p (loop, stmt)) if (nested_in_vect_loop_p (loop, stmt_info))
{ {
outer_loop = loop; outer_loop = loop;
loop = loop->inner; loop = loop->inner;
...@@ -4504,10 +4505,12 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt, ...@@ -4504,10 +4505,12 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
/* Do not use an adjustment def as that case is not supported /* Do not use an adjustment def as that case is not supported
correctly if ncopies is not one. */ correctly if ncopies is not one. */
vect_is_simple_use (initial_def, loop_vinfo, &initial_def_dt); vect_is_simple_use (initial_def, loop_vinfo, &initial_def_dt);
vec_initial_def = vect_get_vec_def_for_operand (initial_def, stmt); vec_initial_def = vect_get_vec_def_for_operand (initial_def,
stmt_info);
} }
else else
vec_initial_def = get_initial_def_for_reduction (stmt, initial_def, vec_initial_def
= get_initial_def_for_reduction (stmt_info, initial_def,
&adjustment_def); &adjustment_def);
vec_initial_defs.create (1); vec_initial_defs.create (1);
vec_initial_defs.quick_push (vec_initial_def); vec_initial_defs.quick_push (vec_initial_def);
...@@ -5676,7 +5679,7 @@ vect_finalize_reduction: ...@@ -5676,7 +5679,7 @@ vect_finalize_reduction:
preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt, preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
loop_preheader_edge (outer_loop)); loop_preheader_edge (outer_loop));
vect_phi_init = get_initial_def_for_reduction vect_phi_init = get_initial_def_for_reduction
(stmt, preheader_arg, NULL); (stmt_info, preheader_arg, NULL);
/* Update phi node arguments with vs0 and vs2. */ /* Update phi node arguments with vs0 and vs2. */
add_phi_arg (vect_phi, vect_phi_init, add_phi_arg (vect_phi, vect_phi_init,
...@@ -5841,7 +5844,7 @@ vectorize_fold_left_reduction (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -5841,7 +5844,7 @@ vectorize_fold_left_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
else else
ncopies = vect_get_num_copies (loop_vinfo, vectype_in); ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
gcc_assert (!nested_in_vect_loop_p (loop, stmt)); gcc_assert (!nested_in_vect_loop_p (loop, stmt_info));
gcc_assert (ncopies == 1); gcc_assert (ncopies == 1);
gcc_assert (TREE_CODE_LENGTH (code) == binary_op); gcc_assert (TREE_CODE_LENGTH (code) == binary_op);
gcc_assert (reduc_index == (code == MINUS_EXPR ? 0 : 1)); gcc_assert (reduc_index == (code == MINUS_EXPR ? 0 : 1));
...@@ -5859,13 +5862,14 @@ vectorize_fold_left_reduction (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -5859,13 +5862,14 @@ vectorize_fold_left_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
auto_vec<tree> vec_oprnds0; auto_vec<tree> vec_oprnds0;
if (slp_node) if (slp_node)
{ {
vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL, slp_node); vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL,
slp_node);
group_size = SLP_TREE_SCALAR_STMTS (slp_node).length (); group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
scalar_dest_def_info = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1]; scalar_dest_def_info = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1];
} }
else else
{ {
tree loop_vec_def0 = vect_get_vec_def_for_operand (op0, stmt); tree loop_vec_def0 = vect_get_vec_def_for_operand (op0, stmt_info);
vec_oprnds0.create (1); vec_oprnds0.create (1);
vec_oprnds0.quick_push (loop_vec_def0); vec_oprnds0.quick_push (loop_vec_def0);
scalar_dest_def_info = stmt_info; scalar_dest_def_info = stmt_info;
...@@ -6099,7 +6103,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6099,7 +6103,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
&& STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle) && STMT_VINFO_DEF_TYPE (stmt_info) != vect_nested_cycle)
return false; return false;
if (nested_in_vect_loop_p (loop, stmt)) if (nested_in_vect_loop_p (loop, stmt_info))
{ {
loop = loop->inner; loop = loop->inner;
nested_cycle = true; nested_cycle = true;
...@@ -6109,7 +6113,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6109,7 +6113,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
gcc_assert (slp_node gcc_assert (slp_node
&& REDUC_GROUP_FIRST_ELEMENT (stmt_info) == stmt_info); && REDUC_GROUP_FIRST_ELEMENT (stmt_info) == stmt_info);
if (gphi *phi = dyn_cast <gphi *> (stmt)) if (gphi *phi = dyn_cast <gphi *> (stmt_info->stmt))
{ {
tree phi_result = gimple_phi_result (phi); tree phi_result = gimple_phi_result (phi);
/* Analysis is fully done on the reduction stmt invocation. */ /* Analysis is fully done on the reduction stmt invocation. */
...@@ -6164,7 +6168,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6164,7 +6168,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
&& STMT_VINFO_RELEVANT (reduc_stmt_info) <= vect_used_only_live && STMT_VINFO_RELEVANT (reduc_stmt_info) <= vect_used_only_live
&& (use_stmt_info = loop_vinfo->lookup_single_use (phi_result)) && (use_stmt_info = loop_vinfo->lookup_single_use (phi_result))
&& (use_stmt_info == reduc_stmt_info && (use_stmt_info == reduc_stmt_info
|| STMT_VINFO_RELATED_STMT (use_stmt_info) == reduc_stmt)) || STMT_VINFO_RELATED_STMT (use_stmt_info) == reduc_stmt_info))
single_defuse_cycle = true; single_defuse_cycle = true;
/* Create the destination vector */ /* Create the destination vector */
...@@ -6548,7 +6552,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6548,7 +6552,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
{ {
/* Only call during the analysis stage, otherwise we'll lose /* Only call during the analysis stage, otherwise we'll lose
STMT_VINFO_TYPE. */ STMT_VINFO_TYPE. */
if (!vec_stmt && !vectorizable_condition (stmt, gsi, NULL, if (!vec_stmt && !vectorizable_condition (stmt_info, gsi, NULL,
ops[reduc_index], 0, NULL, ops[reduc_index], 0, NULL,
cost_vec)) cost_vec))
{ {
...@@ -6935,7 +6939,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6935,7 +6939,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
&& (STMT_VINFO_RELEVANT (stmt_info) <= vect_used_only_live) && (STMT_VINFO_RELEVANT (stmt_info) <= vect_used_only_live)
&& (use_stmt_info = loop_vinfo->lookup_single_use (reduc_phi_result)) && (use_stmt_info = loop_vinfo->lookup_single_use (reduc_phi_result))
&& (use_stmt_info == stmt_info && (use_stmt_info == stmt_info
|| STMT_VINFO_RELATED_STMT (use_stmt_info) == stmt)) || STMT_VINFO_RELATED_STMT (use_stmt_info) == stmt_info))
{ {
single_defuse_cycle = true; single_defuse_cycle = true;
epilog_copies = 1; epilog_copies = 1;
...@@ -7015,13 +7019,13 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -7015,13 +7019,13 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
if (reduction_type == FOLD_LEFT_REDUCTION) if (reduction_type == FOLD_LEFT_REDUCTION)
return vectorize_fold_left_reduction return vectorize_fold_left_reduction
(stmt, gsi, vec_stmt, slp_node, reduc_def_phi, code, (stmt_info, gsi, vec_stmt, slp_node, reduc_def_phi, code,
reduc_fn, ops, vectype_in, reduc_index, masks); reduc_fn, ops, vectype_in, reduc_index, masks);
if (reduction_type == EXTRACT_LAST_REDUCTION) if (reduction_type == EXTRACT_LAST_REDUCTION)
{ {
gcc_assert (!slp_node); gcc_assert (!slp_node);
return vectorizable_condition (stmt, gsi, vec_stmt, return vectorizable_condition (stmt_info, gsi, vec_stmt,
NULL, reduc_index, NULL, NULL); NULL, reduc_index, NULL, NULL);
} }
...@@ -7053,7 +7057,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -7053,7 +7057,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
if (code == COND_EXPR) if (code == COND_EXPR)
{ {
gcc_assert (!slp_node); gcc_assert (!slp_node);
vectorizable_condition (stmt, gsi, vec_stmt, vectorizable_condition (stmt_info, gsi, vec_stmt,
PHI_RESULT (phis[0]->stmt), PHI_RESULT (phis[0]->stmt),
reduc_index, NULL, NULL); reduc_index, NULL, NULL);
/* Multiple types are not supported for condition. */ /* Multiple types are not supported for condition. */
...@@ -7090,12 +7094,12 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -7090,12 +7094,12 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
else else
{ {
vec_oprnds0.quick_push vec_oprnds0.quick_push
(vect_get_vec_def_for_operand (ops[0], stmt)); (vect_get_vec_def_for_operand (ops[0], stmt_info));
vec_oprnds1.quick_push vec_oprnds1.quick_push
(vect_get_vec_def_for_operand (ops[1], stmt)); (vect_get_vec_def_for_operand (ops[1], stmt_info));
if (op_type == ternary_op) if (op_type == ternary_op)
vec_oprnds2.quick_push vec_oprnds2.quick_push
(vect_get_vec_def_for_operand (ops[2], stmt)); (vect_get_vec_def_for_operand (ops[2], stmt_info));
} }
} }
else else
...@@ -7144,7 +7148,8 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -7144,7 +7148,8 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
new_temp = make_ssa_name (vec_dest, call); new_temp = make_ssa_name (vec_dest, call);
gimple_call_set_lhs (call, new_temp); gimple_call_set_lhs (call, new_temp);
gimple_call_set_nothrow (call, true); gimple_call_set_nothrow (call, true);
new_stmt_info = vect_finish_stmt_generation (stmt, call, gsi); new_stmt_info
= vect_finish_stmt_generation (stmt_info, call, gsi);
} }
else else
{ {
...@@ -7156,7 +7161,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -7156,7 +7161,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
new_temp = make_ssa_name (vec_dest, new_stmt); new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp); gimple_assign_set_lhs (new_stmt, new_temp);
new_stmt_info new_stmt_info
= vect_finish_stmt_generation (stmt, new_stmt, gsi); = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
} }
if (slp_node) if (slp_node)
...@@ -7184,7 +7189,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -7184,7 +7189,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node) if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node)
vect_defs[0] = gimple_get_lhs ((*vec_stmt)->stmt); vect_defs[0] = gimple_get_lhs ((*vec_stmt)->stmt);
vect_create_epilog_for_reduction (vect_defs, stmt, reduc_def_phi, vect_create_epilog_for_reduction (vect_defs, stmt_info, reduc_def_phi,
epilog_copies, reduc_fn, phis, epilog_copies, reduc_fn, phis,
double_reduc, slp_node, slp_node_instance, double_reduc, slp_node, slp_node_instance,
cond_reduc_val, cond_reduc_op_code, cond_reduc_val, cond_reduc_op_code,
...@@ -7293,7 +7298,7 @@ vectorizable_induction (gimple *phi, ...@@ -7293,7 +7298,7 @@ vectorizable_induction (gimple *phi,
gcc_assert (ncopies >= 1); gcc_assert (ncopies >= 1);
/* FORNOW. These restrictions should be relaxed. */ /* FORNOW. These restrictions should be relaxed. */
if (nested_in_vect_loop_p (loop, phi)) if (nested_in_vect_loop_p (loop, stmt_info))
{ {
imm_use_iterator imm_iter; imm_use_iterator imm_iter;
use_operand_p use_p; use_operand_p use_p;
...@@ -7443,10 +7448,10 @@ vectorizable_induction (gimple *phi, ...@@ -7443,10 +7448,10 @@ vectorizable_induction (gimple *phi,
new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr), new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
expr, step_expr); expr, step_expr);
if (! CONSTANT_CLASS_P (new_name)) if (! CONSTANT_CLASS_P (new_name))
new_name = vect_init_vector (phi, new_name, new_name = vect_init_vector (stmt_info, new_name,
TREE_TYPE (step_expr), NULL); TREE_TYPE (step_expr), NULL);
new_vec = build_vector_from_val (vectype, new_name); new_vec = build_vector_from_val (vectype, new_name);
vec_step = vect_init_vector (phi, new_vec, vectype, NULL); vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
/* Now generate the IVs. */ /* Now generate the IVs. */
unsigned group_size = SLP_TREE_SCALAR_STMTS (slp_node).length (); unsigned group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
...@@ -7513,10 +7518,10 @@ vectorizable_induction (gimple *phi, ...@@ -7513,10 +7518,10 @@ vectorizable_induction (gimple *phi,
new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr), new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
expr, step_expr); expr, step_expr);
if (! CONSTANT_CLASS_P (new_name)) if (! CONSTANT_CLASS_P (new_name))
new_name = vect_init_vector (phi, new_name, new_name = vect_init_vector (stmt_info, new_name,
TREE_TYPE (step_expr), NULL); TREE_TYPE (step_expr), NULL);
new_vec = build_vector_from_val (vectype, new_name); new_vec = build_vector_from_val (vectype, new_name);
vec_step = vect_init_vector (phi, new_vec, vectype, NULL); vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
for (; ivn < nvects; ++ivn) for (; ivn < nvects; ++ivn)
{ {
gimple *iv = SLP_TREE_VEC_STMTS (slp_node)[ivn - nivs]->stmt; gimple *iv = SLP_TREE_VEC_STMTS (slp_node)[ivn - nivs]->stmt;
...@@ -7549,7 +7554,7 @@ vectorizable_induction (gimple *phi, ...@@ -7549,7 +7554,7 @@ vectorizable_induction (gimple *phi,
/* iv_loop is nested in the loop to be vectorized. init_expr had already /* iv_loop is nested in the loop to be vectorized. init_expr had already
been created during vectorization of previous stmts. We obtain it been created during vectorization of previous stmts. We obtain it
from the STMT_VINFO_VEC_STMT of the defining stmt. */ from the STMT_VINFO_VEC_STMT of the defining stmt. */
vec_init = vect_get_vec_def_for_operand (init_expr, phi); vec_init = vect_get_vec_def_for_operand (init_expr, stmt_info);
/* If the initial value is not of proper type, convert it. */ /* If the initial value is not of proper type, convert it. */
if (!useless_type_conversion_p (vectype, TREE_TYPE (vec_init))) if (!useless_type_conversion_p (vectype, TREE_TYPE (vec_init)))
{ {
...@@ -7651,7 +7656,7 @@ vectorizable_induction (gimple *phi, ...@@ -7651,7 +7656,7 @@ vectorizable_induction (gimple *phi,
gcc_assert (CONSTANT_CLASS_P (new_name) gcc_assert (CONSTANT_CLASS_P (new_name)
|| TREE_CODE (new_name) == SSA_NAME); || TREE_CODE (new_name) == SSA_NAME);
new_vec = build_vector_from_val (vectype, t); new_vec = build_vector_from_val (vectype, t);
vec_step = vect_init_vector (phi, new_vec, vectype, NULL); vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
/* Create the following def-use cycle: /* Create the following def-use cycle:
...@@ -7717,7 +7722,7 @@ vectorizable_induction (gimple *phi, ...@@ -7717,7 +7722,7 @@ vectorizable_induction (gimple *phi,
gcc_assert (CONSTANT_CLASS_P (new_name) gcc_assert (CONSTANT_CLASS_P (new_name)
|| TREE_CODE (new_name) == SSA_NAME); || TREE_CODE (new_name) == SSA_NAME);
new_vec = build_vector_from_val (vectype, t); new_vec = build_vector_from_val (vectype, t);
vec_step = vect_init_vector (phi, new_vec, vectype, NULL); vec_step = vect_init_vector (stmt_info, new_vec, vectype, NULL);
vec_def = induc_def; vec_def = induc_def;
prev_stmt_vinfo = induction_phi_info; prev_stmt_vinfo = induction_phi_info;
...@@ -7815,7 +7820,7 @@ vectorizable_live_operation (gimple *stmt, ...@@ -7815,7 +7820,7 @@ vectorizable_live_operation (gimple *stmt,
return false; return false;
/* FORNOW. CHECKME. */ /* FORNOW. CHECKME. */
if (nested_in_vect_loop_p (loop, stmt)) if (nested_in_vect_loop_p (loop, stmt_info))
return false; return false;
/* If STMT is not relevant and it is a simple assignment and its inputs are /* If STMT is not relevant and it is a simple assignment and its inputs are
...@@ -7823,7 +7828,7 @@ vectorizable_live_operation (gimple *stmt, ...@@ -7823,7 +7828,7 @@ vectorizable_live_operation (gimple *stmt,
scalar value that it computes will be used. */ scalar value that it computes will be used. */
if (!STMT_VINFO_RELEVANT_P (stmt_info)) if (!STMT_VINFO_RELEVANT_P (stmt_info))
{ {
gcc_assert (is_simple_and_all_uses_invariant (stmt, loop_vinfo)); gcc_assert (is_simple_and_all_uses_invariant (stmt_info, loop_vinfo));
if (dump_enabled_p ()) if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, dump_printf_loc (MSG_NOTE, vect_location,
"statement is simple and uses invariant. Leaving in " "statement is simple and uses invariant. Leaving in "
...@@ -8222,11 +8227,11 @@ vect_transform_loop_stmt (loop_vec_info loop_vinfo, gimple *stmt, ...@@ -8222,11 +8227,11 @@ vect_transform_loop_stmt (loop_vec_info loop_vinfo, gimple *stmt,
{ {
dump_printf_loc (MSG_NOTE, vect_location, dump_printf_loc (MSG_NOTE, vect_location,
"------>vectorizing statement: "); "------>vectorizing statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
} }
if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info)) if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
vect_loop_kill_debug_uses (loop, stmt); vect_loop_kill_debug_uses (loop, stmt_info);
if (!STMT_VINFO_RELEVANT_P (stmt_info) if (!STMT_VINFO_RELEVANT_P (stmt_info)
&& !STMT_VINFO_LIVE_P (stmt_info)) && !STMT_VINFO_LIVE_P (stmt_info))
...@@ -8267,7 +8272,7 @@ vect_transform_loop_stmt (loop_vec_info loop_vinfo, gimple *stmt, ...@@ -8267,7 +8272,7 @@ vect_transform_loop_stmt (loop_vec_info loop_vinfo, gimple *stmt,
dump_printf_loc (MSG_NOTE, vect_location, "transform statement.\n"); dump_printf_loc (MSG_NOTE, vect_location, "transform statement.\n");
bool grouped_store = false; bool grouped_store = false;
if (vect_transform_stmt (stmt, gsi, &grouped_store, NULL, NULL)) if (vect_transform_stmt (stmt_info, gsi, &grouped_store, NULL, NULL))
*seen_store = stmt_info; *seen_store = stmt_info;
} }
...@@ -8422,7 +8427,7 @@ vect_transform_loop (loop_vec_info loop_vinfo) ...@@ -8422,7 +8427,7 @@ vect_transform_loop (loop_vec_info loop_vinfo)
continue; continue;
if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info)) if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
vect_loop_kill_debug_uses (loop, phi); vect_loop_kill_debug_uses (loop, stmt_info);
if (!STMT_VINFO_RELEVANT_P (stmt_info) if (!STMT_VINFO_RELEVANT_P (stmt_info)
&& !STMT_VINFO_LIVE_P (stmt_info)) && !STMT_VINFO_LIVE_P (stmt_info))
...@@ -8441,7 +8446,7 @@ vect_transform_loop (loop_vec_info loop_vinfo) ...@@ -8441,7 +8446,7 @@ vect_transform_loop (loop_vec_info loop_vinfo)
{ {
if (dump_enabled_p ()) if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "transform phi.\n"); dump_printf_loc (MSG_NOTE, vect_location, "transform phi.\n");
vect_transform_stmt (phi, NULL, NULL, NULL, NULL); vect_transform_stmt (stmt_info, NULL, NULL, NULL, NULL);
} }
} }
......
...@@ -842,7 +842,7 @@ vect_reassociating_reduction_p (stmt_vec_info stmt_info, tree_code code, ...@@ -842,7 +842,7 @@ vect_reassociating_reduction_p (stmt_vec_info stmt_info, tree_code code,
/* We don't allow changing the order of the computation in the inner-loop /* We don't allow changing the order of the computation in the inner-loop
when doing outer-loop vectorization. */ when doing outer-loop vectorization. */
struct loop *loop = LOOP_VINFO_LOOP (loop_info); struct loop *loop = LOOP_VINFO_LOOP (loop_info);
if (loop && nested_in_vect_loop_p (loop, assign)) if (loop && nested_in_vect_loop_p (loop, stmt_info))
return false; return false;
if (!vect_reassociating_reduction_p (stmt_info)) if (!vect_reassociating_reduction_p (stmt_info))
...@@ -1196,7 +1196,7 @@ vect_recog_widen_op_pattern (stmt_vec_info last_stmt_info, tree *type_out, ...@@ -1196,7 +1196,7 @@ vect_recog_widen_op_pattern (stmt_vec_info last_stmt_info, tree *type_out,
auto_vec<tree> dummy_vec; auto_vec<tree> dummy_vec;
if (!vectype if (!vectype
|| !vecitype || !vecitype
|| !supportable_widening_operation (wide_code, last_stmt, || !supportable_widening_operation (wide_code, last_stmt_info,
vecitype, vectype, vecitype, vectype,
&dummy_code, &dummy_code, &dummy_code, &dummy_code,
&dummy_int, &dummy_vec)) &dummy_int, &dummy_vec))
...@@ -3118,10 +3118,10 @@ vect_recog_mixed_size_cond_pattern (stmt_vec_info stmt_vinfo, tree *type_out) ...@@ -3118,10 +3118,10 @@ vect_recog_mixed_size_cond_pattern (stmt_vec_info stmt_vinfo, tree *type_out)
return NULL; return NULL;
if ((TREE_CODE (then_clause) != INTEGER_CST if ((TREE_CODE (then_clause) != INTEGER_CST
&& !type_conversion_p (then_clause, last_stmt, false, &orig_type0, && !type_conversion_p (then_clause, stmt_vinfo, false, &orig_type0,
&def_stmt0, &promotion)) &def_stmt0, &promotion))
|| (TREE_CODE (else_clause) != INTEGER_CST || (TREE_CODE (else_clause) != INTEGER_CST
&& !type_conversion_p (else_clause, last_stmt, false, &orig_type1, && !type_conversion_p (else_clause, stmt_vinfo, false, &orig_type1,
&def_stmt1, &promotion))) &def_stmt1, &promotion)))
return NULL; return NULL;
...@@ -3709,7 +3709,7 @@ vect_recog_bool_pattern (stmt_vec_info stmt_vinfo, tree *type_out) ...@@ -3709,7 +3709,7 @@ vect_recog_bool_pattern (stmt_vec_info stmt_vinfo, tree *type_out)
if (check_bool_pattern (var, vinfo, bool_stmts)) if (check_bool_pattern (var, vinfo, bool_stmts))
{ {
rhs = adjust_bool_stmts (bool_stmts, TREE_TYPE (lhs), last_stmt); rhs = adjust_bool_stmts (bool_stmts, TREE_TYPE (lhs), stmt_vinfo);
lhs = vect_recog_temp_ssa_var (TREE_TYPE (lhs), NULL); lhs = vect_recog_temp_ssa_var (TREE_TYPE (lhs), NULL);
if (useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs))) if (useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs)))
pattern_stmt = gimple_build_assign (lhs, SSA_NAME, rhs); pattern_stmt = gimple_build_assign (lhs, SSA_NAME, rhs);
...@@ -3776,7 +3776,7 @@ vect_recog_bool_pattern (stmt_vec_info stmt_vinfo, tree *type_out) ...@@ -3776,7 +3776,7 @@ vect_recog_bool_pattern (stmt_vec_info stmt_vinfo, tree *type_out)
if (!check_bool_pattern (var, vinfo, bool_stmts)) if (!check_bool_pattern (var, vinfo, bool_stmts))
return NULL; return NULL;
rhs = adjust_bool_stmts (bool_stmts, type, last_stmt); rhs = adjust_bool_stmts (bool_stmts, type, stmt_vinfo);
lhs = vect_recog_temp_ssa_var (TREE_TYPE (lhs), NULL); lhs = vect_recog_temp_ssa_var (TREE_TYPE (lhs), NULL);
pattern_stmt pattern_stmt
...@@ -3800,7 +3800,7 @@ vect_recog_bool_pattern (stmt_vec_info stmt_vinfo, tree *type_out) ...@@ -3800,7 +3800,7 @@ vect_recog_bool_pattern (stmt_vec_info stmt_vinfo, tree *type_out)
return NULL; return NULL;
if (check_bool_pattern (var, vinfo, bool_stmts)) if (check_bool_pattern (var, vinfo, bool_stmts))
rhs = adjust_bool_stmts (bool_stmts, TREE_TYPE (vectype), last_stmt); rhs = adjust_bool_stmts (bool_stmts, TREE_TYPE (vectype), stmt_vinfo);
else else
{ {
tree type = search_type_for_mask (var, vinfo); tree type = search_type_for_mask (var, vinfo);
...@@ -4234,13 +4234,12 @@ vect_recog_gather_scatter_pattern (stmt_vec_info stmt_info, tree *type_out) ...@@ -4234,13 +4234,12 @@ vect_recog_gather_scatter_pattern (stmt_vec_info stmt_info, tree *type_out)
/* Get the boolean that controls whether the load or store happens. /* Get the boolean that controls whether the load or store happens.
This is null if the operation is unconditional. */ This is null if the operation is unconditional. */
gimple *stmt = stmt_info->stmt; tree mask = vect_get_load_store_mask (stmt_info);
tree mask = vect_get_load_store_mask (stmt);
/* Make sure that the target supports an appropriate internal /* Make sure that the target supports an appropriate internal
function for the gather/scatter operation. */ function for the gather/scatter operation. */
gather_scatter_info gs_info; gather_scatter_info gs_info;
if (!vect_check_gather_scatter (stmt, loop_vinfo, &gs_info) if (!vect_check_gather_scatter (stmt_info, loop_vinfo, &gs_info)
|| gs_info.decl) || gs_info.decl)
return NULL; return NULL;
...@@ -4273,7 +4272,7 @@ vect_recog_gather_scatter_pattern (stmt_vec_info stmt_info, tree *type_out) ...@@ -4273,7 +4272,7 @@ vect_recog_gather_scatter_pattern (stmt_vec_info stmt_info, tree *type_out)
} }
else else
{ {
tree rhs = vect_get_store_rhs (stmt); tree rhs = vect_get_store_rhs (stmt_info);
if (mask != NULL) if (mask != NULL)
pattern_stmt = gimple_build_call_internal (IFN_MASK_SCATTER_STORE, 5, pattern_stmt = gimple_build_call_internal (IFN_MASK_SCATTER_STORE, 5,
base, offset, scale, rhs, base, offset, scale, rhs,
...@@ -4295,7 +4294,7 @@ vect_recog_gather_scatter_pattern (stmt_vec_info stmt_info, tree *type_out) ...@@ -4295,7 +4294,7 @@ vect_recog_gather_scatter_pattern (stmt_vec_info stmt_info, tree *type_out)
tree vectype = STMT_VINFO_VECTYPE (stmt_info); tree vectype = STMT_VINFO_VECTYPE (stmt_info);
*type_out = vectype; *type_out = vectype;
vect_pattern_detected ("gather/scatter pattern", stmt); vect_pattern_detected ("gather/scatter pattern", stmt_info->stmt);
return pattern_stmt; return pattern_stmt;
} }
......
...@@ -2097,7 +2097,7 @@ vect_analyze_slp_instance (vec_info *vinfo, ...@@ -2097,7 +2097,7 @@ vect_analyze_slp_instance (vec_info *vinfo,
"Build SLP failed: unsupported load " "Build SLP failed: unsupported load "
"permutation "); "permutation ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, dump_gimple_stmt (MSG_MISSED_OPTIMIZATION,
TDF_SLIM, stmt, 0); TDF_SLIM, stmt_info->stmt, 0);
} }
vect_free_slp_instance (new_instance, false); vect_free_slp_instance (new_instance, false);
return false; return false;
...@@ -2172,8 +2172,9 @@ vect_analyze_slp_instance (vec_info *vinfo, ...@@ -2172,8 +2172,9 @@ vect_analyze_slp_instance (vec_info *vinfo,
gcc_assert ((const_nunits & (const_nunits - 1)) == 0); gcc_assert ((const_nunits & (const_nunits - 1)) == 0);
unsigned group1_size = i & ~(const_nunits - 1); unsigned group1_size = i & ~(const_nunits - 1);
gimple *rest = vect_split_slp_store_group (stmt, group1_size); gimple *rest = vect_split_slp_store_group (stmt_info, group1_size);
bool res = vect_analyze_slp_instance (vinfo, stmt, max_tree_size); bool res = vect_analyze_slp_instance (vinfo, stmt_info,
max_tree_size);
/* If the first non-match was in the middle of a vector, /* If the first non-match was in the middle of a vector,
skip the rest of that vector. */ skip the rest of that vector. */
if (group1_size < i) if (group1_size < i)
...@@ -2513,7 +2514,6 @@ vect_slp_analyze_node_operations_1 (vec_info *vinfo, slp_tree node, ...@@ -2513,7 +2514,6 @@ vect_slp_analyze_node_operations_1 (vec_info *vinfo, slp_tree node,
stmt_vector_for_cost *cost_vec) stmt_vector_for_cost *cost_vec)
{ {
stmt_vec_info stmt_info = SLP_TREE_SCALAR_STMTS (node)[0]; stmt_vec_info stmt_info = SLP_TREE_SCALAR_STMTS (node)[0];
gimple *stmt = stmt_info->stmt;
gcc_assert (STMT_SLP_TYPE (stmt_info) != loop_vect); gcc_assert (STMT_SLP_TYPE (stmt_info) != loop_vect);
/* For BB vectorization vector types are assigned here. /* For BB vectorization vector types are assigned here.
...@@ -2567,7 +2567,7 @@ vect_slp_analyze_node_operations_1 (vec_info *vinfo, slp_tree node, ...@@ -2567,7 +2567,7 @@ vect_slp_analyze_node_operations_1 (vec_info *vinfo, slp_tree node,
} }
bool dummy; bool dummy;
return vect_analyze_stmt (stmt, &dummy, node, node_instance, cost_vec); return vect_analyze_stmt (stmt_info, &dummy, node, node_instance, cost_vec);
} }
/* Analyze statements contained in SLP tree NODE after recursively analyzing /* Analyze statements contained in SLP tree NODE after recursively analyzing
......
...@@ -205,7 +205,7 @@ vect_mark_relevant (vec<gimple *> *worklist, gimple *stmt, ...@@ -205,7 +205,7 @@ vect_mark_relevant (vec<gimple *> *worklist, gimple *stmt,
{ {
dump_printf_loc (MSG_NOTE, vect_location, dump_printf_loc (MSG_NOTE, vect_location,
"mark relevant %d, live %d: ", relevant, live_p); "mark relevant %d, live %d: ", relevant, live_p);
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
} }
/* If this stmt is an original stmt in a pattern, we might need to mark its /* If this stmt is an original stmt in a pattern, we might need to mark its
...@@ -244,7 +244,7 @@ vect_mark_relevant (vec<gimple *> *worklist, gimple *stmt, ...@@ -244,7 +244,7 @@ vect_mark_relevant (vec<gimple *> *worklist, gimple *stmt,
return; return;
} }
worklist->safe_push (stmt); worklist->safe_push (stmt_info);
} }
...@@ -389,10 +389,10 @@ exist_non_indexing_operands_for_use_p (tree use, gimple *stmt) ...@@ -389,10 +389,10 @@ exist_non_indexing_operands_for_use_p (tree use, gimple *stmt)
Therefore, all we need to check is if STMT falls into the Therefore, all we need to check is if STMT falls into the
first case, and whether var corresponds to USE. */ first case, and whether var corresponds to USE. */
gassign *assign = dyn_cast <gassign *> (stmt); gassign *assign = dyn_cast <gassign *> (stmt_info->stmt);
if (!assign || !gimple_assign_copy_p (assign)) if (!assign || !gimple_assign_copy_p (assign))
{ {
gcall *call = dyn_cast <gcall *> (stmt); gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
if (call && gimple_call_internal_p (call)) if (call && gimple_call_internal_p (call))
{ {
internal_fn ifn = gimple_call_internal_fn (call); internal_fn ifn = gimple_call_internal_fn (call);
...@@ -463,7 +463,7 @@ process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo, ...@@ -463,7 +463,7 @@ process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo,
/* case 1: we are only interested in uses that need to be vectorized. Uses /* case 1: we are only interested in uses that need to be vectorized. Uses
that are used for address computation are not considered relevant. */ that are used for address computation are not considered relevant. */
if (!force && !exist_non_indexing_operands_for_use_p (use, stmt)) if (!force && !exist_non_indexing_operands_for_use_p (use, stmt_vinfo))
return true; return true;
if (!vect_is_simple_use (use, loop_vinfo, &dt, &dstmt_vinfo)) if (!vect_is_simple_use (use, loop_vinfo, &dt, &dstmt_vinfo))
...@@ -484,8 +484,8 @@ process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo, ...@@ -484,8 +484,8 @@ process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo,
only way that STMT, which is a reduction-phi, was put in the worklist, only way that STMT, which is a reduction-phi, was put in the worklist,
as there should be no other uses for DSTMT_VINFO in the loop. So we just as there should be no other uses for DSTMT_VINFO in the loop. So we just
check that everything is as expected, and we are done. */ check that everything is as expected, and we are done. */
bb = gimple_bb (stmt); bb = gimple_bb (stmt_vinfo->stmt);
if (gimple_code (stmt) == GIMPLE_PHI if (gimple_code (stmt_vinfo->stmt) == GIMPLE_PHI
&& STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
&& gimple_code (dstmt_vinfo->stmt) != GIMPLE_PHI && gimple_code (dstmt_vinfo->stmt) != GIMPLE_PHI
&& STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def && STMT_VINFO_DEF_TYPE (dstmt_vinfo) == vect_reduction_def
...@@ -576,10 +576,11 @@ process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo, ...@@ -576,10 +576,11 @@ process_use (gimple *stmt, tree use, loop_vec_info loop_vinfo,
inductions. Otherwise we'll needlessly vectorize the IV increment inductions. Otherwise we'll needlessly vectorize the IV increment
and cause hybrid SLP for SLP inductions. Unless the PHI is live and cause hybrid SLP for SLP inductions. Unless the PHI is live
of course. */ of course. */
else if (gimple_code (stmt) == GIMPLE_PHI else if (gimple_code (stmt_vinfo->stmt) == GIMPLE_PHI
&& STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_induction_def && STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_induction_def
&& ! STMT_VINFO_LIVE_P (stmt_vinfo) && ! STMT_VINFO_LIVE_P (stmt_vinfo)
&& (PHI_ARG_DEF_FROM_EDGE (stmt, loop_latch_edge (bb->loop_father)) && (PHI_ARG_DEF_FROM_EDGE (stmt_vinfo->stmt,
loop_latch_edge (bb->loop_father))
== use)) == use))
{ {
if (dump_enabled_p ()) if (dump_enabled_p ())
...@@ -740,7 +741,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo) ...@@ -740,7 +741,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
/* Pattern statements are not inserted into the code, so /* Pattern statements are not inserted into the code, so
FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we FOR_EACH_PHI_OR_STMT_USE optimizes their operands out, and we
have to scan the RHS or function arguments instead. */ have to scan the RHS or function arguments instead. */
if (gassign *assign = dyn_cast <gassign *> (stmt)) if (gassign *assign = dyn_cast <gassign *> (stmt_vinfo->stmt))
{ {
enum tree_code rhs_code = gimple_assign_rhs_code (assign); enum tree_code rhs_code = gimple_assign_rhs_code (assign);
tree op = gimple_assign_rhs1 (assign); tree op = gimple_assign_rhs1 (assign);
...@@ -748,10 +749,10 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo) ...@@ -748,10 +749,10 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
i = 1; i = 1;
if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op)) if (rhs_code == COND_EXPR && COMPARISON_CLASS_P (op))
{ {
if (!process_use (stmt, TREE_OPERAND (op, 0), loop_vinfo, if (!process_use (stmt_vinfo, TREE_OPERAND (op, 0),
relevant, &worklist, false) loop_vinfo, relevant, &worklist, false)
|| !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo, || !process_use (stmt_vinfo, TREE_OPERAND (op, 1),
relevant, &worklist, false)) loop_vinfo, relevant, &worklist, false))
return false; return false;
i = 2; i = 2;
} }
...@@ -759,27 +760,27 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo) ...@@ -759,27 +760,27 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
{ {
op = gimple_op (assign, i); op = gimple_op (assign, i);
if (TREE_CODE (op) == SSA_NAME if (TREE_CODE (op) == SSA_NAME
&& !process_use (stmt, op, loop_vinfo, relevant, && !process_use (stmt_vinfo, op, loop_vinfo, relevant,
&worklist, false)) &worklist, false))
return false; return false;
} }
} }
else if (gcall *call = dyn_cast <gcall *> (stmt)) else if (gcall *call = dyn_cast <gcall *> (stmt_vinfo->stmt))
{ {
for (i = 0; i < gimple_call_num_args (call); i++) for (i = 0; i < gimple_call_num_args (call); i++)
{ {
tree arg = gimple_call_arg (call, i); tree arg = gimple_call_arg (call, i);
if (!process_use (stmt, arg, loop_vinfo, relevant, if (!process_use (stmt_vinfo, arg, loop_vinfo, relevant,
&worklist, false)) &worklist, false))
return false; return false;
} }
} }
} }
else else
FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE) FOR_EACH_PHI_OR_STMT_USE (use_p, stmt_vinfo->stmt, iter, SSA_OP_USE)
{ {
tree op = USE_FROM_PTR (use_p); tree op = USE_FROM_PTR (use_p);
if (!process_use (stmt, op, loop_vinfo, relevant, if (!process_use (stmt_vinfo, op, loop_vinfo, relevant,
&worklist, false)) &worklist, false))
return false; return false;
} }
...@@ -787,9 +788,9 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo) ...@@ -787,9 +788,9 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo)) if (STMT_VINFO_GATHER_SCATTER_P (stmt_vinfo))
{ {
gather_scatter_info gs_info; gather_scatter_info gs_info;
if (!vect_check_gather_scatter (stmt, loop_vinfo, &gs_info)) if (!vect_check_gather_scatter (stmt_vinfo, loop_vinfo, &gs_info))
gcc_unreachable (); gcc_unreachable ();
if (!process_use (stmt, gs_info.offset, loop_vinfo, relevant, if (!process_use (stmt_vinfo, gs_info.offset, loop_vinfo, relevant,
&worklist, true)) &worklist, true))
return false; return false;
} }
...@@ -1362,7 +1363,7 @@ vect_init_vector_1 (gimple *stmt, gimple *new_stmt, gimple_stmt_iterator *gsi) ...@@ -1362,7 +1363,7 @@ vect_init_vector_1 (gimple *stmt, gimple *new_stmt, gimple_stmt_iterator *gsi)
basic_block new_bb; basic_block new_bb;
edge pe; edge pe;
if (nested_in_vect_loop_p (loop, stmt)) if (nested_in_vect_loop_p (loop, stmt_vinfo))
loop = loop->inner; loop = loop->inner;
pe = loop_preheader_edge (loop); pe = loop_preheader_edge (loop);
...@@ -1573,7 +1574,7 @@ vect_get_vec_def_for_operand (tree op, gimple *stmt, tree vectype) ...@@ -1573,7 +1574,7 @@ vect_get_vec_def_for_operand (tree op, gimple *stmt, tree vectype)
vector_type = get_vectype_for_scalar_type (TREE_TYPE (op)); vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
gcc_assert (vector_type); gcc_assert (vector_type);
return vect_init_vector (stmt, op, vector_type, NULL); return vect_init_vector (stmt_vinfo, op, vector_type, NULL);
} }
else else
return vect_get_vec_def_for_operand_1 (def_stmt_info, dt); return vect_get_vec_def_for_operand_1 (def_stmt_info, dt);
...@@ -1740,12 +1741,12 @@ vect_finish_stmt_generation_1 (gimple *stmt, gimple *vec_stmt) ...@@ -1740,12 +1741,12 @@ vect_finish_stmt_generation_1 (gimple *stmt, gimple *vec_stmt)
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
} }
gimple_set_location (vec_stmt, gimple_location (stmt)); gimple_set_location (vec_stmt, gimple_location (stmt_info->stmt));
/* While EH edges will generally prevent vectorization, stmt might /* While EH edges will generally prevent vectorization, stmt might
e.g. be in a must-not-throw region. Ensure newly created stmts e.g. be in a must-not-throw region. Ensure newly created stmts
that could throw are part of the same region. */ that could throw are part of the same region. */
int lp_nr = lookup_stmt_eh_lp (stmt); int lp_nr = lookup_stmt_eh_lp (stmt_info->stmt);
if (lp_nr != 0 && stmt_could_throw_p (vec_stmt)) if (lp_nr != 0 && stmt_could_throw_p (vec_stmt))
add_stmt_to_eh_lp (vec_stmt, lp_nr); add_stmt_to_eh_lp (vec_stmt, lp_nr);
...@@ -2269,7 +2270,7 @@ get_group_load_store_type (gimple *stmt, tree vectype, bool slp, ...@@ -2269,7 +2270,7 @@ get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
if (!STMT_VINFO_STRIDED_P (stmt_info) if (!STMT_VINFO_STRIDED_P (stmt_info)
&& (can_overrun_p || !would_overrun_p) && (can_overrun_p || !would_overrun_p)
&& compare_step_with_zero (stmt) > 0) && compare_step_with_zero (stmt_info) > 0)
{ {
/* First cope with the degenerate case of a single-element /* First cope with the degenerate case of a single-element
vector. */ vector. */
...@@ -2309,7 +2310,7 @@ get_group_load_store_type (gimple *stmt, tree vectype, bool slp, ...@@ -2309,7 +2310,7 @@ get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
if (*memory_access_type == VMAT_ELEMENTWISE if (*memory_access_type == VMAT_ELEMENTWISE
&& single_element_p && single_element_p
&& loop_vinfo && loop_vinfo
&& vect_use_strided_gather_scatters_p (stmt, loop_vinfo, && vect_use_strided_gather_scatters_p (stmt_info, loop_vinfo,
masked_p, gs_info)) masked_p, gs_info))
*memory_access_type = VMAT_GATHER_SCATTER; *memory_access_type = VMAT_GATHER_SCATTER;
} }
...@@ -2421,7 +2422,7 @@ get_load_store_type (gimple *stmt, tree vectype, bool slp, bool masked_p, ...@@ -2421,7 +2422,7 @@ get_load_store_type (gimple *stmt, tree vectype, bool slp, bool masked_p,
if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
{ {
*memory_access_type = VMAT_GATHER_SCATTER; *memory_access_type = VMAT_GATHER_SCATTER;
if (!vect_check_gather_scatter (stmt, loop_vinfo, gs_info)) if (!vect_check_gather_scatter (stmt_info, loop_vinfo, gs_info))
gcc_unreachable (); gcc_unreachable ();
else if (!vect_is_simple_use (gs_info->offset, vinfo, else if (!vect_is_simple_use (gs_info->offset, vinfo,
&gs_info->offset_dt, &gs_info->offset_dt,
...@@ -2436,15 +2437,15 @@ get_load_store_type (gimple *stmt, tree vectype, bool slp, bool masked_p, ...@@ -2436,15 +2437,15 @@ get_load_store_type (gimple *stmt, tree vectype, bool slp, bool masked_p,
} }
else if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) else if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
{ {
if (!get_group_load_store_type (stmt, vectype, slp, masked_p, vls_type, if (!get_group_load_store_type (stmt_info, vectype, slp, masked_p,
memory_access_type, gs_info)) vls_type, memory_access_type, gs_info))
return false; return false;
} }
else if (STMT_VINFO_STRIDED_P (stmt_info)) else if (STMT_VINFO_STRIDED_P (stmt_info))
{ {
gcc_assert (!slp); gcc_assert (!slp);
if (loop_vinfo if (loop_vinfo
&& vect_use_strided_gather_scatters_p (stmt, loop_vinfo, && vect_use_strided_gather_scatters_p (stmt_info, loop_vinfo,
masked_p, gs_info)) masked_p, gs_info))
*memory_access_type = VMAT_GATHER_SCATTER; *memory_access_type = VMAT_GATHER_SCATTER;
else else
...@@ -2452,10 +2453,10 @@ get_load_store_type (gimple *stmt, tree vectype, bool slp, bool masked_p, ...@@ -2452,10 +2453,10 @@ get_load_store_type (gimple *stmt, tree vectype, bool slp, bool masked_p,
} }
else else
{ {
int cmp = compare_step_with_zero (stmt); int cmp = compare_step_with_zero (stmt_info);
if (cmp < 0) if (cmp < 0)
*memory_access_type = get_negative_load_store_type *memory_access_type = get_negative_load_store_type
(stmt, vectype, vls_type, ncopies); (stmt_info, vectype, vls_type, ncopies);
else if (cmp == 0) else if (cmp == 0)
{ {
gcc_assert (vls_type == VLS_LOAD); gcc_assert (vls_type == VLS_LOAD);
...@@ -2742,8 +2743,8 @@ vect_build_gather_load_calls (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -2742,8 +2743,8 @@ vect_build_gather_load_calls (gimple *stmt, gimple_stmt_iterator *gsi,
else else
gcc_unreachable (); gcc_unreachable ();
tree vec_dest = vect_create_destination_var (gimple_get_lhs (stmt), tree scalar_dest = gimple_get_lhs (stmt_info->stmt);
vectype); tree vec_dest = vect_create_destination_var (scalar_dest, vectype);
tree ptr = fold_convert (ptrtype, gs_info->base); tree ptr = fold_convert (ptrtype, gs_info->base);
if (!is_gimple_min_invariant (ptr)) if (!is_gimple_min_invariant (ptr))
...@@ -2765,8 +2766,8 @@ vect_build_gather_load_calls (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -2765,8 +2766,8 @@ vect_build_gather_load_calls (gimple *stmt, gimple_stmt_iterator *gsi,
if (!mask) if (!mask)
{ {
src_op = vect_build_zero_merge_argument (stmt, rettype); src_op = vect_build_zero_merge_argument (stmt_info, rettype);
mask_op = vect_build_all_ones_mask (stmt, masktype); mask_op = vect_build_all_ones_mask (stmt_info, masktype);
} }
for (int j = 0; j < ncopies; ++j) for (int j = 0; j < ncopies; ++j)
...@@ -2774,10 +2775,10 @@ vect_build_gather_load_calls (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -2774,10 +2775,10 @@ vect_build_gather_load_calls (gimple *stmt, gimple_stmt_iterator *gsi,
tree op, var; tree op, var;
if (modifier == WIDEN && (j & 1)) if (modifier == WIDEN && (j & 1))
op = permute_vec_elements (vec_oprnd0, vec_oprnd0, op = permute_vec_elements (vec_oprnd0, vec_oprnd0,
perm_mask, stmt, gsi); perm_mask, stmt_info, gsi);
else if (j == 0) else if (j == 0)
op = vec_oprnd0 op = vec_oprnd0
= vect_get_vec_def_for_operand (gs_info->offset, stmt); = vect_get_vec_def_for_operand (gs_info->offset, stmt_info);
else else
op = vec_oprnd0 op = vec_oprnd0
= vect_get_vec_def_for_stmt_copy (gs_info->offset_dt, vec_oprnd0); = vect_get_vec_def_for_stmt_copy (gs_info->offset_dt, vec_oprnd0);
...@@ -2789,7 +2790,7 @@ vect_build_gather_load_calls (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -2789,7 +2790,7 @@ vect_build_gather_load_calls (gimple *stmt, gimple_stmt_iterator *gsi,
var = vect_get_new_ssa_name (idxtype, vect_simple_var); var = vect_get_new_ssa_name (idxtype, vect_simple_var);
op = build1 (VIEW_CONVERT_EXPR, idxtype, op); op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
gassign *new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op); gassign *new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
vect_finish_stmt_generation (stmt, new_stmt, gsi); vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
op = var; op = var;
} }
...@@ -2797,11 +2798,11 @@ vect_build_gather_load_calls (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -2797,11 +2798,11 @@ vect_build_gather_load_calls (gimple *stmt, gimple_stmt_iterator *gsi,
{ {
if (mask_perm_mask && (j & 1)) if (mask_perm_mask && (j & 1))
mask_op = permute_vec_elements (mask_op, mask_op, mask_op = permute_vec_elements (mask_op, mask_op,
mask_perm_mask, stmt, gsi); mask_perm_mask, stmt_info, gsi);
else else
{ {
if (j == 0) if (j == 0)
vec_mask = vect_get_vec_def_for_operand (mask, stmt); vec_mask = vect_get_vec_def_for_operand (mask, stmt_info);
else else
vec_mask = vect_get_vec_def_for_stmt_copy (mask_dt, vec_mask); vec_mask = vect_get_vec_def_for_stmt_copy (mask_dt, vec_mask);
...@@ -2815,7 +2816,7 @@ vect_build_gather_load_calls (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -2815,7 +2816,7 @@ vect_build_gather_load_calls (gimple *stmt, gimple_stmt_iterator *gsi,
mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op); mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
gassign *new_stmt gassign *new_stmt
= gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op); = gimple_build_assign (var, VIEW_CONVERT_EXPR, mask_op);
vect_finish_stmt_generation (stmt, new_stmt, gsi); vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
mask_op = var; mask_op = var;
} }
} }
...@@ -2832,17 +2833,19 @@ vect_build_gather_load_calls (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -2832,17 +2833,19 @@ vect_build_gather_load_calls (gimple *stmt, gimple_stmt_iterator *gsi,
TYPE_VECTOR_SUBPARTS (rettype))); TYPE_VECTOR_SUBPARTS (rettype)));
op = vect_get_new_ssa_name (rettype, vect_simple_var); op = vect_get_new_ssa_name (rettype, vect_simple_var);
gimple_call_set_lhs (new_call, op); gimple_call_set_lhs (new_call, op);
vect_finish_stmt_generation (stmt, new_call, gsi); vect_finish_stmt_generation (stmt_info, new_call, gsi);
var = make_ssa_name (vec_dest); var = make_ssa_name (vec_dest);
op = build1 (VIEW_CONVERT_EXPR, vectype, op); op = build1 (VIEW_CONVERT_EXPR, vectype, op);
gassign *new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op); gassign *new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
new_stmt_info = vect_finish_stmt_generation (stmt, new_stmt, gsi); new_stmt_info
= vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
} }
else else
{ {
var = make_ssa_name (vec_dest, new_call); var = make_ssa_name (vec_dest, new_call);
gimple_call_set_lhs (new_call, var); gimple_call_set_lhs (new_call, var);
new_stmt_info = vect_finish_stmt_generation (stmt, new_call, gsi); new_stmt_info
= vect_finish_stmt_generation (stmt_info, new_call, gsi);
} }
if (modifier == NARROW) if (modifier == NARROW)
...@@ -2852,7 +2855,8 @@ vect_build_gather_load_calls (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -2852,7 +2855,8 @@ vect_build_gather_load_calls (gimple *stmt, gimple_stmt_iterator *gsi,
prev_res = var; prev_res = var;
continue; continue;
} }
var = permute_vec_elements (prev_res, var, perm_mask, stmt, gsi); var = permute_vec_elements (prev_res, var, perm_mask,
stmt_info, gsi);
new_stmt_info = loop_vinfo->lookup_def (var); new_stmt_info = loop_vinfo->lookup_def (var);
} }
...@@ -3027,7 +3031,7 @@ vectorizable_bswap (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -3027,7 +3031,7 @@ vectorizable_bswap (gimple *stmt, gimple_stmt_iterator *gsi,
{ {
/* Handle uses. */ /* Handle uses. */
if (j == 0) if (j == 0)
vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node); vect_get_vec_defs (op, NULL, stmt_info, &vec_oprnds, NULL, slp_node);
else else
vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL); vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
...@@ -3040,15 +3044,16 @@ vectorizable_bswap (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -3040,15 +3044,16 @@ vectorizable_bswap (gimple *stmt, gimple_stmt_iterator *gsi,
tree tem = make_ssa_name (char_vectype); tree tem = make_ssa_name (char_vectype);
new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR, new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
char_vectype, vop)); char_vectype, vop));
vect_finish_stmt_generation (stmt, new_stmt, gsi); vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
tree tem2 = make_ssa_name (char_vectype); tree tem2 = make_ssa_name (char_vectype);
new_stmt = gimple_build_assign (tem2, VEC_PERM_EXPR, new_stmt = gimple_build_assign (tem2, VEC_PERM_EXPR,
tem, tem, bswap_vconst); tem, tem, bswap_vconst);
vect_finish_stmt_generation (stmt, new_stmt, gsi); vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
tem = make_ssa_name (vectype); tem = make_ssa_name (vectype);
new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR, new_stmt = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
vectype, tem2)); vectype, tem2));
new_stmt_info = vect_finish_stmt_generation (stmt, new_stmt, gsi); new_stmt_info
= vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
if (slp_node) if (slp_node)
SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
} }
...@@ -3137,8 +3142,8 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, ...@@ -3137,8 +3142,8 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi,
&& ! vec_stmt) && ! vec_stmt)
return false; return false;
/* Is GS a vectorizable call? */ /* Is STMT_INFO a vectorizable call? */
stmt = dyn_cast <gcall *> (gs); stmt = dyn_cast <gcall *> (stmt_info->stmt);
if (!stmt) if (!stmt)
return false; return false;
...@@ -3307,7 +3312,7 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, ...@@ -3307,7 +3312,7 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi,
&& (gimple_call_builtin_p (stmt, BUILT_IN_BSWAP16) && (gimple_call_builtin_p (stmt, BUILT_IN_BSWAP16)
|| gimple_call_builtin_p (stmt, BUILT_IN_BSWAP32) || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP32)
|| gimple_call_builtin_p (stmt, BUILT_IN_BSWAP64))) || gimple_call_builtin_p (stmt, BUILT_IN_BSWAP64)))
return vectorizable_bswap (stmt, gsi, vec_stmt, slp_node, return vectorizable_bswap (stmt_info, gsi, vec_stmt, slp_node,
vectype_in, dt, cost_vec); vectype_in, dt, cost_vec);
else else
{ {
...@@ -3400,7 +3405,7 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, ...@@ -3400,7 +3405,7 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi,
gimple_call_set_lhs (call, half_res); gimple_call_set_lhs (call, half_res);
gimple_call_set_nothrow (call, true); gimple_call_set_nothrow (call, true);
new_stmt_info new_stmt_info
= vect_finish_stmt_generation (stmt, call, gsi); = vect_finish_stmt_generation (stmt_info, call, gsi);
if ((i & 1) == 0) if ((i & 1) == 0)
{ {
prev_res = half_res; prev_res = half_res;
...@@ -3411,7 +3416,8 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, ...@@ -3411,7 +3416,8 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi,
= gimple_build_assign (new_temp, convert_code, = gimple_build_assign (new_temp, convert_code,
prev_res, half_res); prev_res, half_res);
new_stmt_info new_stmt_info
= vect_finish_stmt_generation (stmt, new_stmt, gsi); = vect_finish_stmt_generation (stmt_info, new_stmt,
gsi);
} }
else else
{ {
...@@ -3435,7 +3441,7 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, ...@@ -3435,7 +3441,7 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi,
gimple_call_set_lhs (call, new_temp); gimple_call_set_lhs (call, new_temp);
gimple_call_set_nothrow (call, true); gimple_call_set_nothrow (call, true);
new_stmt_info new_stmt_info
= vect_finish_stmt_generation (stmt, call, gsi); = vect_finish_stmt_generation (stmt_info, call, gsi);
} }
SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
} }
...@@ -3453,7 +3459,7 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, ...@@ -3453,7 +3459,7 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi,
op = gimple_call_arg (stmt, i); op = gimple_call_arg (stmt, i);
if (j == 0) if (j == 0)
vec_oprnd0 vec_oprnd0
= vect_get_vec_def_for_operand (op, stmt); = vect_get_vec_def_for_operand (op, stmt_info);
else else
vec_oprnd0 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (dt[i], orig_vargs[i]); = vect_get_vec_def_for_stmt_copy (dt[i], orig_vargs[i]);
...@@ -3476,11 +3482,11 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, ...@@ -3476,11 +3482,11 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi,
tree new_var tree new_var
= vect_get_new_ssa_name (vectype_out, vect_simple_var, "cst_"); = vect_get_new_ssa_name (vectype_out, vect_simple_var, "cst_");
gimple *init_stmt = gimple_build_assign (new_var, cst); gimple *init_stmt = gimple_build_assign (new_var, cst);
vect_init_vector_1 (stmt, init_stmt, NULL); vect_init_vector_1 (stmt_info, init_stmt, NULL);
new_temp = make_ssa_name (vec_dest); new_temp = make_ssa_name (vec_dest);
gimple *new_stmt = gimple_build_assign (new_temp, new_var); gimple *new_stmt = gimple_build_assign (new_temp, new_var);
new_stmt_info new_stmt_info
= vect_finish_stmt_generation (stmt, new_stmt, gsi); = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
} }
else if (modifier == NARROW) else if (modifier == NARROW)
{ {
...@@ -3491,7 +3497,8 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, ...@@ -3491,7 +3497,8 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi,
gcall *call = gimple_build_call_internal_vec (ifn, vargs); gcall *call = gimple_build_call_internal_vec (ifn, vargs);
gimple_call_set_lhs (call, half_res); gimple_call_set_lhs (call, half_res);
gimple_call_set_nothrow (call, true); gimple_call_set_nothrow (call, true);
new_stmt_info = vect_finish_stmt_generation (stmt, call, gsi); new_stmt_info
= vect_finish_stmt_generation (stmt_info, call, gsi);
if ((j & 1) == 0) if ((j & 1) == 0)
{ {
prev_res = half_res; prev_res = half_res;
...@@ -3501,7 +3508,7 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, ...@@ -3501,7 +3508,7 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi,
gassign *new_stmt = gimple_build_assign (new_temp, convert_code, gassign *new_stmt = gimple_build_assign (new_temp, convert_code,
prev_res, half_res); prev_res, half_res);
new_stmt_info new_stmt_info
= vect_finish_stmt_generation (stmt, new_stmt, gsi); = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
} }
else else
{ {
...@@ -3513,7 +3520,8 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, ...@@ -3513,7 +3520,8 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi,
new_temp = make_ssa_name (vec_dest, call); new_temp = make_ssa_name (vec_dest, call);
gimple_call_set_lhs (call, new_temp); gimple_call_set_lhs (call, new_temp);
gimple_call_set_nothrow (call, true); gimple_call_set_nothrow (call, true);
new_stmt_info = vect_finish_stmt_generation (stmt, call, gsi); new_stmt_info
= vect_finish_stmt_generation (stmt_info, call, gsi);
} }
if (j == (modifier == NARROW ? 1 : 0)) if (j == (modifier == NARROW ? 1 : 0))
...@@ -3566,7 +3574,7 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, ...@@ -3566,7 +3574,7 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi,
gimple_call_set_lhs (call, new_temp); gimple_call_set_lhs (call, new_temp);
gimple_call_set_nothrow (call, true); gimple_call_set_nothrow (call, true);
new_stmt_info new_stmt_info
= vect_finish_stmt_generation (stmt, call, gsi); = vect_finish_stmt_generation (stmt_info, call, gsi);
SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
} }
...@@ -3584,7 +3592,7 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, ...@@ -3584,7 +3592,7 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi,
if (j == 0) if (j == 0)
{ {
vec_oprnd0 vec_oprnd0
= vect_get_vec_def_for_operand (op, stmt); = vect_get_vec_def_for_operand (op, stmt_info);
vec_oprnd1 vec_oprnd1
= vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0); = vect_get_vec_def_for_stmt_copy (dt[i], vec_oprnd0);
} }
...@@ -3605,7 +3613,8 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi, ...@@ -3605,7 +3613,8 @@ vectorizable_call (gimple *gs, gimple_stmt_iterator *gsi,
gcall *new_stmt = gimple_build_call_vec (fndecl, vargs); gcall *new_stmt = gimple_build_call_vec (fndecl, vargs);
new_temp = make_ssa_name (vec_dest, new_stmt); new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_call_set_lhs (new_stmt, new_temp); gimple_call_set_lhs (new_stmt, new_temp);
new_stmt_info = vect_finish_stmt_generation (stmt, new_stmt, gsi); new_stmt_info
= vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
if (j == 0) if (j == 0)
STMT_VINFO_VEC_STMT (stmt_info) = new_stmt_info; STMT_VINFO_VEC_STMT (stmt_info) = new_stmt_info;
...@@ -3793,7 +3802,7 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -3793,7 +3802,7 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
vectype = STMT_VINFO_VECTYPE (stmt_info); vectype = STMT_VINFO_VECTYPE (stmt_info);
if (loop_vinfo && nested_in_vect_loop_p (loop, stmt)) if (loop_vinfo && nested_in_vect_loop_p (loop, stmt_info))
return false; return false;
/* FORNOW */ /* FORNOW */
...@@ -4098,7 +4107,7 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -4098,7 +4107,7 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
gcc_assert ((k & (k - 1)) == 0); gcc_assert ((k & (k - 1)) == 0);
if (m == 0) if (m == 0)
vec_oprnd0 vec_oprnd0
= vect_get_vec_def_for_operand (op, stmt); = vect_get_vec_def_for_operand (op, stmt_info);
else else
{ {
vec_oprnd0 = arginfo[i].op; vec_oprnd0 = arginfo[i].op;
...@@ -4115,7 +4124,7 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -4115,7 +4124,7 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
gassign *new_stmt gassign *new_stmt
= gimple_build_assign (make_ssa_name (atype), = gimple_build_assign (make_ssa_name (atype),
vec_oprnd0); vec_oprnd0);
vect_finish_stmt_generation (stmt, new_stmt, gsi); vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
vargs.safe_push (gimple_assign_lhs (new_stmt)); vargs.safe_push (gimple_assign_lhs (new_stmt));
} }
else else
...@@ -4132,7 +4141,7 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -4132,7 +4141,7 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
{ {
if (m == 0 && l == 0) if (m == 0 && l == 0)
vec_oprnd0 vec_oprnd0
= vect_get_vec_def_for_operand (op, stmt); = vect_get_vec_def_for_operand (op, stmt_info);
else else
vec_oprnd0 vec_oprnd0
= vect_get_vec_def_for_stmt_copy (arginfo[i].dt, = vect_get_vec_def_for_stmt_copy (arginfo[i].dt,
...@@ -4151,7 +4160,8 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -4151,7 +4160,8 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
gassign *new_stmt gassign *new_stmt
= gimple_build_assign (make_ssa_name (atype), = gimple_build_assign (make_ssa_name (atype),
vec_oprnd0); vec_oprnd0);
vect_finish_stmt_generation (stmt, new_stmt, gsi); vect_finish_stmt_generation (stmt_info, new_stmt,
gsi);
vargs.safe_push (gimple_assign_lhs (new_stmt)); vargs.safe_push (gimple_assign_lhs (new_stmt));
} }
} }
...@@ -4220,7 +4230,7 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -4220,7 +4230,7 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
gassign *new_stmt gassign *new_stmt
= gimple_build_assign (new_temp, code, = gimple_build_assign (new_temp, code,
arginfo[i].op, tcst); arginfo[i].op, tcst);
vect_finish_stmt_generation (stmt, new_stmt, gsi); vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
vargs.safe_push (new_temp); vargs.safe_push (new_temp);
} }
break; break;
...@@ -4249,7 +4259,7 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -4249,7 +4259,7 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
gimple_call_set_lhs (new_call, new_temp); gimple_call_set_lhs (new_call, new_temp);
} }
stmt_vec_info new_stmt_info stmt_vec_info new_stmt_info
= vect_finish_stmt_generation (stmt, new_call, gsi); = vect_finish_stmt_generation (stmt_info, new_call, gsi);
if (vec_dest) if (vec_dest)
{ {
...@@ -4275,7 +4285,7 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -4275,7 +4285,7 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
gimple *new_stmt gimple *new_stmt
= gimple_build_assign (make_ssa_name (vectype), t); = gimple_build_assign (make_ssa_name (vectype), t);
new_stmt_info new_stmt_info
= vect_finish_stmt_generation (stmt, new_stmt, gsi); = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
if (j == 0 && l == 0) if (j == 0 && l == 0)
STMT_VINFO_VEC_STMT (stmt_info) STMT_VINFO_VEC_STMT (stmt_info)
...@@ -4287,7 +4297,7 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -4287,7 +4297,7 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
} }
if (ratype) if (ratype)
vect_clobber_variable (stmt, gsi, new_temp); vect_clobber_variable (stmt_info, gsi, new_temp);
continue; continue;
} }
else if (simd_clone_subparts (vectype) > nunits) else if (simd_clone_subparts (vectype) > nunits)
...@@ -4307,11 +4317,12 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -4307,11 +4317,12 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
gimple *new_stmt gimple *new_stmt
= gimple_build_assign (make_ssa_name (rtype), tem); = gimple_build_assign (make_ssa_name (rtype), tem);
new_stmt_info new_stmt_info
= vect_finish_stmt_generation (stmt, new_stmt, gsi); = vect_finish_stmt_generation (stmt_info, new_stmt,
gsi);
CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE,
gimple_assign_lhs (new_stmt)); gimple_assign_lhs (new_stmt));
} }
vect_clobber_variable (stmt, gsi, new_temp); vect_clobber_variable (stmt_info, gsi, new_temp);
} }
else else
CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp); CONSTRUCTOR_APPEND_ELT (ret_ctor_elts, NULL_TREE, new_temp);
...@@ -4321,7 +4332,7 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -4321,7 +4332,7 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
gimple *new_stmt gimple *new_stmt
= gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0); = gimple_build_assign (make_ssa_name (vec_dest), vec_oprnd0);
new_stmt_info new_stmt_info
= vect_finish_stmt_generation (stmt, new_stmt, gsi); = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
if ((unsigned) j == k - 1) if ((unsigned) j == k - 1)
STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
...@@ -4339,8 +4350,8 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -4339,8 +4350,8 @@ vectorizable_simd_clone_call (gimple *stmt, gimple_stmt_iterator *gsi,
gimple *new_stmt gimple *new_stmt
= gimple_build_assign (make_ssa_name (vec_dest), t); = gimple_build_assign (make_ssa_name (vec_dest), t);
new_stmt_info new_stmt_info
= vect_finish_stmt_generation (stmt, new_stmt, gsi); = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
vect_clobber_variable (stmt, gsi, new_temp); vect_clobber_variable (stmt_info, gsi, new_temp);
} }
} }
...@@ -4493,7 +4504,7 @@ vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds, ...@@ -4493,7 +4504,7 @@ vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
new_tmp = make_ssa_name (vec_dest, new_stmt); new_tmp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_tmp); gimple_assign_set_lhs (new_stmt, new_tmp);
stmt_vec_info new_stmt_info stmt_vec_info new_stmt_info
= vect_finish_stmt_generation (stmt, new_stmt, gsi); = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
if (multi_step_cvt) if (multi_step_cvt)
/* Store the resulting vector for next recursive call. */ /* Store the resulting vector for next recursive call. */
...@@ -4527,8 +4538,8 @@ vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds, ...@@ -4527,8 +4538,8 @@ vect_create_vectorized_demotion_stmts (vec<tree> *vec_oprnds,
previous level. */ previous level. */
vec_oprnds->truncate ((i+1)/2); vec_oprnds->truncate ((i+1)/2);
vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1, vect_create_vectorized_demotion_stmts (vec_oprnds, multi_step_cvt - 1,
stmt, vec_dsts, gsi, slp_node, stmt_info, vec_dsts, gsi,
VEC_PACK_TRUNC_EXPR, slp_node, VEC_PACK_TRUNC_EXPR,
prev_stmt_info); prev_stmt_info);
} }
...@@ -4793,9 +4804,9 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -4793,9 +4804,9 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
return false; return false;
case WIDEN: case WIDEN:
if (supportable_widening_operation (code, stmt, vectype_out, vectype_in, if (supportable_widening_operation (code, stmt_info, vectype_out,
&code1, &code2, &multi_step_cvt, vectype_in, &code1, &code2,
&interm_types)) &multi_step_cvt, &interm_types))
{ {
/* Binary widening operation can only be supported directly by the /* Binary widening operation can only be supported directly by the
architecture. */ architecture. */
...@@ -4826,15 +4837,16 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -4826,15 +4837,16 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
cvt_type, &decl1, &codecvt1)) cvt_type, &decl1, &codecvt1))
goto unsupported; goto unsupported;
} }
else if (!supportable_widening_operation (code, stmt, vectype_out, else if (!supportable_widening_operation (code, stmt_info,
cvt_type, &codecvt1, vectype_out, cvt_type,
&codecvt2, &multi_step_cvt, &codecvt1, &codecvt2,
&multi_step_cvt,
&interm_types)) &interm_types))
continue; continue;
else else
gcc_assert (multi_step_cvt == 0); gcc_assert (multi_step_cvt == 0);
if (supportable_widening_operation (NOP_EXPR, stmt, cvt_type, if (supportable_widening_operation (NOP_EXPR, stmt_info, cvt_type,
vectype_in, &code1, &code2, vectype_in, &code1, &code2,
&multi_step_cvt, &interm_types)) &multi_step_cvt, &interm_types))
{ {
...@@ -4973,7 +4985,8 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -4973,7 +4985,8 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
for (j = 0; j < ncopies; j++) for (j = 0; j < ncopies; j++)
{ {
if (j == 0) if (j == 0)
vect_get_vec_defs (op0, NULL, stmt, &vec_oprnds0, NULL, slp_node); vect_get_vec_defs (op0, NULL, stmt_info, &vec_oprnds0,
NULL, slp_node);
else else
vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL); vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
...@@ -4987,7 +5000,7 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -4987,7 +5000,7 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
new_temp = make_ssa_name (vec_dest, new_stmt); new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_call_set_lhs (new_stmt, new_temp); gimple_call_set_lhs (new_stmt, new_temp);
new_stmt_info new_stmt_info
= vect_finish_stmt_generation (stmt, new_stmt, gsi); = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
} }
else else
{ {
...@@ -4997,7 +5010,7 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -4997,7 +5010,7 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
new_temp = make_ssa_name (vec_dest, new_stmt); new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp); gimple_assign_set_lhs (new_stmt, new_temp);
new_stmt_info new_stmt_info
= vect_finish_stmt_generation (stmt, new_stmt, gsi); = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
} }
if (slp_node) if (slp_node)
...@@ -5038,23 +5051,24 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -5038,23 +5051,24 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
for (k = 0; k < slp_node->vec_stmts_size - 1; k++) for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
vec_oprnds1.quick_push (vec_oprnd1); vec_oprnds1.quick_push (vec_oprnd1);
vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL, vect_get_vec_defs (op0, NULL_TREE, stmt_info,
slp_node); &vec_oprnds0, NULL, slp_node);
} }
else else
vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0,
&vec_oprnds1, slp_node); &vec_oprnds1, slp_node);
} }
else else
{ {
vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt); vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt_info);
vec_oprnds0.quick_push (vec_oprnd0); vec_oprnds0.quick_push (vec_oprnd0);
if (op_type == binary_op) if (op_type == binary_op)
{ {
if (code == WIDEN_LSHIFT_EXPR) if (code == WIDEN_LSHIFT_EXPR)
vec_oprnd1 = op1; vec_oprnd1 = op1;
else else
vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt); vec_oprnd1
= vect_get_vec_def_for_operand (op1, stmt_info);
vec_oprnds1.quick_push (vec_oprnd1); vec_oprnds1.quick_push (vec_oprnd1);
} }
} }
...@@ -5087,8 +5101,8 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -5087,8 +5101,8 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
c2 = codecvt2; c2 = codecvt2;
} }
vect_create_vectorized_promotion_stmts (&vec_oprnds0, vect_create_vectorized_promotion_stmts (&vec_oprnds0,
&vec_oprnds1, &vec_oprnds1, stmt_info,
stmt, this_dest, gsi, this_dest, gsi,
c1, c2, decl1, decl2, c1, c2, decl1, decl2,
op_type); op_type);
} }
...@@ -5104,7 +5118,8 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -5104,7 +5118,8 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
new_temp = make_ssa_name (vec_dest, new_stmt); new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_call_set_lhs (new_stmt, new_temp); gimple_call_set_lhs (new_stmt, new_temp);
new_stmt_info new_stmt_info
= vect_finish_stmt_generation (stmt, new_stmt, gsi); = vect_finish_stmt_generation (stmt_info, new_stmt,
gsi);
} }
else else
{ {
...@@ -5113,7 +5128,8 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -5113,7 +5128,8 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
gassign *new_stmt gassign *new_stmt
= gimple_build_assign (new_temp, codecvt1, vop0); = gimple_build_assign (new_temp, codecvt1, vop0);
new_stmt_info new_stmt_info
= vect_finish_stmt_generation (stmt, new_stmt, gsi); = vect_finish_stmt_generation (stmt_info, new_stmt,
gsi);
} }
} }
else else
...@@ -5144,12 +5160,13 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -5144,12 +5160,13 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
{ {
/* Handle uses. */ /* Handle uses. */
if (slp_node) if (slp_node)
vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL, vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL,
slp_node); slp_node);
else else
{ {
vec_oprnds0.truncate (0); vec_oprnds0.truncate (0);
vect_get_loop_based_defs (&last_oprnd, stmt, dt[0], &vec_oprnds0, vect_get_loop_based_defs (&last_oprnd, stmt_info, dt[0],
&vec_oprnds0,
vect_pow2 (multi_step_cvt) - 1); vect_pow2 (multi_step_cvt) - 1);
} }
...@@ -5162,7 +5179,7 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -5162,7 +5179,7 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
gcall *new_stmt = gimple_build_call (decl1, 1, vop0); gcall *new_stmt = gimple_build_call (decl1, 1, vop0);
new_temp = make_ssa_name (vec_dest, new_stmt); new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_call_set_lhs (new_stmt, new_temp); gimple_call_set_lhs (new_stmt, new_temp);
vect_finish_stmt_generation (stmt, new_stmt, gsi); vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
} }
else else
{ {
...@@ -5170,14 +5187,14 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -5170,14 +5187,14 @@ vectorizable_conversion (gimple *stmt, gimple_stmt_iterator *gsi,
new_temp = make_ssa_name (vec_dest); new_temp = make_ssa_name (vec_dest);
gassign *new_stmt gassign *new_stmt
= gimple_build_assign (new_temp, codecvt1, vop0); = gimple_build_assign (new_temp, codecvt1, vop0);
vect_finish_stmt_generation (stmt, new_stmt, gsi); vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
} }
vec_oprnds0[i] = new_temp; vec_oprnds0[i] = new_temp;
} }
vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt, vect_create_vectorized_demotion_stmts (&vec_oprnds0, multi_step_cvt,
stmt, vec_dsts, gsi, stmt_info, vec_dsts, gsi,
slp_node, code1, slp_node, code1,
&prev_stmt_info); &prev_stmt_info);
} }
...@@ -5324,7 +5341,7 @@ vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -5324,7 +5341,7 @@ vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
{ {
/* Handle uses. */ /* Handle uses. */
if (j == 0) if (j == 0)
vect_get_vec_defs (op, NULL, stmt, &vec_oprnds, NULL, slp_node); vect_get_vec_defs (op, NULL, stmt_info, &vec_oprnds, NULL, slp_node);
else else
vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL); vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds, NULL);
...@@ -5338,7 +5355,8 @@ vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -5338,7 +5355,8 @@ vectorizable_assignment (gimple *stmt, gimple_stmt_iterator *gsi,
gassign *new_stmt = gimple_build_assign (vec_dest, vop); gassign *new_stmt = gimple_build_assign (vec_dest, vop);
new_temp = make_ssa_name (vec_dest, new_stmt); new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp); gimple_assign_set_lhs (new_stmt, new_temp);
new_stmt_info = vect_finish_stmt_generation (stmt, new_stmt, gsi); new_stmt_info
= vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
if (slp_node) if (slp_node)
SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
} }
...@@ -5623,7 +5641,7 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -5623,7 +5641,7 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
if (vec_stmt && !slp_node) if (vec_stmt && !slp_node)
{ {
op1 = fold_convert (TREE_TYPE (vectype), op1); op1 = fold_convert (TREE_TYPE (vectype), op1);
op1 = vect_init_vector (stmt, op1, op1 = vect_init_vector (stmt_info, op1,
TREE_TYPE (vectype), NULL); TREE_TYPE (vectype), NULL);
} }
} }
...@@ -5722,10 +5740,10 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -5722,10 +5740,10 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
(a special case for certain kind of vector shifts); otherwise, (a special case for certain kind of vector shifts); otherwise,
operand 1 should be of a vector type (the usual case). */ operand 1 should be of a vector type (the usual case). */
if (vec_oprnd1) if (vec_oprnd1)
vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL, vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL,
slp_node); slp_node);
else else
vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1, vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0, &vec_oprnds1,
slp_node); slp_node);
} }
else else
...@@ -5739,7 +5757,8 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -5739,7 +5757,8 @@ vectorizable_shift (gimple *stmt, gimple_stmt_iterator *gsi,
gassign *new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1); gassign *new_stmt = gimple_build_assign (vec_dest, code, vop0, vop1);
new_temp = make_ssa_name (vec_dest, new_stmt); new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp); gimple_assign_set_lhs (new_stmt, new_temp);
new_stmt_info = vect_finish_stmt_generation (stmt, new_stmt, gsi); new_stmt_info
= vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
if (slp_node) if (slp_node)
SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
} }
...@@ -6076,7 +6095,7 @@ vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6076,7 +6095,7 @@ vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
if (j == 0) if (j == 0)
{ {
if (op_type == binary_op) if (op_type == binary_op)
vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1, vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0, &vec_oprnds1,
slp_node); slp_node);
else if (op_type == ternary_op) else if (op_type == ternary_op)
{ {
...@@ -6094,14 +6113,14 @@ vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6094,14 +6113,14 @@ vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
} }
else else
{ {
vect_get_vec_defs (op0, op1, stmt, &vec_oprnds0, &vec_oprnds1, vect_get_vec_defs (op0, op1, stmt_info, &vec_oprnds0,
NULL); &vec_oprnds1, NULL);
vect_get_vec_defs (op2, NULL_TREE, stmt, &vec_oprnds2, NULL, vect_get_vec_defs (op2, NULL_TREE, stmt_info, &vec_oprnds2,
NULL); NULL, NULL);
} }
} }
else else
vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL, vect_get_vec_defs (op0, NULL_TREE, stmt_info, &vec_oprnds0, NULL,
slp_node); slp_node);
} }
else else
...@@ -6127,7 +6146,8 @@ vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6127,7 +6146,8 @@ vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
vop0, vop1, vop2); vop0, vop1, vop2);
new_temp = make_ssa_name (vec_dest, new_stmt); new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp); gimple_assign_set_lhs (new_stmt, new_temp);
new_stmt_info = vect_finish_stmt_generation (stmt, new_stmt, gsi); new_stmt_info
= vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
if (vec_cvt_dest) if (vec_cvt_dest)
{ {
new_temp = build1 (VIEW_CONVERT_EXPR, vectype_out, new_temp); new_temp = build1 (VIEW_CONVERT_EXPR, vectype_out, new_temp);
...@@ -6137,7 +6157,7 @@ vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6137,7 +6157,7 @@ vectorizable_operation (gimple *stmt, gimple_stmt_iterator *gsi,
new_temp = make_ssa_name (vec_cvt_dest, new_stmt); new_temp = make_ssa_name (vec_cvt_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp); gimple_assign_set_lhs (new_stmt, new_temp);
new_stmt_info new_stmt_info
= vect_finish_stmt_generation (stmt, new_stmt, gsi); = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
} }
if (slp_node) if (slp_node)
SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
...@@ -6275,7 +6295,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6275,7 +6295,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
/* Is vectorizable store? */ /* Is vectorizable store? */
tree mask = NULL_TREE, mask_vectype = NULL_TREE; tree mask = NULL_TREE, mask_vectype = NULL_TREE;
if (gassign *assign = dyn_cast <gassign *> (stmt)) if (gassign *assign = dyn_cast <gassign *> (stmt_info->stmt))
{ {
tree scalar_dest = gimple_assign_lhs (assign); tree scalar_dest = gimple_assign_lhs (assign);
if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
...@@ -6292,7 +6312,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6292,7 +6312,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
} }
else else
{ {
gcall *call = dyn_cast <gcall *> (stmt); gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
if (!call || !gimple_call_internal_p (call)) if (!call || !gimple_call_internal_p (call))
return false; return false;
...@@ -6312,13 +6332,13 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6312,13 +6332,13 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
if (mask_index >= 0) if (mask_index >= 0)
{ {
mask = gimple_call_arg (call, mask_index); mask = gimple_call_arg (call, mask_index);
if (!vect_check_load_store_mask (stmt, mask, &mask_dt, if (!vect_check_load_store_mask (stmt_info, mask, &mask_dt,
&mask_vectype)) &mask_vectype))
return false; return false;
} }
} }
op = vect_get_store_rhs (stmt); op = vect_get_store_rhs (stmt_info);
/* Cannot have hybrid store SLP -- that would mean storing to the /* Cannot have hybrid store SLP -- that would mean storing to the
same location twice. */ same location twice. */
...@@ -6346,7 +6366,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6346,7 +6366,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
gcc_assert (ncopies >= 1); gcc_assert (ncopies >= 1);
/* FORNOW. This restriction should be relaxed. */ /* FORNOW. This restriction should be relaxed. */
if (loop && nested_in_vect_loop_p (loop, stmt) && ncopies > 1) if (loop && nested_in_vect_loop_p (loop, stmt_info) && ncopies > 1)
{ {
if (dump_enabled_p ()) if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
...@@ -6354,7 +6374,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6354,7 +6374,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
return false; return false;
} }
if (!vect_check_store_rhs (stmt, op, &rhs_dt, &rhs_vectype, &vls_type)) if (!vect_check_store_rhs (stmt_info, op, &rhs_dt, &rhs_vectype, &vls_type))
return false; return false;
elem_type = TREE_TYPE (vectype); elem_type = TREE_TYPE (vectype);
...@@ -6364,7 +6384,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6364,7 +6384,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
return false; return false;
vect_memory_access_type memory_access_type; vect_memory_access_type memory_access_type;
if (!get_load_store_type (stmt, vectype, slp, mask, vls_type, ncopies, if (!get_load_store_type (stmt_info, vectype, slp, mask, vls_type, ncopies,
&memory_access_type, &gs_info)) &memory_access_type, &gs_info))
return false; return false;
...@@ -6501,7 +6521,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6501,7 +6521,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
/* Currently we support only unconditional scatter stores, /* Currently we support only unconditional scatter stores,
so mask should be all ones. */ so mask should be all ones. */
mask = build_int_cst (masktype, -1); mask = build_int_cst (masktype, -1);
mask = vect_init_vector (stmt, mask, masktype, NULL); mask = vect_init_vector (stmt_info, mask, masktype, NULL);
scale = build_int_cst (scaletype, gs_info.scale); scale = build_int_cst (scaletype, gs_info.scale);
...@@ -6511,9 +6531,9 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6511,9 +6531,9 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
if (j == 0) if (j == 0)
{ {
src = vec_oprnd1 src = vec_oprnd1
= vect_get_vec_def_for_operand (op, stmt); = vect_get_vec_def_for_operand (op, stmt_info);
op = vec_oprnd0 op = vec_oprnd0
= vect_get_vec_def_for_operand (gs_info.offset, stmt); = vect_get_vec_def_for_operand (gs_info.offset, stmt_info);
} }
else if (modifier != NONE && (j & 1)) else if (modifier != NONE && (j & 1))
{ {
...@@ -6522,12 +6542,12 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6522,12 +6542,12 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
src = vec_oprnd1 src = vec_oprnd1
= vect_get_vec_def_for_stmt_copy (rhs_dt, vec_oprnd1); = vect_get_vec_def_for_stmt_copy (rhs_dt, vec_oprnd1);
op = permute_vec_elements (vec_oprnd0, vec_oprnd0, perm_mask, op = permute_vec_elements (vec_oprnd0, vec_oprnd0, perm_mask,
stmt, gsi); stmt_info, gsi);
} }
else if (modifier == NARROW) else if (modifier == NARROW)
{ {
src = permute_vec_elements (vec_oprnd1, vec_oprnd1, perm_mask, src = permute_vec_elements (vec_oprnd1, vec_oprnd1, perm_mask,
stmt, gsi); stmt_info, gsi);
op = vec_oprnd0 op = vec_oprnd0
= vect_get_vec_def_for_stmt_copy (gs_info.offset_dt, = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt,
vec_oprnd0); vec_oprnd0);
...@@ -6552,7 +6572,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6552,7 +6572,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
src = build1 (VIEW_CONVERT_EXPR, srctype, src); src = build1 (VIEW_CONVERT_EXPR, srctype, src);
gassign *new_stmt gassign *new_stmt
= gimple_build_assign (var, VIEW_CONVERT_EXPR, src); = gimple_build_assign (var, VIEW_CONVERT_EXPR, src);
vect_finish_stmt_generation (stmt, new_stmt, gsi); vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
src = var; src = var;
} }
...@@ -6564,14 +6584,14 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6564,14 +6584,14 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
op = build1 (VIEW_CONVERT_EXPR, idxtype, op); op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
gassign *new_stmt gassign *new_stmt
= gimple_build_assign (var, VIEW_CONVERT_EXPR, op); = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
vect_finish_stmt_generation (stmt, new_stmt, gsi); vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
op = var; op = var;
} }
gcall *new_stmt gcall *new_stmt
= gimple_build_call (gs_info.decl, 5, ptr, mask, op, src, scale); = gimple_build_call (gs_info.decl, 5, ptr, mask, op, src, scale);
stmt_vec_info new_stmt_info stmt_vec_info new_stmt_info
= vect_finish_stmt_generation (stmt, new_stmt, gsi); = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
if (prev_stmt_info == NULL_STMT_VEC_INFO) if (prev_stmt_info == NULL_STMT_VEC_INFO)
STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info; STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt_info;
...@@ -6588,7 +6608,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6588,7 +6608,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
if (grouped_store) if (grouped_store)
{ {
/* FORNOW */ /* FORNOW */
gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt)); gcc_assert (!loop || !nested_in_vect_loop_p (loop, stmt_info));
/* We vectorize all the stmts of the interleaving group when we /* We vectorize all the stmts of the interleaving group when we
reach the last stmt in the group. */ reach the last stmt in the group. */
...@@ -6642,7 +6662,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6642,7 +6662,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
unsigned int const_nunits = nunits.to_constant (); unsigned int const_nunits = nunits.to_constant ();
gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)); gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
gcc_assert (!nested_in_vect_loop_p (loop, stmt)); gcc_assert (!nested_in_vect_loop_p (loop, stmt_info));
stride_base stride_base
= fold_build_pointer_plus = fold_build_pointer_plus
...@@ -6768,7 +6788,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6768,7 +6788,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
tree newoff = copy_ssa_name (running_off, NULL); tree newoff = copy_ssa_name (running_off, NULL);
incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR, incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
running_off, pos); running_off, pos);
vect_finish_stmt_generation (stmt, incr, gsi); vect_finish_stmt_generation (stmt_info, incr, gsi);
running_off = newoff; running_off = newoff;
} }
unsigned int group_el = 0; unsigned int group_el = 0;
...@@ -6782,8 +6802,8 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6782,8 +6802,8 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
{ {
if (slp) if (slp)
{ {
vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds, NULL, vect_get_vec_defs (op, NULL_TREE, stmt_info,
slp_node); &vec_oprnds, NULL, slp_node);
vec_oprnd = vec_oprnds[0]; vec_oprnd = vec_oprnds[0];
} }
else else
...@@ -6811,7 +6831,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6811,7 +6831,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
gimple *pun gimple *pun
= gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR, = gimple_build_assign (tem, build1 (VIEW_CONVERT_EXPR,
lvectype, vec_oprnd)); lvectype, vec_oprnd));
vect_finish_stmt_generation (stmt, pun, gsi); vect_finish_stmt_generation (stmt_info, pun, gsi);
vec_oprnd = tem; vec_oprnd = tem;
} }
for (i = 0; i < nstores; i++) for (i = 0; i < nstores; i++)
...@@ -6838,7 +6858,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6838,7 +6858,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
/* And store it to *running_off. */ /* And store it to *running_off. */
assign = gimple_build_assign (newref, elem); assign = gimple_build_assign (newref, elem);
stmt_vec_info assign_info stmt_vec_info assign_info
= vect_finish_stmt_generation (stmt, assign, gsi); = vect_finish_stmt_generation (stmt_info, assign, gsi);
group_el += lnel; group_el += lnel;
if (! slp if (! slp
...@@ -6847,7 +6867,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6847,7 +6867,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
newoff = copy_ssa_name (running_off, NULL); newoff = copy_ssa_name (running_off, NULL);
incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR, incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
running_off, stride_step); running_off, stride_step);
vect_finish_stmt_generation (stmt, incr, gsi); vect_finish_stmt_generation (stmt_info, incr, gsi);
running_off = newoff; running_off = newoff;
group_el = 0; group_el = 0;
...@@ -6905,7 +6925,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6905,7 +6925,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
else if (memory_access_type == VMAT_GATHER_SCATTER) else if (memory_access_type == VMAT_GATHER_SCATTER)
{ {
aggr_type = elem_type; aggr_type = elem_type;
vect_get_strided_load_store_ops (stmt, loop_vinfo, &gs_info, vect_get_strided_load_store_ops (stmt_info, loop_vinfo, &gs_info,
&bump, &vec_offset); &bump, &vec_offset);
} }
else else
...@@ -6969,7 +6989,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6969,7 +6989,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
if (slp) if (slp)
{ {
/* Get vectorized arguments for SLP_NODE. */ /* Get vectorized arguments for SLP_NODE. */
vect_get_vec_defs (op, NULL_TREE, stmt, &vec_oprnds, vect_get_vec_defs (op, NULL_TREE, stmt_info, &vec_oprnds,
NULL, slp_node); NULL, slp_node);
vec_oprnd = vec_oprnds[0]; vec_oprnd = vec_oprnds[0];
...@@ -6999,7 +7019,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6999,7 +7019,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info); next_stmt_info = DR_GROUP_NEXT_ELEMENT (next_stmt_info);
} }
if (mask) if (mask)
vec_mask = vect_get_vec_def_for_operand (mask, stmt, vec_mask = vect_get_vec_def_for_operand (mask, stmt_info,
mask_vectype); mask_vectype);
} }
...@@ -7022,7 +7042,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -7022,7 +7042,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
} }
else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
{ {
vect_get_gather_scatter_ops (loop, stmt, &gs_info, vect_get_gather_scatter_ops (loop, stmt_info, &gs_info,
&dataref_ptr, &vec_offset); &dataref_ptr, &vec_offset);
inv_p = false; inv_p = false;
} }
...@@ -7061,8 +7081,8 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -7061,8 +7081,8 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
vec_offset = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt, vec_offset = vect_get_vec_def_for_stmt_copy (gs_info.offset_dt,
vec_offset); vec_offset);
else else
dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, stmt, dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
bump); stmt_info, bump);
} }
if (memory_access_type == VMAT_LOAD_STORE_LANES) if (memory_access_type == VMAT_LOAD_STORE_LANES)
...@@ -7075,13 +7095,13 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -7075,13 +7095,13 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
/* Invalidate the current contents of VEC_ARRAY. This should /* Invalidate the current contents of VEC_ARRAY. This should
become an RTL clobber too, which prevents the vector registers become an RTL clobber too, which prevents the vector registers
from being upward-exposed. */ from being upward-exposed. */
vect_clobber_variable (stmt, gsi, vec_array); vect_clobber_variable (stmt_info, gsi, vec_array);
/* Store the individual vectors into the array. */ /* Store the individual vectors into the array. */
for (i = 0; i < vec_num; i++) for (i = 0; i < vec_num; i++)
{ {
vec_oprnd = dr_chain[i]; vec_oprnd = dr_chain[i];
write_vector_array (stmt, gsi, vec_oprnd, vec_array, i); write_vector_array (stmt_info, gsi, vec_oprnd, vec_array, i);
} }
tree final_mask = NULL; tree final_mask = NULL;
...@@ -7114,10 +7134,10 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -7114,10 +7134,10 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
gimple_call_set_lhs (call, data_ref); gimple_call_set_lhs (call, data_ref);
} }
gimple_call_set_nothrow (call, true); gimple_call_set_nothrow (call, true);
new_stmt_info = vect_finish_stmt_generation (stmt, call, gsi); new_stmt_info = vect_finish_stmt_generation (stmt_info, call, gsi);
/* Record that VEC_ARRAY is now dead. */ /* Record that VEC_ARRAY is now dead. */
vect_clobber_variable (stmt, gsi, vec_array); vect_clobber_variable (stmt_info, gsi, vec_array);
} }
else else
{ {
...@@ -7127,7 +7147,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -7127,7 +7147,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
if (j == 0) if (j == 0)
result_chain.create (group_size); result_chain.create (group_size);
/* Permute. */ /* Permute. */
vect_permute_store_chain (dr_chain, group_size, stmt, gsi, vect_permute_store_chain (dr_chain, group_size, stmt_info, gsi,
&result_chain); &result_chain);
} }
...@@ -7159,14 +7179,14 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -7159,14 +7179,14 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
scale, vec_oprnd); scale, vec_oprnd);
gimple_call_set_nothrow (call, true); gimple_call_set_nothrow (call, true);
new_stmt_info new_stmt_info
= vect_finish_stmt_generation (stmt, call, gsi); = vect_finish_stmt_generation (stmt_info, call, gsi);
break; break;
} }
if (i > 0) if (i > 0)
/* Bump the vector pointer. */ /* Bump the vector pointer. */
dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
stmt, bump); stmt_info, bump);
if (slp) if (slp)
vec_oprnd = vec_oprnds[i]; vec_oprnd = vec_oprnds[i];
...@@ -7193,16 +7213,15 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -7193,16 +7213,15 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
if (memory_access_type == VMAT_CONTIGUOUS_REVERSE) if (memory_access_type == VMAT_CONTIGUOUS_REVERSE)
{ {
tree perm_mask = perm_mask_for_reverse (vectype); tree perm_mask = perm_mask_for_reverse (vectype);
tree perm_dest tree perm_dest = vect_create_destination_var
= vect_create_destination_var (vect_get_store_rhs (stmt), (vect_get_store_rhs (stmt_info), vectype);
vectype);
tree new_temp = make_ssa_name (perm_dest); tree new_temp = make_ssa_name (perm_dest);
/* Generate the permute statement. */ /* Generate the permute statement. */
gimple *perm_stmt gimple *perm_stmt
= gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd, = gimple_build_assign (new_temp, VEC_PERM_EXPR, vec_oprnd,
vec_oprnd, perm_mask); vec_oprnd, perm_mask);
vect_finish_stmt_generation (stmt, perm_stmt, gsi); vect_finish_stmt_generation (stmt_info, perm_stmt, gsi);
perm_stmt = SSA_NAME_DEF_STMT (new_temp); perm_stmt = SSA_NAME_DEF_STMT (new_temp);
vec_oprnd = new_temp; vec_oprnd = new_temp;
...@@ -7219,7 +7238,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -7219,7 +7238,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
final_mask, vec_oprnd); final_mask, vec_oprnd);
gimple_call_set_nothrow (call, true); gimple_call_set_nothrow (call, true);
new_stmt_info new_stmt_info
= vect_finish_stmt_generation (stmt, call, gsi); = vect_finish_stmt_generation (stmt_info, call, gsi);
} }
else else
{ {
...@@ -7242,7 +7261,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -7242,7 +7261,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi,
gassign *new_stmt gassign *new_stmt
= gimple_build_assign (data_ref, vec_oprnd); = gimple_build_assign (data_ref, vec_oprnd);
new_stmt_info new_stmt_info
= vect_finish_stmt_generation (stmt, new_stmt, gsi); = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
} }
if (slp) if (slp)
...@@ -7446,7 +7465,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -7446,7 +7465,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
return false; return false;
tree mask = NULL_TREE, mask_vectype = NULL_TREE; tree mask = NULL_TREE, mask_vectype = NULL_TREE;
if (gassign *assign = dyn_cast <gassign *> (stmt)) if (gassign *assign = dyn_cast <gassign *> (stmt_info->stmt))
{ {
scalar_dest = gimple_assign_lhs (assign); scalar_dest = gimple_assign_lhs (assign);
if (TREE_CODE (scalar_dest) != SSA_NAME) if (TREE_CODE (scalar_dest) != SSA_NAME)
...@@ -7465,7 +7484,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -7465,7 +7484,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
} }
else else
{ {
gcall *call = dyn_cast <gcall *> (stmt); gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
if (!call || !gimple_call_internal_p (call)) if (!call || !gimple_call_internal_p (call))
return false; return false;
...@@ -7489,7 +7508,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -7489,7 +7508,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
if (mask_index >= 0) if (mask_index >= 0)
{ {
mask = gimple_call_arg (call, mask_index); mask = gimple_call_arg (call, mask_index);
if (!vect_check_load_store_mask (stmt, mask, &mask_dt, if (!vect_check_load_store_mask (stmt_info, mask, &mask_dt,
&mask_vectype)) &mask_vectype))
return false; return false;
} }
...@@ -7504,7 +7523,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -7504,7 +7523,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
if (loop_vinfo) if (loop_vinfo)
{ {
loop = LOOP_VINFO_LOOP (loop_vinfo); loop = LOOP_VINFO_LOOP (loop_vinfo);
nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt); nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt_info);
vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
} }
else else
...@@ -7601,7 +7620,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -7601,7 +7620,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
group_size = 1; group_size = 1;
vect_memory_access_type memory_access_type; vect_memory_access_type memory_access_type;
if (!get_load_store_type (stmt, vectype, slp, mask, VLS_LOAD, ncopies, if (!get_load_store_type (stmt_info, vectype, slp, mask, VLS_LOAD, ncopies,
&memory_access_type, &gs_info)) &memory_access_type, &gs_info))
return false; return false;
...@@ -7669,7 +7688,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -7669,7 +7688,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl) if (memory_access_type == VMAT_GATHER_SCATTER && gs_info.decl)
{ {
vect_build_gather_load_calls (stmt, gsi, vec_stmt, &gs_info, mask, vect_build_gather_load_calls (stmt_info, gsi, vec_stmt, &gs_info, mask,
mask_dt); mask_dt);
return true; return true;
} }
...@@ -7712,7 +7731,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -7712,7 +7731,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
if (grouped_load) if (grouped_load)
cst_offset cst_offset
= (tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype))) = (tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (vectype)))
* vect_get_place_in_interleaving_chain (stmt, * vect_get_place_in_interleaving_chain (stmt_info,
first_stmt_info)); first_stmt_info));
group_size = 1; group_size = 1;
ref_type = reference_alias_ptr_type (DR_REF (dr)); ref_type = reference_alias_ptr_type (DR_REF (dr));
...@@ -7857,7 +7876,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -7857,7 +7876,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
gassign *new_stmt gassign *new_stmt
= gimple_build_assign (make_ssa_name (ltype), data_ref); = gimple_build_assign (make_ssa_name (ltype), data_ref);
new_stmt_info new_stmt_info
= vect_finish_stmt_generation (stmt, new_stmt, gsi); = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
if (nloads > 1) if (nloads > 1)
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, CONSTRUCTOR_APPEND_ELT (v, NULL_TREE,
gimple_assign_lhs (new_stmt)); gimple_assign_lhs (new_stmt));
...@@ -7869,7 +7888,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -7869,7 +7888,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
tree newoff = copy_ssa_name (running_off); tree newoff = copy_ssa_name (running_off);
gimple *incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR, gimple *incr = gimple_build_assign (newoff, POINTER_PLUS_EXPR,
running_off, stride_step); running_off, stride_step);
vect_finish_stmt_generation (stmt, incr, gsi); vect_finish_stmt_generation (stmt_info, incr, gsi);
running_off = newoff; running_off = newoff;
group_el = 0; group_el = 0;
...@@ -7878,7 +7897,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -7878,7 +7897,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
if (nloads > 1) if (nloads > 1)
{ {
tree vec_inv = build_constructor (lvectype, v); tree vec_inv = build_constructor (lvectype, v);
new_temp = vect_init_vector (stmt, vec_inv, lvectype, gsi); new_temp = vect_init_vector (stmt_info, vec_inv, lvectype, gsi);
new_stmt_info = vinfo->lookup_def (new_temp); new_stmt_info = vinfo->lookup_def (new_temp);
if (lvectype != vectype) if (lvectype != vectype)
{ {
...@@ -7888,7 +7907,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -7888,7 +7907,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
build1 (VIEW_CONVERT_EXPR, build1 (VIEW_CONVERT_EXPR,
vectype, new_temp)); vectype, new_temp));
new_stmt_info new_stmt_info
= vect_finish_stmt_generation (stmt, new_stmt, gsi); = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
} }
} }
...@@ -8145,7 +8164,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -8145,7 +8164,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
else if (memory_access_type == VMAT_GATHER_SCATTER) else if (memory_access_type == VMAT_GATHER_SCATTER)
{ {
aggr_type = elem_type; aggr_type = elem_type;
vect_get_strided_load_store_ops (stmt, loop_vinfo, &gs_info, vect_get_strided_load_store_ops (stmt_info, loop_vinfo, &gs_info,
&bump, &vec_offset); &bump, &vec_offset);
} }
else else
...@@ -8198,11 +8217,11 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -8198,11 +8217,11 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
DR_INIT (first_dr), DR_INIT (first_dr),
DR_INIT (ptrdr))); DR_INIT (ptrdr)));
dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
stmt, diff); stmt_info, diff);
} }
else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)) else if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
{ {
vect_get_gather_scatter_ops (loop, stmt, &gs_info, vect_get_gather_scatter_ops (loop, stmt_info, &gs_info,
&dataref_ptr, &vec_offset); &dataref_ptr, &vec_offset);
inv_p = false; inv_p = false;
} }
...@@ -8213,7 +8232,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -8213,7 +8232,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
simd_lane_access_p, &inv_p, simd_lane_access_p, &inv_p,
byte_offset, bump); byte_offset, bump);
if (mask) if (mask)
vec_mask = vect_get_vec_def_for_operand (mask, stmt, vec_mask = vect_get_vec_def_for_operand (mask, stmt_info,
mask_vectype); mask_vectype);
} }
else else
...@@ -8226,7 +8245,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -8226,7 +8245,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
vec_offset); vec_offset);
else else
dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
stmt, bump); stmt_info, bump);
if (mask) if (mask)
vec_mask = vect_get_vec_def_for_stmt_copy (mask_dt, vec_mask); vec_mask = vect_get_vec_def_for_stmt_copy (mask_dt, vec_mask);
} }
...@@ -8269,21 +8288,21 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -8269,21 +8288,21 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
} }
gimple_call_set_lhs (call, vec_array); gimple_call_set_lhs (call, vec_array);
gimple_call_set_nothrow (call, true); gimple_call_set_nothrow (call, true);
new_stmt_info = vect_finish_stmt_generation (stmt, call, gsi); new_stmt_info = vect_finish_stmt_generation (stmt_info, call, gsi);
/* Extract each vector into an SSA_NAME. */ /* Extract each vector into an SSA_NAME. */
for (i = 0; i < vec_num; i++) for (i = 0; i < vec_num; i++)
{ {
new_temp = read_vector_array (stmt, gsi, scalar_dest, new_temp = read_vector_array (stmt_info, gsi, scalar_dest,
vec_array, i); vec_array, i);
dr_chain.quick_push (new_temp); dr_chain.quick_push (new_temp);
} }
/* Record the mapping between SSA_NAMEs and statements. */ /* Record the mapping between SSA_NAMEs and statements. */
vect_record_grouped_load_vectors (stmt, dr_chain); vect_record_grouped_load_vectors (stmt_info, dr_chain);
/* Record that VEC_ARRAY is now dead. */ /* Record that VEC_ARRAY is now dead. */
vect_clobber_variable (stmt, gsi, vec_array); vect_clobber_variable (stmt_info, gsi, vec_array);
} }
else else
{ {
...@@ -8301,7 +8320,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -8301,7 +8320,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
if (i > 0) if (i > 0)
dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
stmt, bump); stmt_info, bump);
/* 2. Create the vector-load in the loop. */ /* 2. Create the vector-load in the loop. */
gimple *new_stmt = NULL; gimple *new_stmt = NULL;
...@@ -8402,7 +8421,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -8402,7 +8421,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
build_int_cst build_int_cst
(TREE_TYPE (dataref_ptr), (TREE_TYPE (dataref_ptr),
-(HOST_WIDE_INT) align)); -(HOST_WIDE_INT) align));
vect_finish_stmt_generation (stmt, new_stmt, gsi); vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
data_ref data_ref
= build2 (MEM_REF, vectype, ptr, = build2 (MEM_REF, vectype, ptr,
build_int_cst (ref_type, 0)); build_int_cst (ref_type, 0));
...@@ -8412,22 +8431,23 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -8412,22 +8431,23 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
new_stmt = gimple_build_assign (vec_dest, data_ref); new_stmt = gimple_build_assign (vec_dest, data_ref);
new_temp = make_ssa_name (vec_dest, new_stmt); new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp); gimple_assign_set_lhs (new_stmt, new_temp);
gimple_set_vdef (new_stmt, gimple_vdef (stmt)); gimple_set_vdef (new_stmt, gimple_vdef (stmt_info->stmt));
gimple_set_vuse (new_stmt, gimple_vuse (stmt)); gimple_set_vuse (new_stmt, gimple_vuse (stmt_info->stmt));
vect_finish_stmt_generation (stmt, new_stmt, gsi); vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
msq = new_temp; msq = new_temp;
bump = size_binop (MULT_EXPR, vs, bump = size_binop (MULT_EXPR, vs,
TYPE_SIZE_UNIT (elem_type)); TYPE_SIZE_UNIT (elem_type));
bump = size_binop (MINUS_EXPR, bump, size_one_node); bump = size_binop (MINUS_EXPR, bump, size_one_node);
ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump); ptr = bump_vector_ptr (dataref_ptr, NULL, gsi,
stmt_info, bump);
new_stmt = gimple_build_assign new_stmt = gimple_build_assign
(NULL_TREE, BIT_AND_EXPR, ptr, (NULL_TREE, BIT_AND_EXPR, ptr,
build_int_cst build_int_cst
(TREE_TYPE (ptr), -(HOST_WIDE_INT) align)); (TREE_TYPE (ptr), -(HOST_WIDE_INT) align));
ptr = copy_ssa_name (ptr, new_stmt); ptr = copy_ssa_name (ptr, new_stmt);
gimple_assign_set_lhs (new_stmt, ptr); gimple_assign_set_lhs (new_stmt, ptr);
vect_finish_stmt_generation (stmt, new_stmt, gsi); vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
data_ref data_ref
= build2 (MEM_REF, vectype, ptr, = build2 (MEM_REF, vectype, ptr,
build_int_cst (ref_type, 0)); build_int_cst (ref_type, 0));
...@@ -8444,7 +8464,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -8444,7 +8464,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
(new_temp, BIT_AND_EXPR, dataref_ptr, (new_temp, BIT_AND_EXPR, dataref_ptr,
build_int_cst (TREE_TYPE (dataref_ptr), build_int_cst (TREE_TYPE (dataref_ptr),
-(HOST_WIDE_INT) align)); -(HOST_WIDE_INT) align));
vect_finish_stmt_generation (stmt, new_stmt, gsi); vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
data_ref data_ref
= build2 (MEM_REF, vectype, new_temp, = build2 (MEM_REF, vectype, new_temp,
build_int_cst (ref_type, 0)); build_int_cst (ref_type, 0));
...@@ -8463,7 +8483,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -8463,7 +8483,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
new_temp = make_ssa_name (vec_dest, new_stmt); new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_set_lhs (new_stmt, new_temp); gimple_set_lhs (new_stmt, new_temp);
new_stmt_info new_stmt_info
= vect_finish_stmt_generation (stmt, new_stmt, gsi); = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
/* 3. Handle explicit realignment if necessary/supported. /* 3. Handle explicit realignment if necessary/supported.
Create in loop: Create in loop:
...@@ -8480,7 +8500,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -8480,7 +8500,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
new_temp = make_ssa_name (vec_dest, new_stmt); new_temp = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_temp); gimple_assign_set_lhs (new_stmt, new_temp);
new_stmt_info new_stmt_info
= vect_finish_stmt_generation (stmt, new_stmt, gsi); = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
if (alignment_support_scheme == dr_explicit_realign_optimized) if (alignment_support_scheme == dr_explicit_realign_optimized)
{ {
...@@ -8503,7 +8523,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -8503,7 +8523,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
thus we can insert it on the preheader edge. */ thus we can insert it on the preheader edge. */
if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) if (LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo)
&& !nested_in_vect_loop && !nested_in_vect_loop
&& hoist_defs_of_uses (stmt, loop)) && hoist_defs_of_uses (stmt_info, loop))
{ {
if (dump_enabled_p ()) if (dump_enabled_p ())
{ {
...@@ -8518,7 +8538,8 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -8518,7 +8538,8 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
gimple_build_assign (tem, gimple_build_assign (tem,
unshare_expr unshare_expr
(gimple_assign_rhs1 (stmt)))); (gimple_assign_rhs1 (stmt))));
new_temp = vect_init_vector (stmt, tem, vectype, NULL); new_temp = vect_init_vector (stmt_info, tem,
vectype, NULL);
new_stmt = SSA_NAME_DEF_STMT (new_temp); new_stmt = SSA_NAME_DEF_STMT (new_temp);
new_stmt_info = vinfo->add_stmt (new_stmt); new_stmt_info = vinfo->add_stmt (new_stmt);
} }
...@@ -8526,7 +8547,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -8526,7 +8547,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
{ {
gimple_stmt_iterator gsi2 = *gsi; gimple_stmt_iterator gsi2 = *gsi;
gsi_next (&gsi2); gsi_next (&gsi2);
new_temp = vect_init_vector (stmt, scalar_dest, new_temp = vect_init_vector (stmt_info, scalar_dest,
vectype, &gsi2); vectype, &gsi2);
new_stmt_info = vinfo->lookup_def (new_temp); new_stmt_info = vinfo->lookup_def (new_temp);
} }
...@@ -8536,7 +8557,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -8536,7 +8557,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
{ {
tree perm_mask = perm_mask_for_reverse (vectype); tree perm_mask = perm_mask_for_reverse (vectype);
new_temp = permute_vec_elements (new_temp, new_temp, new_temp = permute_vec_elements (new_temp, new_temp,
perm_mask, stmt, gsi); perm_mask, stmt_info, gsi);
new_stmt_info = vinfo->lookup_def (new_temp); new_stmt_info = vinfo->lookup_def (new_temp);
} }
...@@ -8562,7 +8583,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -8562,7 +8583,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
* group_gap_adj); * group_gap_adj);
tree bump = wide_int_to_tree (sizetype, bump_val); tree bump = wide_int_to_tree (sizetype, bump_val);
dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
stmt, bump); stmt_info, bump);
group_elt = 0; group_elt = 0;
} }
} }
...@@ -8575,7 +8596,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -8575,7 +8596,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
* group_gap_adj); * group_gap_adj);
tree bump = wide_int_to_tree (sizetype, bump_val); tree bump = wide_int_to_tree (sizetype, bump_val);
dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi, dataref_ptr = bump_vector_ptr (dataref_ptr, ptr_incr, gsi,
stmt, bump); stmt_info, bump);
} }
} }
...@@ -8598,7 +8619,8 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -8598,7 +8619,8 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi,
if (grouped_load) if (grouped_load)
{ {
if (memory_access_type != VMAT_LOAD_STORE_LANES) if (memory_access_type != VMAT_LOAD_STORE_LANES)
vect_transform_grouped_load (stmt, dr_chain, group_size, gsi); vect_transform_grouped_load (stmt_info, dr_chain,
group_size, gsi);
*vec_stmt = STMT_VINFO_VEC_STMT (stmt_info); *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
} }
else else
...@@ -8942,7 +8964,7 @@ vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -8942,7 +8964,7 @@ vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
if (masked) if (masked)
{ {
vec_cond_lhs vec_cond_lhs
= vect_get_vec_def_for_operand (cond_expr, stmt, = vect_get_vec_def_for_operand (cond_expr, stmt_info,
comp_vectype); comp_vectype);
vect_is_simple_use (cond_expr, stmt_info->vinfo, &dts[0]); vect_is_simple_use (cond_expr, stmt_info->vinfo, &dts[0]);
} }
...@@ -8950,12 +8972,12 @@ vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -8950,12 +8972,12 @@ vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
{ {
vec_cond_lhs vec_cond_lhs
= vect_get_vec_def_for_operand (cond_expr0, = vect_get_vec_def_for_operand (cond_expr0,
stmt, comp_vectype); stmt_info, comp_vectype);
vect_is_simple_use (cond_expr0, loop_vinfo, &dts[0]); vect_is_simple_use (cond_expr0, loop_vinfo, &dts[0]);
vec_cond_rhs vec_cond_rhs
= vect_get_vec_def_for_operand (cond_expr1, = vect_get_vec_def_for_operand (cond_expr1,
stmt, comp_vectype); stmt_info, comp_vectype);
vect_is_simple_use (cond_expr1, loop_vinfo, &dts[1]); vect_is_simple_use (cond_expr1, loop_vinfo, &dts[1]);
} }
if (reduc_index == 1) if (reduc_index == 1)
...@@ -8963,7 +8985,7 @@ vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -8963,7 +8985,7 @@ vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
else else
{ {
vec_then_clause = vect_get_vec_def_for_operand (then_clause, vec_then_clause = vect_get_vec_def_for_operand (then_clause,
stmt); stmt_info);
vect_is_simple_use (then_clause, loop_vinfo, &dts[2]); vect_is_simple_use (then_clause, loop_vinfo, &dts[2]);
} }
if (reduc_index == 2) if (reduc_index == 2)
...@@ -8971,7 +8993,7 @@ vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -8971,7 +8993,7 @@ vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
else else
{ {
vec_else_clause = vect_get_vec_def_for_operand (else_clause, vec_else_clause = vect_get_vec_def_for_operand (else_clause,
stmt); stmt_info);
vect_is_simple_use (else_clause, loop_vinfo, &dts[3]); vect_is_simple_use (else_clause, loop_vinfo, &dts[3]);
} }
} }
...@@ -9026,7 +9048,7 @@ vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -9026,7 +9048,7 @@ vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
new_stmt new_stmt
= gimple_build_assign (new_temp, bitop1, vec_cond_lhs, = gimple_build_assign (new_temp, bitop1, vec_cond_lhs,
vec_cond_rhs); vec_cond_rhs);
vect_finish_stmt_generation (stmt, new_stmt, gsi); vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
if (bitop2 == NOP_EXPR) if (bitop2 == NOP_EXPR)
vec_compare = new_temp; vec_compare = new_temp;
else if (bitop2 == BIT_NOT_EXPR) else if (bitop2 == BIT_NOT_EXPR)
...@@ -9041,7 +9063,7 @@ vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -9041,7 +9063,7 @@ vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
new_stmt new_stmt
= gimple_build_assign (vec_compare, bitop2, = gimple_build_assign (vec_compare, bitop2,
vec_cond_lhs, new_temp); vec_cond_lhs, new_temp);
vect_finish_stmt_generation (stmt, new_stmt, gsi); vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
} }
} }
} }
...@@ -9052,7 +9074,7 @@ vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -9052,7 +9074,7 @@ vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
tree vec_compare_name = make_ssa_name (vec_cmp_type); tree vec_compare_name = make_ssa_name (vec_cmp_type);
gassign *new_stmt = gimple_build_assign (vec_compare_name, gassign *new_stmt = gimple_build_assign (vec_compare_name,
vec_compare); vec_compare);
vect_finish_stmt_generation (stmt, new_stmt, gsi); vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
vec_compare = vec_compare_name; vec_compare = vec_compare_name;
} }
gcc_assert (reduc_index == 2); gcc_assert (reduc_index == 2);
...@@ -9061,17 +9083,18 @@ vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -9061,17 +9083,18 @@ vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
vec_then_clause); vec_then_clause);
gimple_call_set_lhs (new_stmt, scalar_dest); gimple_call_set_lhs (new_stmt, scalar_dest);
SSA_NAME_DEF_STMT (scalar_dest) = new_stmt; SSA_NAME_DEF_STMT (scalar_dest) = new_stmt;
if (stmt == gsi_stmt (*gsi)) if (stmt_info->stmt == gsi_stmt (*gsi))
new_stmt_info = vect_finish_replace_stmt (stmt, new_stmt); new_stmt_info = vect_finish_replace_stmt (stmt_info, new_stmt);
else else
{ {
/* In this case we're moving the definition to later in the /* In this case we're moving the definition to later in the
block. That doesn't matter because the only uses of the block. That doesn't matter because the only uses of the
lhs are in phi statements. */ lhs are in phi statements. */
gimple_stmt_iterator old_gsi = gsi_for_stmt (stmt); gimple_stmt_iterator old_gsi
= gsi_for_stmt (stmt_info->stmt);
gsi_remove (&old_gsi, true); gsi_remove (&old_gsi, true);
new_stmt_info new_stmt_info
= vect_finish_stmt_generation (stmt, new_stmt, gsi); = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
} }
} }
else else
...@@ -9081,7 +9104,7 @@ vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -9081,7 +9104,7 @@ vectorizable_condition (gimple *stmt, gimple_stmt_iterator *gsi,
= gimple_build_assign (new_temp, VEC_COND_EXPR, vec_compare, = gimple_build_assign (new_temp, VEC_COND_EXPR, vec_compare,
vec_then_clause, vec_else_clause); vec_then_clause, vec_else_clause);
new_stmt_info new_stmt_info
= vect_finish_stmt_generation (stmt, new_stmt, gsi); = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
} }
if (slp_node) if (slp_node)
SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info); SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info);
...@@ -9307,8 +9330,10 @@ vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -9307,8 +9330,10 @@ vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi,
} }
else else
{ {
vec_rhs1 = vect_get_vec_def_for_operand (rhs1, stmt, vectype); vec_rhs1 = vect_get_vec_def_for_operand (rhs1, stmt_info,
vec_rhs2 = vect_get_vec_def_for_operand (rhs2, stmt, vectype); vectype);
vec_rhs2 = vect_get_vec_def_for_operand (rhs2, stmt_info,
vectype);
} }
} }
else else
...@@ -9336,7 +9361,7 @@ vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -9336,7 +9361,7 @@ vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi,
gassign *new_stmt = gimple_build_assign (new_temp, code, gassign *new_stmt = gimple_build_assign (new_temp, code,
vec_rhs1, vec_rhs2); vec_rhs1, vec_rhs2);
new_stmt_info new_stmt_info
= vect_finish_stmt_generation (stmt, new_stmt, gsi); = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
} }
else else
{ {
...@@ -9347,7 +9372,7 @@ vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -9347,7 +9372,7 @@ vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi,
new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs1, new_stmt = gimple_build_assign (new_temp, bitop1, vec_rhs1,
vec_rhs2); vec_rhs2);
new_stmt_info new_stmt_info
= vect_finish_stmt_generation (stmt, new_stmt, gsi); = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
if (bitop2 != NOP_EXPR) if (bitop2 != NOP_EXPR)
{ {
tree res = make_ssa_name (mask); tree res = make_ssa_name (mask);
...@@ -9357,7 +9382,7 @@ vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -9357,7 +9382,7 @@ vectorizable_comparison (gimple *stmt, gimple_stmt_iterator *gsi,
new_stmt = gimple_build_assign (res, bitop2, vec_rhs1, new_stmt = gimple_build_assign (res, bitop2, vec_rhs1,
new_temp); new_temp);
new_stmt_info new_stmt_info
= vect_finish_stmt_generation (stmt, new_stmt, gsi); = vect_finish_stmt_generation (stmt_info, new_stmt, gsi);
} }
} }
if (slp_node) if (slp_node)
...@@ -9427,10 +9452,10 @@ vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node, ...@@ -9427,10 +9452,10 @@ vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
if (dump_enabled_p ()) if (dump_enabled_p ())
{ {
dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: "); dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
} }
if (gimple_has_volatile_ops (stmt)) if (gimple_has_volatile_ops (stmt_info->stmt))
{ {
if (dump_enabled_p ()) if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
...@@ -9447,7 +9472,6 @@ vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node, ...@@ -9447,7 +9472,6 @@ vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si)) for (si = gsi_start (pattern_def_seq); !gsi_end_p (si); gsi_next (&si))
{ {
gimple *pattern_def_stmt = gsi_stmt (si);
stmt_vec_info pattern_def_stmt_info stmt_vec_info pattern_def_stmt_info
= vinfo->lookup_stmt (gsi_stmt (si)); = vinfo->lookup_stmt (gsi_stmt (si));
if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info) if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info)
...@@ -9458,10 +9482,11 @@ vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node, ...@@ -9458,10 +9482,11 @@ vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
{ {
dump_printf_loc (MSG_NOTE, vect_location, dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern def statement: "); "==> examining pattern def statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0); dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
pattern_def_stmt_info->stmt, 0);
} }
if (!vect_analyze_stmt (pattern_def_stmt, if (!vect_analyze_stmt (pattern_def_stmt_info,
need_to_vectorize, node, node_instance, need_to_vectorize, node, node_instance,
cost_vec)) cost_vec))
return false; return false;
...@@ -9499,7 +9524,7 @@ vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node, ...@@ -9499,7 +9524,7 @@ vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
{ {
dump_printf_loc (MSG_NOTE, vect_location, dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern statement: "); "==> examining pattern statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt_info->stmt, 0);
} }
} }
else else
...@@ -9521,7 +9546,7 @@ vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node, ...@@ -9521,7 +9546,7 @@ vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
{ {
dump_printf_loc (MSG_NOTE, vect_location, dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern statement: "); "==> examining pattern statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_stmt_info->stmt, 0);
} }
if (!vect_analyze_stmt (pattern_stmt_info, need_to_vectorize, node, if (!vect_analyze_stmt (pattern_stmt_info, need_to_vectorize, node,
...@@ -9557,8 +9582,9 @@ vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node, ...@@ -9557,8 +9582,9 @@ vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
if (STMT_VINFO_RELEVANT_P (stmt_info)) if (STMT_VINFO_RELEVANT_P (stmt_info))
{ {
gcc_assert (!VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt)))); tree type = gimple_expr_type (stmt_info->stmt);
gcall *call = dyn_cast <gcall *> (stmt); gcc_assert (!VECTOR_MODE_P (TYPE_MODE (type)));
gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
gcc_assert (STMT_VINFO_VECTYPE (stmt_info) gcc_assert (STMT_VINFO_VECTYPE (stmt_info)
|| (call && gimple_call_lhs (call) == NULL_TREE)); || (call && gimple_call_lhs (call) == NULL_TREE));
*need_to_vectorize = true; *need_to_vectorize = true;
...@@ -9575,34 +9601,40 @@ vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node, ...@@ -9575,34 +9601,40 @@ vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
if (!bb_vinfo if (!bb_vinfo
&& (STMT_VINFO_RELEVANT_P (stmt_info) && (STMT_VINFO_RELEVANT_P (stmt_info)
|| STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)) || STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def))
ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node, cost_vec) ok = (vectorizable_simd_clone_call (stmt_info, NULL, NULL, node, cost_vec)
|| vectorizable_conversion (stmt, NULL, NULL, node, cost_vec) || vectorizable_conversion (stmt_info, NULL, NULL, node, cost_vec)
|| vectorizable_shift (stmt, NULL, NULL, node, cost_vec) || vectorizable_shift (stmt_info, NULL, NULL, node, cost_vec)
|| vectorizable_operation (stmt, NULL, NULL, node, cost_vec) || vectorizable_operation (stmt_info, NULL, NULL, node, cost_vec)
|| vectorizable_assignment (stmt, NULL, NULL, node, cost_vec) || vectorizable_assignment (stmt_info, NULL, NULL, node, cost_vec)
|| vectorizable_load (stmt, NULL, NULL, node, node_instance, cost_vec) || vectorizable_load (stmt_info, NULL, NULL, node, node_instance,
|| vectorizable_call (stmt, NULL, NULL, node, cost_vec) cost_vec)
|| vectorizable_store (stmt, NULL, NULL, node, cost_vec) || vectorizable_call (stmt_info, NULL, NULL, node, cost_vec)
|| vectorizable_reduction (stmt, NULL, NULL, node, node_instance, || vectorizable_store (stmt_info, NULL, NULL, node, cost_vec)
|| vectorizable_reduction (stmt_info, NULL, NULL, node,
node_instance, cost_vec)
|| vectorizable_induction (stmt_info, NULL, NULL, node, cost_vec)
|| vectorizable_condition (stmt_info, NULL, NULL, NULL, 0, node,
cost_vec) cost_vec)
|| vectorizable_induction (stmt, NULL, NULL, node, cost_vec) || vectorizable_comparison (stmt_info, NULL, NULL, NULL, node,
|| vectorizable_condition (stmt, NULL, NULL, NULL, 0, node, cost_vec) cost_vec));
|| vectorizable_comparison (stmt, NULL, NULL, NULL, node, cost_vec));
else else
{ {
if (bb_vinfo) if (bb_vinfo)
ok = (vectorizable_simd_clone_call (stmt, NULL, NULL, node, cost_vec) ok = (vectorizable_simd_clone_call (stmt_info, NULL, NULL, node,
|| vectorizable_conversion (stmt, NULL, NULL, node, cost_vec) cost_vec)
|| vectorizable_shift (stmt, NULL, NULL, node, cost_vec) || vectorizable_conversion (stmt_info, NULL, NULL, node,
|| vectorizable_operation (stmt, NULL, NULL, node, cost_vec) cost_vec)
|| vectorizable_assignment (stmt, NULL, NULL, node, cost_vec) || vectorizable_shift (stmt_info, NULL, NULL, node, cost_vec)
|| vectorizable_load (stmt, NULL, NULL, node, node_instance, || vectorizable_operation (stmt_info, NULL, NULL, node, cost_vec)
|| vectorizable_assignment (stmt_info, NULL, NULL, node,
cost_vec)
|| vectorizable_load (stmt_info, NULL, NULL, node, node_instance,
cost_vec) cost_vec)
|| vectorizable_call (stmt, NULL, NULL, node, cost_vec) || vectorizable_call (stmt_info, NULL, NULL, node, cost_vec)
|| vectorizable_store (stmt, NULL, NULL, node, cost_vec) || vectorizable_store (stmt_info, NULL, NULL, node, cost_vec)
|| vectorizable_condition (stmt, NULL, NULL, NULL, 0, node, || vectorizable_condition (stmt_info, NULL, NULL, NULL, 0, node,
cost_vec) cost_vec)
|| vectorizable_comparison (stmt, NULL, NULL, NULL, node, || vectorizable_comparison (stmt_info, NULL, NULL, NULL, node,
cost_vec)); cost_vec));
} }
...@@ -9613,7 +9645,8 @@ vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node, ...@@ -9613,7 +9645,8 @@ vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: relevant stmt not "); "not vectorized: relevant stmt not ");
dump_printf (MSG_MISSED_OPTIMIZATION, "supported: "); dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
stmt_info->stmt, 0);
} }
return false; return false;
...@@ -9623,13 +9656,14 @@ vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node, ...@@ -9623,13 +9656,14 @@ vect_analyze_stmt (gimple *stmt, bool *need_to_vectorize, slp_tree node,
need extra handling, except for vectorizable reductions. */ need extra handling, except for vectorizable reductions. */
if (!bb_vinfo if (!bb_vinfo
&& STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type && STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type
&& !can_vectorize_live_stmts (stmt, NULL, node, NULL, cost_vec)) && !can_vectorize_live_stmts (stmt_info, NULL, node, NULL, cost_vec))
{ {
if (dump_enabled_p ()) if (dump_enabled_p ())
{ {
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: live stmt not supported: "); "not vectorized: live stmt not supported: ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0); dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
stmt_info->stmt, 0);
} }
return false; return false;
...@@ -9660,45 +9694,49 @@ vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -9660,45 +9694,49 @@ vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi,
bool nested_p = (STMT_VINFO_LOOP_VINFO (stmt_info) bool nested_p = (STMT_VINFO_LOOP_VINFO (stmt_info)
&& nested_in_vect_loop_p && nested_in_vect_loop_p
(LOOP_VINFO_LOOP (STMT_VINFO_LOOP_VINFO (stmt_info)), (LOOP_VINFO_LOOP (STMT_VINFO_LOOP_VINFO (stmt_info)),
stmt)); stmt_info));
switch (STMT_VINFO_TYPE (stmt_info)) switch (STMT_VINFO_TYPE (stmt_info))
{ {
case type_demotion_vec_info_type: case type_demotion_vec_info_type:
case type_promotion_vec_info_type: case type_promotion_vec_info_type:
case type_conversion_vec_info_type: case type_conversion_vec_info_type:
done = vectorizable_conversion (stmt, gsi, &vec_stmt, slp_node, NULL); done = vectorizable_conversion (stmt_info, gsi, &vec_stmt, slp_node,
NULL);
gcc_assert (done); gcc_assert (done);
break; break;
case induc_vec_info_type: case induc_vec_info_type:
done = vectorizable_induction (stmt, gsi, &vec_stmt, slp_node, NULL); done = vectorizable_induction (stmt_info, gsi, &vec_stmt, slp_node,
NULL);
gcc_assert (done); gcc_assert (done);
break; break;
case shift_vec_info_type: case shift_vec_info_type:
done = vectorizable_shift (stmt, gsi, &vec_stmt, slp_node, NULL); done = vectorizable_shift (stmt_info, gsi, &vec_stmt, slp_node, NULL);
gcc_assert (done); gcc_assert (done);
break; break;
case op_vec_info_type: case op_vec_info_type:
done = vectorizable_operation (stmt, gsi, &vec_stmt, slp_node, NULL); done = vectorizable_operation (stmt_info, gsi, &vec_stmt, slp_node,
NULL);
gcc_assert (done); gcc_assert (done);
break; break;
case assignment_vec_info_type: case assignment_vec_info_type:
done = vectorizable_assignment (stmt, gsi, &vec_stmt, slp_node, NULL); done = vectorizable_assignment (stmt_info, gsi, &vec_stmt, slp_node,
NULL);
gcc_assert (done); gcc_assert (done);
break; break;
case load_vec_info_type: case load_vec_info_type:
done = vectorizable_load (stmt, gsi, &vec_stmt, slp_node, done = vectorizable_load (stmt_info, gsi, &vec_stmt, slp_node,
slp_node_instance, NULL); slp_node_instance, NULL);
gcc_assert (done); gcc_assert (done);
break; break;
case store_vec_info_type: case store_vec_info_type:
done = vectorizable_store (stmt, gsi, &vec_stmt, slp_node, NULL); done = vectorizable_store (stmt_info, gsi, &vec_stmt, slp_node, NULL);
gcc_assert (done); gcc_assert (done);
if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node) if (STMT_VINFO_GROUPED_ACCESS (stmt_info) && !slp_node)
{ {
...@@ -9716,27 +9754,30 @@ vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -9716,27 +9754,30 @@ vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi,
break; break;
case condition_vec_info_type: case condition_vec_info_type:
done = vectorizable_condition (stmt, gsi, &vec_stmt, NULL, 0, slp_node, NULL); done = vectorizable_condition (stmt_info, gsi, &vec_stmt, NULL, 0,
slp_node, NULL);
gcc_assert (done); gcc_assert (done);
break; break;
case comparison_vec_info_type: case comparison_vec_info_type:
done = vectorizable_comparison (stmt, gsi, &vec_stmt, NULL, slp_node, NULL); done = vectorizable_comparison (stmt_info, gsi, &vec_stmt, NULL,
slp_node, NULL);
gcc_assert (done); gcc_assert (done);
break; break;
case call_vec_info_type: case call_vec_info_type:
done = vectorizable_call (stmt, gsi, &vec_stmt, slp_node, NULL); done = vectorizable_call (stmt_info, gsi, &vec_stmt, slp_node, NULL);
stmt = gsi_stmt (*gsi); stmt = gsi_stmt (*gsi);
break; break;
case call_simd_clone_vec_info_type: case call_simd_clone_vec_info_type:
done = vectorizable_simd_clone_call (stmt, gsi, &vec_stmt, slp_node, NULL); done = vectorizable_simd_clone_call (stmt_info, gsi, &vec_stmt,
slp_node, NULL);
stmt = gsi_stmt (*gsi); stmt = gsi_stmt (*gsi);
break; break;
case reduc_vec_info_type: case reduc_vec_info_type:
done = vectorizable_reduction (stmt, gsi, &vec_stmt, slp_node, done = vectorizable_reduction (stmt_info, gsi, &vec_stmt, slp_node,
slp_node_instance, NULL); slp_node_instance, NULL);
gcc_assert (done); gcc_assert (done);
break; break;
...@@ -9797,7 +9838,8 @@ vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -9797,7 +9838,8 @@ vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi,
being vectorized. */ being vectorized. */
if (STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type) if (STMT_VINFO_TYPE (stmt_info) != reduc_vec_info_type)
{ {
done = can_vectorize_live_stmts (stmt, gsi, slp_node, &vec_stmt, NULL); done = can_vectorize_live_stmts (stmt_info, gsi, slp_node, &vec_stmt,
NULL);
gcc_assert (done); gcc_assert (done);
} }
...@@ -10344,18 +10386,18 @@ supportable_widening_operation (enum tree_code code, gimple *stmt, ...@@ -10344,18 +10386,18 @@ supportable_widening_operation (enum tree_code code, gimple *stmt,
a VEC_WIDEN_MULT_LO/HI_EXPR check. */ a VEC_WIDEN_MULT_LO/HI_EXPR check. */
if (vect_loop if (vect_loop
&& STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction && STMT_VINFO_RELEVANT (stmt_info) == vect_used_by_reduction
&& !nested_in_vect_loop_p (vect_loop, stmt) && !nested_in_vect_loop_p (vect_loop, stmt_info)
&& supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR, && supportable_widening_operation (VEC_WIDEN_MULT_EVEN_EXPR,
stmt, vectype_out, vectype_in, stmt_info, vectype_out,
code1, code2, multi_step_cvt, vectype_in, code1, code2,
interm_types)) multi_step_cvt, interm_types))
{ {
/* Elements in a vector with vect_used_by_reduction property cannot /* Elements in a vector with vect_used_by_reduction property cannot
be reordered if the use chain with this property does not have the be reordered if the use chain with this property does not have the
same operation. One such an example is s += a * b, where elements same operation. One such an example is s += a * b, where elements
in a and b cannot be reordered. Here we check if the vector defined in a and b cannot be reordered. Here we check if the vector defined
by STMT is only directly used in the reduction statement. */ by STMT is only directly used in the reduction statement. */
tree lhs = gimple_assign_lhs (stmt); tree lhs = gimple_assign_lhs (stmt_info->stmt);
stmt_vec_info use_stmt_info = loop_info->lookup_single_use (lhs); stmt_vec_info use_stmt_info = loop_info->lookup_single_use (lhs);
if (use_stmt_info if (use_stmt_info
&& STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def) && STMT_VINFO_DEF_TYPE (use_stmt_info) == vect_reduction_def)
...@@ -10827,7 +10869,8 @@ vect_get_vector_types_for_stmt (stmt_vec_info stmt_info, ...@@ -10827,7 +10869,8 @@ vect_get_vector_types_for_stmt (stmt_vec_info stmt_info,
if (*stmt_vectype_out != boolean_type_node) if (*stmt_vectype_out != boolean_type_node)
{ {
HOST_WIDE_INT dummy; HOST_WIDE_INT dummy;
scalar_type = vect_get_smallest_scalar_type (stmt, &dummy, &dummy); scalar_type = vect_get_smallest_scalar_type (stmt_info,
&dummy, &dummy);
} }
if (dump_enabled_p ()) if (dump_enabled_p ())
{ {
......
...@@ -1325,7 +1325,7 @@ vect_dr_behavior (data_reference *dr) ...@@ -1325,7 +1325,7 @@ vect_dr_behavior (data_reference *dr)
stmt_vec_info stmt_info = vinfo_for_stmt (stmt); stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
if (loop_vinfo == NULL if (loop_vinfo == NULL
|| !nested_in_vect_loop_p (LOOP_VINFO_LOOP (loop_vinfo), stmt)) || !nested_in_vect_loop_p (LOOP_VINFO_LOOP (loop_vinfo), stmt_info))
return &DR_INNERMOST (dr); return &DR_INNERMOST (dr);
else else
return &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info); return &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment