Commit 82570274 by Richard Sandiford Committed by Richard Sandiford

[31/46] Use stmt_vec_info in function interfaces (part 1)

This first (less mechanical) part handles cases that involve changes in
the callers or non-trivial changes in the functions themselves.

2018-07-31  Richard Sandiford  <richard.sandiford@arm.com>

gcc/
	* tree-vect-data-refs.c (vect_describe_gather_scatter_call): Take
	a stmt_vec_info instead of a gcall.
	(vect_check_gather_scatter): Update call accordingly.
	* tree-vect-loop-manip.c (iv_phi_p): Take a stmt_vec_info instead
	of a gphi.
	(vect_can_advance_ivs_p, vect_update_ivs_after_vectorizer)
	(slpeel_update_phi_nodes_for_loops):): Update calls accordingly.
	* tree-vect-loop.c (vect_transform_loop_stmt): Take a stmt_vec_info
	instead of a gimple stmt.
	(vect_transform_loop): Update calls accordingly.
	* tree-vect-slp.c (vect_split_slp_store_group): Take and return
	stmt_vec_infos instead of gimple stmts.
	(vect_analyze_slp_instance): Update use accordingly.
	* tree-vect-stmts.c (read_vector_array, write_vector_array)
	(vect_clobber_variable, vect_stmt_relevant_p, permute_vec_elements)
	(vect_use_strided_gather_scatters_p, vect_build_all_ones_mask)
	(vect_build_zero_merge_argument, vect_get_gather_scatter_ops)
	(vect_gen_widened_results_half, vect_get_loop_based_defs)
	(vect_create_vectorized_promotion_stmts, can_vectorize_live_stmts):
	Take a stmt_vec_info instead of a gimple stmt and pass stmt_vec_infos
	down to subroutines.

From-SVN: r263146
parent eca52fdd
2018-07-31 Richard Sandiford <richard.sandiford@arm.com> 2018-07-31 Richard Sandiford <richard.sandiford@arm.com>
* tree-vect-data-refs.c (vect_describe_gather_scatter_call): Take
a stmt_vec_info instead of a gcall.
(vect_check_gather_scatter): Update call accordingly.
* tree-vect-loop-manip.c (iv_phi_p): Take a stmt_vec_info instead
of a gphi.
(vect_can_advance_ivs_p, vect_update_ivs_after_vectorizer)
(slpeel_update_phi_nodes_for_loops):): Update calls accordingly.
* tree-vect-loop.c (vect_transform_loop_stmt): Take a stmt_vec_info
instead of a gimple stmt.
(vect_transform_loop): Update calls accordingly.
* tree-vect-slp.c (vect_split_slp_store_group): Take and return
stmt_vec_infos instead of gimple stmts.
(vect_analyze_slp_instance): Update use accordingly.
* tree-vect-stmts.c (read_vector_array, write_vector_array)
(vect_clobber_variable, vect_stmt_relevant_p, permute_vec_elements)
(vect_use_strided_gather_scatters_p, vect_build_all_ones_mask)
(vect_build_zero_merge_argument, vect_get_gather_scatter_ops)
(vect_gen_widened_results_half, vect_get_loop_based_defs)
(vect_create_vectorized_promotion_stmts, can_vectorize_live_stmts):
Take a stmt_vec_info instead of a gimple stmt and pass stmt_vec_infos
down to subroutines.
2018-07-31 Richard Sandiford <richard.sandiford@arm.com>
* tree-vect-loop.c (vect_analyze_scalar_cycles_1): Change the type * tree-vect-loop.c (vect_analyze_scalar_cycles_1): Change the type
of the worklist from a vector of gimple stmts to a vector of of the worklist from a vector of gimple stmts to a vector of
stmt_vec_infos. stmt_vec_infos.
......
...@@ -3621,13 +3621,14 @@ vect_gather_scatter_fn_p (bool read_p, bool masked_p, tree vectype, ...@@ -3621,13 +3621,14 @@ vect_gather_scatter_fn_p (bool read_p, bool masked_p, tree vectype,
return true; return true;
} }
/* CALL is a call to an internal gather load or scatter store function. /* STMT_INFO is a call to an internal gather load or scatter store function.
Describe the operation in INFO. */ Describe the operation in INFO. */
static void static void
vect_describe_gather_scatter_call (gcall *call, gather_scatter_info *info) vect_describe_gather_scatter_call (stmt_vec_info stmt_info,
gather_scatter_info *info)
{ {
stmt_vec_info stmt_info = vinfo_for_stmt (call); gcall *call = as_a <gcall *> (stmt_info->stmt);
tree vectype = STMT_VINFO_VECTYPE (stmt_info); tree vectype = STMT_VINFO_VECTYPE (stmt_info);
data_reference *dr = STMT_VINFO_DATA_REF (stmt_info); data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
...@@ -3672,7 +3673,7 @@ vect_check_gather_scatter (gimple *stmt, loop_vec_info loop_vinfo, ...@@ -3672,7 +3673,7 @@ vect_check_gather_scatter (gimple *stmt, loop_vec_info loop_vinfo,
ifn = gimple_call_internal_fn (call); ifn = gimple_call_internal_fn (call);
if (internal_gather_scatter_fn_p (ifn)) if (internal_gather_scatter_fn_p (ifn))
{ {
vect_describe_gather_scatter_call (call, info); vect_describe_gather_scatter_call (stmt_info, info);
return true; return true;
} }
masked_p = (ifn == IFN_MASK_LOAD || ifn == IFN_MASK_STORE); masked_p = (ifn == IFN_MASK_LOAD || ifn == IFN_MASK_STORE);
......
...@@ -1335,16 +1335,16 @@ find_loop_location (struct loop *loop) ...@@ -1335,16 +1335,16 @@ find_loop_location (struct loop *loop)
return dump_user_location_t (); return dump_user_location_t ();
} }
/* Return true if PHI defines an IV of the loop to be vectorized. */ /* Return true if the phi described by STMT_INFO defines an IV of the
loop to be vectorized. */
static bool static bool
iv_phi_p (gphi *phi) iv_phi_p (stmt_vec_info stmt_info)
{ {
gphi *phi = as_a <gphi *> (stmt_info->stmt);
if (virtual_operand_p (PHI_RESULT (phi))) if (virtual_operand_p (PHI_RESULT (phi)))
return false; return false;
stmt_vec_info stmt_info = vinfo_for_stmt (phi);
gcc_assert (stmt_info != NULL_STMT_VEC_INFO);
if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
|| STMT_VINFO_DEF_TYPE (stmt_info) == vect_double_reduction_def) || STMT_VINFO_DEF_TYPE (stmt_info) == vect_double_reduction_def)
return false; return false;
...@@ -1388,7 +1388,7 @@ vect_can_advance_ivs_p (loop_vec_info loop_vinfo) ...@@ -1388,7 +1388,7 @@ vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. virtual defs/uses (i.e., memory accesses) are analyzed elsewhere.
Skip reduction phis. */ Skip reduction phis. */
if (!iv_phi_p (phi)) if (!iv_phi_p (phi_info))
{ {
if (dump_enabled_p ()) if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, dump_printf_loc (MSG_NOTE, vect_location,
...@@ -1509,7 +1509,7 @@ vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, ...@@ -1509,7 +1509,7 @@ vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo,
} }
/* Skip reduction and virtual phis. */ /* Skip reduction and virtual phis. */
if (!iv_phi_p (phi)) if (!iv_phi_p (phi_info))
{ {
if (dump_enabled_p ()) if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, dump_printf_loc (MSG_NOTE, vect_location,
...@@ -2088,7 +2088,8 @@ slpeel_update_phi_nodes_for_loops (loop_vec_info loop_vinfo, ...@@ -2088,7 +2088,8 @@ slpeel_update_phi_nodes_for_loops (loop_vec_info loop_vinfo,
tree arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, first_latch_e); tree arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, first_latch_e);
/* Generate lcssa PHI node for the first loop. */ /* Generate lcssa PHI node for the first loop. */
gphi *vect_phi = (loop == first) ? orig_phi : update_phi; gphi *vect_phi = (loop == first) ? orig_phi : update_phi;
if (create_lcssa_for_iv_phis || !iv_phi_p (vect_phi)) stmt_vec_info vect_phi_info = loop_vinfo->lookup_stmt (vect_phi);
if (create_lcssa_for_iv_phis || !iv_phi_p (vect_phi_info))
{ {
tree new_res = copy_ssa_name (PHI_RESULT (orig_phi)); tree new_res = copy_ssa_name (PHI_RESULT (orig_phi));
gphi *lcssa_phi = create_phi_node (new_res, between_bb); gphi *lcssa_phi = create_phi_node (new_res, between_bb);
......
...@@ -8207,21 +8207,18 @@ scale_profile_for_vect_loop (struct loop *loop, unsigned vf) ...@@ -8207,21 +8207,18 @@ scale_profile_for_vect_loop (struct loop *loop, unsigned vf)
scale_bbs_frequencies (&loop->latch, 1, exit_l->probability / prob); scale_bbs_frequencies (&loop->latch, 1, exit_l->probability / prob);
} }
/* Vectorize STMT if relevant, inserting any new instructions before GSI. /* Vectorize STMT_INFO if relevant, inserting any new instructions before GSI.
When vectorizing STMT as a store, set *SEEN_STORE to its stmt_vec_info. When vectorizing STMT_INFO as a store, set *SEEN_STORE to its stmt_vec_info.
*SLP_SCHEDULE is a running record of whether we have called *SLP_SCHEDULE is a running record of whether we have called
vect_schedule_slp. */ vect_schedule_slp. */
static void static void
vect_transform_loop_stmt (loop_vec_info loop_vinfo, gimple *stmt, vect_transform_loop_stmt (loop_vec_info loop_vinfo, stmt_vec_info stmt_info,
gimple_stmt_iterator *gsi, gimple_stmt_iterator *gsi,
stmt_vec_info *seen_store, bool *slp_scheduled) stmt_vec_info *seen_store, bool *slp_scheduled)
{ {
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo); struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo); poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
if (!stmt_info)
return;
if (dump_enabled_p ()) if (dump_enabled_p ())
{ {
...@@ -8476,15 +8473,19 @@ vect_transform_loop (loop_vec_info loop_vinfo) ...@@ -8476,15 +8473,19 @@ vect_transform_loop (loop_vec_info loop_vinfo)
gimple *def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info); gimple *def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
for (gimple_stmt_iterator subsi = gsi_start (def_seq); for (gimple_stmt_iterator subsi = gsi_start (def_seq);
!gsi_end_p (subsi); gsi_next (&subsi)) !gsi_end_p (subsi); gsi_next (&subsi))
vect_transform_loop_stmt (loop_vinfo, {
gsi_stmt (subsi), &si, stmt_vec_info pat_stmt_info
&seen_store, = loop_vinfo->lookup_stmt (gsi_stmt (subsi));
&slp_scheduled); vect_transform_loop_stmt (loop_vinfo, pat_stmt_info,
gimple *pat_stmt = STMT_VINFO_RELATED_STMT (stmt_info); &si, &seen_store,
vect_transform_loop_stmt (loop_vinfo, pat_stmt, &si, &slp_scheduled);
}
stmt_vec_info pat_stmt_info
= STMT_VINFO_RELATED_STMT (stmt_info);
vect_transform_loop_stmt (loop_vinfo, pat_stmt_info, &si,
&seen_store, &slp_scheduled); &seen_store, &slp_scheduled);
} }
vect_transform_loop_stmt (loop_vinfo, stmt, &si, vect_transform_loop_stmt (loop_vinfo, stmt_info, &si,
&seen_store, &slp_scheduled); &seen_store, &slp_scheduled);
} }
if (seen_store) if (seen_store)
......
...@@ -1856,16 +1856,15 @@ vect_find_last_scalar_stmt_in_slp (slp_tree node) ...@@ -1856,16 +1856,15 @@ vect_find_last_scalar_stmt_in_slp (slp_tree node)
return last; return last;
} }
/* Splits a group of stores, currently beginning at FIRST_STMT, into two groups: /* Splits a group of stores, currently beginning at FIRST_VINFO, into
one (still beginning at FIRST_STMT) of size GROUP1_SIZE (also containing two groups: one (still beginning at FIRST_VINFO) of size GROUP1_SIZE
the first GROUP1_SIZE stmts, since stores are consecutive), the second (also containing the first GROUP1_SIZE stmts, since stores are
containing the remainder. consecutive), the second containing the remainder.
Return the first stmt in the second group. */ Return the first stmt in the second group. */
static gimple * static stmt_vec_info
vect_split_slp_store_group (gimple *first_stmt, unsigned group1_size) vect_split_slp_store_group (stmt_vec_info first_vinfo, unsigned group1_size)
{ {
stmt_vec_info first_vinfo = vinfo_for_stmt (first_stmt);
gcc_assert (DR_GROUP_FIRST_ELEMENT (first_vinfo) == first_vinfo); gcc_assert (DR_GROUP_FIRST_ELEMENT (first_vinfo) == first_vinfo);
gcc_assert (group1_size > 0); gcc_assert (group1_size > 0);
int group2_size = DR_GROUP_SIZE (first_vinfo) - group1_size; int group2_size = DR_GROUP_SIZE (first_vinfo) - group1_size;
...@@ -2174,7 +2173,8 @@ vect_analyze_slp_instance (vec_info *vinfo, ...@@ -2174,7 +2173,8 @@ vect_analyze_slp_instance (vec_info *vinfo,
gcc_assert ((const_nunits & (const_nunits - 1)) == 0); gcc_assert ((const_nunits & (const_nunits - 1)) == 0);
unsigned group1_size = i & ~(const_nunits - 1); unsigned group1_size = i & ~(const_nunits - 1);
gimple *rest = vect_split_slp_store_group (stmt_info, group1_size); stmt_vec_info rest = vect_split_slp_store_group (stmt_info,
group1_size);
bool res = vect_analyze_slp_instance (vinfo, stmt_info, bool res = vect_analyze_slp_instance (vinfo, stmt_info,
max_tree_size); max_tree_size);
/* If the first non-match was in the middle of a vector, /* If the first non-match was in the middle of a vector,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment