Commit 2c53b149 by Richard Biener Committed by Richard Biener

tree-vectorizer.h (STMT_VINFO_GROUP_*, GROUP_*): Remove.

2018-05-25  Richard Biener  <rguenther@suse.de>

	* tree-vectorizer.h (STMT_VINFO_GROUP_*, GROUP_*): Remove.
	(DR_GROUP_*): New, assert we have non-NULL ->data_ref_info.
	(REDUC_GROUP_*): New, assert we have NULL ->data_ref_info.
	(STMT_VINFO_GROUPED_ACCESS): Adjust.
	* tree-vect-data-refs.c (everywhere): Adjust users.
	* tree-vect-loop.c (everywhere): Likewise.
	* tree-vect-slp.c (everywhere): Likewise.
	* tree-vect-stmts.c (everywhere): Likewise.
	* tree-vect-patterns.c (vect_reassociating_reduction_p): Likewise.

From-SVN: r260709
parent f8d70404
2018-05-25 Richard Biener <rguenther@suse.de>
* tree-vectorizer.h (STMT_VINFO_GROUP_*, GROUP_*): Remove.
(DR_GROUP_*): New, assert we have non-NULL ->data_ref_info.
(REDUC_GROUP_*): New, assert we have NULL ->data_ref_info.
(STMT_VINFO_GROUPED_ACCESS): Adjust.
* tree-vect-data-refs.c (everywhere): Adjust users.
* tree-vect-loop.c (everywhere): Likewise.
* tree-vect-slp.c (everywhere): Likewise.
* tree-vect-stmts.c (everywhere): Likewise.
* tree-vect-patterns.c (vect_reassociating_reduction_p): Likewise.
2018-05-25 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE> 2018-05-25 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
* configure.ac (gcc_cv_as_section_has_e): Move to common section. * configure.ac (gcc_cv_as_section_has_e): Move to common section.
......
...@@ -307,8 +307,9 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr, ...@@ -307,8 +307,9 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
/* We do not have to consider dependences between accesses that belong /* We do not have to consider dependences between accesses that belong
to the same group, unless the stride could be smaller than the to the same group, unless the stride could be smaller than the
group size. */ group size. */
if (GROUP_FIRST_ELEMENT (stmtinfo_a) if (DR_GROUP_FIRST_ELEMENT (stmtinfo_a)
&& GROUP_FIRST_ELEMENT (stmtinfo_a) == GROUP_FIRST_ELEMENT (stmtinfo_b) && (DR_GROUP_FIRST_ELEMENT (stmtinfo_a)
== DR_GROUP_FIRST_ELEMENT (stmtinfo_b))
&& !STMT_VINFO_STRIDED_P (stmtinfo_a)) && !STMT_VINFO_STRIDED_P (stmtinfo_a))
return false; return false;
...@@ -614,8 +615,8 @@ vect_slp_analyze_data_ref_dependence (struct data_dependence_relation *ddr) ...@@ -614,8 +615,8 @@ vect_slp_analyze_data_ref_dependence (struct data_dependence_relation *ddr)
/* If dra and drb are part of the same interleaving chain consider /* If dra and drb are part of the same interleaving chain consider
them independent. */ them independent. */
if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (DR_STMT (dra))) if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (DR_STMT (dra)))
&& (GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (dra))) && (DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (dra)))
== GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (drb))))) == DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (drb)))))
return false; return false;
/* Unknown data dependence. */ /* Unknown data dependence. */
...@@ -1056,9 +1057,9 @@ vect_update_misalignment_for_peel (struct data_reference *dr, ...@@ -1056,9 +1057,9 @@ vect_update_misalignment_for_peel (struct data_reference *dr,
/* For interleaved data accesses the step in the loop must be multiplied by /* For interleaved data accesses the step in the loop must be multiplied by
the size of the interleaving group. */ the size of the interleaving group. */
if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
dr_size *= GROUP_SIZE (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info))); dr_size *= DR_GROUP_SIZE (vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (stmt_info)));
if (STMT_VINFO_GROUPED_ACCESS (peel_stmt_info)) if (STMT_VINFO_GROUPED_ACCESS (peel_stmt_info))
dr_peel_size *= GROUP_SIZE (peel_stmt_info); dr_peel_size *= DR_GROUP_SIZE (peel_stmt_info);
/* It can be assumed that the data refs with the same alignment as dr_peel /* It can be assumed that the data refs with the same alignment as dr_peel
are aligned in the vector loop. */ are aligned in the vector loop. */
...@@ -1151,7 +1152,7 @@ vect_verify_datarefs_alignment (loop_vec_info vinfo) ...@@ -1151,7 +1152,7 @@ vect_verify_datarefs_alignment (loop_vec_info vinfo)
/* For interleaving, only the alignment of the first access matters. */ /* For interleaving, only the alignment of the first access matters. */
if (STMT_VINFO_GROUPED_ACCESS (stmt_info) if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
&& GROUP_FIRST_ELEMENT (stmt_info) != stmt) && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt)
continue; continue;
/* Strided accesses perform only component accesses, alignment is /* Strided accesses perform only component accesses, alignment is
...@@ -1208,7 +1209,7 @@ vector_alignment_reachable_p (struct data_reference *dr) ...@@ -1208,7 +1209,7 @@ vector_alignment_reachable_p (struct data_reference *dr)
elem_size = vector_element_size (vector_size, nelements); elem_size = vector_element_size (vector_size, nelements);
mis_in_elements = DR_MISALIGNMENT (dr) / elem_size; mis_in_elements = DR_MISALIGNMENT (dr) / elem_size;
if (!multiple_p (nelements - mis_in_elements, GROUP_SIZE (stmt_info))) if (!multiple_p (nelements - mis_in_elements, DR_GROUP_SIZE (stmt_info)))
return false; return false;
} }
...@@ -1396,7 +1397,7 @@ vect_get_peeling_costs_all_drs (vec<data_reference_p> datarefs, ...@@ -1396,7 +1397,7 @@ vect_get_peeling_costs_all_drs (vec<data_reference_p> datarefs,
/* For interleaving, only the alignment of the first access /* For interleaving, only the alignment of the first access
matters. */ matters. */
if (STMT_VINFO_GROUPED_ACCESS (stmt_info) if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
&& GROUP_FIRST_ELEMENT (stmt_info) != stmt) && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt)
continue; continue;
/* Strided accesses perform only component accesses, alignment is /* Strided accesses perform only component accesses, alignment is
...@@ -1530,7 +1531,7 @@ vect_peeling_supportable (loop_vec_info loop_vinfo, struct data_reference *dr0, ...@@ -1530,7 +1531,7 @@ vect_peeling_supportable (loop_vec_info loop_vinfo, struct data_reference *dr0,
/* For interleaving, only the alignment of the first access /* For interleaving, only the alignment of the first access
matters. */ matters. */
if (STMT_VINFO_GROUPED_ACCESS (stmt_info) if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
&& GROUP_FIRST_ELEMENT (stmt_info) != stmt) && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt)
continue; continue;
/* Strided accesses perform only component accesses, alignment is /* Strided accesses perform only component accesses, alignment is
...@@ -1718,7 +1719,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo) ...@@ -1718,7 +1719,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
/* For interleaving, only the alignment of the first access /* For interleaving, only the alignment of the first access
matters. */ matters. */
if (STMT_VINFO_GROUPED_ACCESS (stmt_info) if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
&& GROUP_FIRST_ELEMENT (stmt_info) != stmt) && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt)
continue; continue;
/* For invariant accesses there is nothing to enhance. */ /* For invariant accesses there is nothing to enhance. */
...@@ -1764,7 +1765,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo) ...@@ -1764,7 +1765,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo))) if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
{ {
poly_uint64 nscalars = (STMT_SLP_TYPE (stmt_info) poly_uint64 nscalars = (STMT_SLP_TYPE (stmt_info)
? vf * GROUP_SIZE (stmt_info) : vf); ? vf * DR_GROUP_SIZE (stmt_info) : vf);
possible_npeel_number possible_npeel_number
= vect_get_num_vectors (nscalars, vectype); = vect_get_num_vectors (nscalars, vectype);
...@@ -2027,7 +2028,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo) ...@@ -2027,7 +2028,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
by the group size. */ by the group size. */
stmt_info = vinfo_for_stmt (DR_STMT (dr0)); stmt_info = vinfo_for_stmt (DR_STMT (dr0));
if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
npeel /= GROUP_SIZE (stmt_info); npeel /= DR_GROUP_SIZE (stmt_info);
if (dump_enabled_p ()) if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, dump_printf_loc (MSG_NOTE, vect_location,
...@@ -2155,7 +2156,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo) ...@@ -2155,7 +2156,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
matters. */ matters. */
if (aligned_access_p (dr) if (aligned_access_p (dr)
|| (STMT_VINFO_GROUPED_ACCESS (stmt_info) || (STMT_VINFO_GROUPED_ACCESS (stmt_info)
&& GROUP_FIRST_ELEMENT (stmt_info) != stmt)) && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt))
continue; continue;
if (STMT_VINFO_STRIDED_P (stmt_info)) if (STMT_VINFO_STRIDED_P (stmt_info))
...@@ -2380,7 +2381,7 @@ vect_slp_analyze_and_verify_node_alignment (slp_tree node) ...@@ -2380,7 +2381,7 @@ vect_slp_analyze_and_verify_node_alignment (slp_tree node)
gimple *first_stmt = SLP_TREE_SCALAR_STMTS (node)[0]; gimple *first_stmt = SLP_TREE_SCALAR_STMTS (node)[0];
data_reference_p first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)); data_reference_p first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
if (SLP_TREE_LOAD_PERMUTATION (node).exists ()) if (SLP_TREE_LOAD_PERMUTATION (node).exists ())
first_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt)); first_stmt = DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt));
data_reference_p dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)); data_reference_p dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
if (! vect_compute_data_ref_alignment (dr) if (! vect_compute_data_ref_alignment (dr)
...@@ -2455,10 +2456,10 @@ vect_analyze_group_access_1 (struct data_reference *dr) ...@@ -2455,10 +2456,10 @@ vect_analyze_group_access_1 (struct data_reference *dr)
dr_step = tree_to_shwi (step); dr_step = tree_to_shwi (step);
/* Check that STEP is a multiple of type size. Otherwise there is /* Check that STEP is a multiple of type size. Otherwise there is
a non-element-sized gap at the end of the group which we a non-element-sized gap at the end of the group which we
cannot represent in GROUP_GAP or GROUP_SIZE. cannot represent in DR_GROUP_GAP or DR_GROUP_SIZE.
??? As we can handle non-constant step fine here we should ??? As we can handle non-constant step fine here we should
simply remove uses of GROUP_GAP between the last and first simply remove uses of DR_GROUP_GAP between the last and first
element and instead rely on DR_STEP. GROUP_SIZE then would element and instead rely on DR_STEP. DR_GROUP_SIZE then would
simply not include that gap. */ simply not include that gap. */
if ((dr_step % type_size) != 0) if ((dr_step % type_size) != 0)
{ {
...@@ -2480,7 +2481,7 @@ vect_analyze_group_access_1 (struct data_reference *dr) ...@@ -2480,7 +2481,7 @@ vect_analyze_group_access_1 (struct data_reference *dr)
groupsize = 0; groupsize = 0;
/* Not consecutive access is possible only if it is a part of interleaving. */ /* Not consecutive access is possible only if it is a part of interleaving. */
if (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))) if (!DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
{ {
/* Check if it this DR is a part of interleaving, and is a single /* Check if it this DR is a part of interleaving, and is a single
element of the group that is accessed in the loop. */ element of the group that is accessed in the loop. */
...@@ -2491,9 +2492,9 @@ vect_analyze_group_access_1 (struct data_reference *dr) ...@@ -2491,9 +2492,9 @@ vect_analyze_group_access_1 (struct data_reference *dr)
&& (dr_step % type_size) == 0 && (dr_step % type_size) == 0
&& groupsize > 0) && groupsize > 0)
{ {
GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt; DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = stmt;
GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize; DR_GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
GROUP_GAP (stmt_info) = groupsize - 1; DR_GROUP_GAP (stmt_info) = groupsize - 1;
if (dump_enabled_p ()) if (dump_enabled_p ())
{ {
dump_printf_loc (MSG_NOTE, vect_location, dump_printf_loc (MSG_NOTE, vect_location,
...@@ -2526,10 +2527,10 @@ vect_analyze_group_access_1 (struct data_reference *dr) ...@@ -2526,10 +2527,10 @@ vect_analyze_group_access_1 (struct data_reference *dr)
return true; return true;
} }
if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt) if (DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) == stmt)
{ {
/* First stmt in the interleaving chain. Check the chain. */ /* First stmt in the interleaving chain. Check the chain. */
gimple *next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt)); gimple *next = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
struct data_reference *data_ref = dr; struct data_reference *data_ref = dr;
unsigned int count = 1; unsigned int count = 1;
tree prev_init = DR_INIT (data_ref); tree prev_init = DR_INIT (data_ref);
...@@ -2560,10 +2561,10 @@ vect_analyze_group_access_1 (struct data_reference *dr) ...@@ -2560,10 +2561,10 @@ vect_analyze_group_access_1 (struct data_reference *dr)
"Two or more load stmts share the same dr.\n"); "Two or more load stmts share the same dr.\n");
/* For load use the same data-ref load. */ /* For load use the same data-ref load. */
GROUP_SAME_DR_STMT (vinfo_for_stmt (next)) = prev; DR_GROUP_SAME_DR_STMT (vinfo_for_stmt (next)) = prev;
prev = next; prev = next;
next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next)); next = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
continue; continue;
} }
...@@ -2595,11 +2596,11 @@ vect_analyze_group_access_1 (struct data_reference *dr) ...@@ -2595,11 +2596,11 @@ vect_analyze_group_access_1 (struct data_reference *dr)
last_accessed_element += diff; last_accessed_element += diff;
/* Store the gap from the previous member of the group. If there is no /* Store the gap from the previous member of the group. If there is no
gap in the access, GROUP_GAP is always 1. */ gap in the access, DR_GROUP_GAP is always 1. */
GROUP_GAP (vinfo_for_stmt (next)) = diff; DR_GROUP_GAP (vinfo_for_stmt (next)) = diff;
prev_init = DR_INIT (data_ref); prev_init = DR_INIT (data_ref);
next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next)); next = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
/* Count the number of data-refs in the chain. */ /* Count the number of data-refs in the chain. */
count++; count++;
} }
...@@ -2632,9 +2633,9 @@ vect_analyze_group_access_1 (struct data_reference *dr) ...@@ -2632,9 +2633,9 @@ vect_analyze_group_access_1 (struct data_reference *dr)
difference between the groupsize and the last accessed difference between the groupsize and the last accessed
element. element.
When there is no gap, this difference should be 0. */ When there is no gap, this difference should be 0. */
GROUP_GAP (vinfo_for_stmt (stmt)) = groupsize - last_accessed_element; DR_GROUP_GAP (vinfo_for_stmt (stmt)) = groupsize - last_accessed_element;
GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize; DR_GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
if (dump_enabled_p ()) if (dump_enabled_p ())
{ {
dump_printf_loc (MSG_NOTE, vect_location, dump_printf_loc (MSG_NOTE, vect_location,
...@@ -2646,10 +2647,10 @@ vect_analyze_group_access_1 (struct data_reference *dr) ...@@ -2646,10 +2647,10 @@ vect_analyze_group_access_1 (struct data_reference *dr)
dump_printf (MSG_NOTE, "of size %u starting with ", dump_printf (MSG_NOTE, "of size %u starting with ",
(unsigned)groupsize); (unsigned)groupsize);
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0); dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
if (GROUP_GAP (vinfo_for_stmt (stmt)) != 0) if (DR_GROUP_GAP (vinfo_for_stmt (stmt)) != 0)
dump_printf_loc (MSG_NOTE, vect_location, dump_printf_loc (MSG_NOTE, vect_location,
"There is a gap of %u elements after the group\n", "There is a gap of %u elements after the group\n",
GROUP_GAP (vinfo_for_stmt (stmt))); DR_GROUP_GAP (vinfo_for_stmt (stmt)));
} }
/* SLP: create an SLP data structure for every interleaving group of /* SLP: create an SLP data structure for every interleaving group of
...@@ -2678,13 +2679,13 @@ vect_analyze_group_access (struct data_reference *dr) ...@@ -2678,13 +2679,13 @@ vect_analyze_group_access (struct data_reference *dr)
{ {
/* Dissolve the group if present. */ /* Dissolve the group if present. */
gimple *next; gimple *next;
gimple *stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (dr))); gimple *stmt = DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (DR_STMT (dr)));
while (stmt) while (stmt)
{ {
stmt_vec_info vinfo = vinfo_for_stmt (stmt); stmt_vec_info vinfo = vinfo_for_stmt (stmt);
next = GROUP_NEXT_ELEMENT (vinfo); next = DR_GROUP_NEXT_ELEMENT (vinfo);
GROUP_FIRST_ELEMENT (vinfo) = NULL; DR_GROUP_FIRST_ELEMENT (vinfo) = NULL;
GROUP_NEXT_ELEMENT (vinfo) = NULL; DR_GROUP_NEXT_ELEMENT (vinfo) = NULL;
stmt = next; stmt = next;
} }
return false; return false;
...@@ -2723,7 +2724,7 @@ vect_analyze_data_ref_access (struct data_reference *dr) ...@@ -2723,7 +2724,7 @@ vect_analyze_data_ref_access (struct data_reference *dr)
/* Allow loads with zero step in inner-loop vectorization. */ /* Allow loads with zero step in inner-loop vectorization. */
if (loop_vinfo && integer_zerop (step)) if (loop_vinfo && integer_zerop (step))
{ {
GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL; DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
if (!nested_in_vect_loop_p (loop, stmt)) if (!nested_in_vect_loop_p (loop, stmt))
return DR_IS_READ (dr); return DR_IS_READ (dr);
/* Allow references with zero step for outer loops marked /* Allow references with zero step for outer loops marked
...@@ -2742,7 +2743,7 @@ vect_analyze_data_ref_access (struct data_reference *dr) ...@@ -2742,7 +2743,7 @@ vect_analyze_data_ref_access (struct data_reference *dr)
{ {
/* Interleaved accesses are not yet supported within outer-loop /* Interleaved accesses are not yet supported within outer-loop
vectorization for references in the inner-loop. */ vectorization for references in the inner-loop. */
GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL; DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
/* For the rest of the analysis we use the outer-loop step. */ /* For the rest of the analysis we use the outer-loop step. */
step = STMT_VINFO_DR_STEP (stmt_info); step = STMT_VINFO_DR_STEP (stmt_info);
...@@ -2764,7 +2765,7 @@ vect_analyze_data_ref_access (struct data_reference *dr) ...@@ -2764,7 +2765,7 @@ vect_analyze_data_ref_access (struct data_reference *dr)
&& !compare_tree_int (TYPE_SIZE_UNIT (scalar_type), -dr_step))) && !compare_tree_int (TYPE_SIZE_UNIT (scalar_type), -dr_step)))
{ {
/* Mark that it is not interleaving. */ /* Mark that it is not interleaving. */
GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL; DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL;
return true; return true;
} }
} }
...@@ -3052,13 +3053,13 @@ vect_analyze_data_ref_accesses (vec_info *vinfo) ...@@ -3052,13 +3053,13 @@ vect_analyze_data_ref_accesses (vec_info *vinfo)
} }
/* Link the found element into the group list. */ /* Link the found element into the group list. */
if (!GROUP_FIRST_ELEMENT (stmtinfo_a)) if (!DR_GROUP_FIRST_ELEMENT (stmtinfo_a))
{ {
GROUP_FIRST_ELEMENT (stmtinfo_a) = DR_STMT (dra); DR_GROUP_FIRST_ELEMENT (stmtinfo_a) = DR_STMT (dra);
lastinfo = stmtinfo_a; lastinfo = stmtinfo_a;
} }
GROUP_FIRST_ELEMENT (stmtinfo_b) = DR_STMT (dra); DR_GROUP_FIRST_ELEMENT (stmtinfo_b) = DR_STMT (dra);
GROUP_NEXT_ELEMENT (lastinfo) = DR_STMT (drb); DR_GROUP_NEXT_ELEMENT (lastinfo) = DR_STMT (drb);
lastinfo = stmtinfo_b; lastinfo = stmtinfo_b;
} }
} }
...@@ -3119,10 +3120,10 @@ vect_vfa_access_size (data_reference *dr) ...@@ -3119,10 +3120,10 @@ vect_vfa_access_size (data_reference *dr)
tree ref_type = TREE_TYPE (DR_REF (dr)); tree ref_type = TREE_TYPE (DR_REF (dr));
unsigned HOST_WIDE_INT ref_size = tree_to_uhwi (TYPE_SIZE_UNIT (ref_type)); unsigned HOST_WIDE_INT ref_size = tree_to_uhwi (TYPE_SIZE_UNIT (ref_type));
unsigned HOST_WIDE_INT access_size = ref_size; unsigned HOST_WIDE_INT access_size = ref_size;
if (GROUP_FIRST_ELEMENT (stmt_vinfo)) if (DR_GROUP_FIRST_ELEMENT (stmt_vinfo))
{ {
gcc_assert (GROUP_FIRST_ELEMENT (stmt_vinfo) == DR_STMT (dr)); gcc_assert (DR_GROUP_FIRST_ELEMENT (stmt_vinfo) == DR_STMT (dr));
access_size *= GROUP_SIZE (stmt_vinfo) - GROUP_GAP (stmt_vinfo); access_size *= DR_GROUP_SIZE (stmt_vinfo) - DR_GROUP_GAP (stmt_vinfo);
} }
if (STMT_VINFO_VEC_STMT (stmt_vinfo) if (STMT_VINFO_VEC_STMT (stmt_vinfo)
&& (vect_supportable_dr_alignment (dr, false) && (vect_supportable_dr_alignment (dr, false)
...@@ -3292,8 +3293,8 @@ vect_small_gap_p (loop_vec_info loop_vinfo, data_reference *dr, poly_int64 gap) ...@@ -3292,8 +3293,8 @@ vect_small_gap_p (loop_vec_info loop_vinfo, data_reference *dr, poly_int64 gap)
stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr)); stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr));
HOST_WIDE_INT count HOST_WIDE_INT count
= estimated_poly_value (LOOP_VINFO_VECT_FACTOR (loop_vinfo)); = estimated_poly_value (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
if (GROUP_FIRST_ELEMENT (stmt_info)) if (DR_GROUP_FIRST_ELEMENT (stmt_info))
count *= GROUP_SIZE (vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info))); count *= DR_GROUP_SIZE (vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (stmt_info)));
return estimated_poly_value (gap) <= count * vect_get_scalar_dr_size (dr); return estimated_poly_value (gap) <= count * vect_get_scalar_dr_size (dr);
} }
...@@ -3481,14 +3482,14 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo) ...@@ -3481,14 +3482,14 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
continue; continue;
} }
dr_group_first_a = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_a)); dr_group_first_a = DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_a));
if (dr_group_first_a) if (dr_group_first_a)
{ {
stmt_a = dr_group_first_a; stmt_a = dr_group_first_a;
dr_a = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_a)); dr_a = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt_a));
} }
dr_group_first_b = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_b)); dr_group_first_b = DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt_b));
if (dr_group_first_b) if (dr_group_first_b)
{ {
stmt_b = dr_group_first_b; stmt_b = dr_group_first_b;
...@@ -4784,9 +4785,9 @@ vect_create_data_ref_ptr (gimple *stmt, tree aggr_type, struct loop *at_loop, ...@@ -4784,9 +4785,9 @@ vect_create_data_ref_ptr (gimple *stmt, tree aggr_type, struct loop *at_loop,
get_alias_set (DR_REF (dr)))) get_alias_set (DR_REF (dr))))
need_ref_all = true; need_ref_all = true;
/* Likewise for any of the data references in the stmt group. */ /* Likewise for any of the data references in the stmt group. */
else if (STMT_VINFO_GROUP_SIZE (stmt_info) > 1) else if (DR_GROUP_SIZE (stmt_info) > 1)
{ {
gimple *orig_stmt = STMT_VINFO_GROUP_FIRST_ELEMENT (stmt_info); gimple *orig_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
do do
{ {
stmt_vec_info sinfo = vinfo_for_stmt (orig_stmt); stmt_vec_info sinfo = vinfo_for_stmt (orig_stmt);
...@@ -4797,7 +4798,7 @@ vect_create_data_ref_ptr (gimple *stmt, tree aggr_type, struct loop *at_loop, ...@@ -4797,7 +4798,7 @@ vect_create_data_ref_ptr (gimple *stmt, tree aggr_type, struct loop *at_loop,
need_ref_all = true; need_ref_all = true;
break; break;
} }
orig_stmt = STMT_VINFO_GROUP_NEXT_ELEMENT (sinfo); orig_stmt = DR_GROUP_NEXT_ELEMENT (sinfo);
} }
while (orig_stmt); while (orig_stmt);
} }
...@@ -6395,7 +6396,7 @@ vect_transform_grouped_load (gimple *stmt, vec<tree> dr_chain, int size, ...@@ -6395,7 +6396,7 @@ vect_transform_grouped_load (gimple *stmt, vec<tree> dr_chain, int size,
void void
vect_record_grouped_load_vectors (gimple *stmt, vec<tree> result_chain) vect_record_grouped_load_vectors (gimple *stmt, vec<tree> result_chain)
{ {
gimple *first_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)); gimple *first_stmt = DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
gimple *next_stmt, *new_stmt; gimple *next_stmt, *new_stmt;
unsigned int i, gap_count; unsigned int i, gap_count;
tree tmp_data_ref; tree tmp_data_ref;
...@@ -6413,11 +6414,11 @@ vect_record_grouped_load_vectors (gimple *stmt, vec<tree> result_chain) ...@@ -6413,11 +6414,11 @@ vect_record_grouped_load_vectors (gimple *stmt, vec<tree> result_chain)
/* Skip the gaps. Loads created for the gaps will be removed by dead /* Skip the gaps. Loads created for the gaps will be removed by dead
code elimination pass later. No need to check for the first stmt in code elimination pass later. No need to check for the first stmt in
the group, since it always exists. the group, since it always exists.
GROUP_GAP is the number of steps in elements from the previous DR_GROUP_GAP is the number of steps in elements from the previous
access (if there is no gap GROUP_GAP is 1). We skip loads that access (if there is no gap DR_GROUP_GAP is 1). We skip loads that
correspond to the gaps. */ correspond to the gaps. */
if (next_stmt != first_stmt if (next_stmt != first_stmt
&& gap_count < GROUP_GAP (vinfo_for_stmt (next_stmt))) && gap_count < DR_GROUP_GAP (vinfo_for_stmt (next_stmt)))
{ {
gap_count++; gap_count++;
continue; continue;
...@@ -6433,7 +6434,7 @@ vect_record_grouped_load_vectors (gimple *stmt, vec<tree> result_chain) ...@@ -6433,7 +6434,7 @@ vect_record_grouped_load_vectors (gimple *stmt, vec<tree> result_chain)
STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)) = new_stmt; STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)) = new_stmt;
else else
{ {
if (!GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt))) if (!DR_GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
{ {
gimple *prev_stmt = gimple *prev_stmt =
STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt)); STMT_VINFO_VEC_STMT (vinfo_for_stmt (next_stmt));
...@@ -6451,12 +6452,12 @@ vect_record_grouped_load_vectors (gimple *stmt, vec<tree> result_chain) ...@@ -6451,12 +6452,12 @@ vect_record_grouped_load_vectors (gimple *stmt, vec<tree> result_chain)
} }
} }
next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
gap_count = 1; gap_count = 1;
/* If NEXT_STMT accesses the same DR as the previous statement, /* If NEXT_STMT accesses the same DR as the previous statement,
put the same TMP_DATA_REF as its vectorized statement; otherwise put the same TMP_DATA_REF as its vectorized statement; otherwise
get the next data-ref from RESULT_CHAIN. */ get the next data-ref from RESULT_CHAIN. */
if (!next_stmt || !GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt))) if (!next_stmt || !DR_GROUP_SAME_DR_STMT (vinfo_for_stmt (next_stmt)))
break; break;
} }
} }
...@@ -6598,8 +6599,8 @@ vect_supportable_dr_alignment (struct data_reference *dr, ...@@ -6598,8 +6599,8 @@ vect_supportable_dr_alignment (struct data_reference *dr,
if (loop_vinfo if (loop_vinfo
&& STMT_SLP_TYPE (stmt_info) && STMT_SLP_TYPE (stmt_info)
&& !multiple_p (LOOP_VINFO_VECT_FACTOR (loop_vinfo) && !multiple_p (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
* GROUP_SIZE (vinfo_for_stmt * DR_GROUP_SIZE (vinfo_for_stmt
(GROUP_FIRST_ELEMENT (stmt_info))), (DR_GROUP_FIRST_ELEMENT (stmt_info))),
TYPE_VECTOR_SUBPARTS (vectype))) TYPE_VECTOR_SUBPARTS (vectype)))
; ;
else if (!loop_vinfo else if (!loop_vinfo
......
...@@ -598,7 +598,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop) ...@@ -598,7 +598,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
/* Store the reduction cycles for possible vectorization in /* Store the reduction cycles for possible vectorization in
loop-aware SLP if it was not detected as reduction loop-aware SLP if it was not detected as reduction
chain. */ chain. */
if (! GROUP_FIRST_ELEMENT (vinfo_for_stmt (reduc_stmt))) if (! REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (reduc_stmt)))
LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push (reduc_stmt); LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push (reduc_stmt);
} }
} }
...@@ -659,16 +659,17 @@ vect_fixup_reduc_chain (gimple *stmt) ...@@ -659,16 +659,17 @@ vect_fixup_reduc_chain (gimple *stmt)
{ {
gimple *firstp = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt)); gimple *firstp = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt));
gimple *stmtp; gimple *stmtp;
gcc_assert (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (firstp)) gcc_assert (!REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (firstp))
&& GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))); && REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)));
GROUP_SIZE (vinfo_for_stmt (firstp)) = GROUP_SIZE (vinfo_for_stmt (stmt)); REDUC_GROUP_SIZE (vinfo_for_stmt (firstp))
= REDUC_GROUP_SIZE (vinfo_for_stmt (stmt));
do do
{ {
stmtp = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt)); stmtp = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt));
GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmtp)) = firstp; REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmtp)) = firstp;
stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt)); stmt = REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
if (stmt) if (stmt)
GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmtp)) REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmtp))
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt)); = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt));
} }
while (stmt); while (stmt);
...@@ -686,12 +687,12 @@ vect_fixup_scalar_cycles_with_patterns (loop_vec_info loop_vinfo) ...@@ -686,12 +687,12 @@ vect_fixup_scalar_cycles_with_patterns (loop_vec_info loop_vinfo)
FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo), i, first) FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo), i, first)
if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (first))) if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (first)))
{ {
gimple *next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first)); gimple *next = REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (first));
while (next) while (next)
{ {
if (! STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next))) if (! STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next)))
break; break;
next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next)); next = REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
} }
/* If not all stmt in the chain are patterns try to handle /* If not all stmt in the chain are patterns try to handle
the chain without patterns. */ the chain without patterns. */
...@@ -2194,8 +2195,8 @@ again: ...@@ -2194,8 +2195,8 @@ again:
(SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0]); (SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0]);
if (! STMT_VINFO_GROUPED_ACCESS (vinfo)) if (! STMT_VINFO_GROUPED_ACCESS (vinfo))
continue; continue;
vinfo = vinfo_for_stmt (STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo)); vinfo = vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (vinfo));
unsigned int size = STMT_VINFO_GROUP_SIZE (vinfo); unsigned int size = DR_GROUP_SIZE (vinfo);
tree vectype = STMT_VINFO_VECTYPE (vinfo); tree vectype = STMT_VINFO_VECTYPE (vinfo);
if (! vect_store_lanes_supported (vectype, size, false) if (! vect_store_lanes_supported (vectype, size, false)
&& ! known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U) && ! known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U)
...@@ -2204,9 +2205,9 @@ again: ...@@ -2204,9 +2205,9 @@ again:
FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), j, node) FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), j, node)
{ {
vinfo = vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node)[0]); vinfo = vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node)[0]);
vinfo = vinfo_for_stmt (STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo)); vinfo = vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (vinfo));
bool single_element_p = !STMT_VINFO_GROUP_NEXT_ELEMENT (vinfo); bool single_element_p = !DR_GROUP_NEXT_ELEMENT (vinfo);
size = STMT_VINFO_GROUP_SIZE (vinfo); size = DR_GROUP_SIZE (vinfo);
vectype = STMT_VINFO_VECTYPE (vinfo); vectype = STMT_VINFO_VECTYPE (vinfo);
if (! vect_load_lanes_supported (vectype, size, false) if (! vect_load_lanes_supported (vectype, size, false)
&& ! vect_grouped_load_supported (vectype, single_element_p, && ! vect_grouped_load_supported (vectype, single_element_p,
...@@ -2577,12 +2578,12 @@ vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi, ...@@ -2577,12 +2578,12 @@ vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi,
if (current_stmt) if (current_stmt)
{ {
current_stmt_info = vinfo_for_stmt (current_stmt); current_stmt_info = vinfo_for_stmt (current_stmt);
GROUP_NEXT_ELEMENT (current_stmt_info) = loop_use_stmt; REDUC_GROUP_NEXT_ELEMENT (current_stmt_info) = loop_use_stmt;
GROUP_FIRST_ELEMENT (use_stmt_info) REDUC_GROUP_FIRST_ELEMENT (use_stmt_info)
= GROUP_FIRST_ELEMENT (current_stmt_info); = REDUC_GROUP_FIRST_ELEMENT (current_stmt_info);
} }
else else
GROUP_FIRST_ELEMENT (use_stmt_info) = loop_use_stmt; REDUC_GROUP_FIRST_ELEMENT (use_stmt_info) = loop_use_stmt;
lhs = gimple_assign_lhs (loop_use_stmt); lhs = gimple_assign_lhs (loop_use_stmt);
current_stmt = loop_use_stmt; current_stmt = loop_use_stmt;
...@@ -2595,7 +2596,7 @@ vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi, ...@@ -2595,7 +2596,7 @@ vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi,
/* Swap the operands, if needed, to make the reduction operand be the second /* Swap the operands, if needed, to make the reduction operand be the second
operand. */ operand. */
lhs = PHI_RESULT (phi); lhs = PHI_RESULT (phi);
next_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt)); next_stmt = REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
while (next_stmt) while (next_stmt)
{ {
if (gimple_assign_rhs2 (next_stmt) == lhs) if (gimple_assign_rhs2 (next_stmt) == lhs)
...@@ -2622,7 +2623,7 @@ vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi, ...@@ -2622,7 +2623,7 @@ vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi,
&& !is_loop_header_bb_p (gimple_bb (def_stmt))))) && !is_loop_header_bb_p (gimple_bb (def_stmt)))))
{ {
lhs = gimple_assign_lhs (next_stmt); lhs = gimple_assign_lhs (next_stmt);
next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); next_stmt = REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
continue; continue;
} }
...@@ -2670,13 +2671,13 @@ vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi, ...@@ -2670,13 +2671,13 @@ vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi,
} }
lhs = gimple_assign_lhs (next_stmt); lhs = gimple_assign_lhs (next_stmt);
next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); next_stmt = REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
} }
/* Save the chain for further analysis in SLP detection. */ /* Save the chain for further analysis in SLP detection. */
first = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt)); first = REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (first); LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (first);
GROUP_SIZE (vinfo_for_stmt (first)) = size; REDUC_GROUP_SIZE (vinfo_for_stmt (first)) = size;
return true; return true;
} }
...@@ -3278,12 +3279,12 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi, ...@@ -3278,12 +3279,12 @@ vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
} }
/* Dissolve group eventually half-built by vect_is_slp_reduction. */ /* Dissolve group eventually half-built by vect_is_slp_reduction. */
gimple *first = GROUP_FIRST_ELEMENT (vinfo_for_stmt (def_stmt)); gimple *first = REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (def_stmt));
while (first) while (first)
{ {
gimple *next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first)); gimple *next = REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (first));
GROUP_FIRST_ELEMENT (vinfo_for_stmt (first)) = NULL; REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (first)) = NULL;
GROUP_NEXT_ELEMENT (vinfo_for_stmt (first)) = NULL; REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (first)) = NULL;
first = next; first = next;
} }
...@@ -4270,8 +4271,8 @@ get_initial_defs_for_reduction (slp_tree slp_node, ...@@ -4270,8 +4271,8 @@ get_initial_defs_for_reduction (slp_tree slp_node,
two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
will be 2). will be 2).
If GROUP_SIZE > NUNITS, the scalars will be split into several vectors If REDUC_GROUP_SIZE > NUNITS, the scalars will be split into several
containing the operands. vectors containing the operands.
For example, NUNITS is four as before, and the group size is 8 For example, NUNITS is four as before, and the group size is 8
(s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and (s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
...@@ -4551,7 +4552,7 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt, ...@@ -4551,7 +4552,7 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
vec_initial_defs.reserve (vec_num); vec_initial_defs.reserve (vec_num);
get_initial_defs_for_reduction (slp_node_instance->reduc_phis, get_initial_defs_for_reduction (slp_node_instance->reduc_phis,
&vec_initial_defs, vec_num, &vec_initial_defs, vec_num,
GROUP_FIRST_ELEMENT (stmt_info), REDUC_GROUP_FIRST_ELEMENT (stmt_info),
neutral_op); neutral_op);
} }
else else
...@@ -4857,7 +4858,7 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt, ...@@ -4857,7 +4858,7 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
# b1 = phi <b2, b0> # b1 = phi <b2, b0>
a2 = operation (a1) a2 = operation (a1)
b2 = operation (b1) */ b2 = operation (b1) */
slp_reduc = (slp_node && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))); slp_reduc = (slp_node && !REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)));
/* True if we should implement SLP_REDUC using native reduction operations /* True if we should implement SLP_REDUC using native reduction operations
instead of scalar operations. */ instead of scalar operations. */
...@@ -4872,7 +4873,7 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt, ...@@ -4872,7 +4873,7 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
we may end up with more than one vector result. Here we reduce them to we may end up with more than one vector result. Here we reduce them to
one vector. */ one vector. */
if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) || direct_slp_reduc) if (REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) || direct_slp_reduc)
{ {
tree first_vect = PHI_RESULT (new_phis[0]); tree first_vect = PHI_RESULT (new_phis[0]);
gassign *new_vec_stmt = NULL; gassign *new_vec_stmt = NULL;
...@@ -5165,7 +5166,7 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt, ...@@ -5165,7 +5166,7 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
} }
else if (direct_slp_reduc) else if (direct_slp_reduc)
{ {
/* Here we create one vector for each of the GROUP_SIZE results, /* Here we create one vector for each of the REDUC_GROUP_SIZE results,
with the elements for other SLP statements replaced with the with the elements for other SLP statements replaced with the
neutral value. We can then do a normal reduction on each vector. */ neutral value. We can then do a normal reduction on each vector. */
...@@ -5185,7 +5186,7 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt, ...@@ -5185,7 +5186,7 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
tree mask_type = build_same_sized_truth_vector_type (index_type); tree mask_type = build_same_sized_truth_vector_type (index_type);
/* Create a vector that, for each element, identifies which of /* Create a vector that, for each element, identifies which of
the GROUP_SIZE results should use it. */ the REDUC_GROUP_SIZE results should use it. */
tree index_mask = build_int_cst (index_elt_type, group_size - 1); tree index_mask = build_int_cst (index_elt_type, group_size - 1);
index = gimple_build (&seq, BIT_AND_EXPR, index_type, index, index = gimple_build (&seq, BIT_AND_EXPR, index_type, index,
build_vector_from_val (index_type, index_mask)); build_vector_from_val (index_type, index_mask));
...@@ -5493,8 +5494,8 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt, ...@@ -5493,8 +5494,8 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
/* The only case where we need to reduce scalar results in SLP, is /* The only case where we need to reduce scalar results in SLP, is
unrolling. If the size of SCALAR_RESULTS is greater than unrolling. If the size of SCALAR_RESULTS is greater than
GROUP_SIZE, we reduce them combining elements modulo REDUC_GROUP_SIZE, we reduce them combining elements modulo
GROUP_SIZE. */ REDUC_GROUP_SIZE. */
if (slp_reduc) if (slp_reduc)
{ {
tree res, first_res, new_res; tree res, first_res, new_res;
...@@ -5614,10 +5615,10 @@ vect_finalize_reduction: ...@@ -5614,10 +5615,10 @@ vect_finalize_reduction:
/* In SLP reduction chain we reduce vector results into one vector if /* In SLP reduction chain we reduce vector results into one vector if
necessary, hence we set here GROUP_SIZE to 1. SCALAR_DEST is the LHS of necessary, hence we set here REDUC_GROUP_SIZE to 1. SCALAR_DEST is the
the last stmt in the reduction chain, since we are looking for the loop LHS of the last stmt in the reduction chain, since we are looking for
exit phi node. */ the loop exit phi node. */
if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))) if (REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
{ {
gimple *dest_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1]; gimple *dest_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1];
/* Handle reduction patterns. */ /* Handle reduction patterns. */
...@@ -5628,12 +5629,12 @@ vect_finalize_reduction: ...@@ -5628,12 +5629,12 @@ vect_finalize_reduction:
group_size = 1; group_size = 1;
} }
/* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in /* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
case that GROUP_SIZE is greater than vectorization factor). Therefore, we case that REDUC_GROUP_SIZE is greater than vectorization factor).
need to match SCALAR_RESULTS with corresponding statements. The first Therefore, we need to match SCALAR_RESULTS with corresponding statements.
(GROUP_SIZE / number of new vector stmts) scalar results correspond to The first (REDUC_GROUP_SIZE / number of new vector stmts) scalar results
the first vector stmt, etc. correspond to the first vector stmt, etc.
(RATIO is equal to (GROUP_SIZE / number of new vector stmts)). */ (RATIO is equal to (REDUC_GROUP_SIZE / number of new vector stmts)). */
if (group_size > new_phis.length ()) if (group_size > new_phis.length ())
{ {
ratio = group_size / new_phis.length (); ratio = group_size / new_phis.length ();
...@@ -6184,10 +6185,10 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6184,10 +6185,10 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
/* In case of reduction chain we switch to the first stmt in the chain, but /* In case of reduction chain we switch to the first stmt in the chain, but
we don't update STMT_INFO, since only the last stmt is marked as reduction we don't update STMT_INFO, since only the last stmt is marked as reduction
and has reduction properties. */ and has reduction properties. */
if (GROUP_FIRST_ELEMENT (stmt_info) if (REDUC_GROUP_FIRST_ELEMENT (stmt_info)
&& GROUP_FIRST_ELEMENT (stmt_info) != stmt) && REDUC_GROUP_FIRST_ELEMENT (stmt_info) != stmt)
{ {
stmt = GROUP_FIRST_ELEMENT (stmt_info); stmt = REDUC_GROUP_FIRST_ELEMENT (stmt_info);
first_p = false; first_p = false;
} }
...@@ -6299,7 +6300,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6299,7 +6300,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
/* Not supportable if the reduction variable is used in the loop, unless /* Not supportable if the reduction variable is used in the loop, unless
it's a reduction chain. */ it's a reduction chain. */
if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer
&& !GROUP_FIRST_ELEMENT (stmt_info)) && !REDUC_GROUP_FIRST_ELEMENT (stmt_info))
return false; return false;
/* Reductions that are not used even in an enclosing outer-loop, /* Reductions that are not used even in an enclosing outer-loop,
...@@ -6604,12 +6605,13 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6604,12 +6605,13 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
if (orig_stmt) if (orig_stmt)
gcc_assert (tmp == orig_stmt gcc_assert (tmp == orig_stmt
|| GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == orig_stmt); || (REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp))
== orig_stmt));
else else
/* We changed STMT to be the first stmt in reduction chain, hence we /* We changed STMT to be the first stmt in reduction chain, hence we
check that in this case the first element in the chain is STMT. */ check that in this case the first element in the chain is STMT. */
gcc_assert (stmt == tmp gcc_assert (stmt == tmp
|| GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == stmt); || REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == stmt);
if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt))) if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt)))
return false; return false;
...@@ -6843,9 +6845,9 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6843,9 +6845,9 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
/* For SLP reductions, see if there is a neutral value we can use. */ /* For SLP reductions, see if there is a neutral value we can use. */
tree neutral_op = NULL_TREE; tree neutral_op = NULL_TREE;
if (slp_node) if (slp_node)
neutral_op neutral_op = neutral_op_for_slp_reduction
= neutral_op_for_slp_reduction (slp_node_instance->reduc_phis, code, (slp_node_instance->reduc_phis, code,
GROUP_FIRST_ELEMENT (stmt_info) != NULL); REDUC_GROUP_FIRST_ELEMENT (stmt_info) != NULL);
if (double_reduc && reduction_type == FOLD_LEFT_REDUCTION) if (double_reduc && reduction_type == FOLD_LEFT_REDUCTION)
{ {
...@@ -6872,7 +6874,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6872,7 +6874,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
if (reduction_type == FOLD_LEFT_REDUCTION if (reduction_type == FOLD_LEFT_REDUCTION
&& slp_node && slp_node
&& !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))) && !REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
{ {
/* We cannot use in-order reductions in this case because there is /* We cannot use in-order reductions in this case because there is
an implicit reassociation of the operations involved. */ an implicit reassociation of the operations involved. */
...@@ -6901,7 +6903,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -6901,7 +6903,7 @@ vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
/* Check extra constraints for variable-length unchained SLP reductions. */ /* Check extra constraints for variable-length unchained SLP reductions. */
if (STMT_SLP_TYPE (stmt_info) if (STMT_SLP_TYPE (stmt_info)
&& !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) && !REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
&& !nunits_out.is_constant ()) && !nunits_out.is_constant ())
{ {
/* We checked above that we could build the initial vector when /* We checked above that we could build the initial vector when
...@@ -8635,7 +8637,7 @@ vect_transform_loop (loop_vec_info loop_vinfo) ...@@ -8635,7 +8637,7 @@ vect_transform_loop (loop_vec_info loop_vinfo)
interleaving chain was completed - free all the stores in interleaving chain was completed - free all the stores in
the chain. */ the chain. */
gsi_next (&si); gsi_next (&si);
vect_remove_stores (GROUP_FIRST_ELEMENT (stmt_info)); vect_remove_stores (DR_GROUP_FIRST_ELEMENT (stmt_info));
} }
else else
{ {
......
...@@ -231,7 +231,7 @@ vect_reassociating_reduction_p (stmt_vec_info stmt_vinfo) ...@@ -231,7 +231,7 @@ vect_reassociating_reduction_p (stmt_vec_info stmt_vinfo)
{ {
return (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def return (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def
? STMT_VINFO_REDUC_TYPE (stmt_vinfo) != FOLD_LEFT_REDUCTION ? STMT_VINFO_REDUC_TYPE (stmt_vinfo) != FOLD_LEFT_REDUCTION
: GROUP_FIRST_ELEMENT (stmt_vinfo) != NULL); : REDUC_GROUP_FIRST_ELEMENT (stmt_vinfo) != NULL);
} }
/* Function vect_recog_dot_prod_pattern /* Function vect_recog_dot_prod_pattern
......
...@@ -195,16 +195,16 @@ vect_get_place_in_interleaving_chain (gimple *stmt, gimple *first_stmt) ...@@ -195,16 +195,16 @@ vect_get_place_in_interleaving_chain (gimple *stmt, gimple *first_stmt)
gimple *next_stmt = first_stmt; gimple *next_stmt = first_stmt;
int result = 0; int result = 0;
if (first_stmt != GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))) if (first_stmt != DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
return -1; return -1;
do do
{ {
if (next_stmt == stmt) if (next_stmt == stmt)
return result; return result;
next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
if (next_stmt) if (next_stmt)
result += GROUP_GAP (vinfo_for_stmt (next_stmt)); result += DR_GROUP_GAP (vinfo_for_stmt (next_stmt));
} }
while (next_stmt); while (next_stmt);
...@@ -906,7 +906,7 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap, ...@@ -906,7 +906,7 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
else else
{ {
/* Load. */ /* Load. */
first_load = GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)); first_load = DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt));
if (prev_first_load) if (prev_first_load)
{ {
/* Check that there are no loads from different interleaving /* Check that there are no loads from different interleaving
...@@ -1182,8 +1182,8 @@ vect_build_slp_tree_2 (vec_info *vinfo, ...@@ -1182,8 +1182,8 @@ vect_build_slp_tree_2 (vec_info *vinfo,
FOR_EACH_VEC_ELT (stmts, i, stmt) FOR_EACH_VEC_ELT (stmts, i, stmt)
{ {
/* But for reduction chains only check on the first stmt. */ /* But for reduction chains only check on the first stmt. */
if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) if (REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
&& GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) != stmt) && REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) != stmt)
continue; continue;
if (STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) != def_type) if (STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) != def_type)
return NULL; return NULL;
...@@ -1660,12 +1660,12 @@ vect_attempt_slp_rearrange_stmts (slp_instance slp_instn) ...@@ -1660,12 +1660,12 @@ vect_attempt_slp_rearrange_stmts (slp_instance slp_instn)
FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node) FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
{ {
gimple *first_stmt = SLP_TREE_SCALAR_STMTS (node)[0]; gimple *first_stmt = SLP_TREE_SCALAR_STMTS (node)[0];
first_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt)); first_stmt = DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt));
/* But we have to keep those permutations that are required because /* But we have to keep those permutations that are required because
of handling of gaps. */ of handling of gaps. */
if (known_eq (unrolling_factor, 1U) if (known_eq (unrolling_factor, 1U)
|| (group_size == GROUP_SIZE (vinfo_for_stmt (first_stmt)) || (group_size == DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
&& GROUP_GAP (vinfo_for_stmt (first_stmt)) == 0)) && DR_GROUP_GAP (vinfo_for_stmt (first_stmt)) == 0))
SLP_TREE_LOAD_PERMUTATION (node).release (); SLP_TREE_LOAD_PERMUTATION (node).release ();
else else
for (j = 0; j < SLP_TREE_LOAD_PERMUTATION (node).length (); ++j) for (j = 0; j < SLP_TREE_LOAD_PERMUTATION (node).length (); ++j)
...@@ -1718,7 +1718,7 @@ vect_supported_load_permutation_p (slp_instance slp_instn) ...@@ -1718,7 +1718,7 @@ vect_supported_load_permutation_p (slp_instance slp_instn)
/* Reduction (there are no data-refs in the root). /* Reduction (there are no data-refs in the root).
In reduction chain the order of the loads is not important. */ In reduction chain the order of the loads is not important. */
if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt)) if (!STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt))
&& !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))) && !REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
vect_attempt_slp_rearrange_stmts (slp_instn); vect_attempt_slp_rearrange_stmts (slp_instn);
/* In basic block vectorization we allow any subchain of an interleaving /* In basic block vectorization we allow any subchain of an interleaving
...@@ -1738,12 +1738,12 @@ vect_supported_load_permutation_p (slp_instance slp_instn) ...@@ -1738,12 +1738,12 @@ vect_supported_load_permutation_p (slp_instance slp_instn)
{ {
if (j != 0 if (j != 0
&& (next_load != load && (next_load != load
|| GROUP_GAP (vinfo_for_stmt (load)) != 1)) || DR_GROUP_GAP (vinfo_for_stmt (load)) != 1))
{ {
subchain_p = false; subchain_p = false;
break; break;
} }
next_load = GROUP_NEXT_ELEMENT (vinfo_for_stmt (load)); next_load = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (load));
} }
if (subchain_p) if (subchain_p)
SLP_TREE_LOAD_PERMUTATION (node).release (); SLP_TREE_LOAD_PERMUTATION (node).release ();
...@@ -1751,17 +1751,17 @@ vect_supported_load_permutation_p (slp_instance slp_instn) ...@@ -1751,17 +1751,17 @@ vect_supported_load_permutation_p (slp_instance slp_instn)
{ {
stmt_vec_info group_info stmt_vec_info group_info
= vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node)[0]); = vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node)[0]);
group_info = vinfo_for_stmt (GROUP_FIRST_ELEMENT (group_info)); group_info = vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (group_info));
unsigned HOST_WIDE_INT nunits; unsigned HOST_WIDE_INT nunits;
unsigned k, maxk = 0; unsigned k, maxk = 0;
FOR_EACH_VEC_ELT (SLP_TREE_LOAD_PERMUTATION (node), j, k) FOR_EACH_VEC_ELT (SLP_TREE_LOAD_PERMUTATION (node), j, k)
if (k > maxk) if (k > maxk)
maxk = k; maxk = k;
/* In BB vectorization we may not actually use a loaded vector /* In BB vectorization we may not actually use a loaded vector
accessing elements in excess of GROUP_SIZE. */ accessing elements in excess of DR_GROUP_SIZE. */
tree vectype = STMT_VINFO_VECTYPE (group_info); tree vectype = STMT_VINFO_VECTYPE (group_info);
if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nunits) if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nunits)
|| maxk >= (GROUP_SIZE (group_info) & ~(nunits - 1))) || maxk >= (DR_GROUP_SIZE (group_info) & ~(nunits - 1)))
{ {
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"BB vectorization with gaps at the end of " "BB vectorization with gaps at the end of "
...@@ -1834,36 +1834,36 @@ static gimple * ...@@ -1834,36 +1834,36 @@ static gimple *
vect_split_slp_store_group (gimple *first_stmt, unsigned group1_size) vect_split_slp_store_group (gimple *first_stmt, unsigned group1_size)
{ {
stmt_vec_info first_vinfo = vinfo_for_stmt (first_stmt); stmt_vec_info first_vinfo = vinfo_for_stmt (first_stmt);
gcc_assert (GROUP_FIRST_ELEMENT (first_vinfo) == first_stmt); gcc_assert (DR_GROUP_FIRST_ELEMENT (first_vinfo) == first_stmt);
gcc_assert (group1_size > 0); gcc_assert (group1_size > 0);
int group2_size = GROUP_SIZE (first_vinfo) - group1_size; int group2_size = DR_GROUP_SIZE (first_vinfo) - group1_size;
gcc_assert (group2_size > 0); gcc_assert (group2_size > 0);
GROUP_SIZE (first_vinfo) = group1_size; DR_GROUP_SIZE (first_vinfo) = group1_size;
gimple *stmt = first_stmt; gimple *stmt = first_stmt;
for (unsigned i = group1_size; i > 1; i--) for (unsigned i = group1_size; i > 1; i--)
{ {
stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt)); stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
gcc_assert (GROUP_GAP (vinfo_for_stmt (stmt)) == 1); gcc_assert (DR_GROUP_GAP (vinfo_for_stmt (stmt)) == 1);
} }
/* STMT is now the last element of the first group. */ /* STMT is now the last element of the first group. */
gimple *group2 = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt)); gimple *group2 = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt)) = 0; DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt)) = 0;
GROUP_SIZE (vinfo_for_stmt (group2)) = group2_size; DR_GROUP_SIZE (vinfo_for_stmt (group2)) = group2_size;
for (stmt = group2; stmt; stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt))) for (stmt = group2; stmt; stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt)))
{ {
GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = group2; DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = group2;
gcc_assert (GROUP_GAP (vinfo_for_stmt (stmt)) == 1); gcc_assert (DR_GROUP_GAP (vinfo_for_stmt (stmt)) == 1);
} }
/* For the second group, the GROUP_GAP is that before the original group, /* For the second group, the DR_GROUP_GAP is that before the original group,
plus skipping over the first vector. */ plus skipping over the first vector. */
GROUP_GAP (vinfo_for_stmt (group2)) = DR_GROUP_GAP (vinfo_for_stmt (group2))
GROUP_GAP (first_vinfo) + group1_size; = DR_GROUP_GAP (first_vinfo) + group1_size;
/* GROUP_GAP of the first group now has to skip over the second group too. */ /* DR_GROUP_GAP of the first group now has to skip over the second group too. */
GROUP_GAP (first_vinfo) += group2_size; DR_GROUP_GAP (first_vinfo) += group2_size;
if (dump_enabled_p ()) if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "Split group into %d and %d\n", dump_printf_loc (MSG_NOTE, vect_location, "Split group into %d and %d\n",
...@@ -1891,7 +1891,7 @@ vect_analyze_slp_instance (vec_info *vinfo, ...@@ -1891,7 +1891,7 @@ vect_analyze_slp_instance (vec_info *vinfo,
{ {
slp_instance new_instance; slp_instance new_instance;
slp_tree node; slp_tree node;
unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (stmt)); unsigned int group_size;
tree vectype, scalar_type = NULL_TREE; tree vectype, scalar_type = NULL_TREE;
gimple *next; gimple *next;
unsigned int i; unsigned int i;
...@@ -1899,20 +1899,17 @@ vect_analyze_slp_instance (vec_info *vinfo, ...@@ -1899,20 +1899,17 @@ vect_analyze_slp_instance (vec_info *vinfo,
struct data_reference *dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt)); struct data_reference *dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (stmt));
vec<gimple *> scalar_stmts; vec<gimple *> scalar_stmts;
if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))) if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt)))
{ {
if (dr) scalar_type = TREE_TYPE (DR_REF (dr));
{ vectype = get_vectype_for_scalar_type (scalar_type);
scalar_type = TREE_TYPE (DR_REF (dr)); group_size = DR_GROUP_SIZE (vinfo_for_stmt (stmt));
vectype = get_vectype_for_scalar_type (scalar_type); }
} else if (!dr && REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
else {
{ gcc_assert (is_a <loop_vec_info> (vinfo));
gcc_assert (is_a <loop_vec_info> (vinfo)); vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)); group_size = REDUC_GROUP_SIZE (vinfo_for_stmt (stmt));
}
group_size = GROUP_SIZE (vinfo_for_stmt (stmt));
} }
else else
{ {
...@@ -1938,7 +1935,7 @@ vect_analyze_slp_instance (vec_info *vinfo, ...@@ -1938,7 +1935,7 @@ vect_analyze_slp_instance (vec_info *vinfo,
/* Create a node (a root of the SLP tree) for the packed grouped stores. */ /* Create a node (a root of the SLP tree) for the packed grouped stores. */
scalar_stmts.create (group_size); scalar_stmts.create (group_size);
next = stmt; next = stmt;
if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))) if (STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt)))
{ {
/* Collect the stores and store them in SLP_TREE_SCALAR_STMTS. */ /* Collect the stores and store them in SLP_TREE_SCALAR_STMTS. */
while (next) while (next)
...@@ -1949,13 +1946,27 @@ vect_analyze_slp_instance (vec_info *vinfo, ...@@ -1949,13 +1946,27 @@ vect_analyze_slp_instance (vec_info *vinfo,
STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next))); STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)));
else else
scalar_stmts.safe_push (next); scalar_stmts.safe_push (next);
next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next)); next = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
}
}
else if (!dr && REDUC_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
{
/* Collect the reduction stmts and store them in
SLP_TREE_SCALAR_STMTS. */
while (next)
{
if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next))
&& STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)))
scalar_stmts.safe_push (
STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next)));
else
scalar_stmts.safe_push (next);
next = REDUC_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
} }
/* Mark the first element of the reduction chain as reduction to properly /* Mark the first element of the reduction chain as reduction to properly
transform the node. In the reduction analysis phase only the last transform the node. In the reduction analysis phase only the last
element of the chain is marked as reduction. */ element of the chain is marked as reduction. */
if (!STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))) STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) = vect_reduction_def;
STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) = vect_reduction_def;
} }
else else
{ {
...@@ -2022,7 +2033,7 @@ vect_analyze_slp_instance (vec_info *vinfo, ...@@ -2022,7 +2033,7 @@ vect_analyze_slp_instance (vec_info *vinfo,
gimple *load, *first_stmt; gimple *load, *first_stmt;
bool this_load_permuted = false; bool this_load_permuted = false;
load_permutation.create (group_size); load_permutation.create (group_size);
first_stmt = GROUP_FIRST_ELEMENT first_stmt = DR_GROUP_FIRST_ELEMENT
(vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (load_node)[0])); (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (load_node)[0]));
FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node), j, load) FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (load_node), j, load)
{ {
...@@ -2038,8 +2049,8 @@ vect_analyze_slp_instance (vec_info *vinfo, ...@@ -2038,8 +2049,8 @@ vect_analyze_slp_instance (vec_info *vinfo,
a gap either because the group is larger than the SLP a gap either because the group is larger than the SLP
group-size or because there is a gap between the groups. */ group-size or because there is a gap between the groups. */
&& (known_eq (unrolling_factor, 1U) && (known_eq (unrolling_factor, 1U)
|| (group_size == GROUP_SIZE (vinfo_for_stmt (first_stmt)) || (group_size == DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
&& GROUP_GAP (vinfo_for_stmt (first_stmt)) == 0))) && DR_GROUP_GAP (vinfo_for_stmt (first_stmt)) == 0)))
{ {
load_permutation.release (); load_permutation.release ();
continue; continue;
...@@ -2074,7 +2085,7 @@ vect_analyze_slp_instance (vec_info *vinfo, ...@@ -2074,7 +2085,7 @@ vect_analyze_slp_instance (vec_info *vinfo,
slp_tree load_node; slp_tree load_node;
FOR_EACH_VEC_ELT (loads, i, load_node) FOR_EACH_VEC_ELT (loads, i, load_node)
{ {
gimple *first_stmt = GROUP_FIRST_ELEMENT gimple *first_stmt = DR_GROUP_FIRST_ELEMENT
(vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (load_node)[0])); (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (load_node)[0]));
stmt_vec_info stmt_vinfo = vinfo_for_stmt (first_stmt); stmt_vec_info stmt_vinfo = vinfo_for_stmt (first_stmt);
/* Use SLP for strided accesses (or if we /* Use SLP for strided accesses (or if we
...@@ -2082,7 +2093,7 @@ vect_analyze_slp_instance (vec_info *vinfo, ...@@ -2082,7 +2093,7 @@ vect_analyze_slp_instance (vec_info *vinfo,
if (STMT_VINFO_STRIDED_P (stmt_vinfo) if (STMT_VINFO_STRIDED_P (stmt_vinfo)
|| ! vect_load_lanes_supported || ! vect_load_lanes_supported
(STMT_VINFO_VECTYPE (stmt_vinfo), (STMT_VINFO_VECTYPE (stmt_vinfo),
GROUP_SIZE (stmt_vinfo), false)) DR_GROUP_SIZE (stmt_vinfo), false))
break; break;
} }
if (i == loads.length ()) if (i == loads.length ())
...@@ -2120,8 +2131,8 @@ vect_analyze_slp_instance (vec_info *vinfo, ...@@ -2120,8 +2131,8 @@ vect_analyze_slp_instance (vec_info *vinfo,
vector size. */ vector size. */
unsigned HOST_WIDE_INT const_nunits; unsigned HOST_WIDE_INT const_nunits;
if (is_a <bb_vec_info> (vinfo) if (is_a <bb_vec_info> (vinfo)
&& GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
&& STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt)) && STMT_VINFO_GROUPED_ACCESS (vinfo_for_stmt (stmt))
&& DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
&& nunits.is_constant (&const_nunits)) && nunits.is_constant (&const_nunits))
{ {
/* We consider breaking the group only on VF boundaries from the existing /* We consider breaking the group only on VF boundaries from the existing
...@@ -2187,9 +2198,9 @@ vect_analyze_slp (vec_info *vinfo, unsigned max_tree_size) ...@@ -2187,9 +2198,9 @@ vect_analyze_slp (vec_info *vinfo, unsigned max_tree_size)
while (stmt) while (stmt)
{ {
stmt_vec_info vinfo = vinfo_for_stmt (stmt); stmt_vec_info vinfo = vinfo_for_stmt (stmt);
next = GROUP_NEXT_ELEMENT (vinfo); next = REDUC_GROUP_NEXT_ELEMENT (vinfo);
GROUP_FIRST_ELEMENT (vinfo) = NULL; REDUC_GROUP_FIRST_ELEMENT (vinfo) = NULL;
GROUP_NEXT_ELEMENT (vinfo) = NULL; REDUC_GROUP_NEXT_ELEMENT (vinfo) = NULL;
stmt = next; stmt = next;
} }
STMT_VINFO_DEF_TYPE (vinfo_for_stmt (first_element)) STMT_VINFO_DEF_TYPE (vinfo_for_stmt (first_element))
...@@ -2521,10 +2532,10 @@ vect_slp_analyze_node_operations_1 (vec_info *vinfo, slp_tree node, ...@@ -2521,10 +2532,10 @@ vect_slp_analyze_node_operations_1 (vec_info *vinfo, slp_tree node,
scalar stmts in this node. For SLP reductions it is equal to the scalar stmts in this node. For SLP reductions it is equal to the
number of vector statements in the children (which has already been number of vector statements in the children (which has already been
calculated by the recursive call). Otherwise it is the number of calculated by the recursive call). Otherwise it is the number of
scalar elements in one scalar iteration (GROUP_SIZE) multiplied by scalar elements in one scalar iteration (DR_GROUP_SIZE) multiplied by
VF divided by the number of elements in a vector. */ VF divided by the number of elements in a vector. */
if (GROUP_FIRST_ELEMENT (stmt_info) if (!STMT_VINFO_GROUPED_ACCESS (stmt_info)
&& !STMT_VINFO_GROUPED_ACCESS (stmt_info)) && REDUC_GROUP_FIRST_ELEMENT (stmt_info))
SLP_TREE_NUMBER_OF_VEC_STMTS (node) SLP_TREE_NUMBER_OF_VEC_STMTS (node)
= SLP_TREE_NUMBER_OF_VEC_STMTS (SLP_TREE_CHILDREN (node)[0]); = SLP_TREE_NUMBER_OF_VEC_STMTS (SLP_TREE_CHILDREN (node)[0]);
else else
...@@ -3654,7 +3665,7 @@ vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain, ...@@ -3654,7 +3665,7 @@ vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
if (!STMT_VINFO_GROUPED_ACCESS (stmt_info)) if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
return false; return false;
stmt_info = vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)); stmt_info = vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (stmt_info));
mode = TYPE_MODE (vectype); mode = TYPE_MODE (vectype);
...@@ -3711,7 +3722,7 @@ vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain, ...@@ -3711,7 +3722,7 @@ vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
for (int k = 0; k < group_size; k++) for (int k = 0; k < group_size; k++)
{ {
unsigned int i = (SLP_TREE_LOAD_PERMUTATION (node)[k] unsigned int i = (SLP_TREE_LOAD_PERMUTATION (node)[k]
+ j * STMT_VINFO_GROUP_SIZE (stmt_info)); + j * DR_GROUP_SIZE (stmt_info));
vec_index = i / nunits; vec_index = i / nunits;
mask_element = i % nunits; mask_element = i % nunits;
if (vec_index == first_vec_index if (vec_index == first_vec_index
...@@ -3878,8 +3889,9 @@ vect_schedule_slp_instance (slp_tree node, slp_instance instance, ...@@ -3878,8 +3889,9 @@ vect_schedule_slp_instance (slp_tree node, slp_instance instance,
/* Mark the first element of the reduction chain as reduction to properly /* Mark the first element of the reduction chain as reduction to properly
transform the node. In the analysis phase only the last element of the transform the node. In the analysis phase only the last element of the
chain is marked as reduction. */ chain is marked as reduction. */
if (GROUP_FIRST_ELEMENT (stmt_info) && !STMT_VINFO_GROUPED_ACCESS (stmt_info) if (!STMT_VINFO_GROUPED_ACCESS (stmt_info)
&& GROUP_FIRST_ELEMENT (stmt_info) == stmt) && REDUC_GROUP_FIRST_ELEMENT (stmt_info)
&& REDUC_GROUP_FIRST_ELEMENT (stmt_info) == stmt)
{ {
STMT_VINFO_DEF_TYPE (stmt_info) = vect_reduction_def; STMT_VINFO_DEF_TYPE (stmt_info) = vect_reduction_def;
STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type; STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
......
...@@ -1017,7 +1017,7 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies, ...@@ -1017,7 +1017,7 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
so we want the DR for the first statement. */ so we want the DR for the first statement. */
if (!slp_node && grouped_access_p) if (!slp_node && grouped_access_p)
{ {
first_stmt = GROUP_FIRST_ELEMENT (stmt_info); first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)); dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
} }
...@@ -1027,7 +1027,7 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies, ...@@ -1027,7 +1027,7 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info)); bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info));
/* We assume that the cost of a single store-lanes instruction is /* We assume that the cost of a single store-lanes instruction is
equivalent to the cost of GROUP_SIZE separate stores. If a grouped equivalent to the cost of DR_GROUP_SIZE separate stores. If a grouped
access is instead being provided by a permute-and-store operation, access is instead being provided by a permute-and-store operation,
include the cost of the permutes. */ include the cost of the permutes. */
if (first_stmt_p if (first_stmt_p
...@@ -1035,7 +1035,7 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies, ...@@ -1035,7 +1035,7 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
{ {
/* Uses a high and low interleave or shuffle operations for each /* Uses a high and low interleave or shuffle operations for each
needed permute. */ needed permute. */
int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt)); int group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
int nstmts = ncopies * ceil_log2 (group_size) * group_size; int nstmts = ncopies * ceil_log2 (group_size) * group_size;
inside_cost = record_stmt_cost (cost_vec, nstmts, vec_perm, inside_cost = record_stmt_cost (cost_vec, nstmts, vec_perm,
stmt_info, 0, vect_body); stmt_info, 0, vect_body);
...@@ -1159,7 +1159,7 @@ vect_model_load_cost (stmt_vec_info stmt_info, unsigned ncopies, ...@@ -1159,7 +1159,7 @@ vect_model_load_cost (stmt_vec_info stmt_info, unsigned ncopies,
{ {
/* If the load is permuted then the alignment is determined by /* If the load is permuted then the alignment is determined by
the first group element not by the first scalar stmt DR. */ the first group element not by the first scalar stmt DR. */
gimple *stmt = GROUP_FIRST_ELEMENT (stmt_info); gimple *stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt); stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
/* Record the cost for the permutation. */ /* Record the cost for the permutation. */
unsigned n_perms; unsigned n_perms;
...@@ -1173,14 +1173,14 @@ vect_model_load_cost (stmt_vec_info stmt_info, unsigned ncopies, ...@@ -1173,14 +1173,14 @@ vect_model_load_cost (stmt_vec_info stmt_info, unsigned ncopies,
stmt_info, 0, vect_body); stmt_info, 0, vect_body);
/* And adjust the number of loads performed. This handles /* And adjust the number of loads performed. This handles
redundancies as well as loads that are later dead. */ redundancies as well as loads that are later dead. */
auto_sbitmap perm (GROUP_SIZE (stmt_info)); auto_sbitmap perm (DR_GROUP_SIZE (stmt_info));
bitmap_clear (perm); bitmap_clear (perm);
for (unsigned i = 0; for (unsigned i = 0;
i < SLP_TREE_LOAD_PERMUTATION (slp_node).length (); ++i) i < SLP_TREE_LOAD_PERMUTATION (slp_node).length (); ++i)
bitmap_set_bit (perm, SLP_TREE_LOAD_PERMUTATION (slp_node)[i]); bitmap_set_bit (perm, SLP_TREE_LOAD_PERMUTATION (slp_node)[i]);
ncopies = 0; ncopies = 0;
bool load_seen = false; bool load_seen = false;
for (unsigned i = 0; i < GROUP_SIZE (stmt_info); ++i) for (unsigned i = 0; i < DR_GROUP_SIZE (stmt_info); ++i)
{ {
if (i % assumed_nunits == 0) if (i % assumed_nunits == 0)
{ {
...@@ -1194,7 +1194,7 @@ vect_model_load_cost (stmt_vec_info stmt_info, unsigned ncopies, ...@@ -1194,7 +1194,7 @@ vect_model_load_cost (stmt_vec_info stmt_info, unsigned ncopies,
if (load_seen) if (load_seen)
ncopies++; ncopies++;
gcc_assert (ncopies gcc_assert (ncopies
<= (GROUP_SIZE (stmt_info) - GROUP_GAP (stmt_info) <= (DR_GROUP_SIZE (stmt_info) - DR_GROUP_GAP (stmt_info)
+ assumed_nunits - 1) / assumed_nunits); + assumed_nunits - 1) / assumed_nunits);
} }
...@@ -1205,7 +1205,7 @@ vect_model_load_cost (stmt_vec_info stmt_info, unsigned ncopies, ...@@ -1205,7 +1205,7 @@ vect_model_load_cost (stmt_vec_info stmt_info, unsigned ncopies,
so we want the DR for the first statement. */ so we want the DR for the first statement. */
if (!slp_node && grouped_access_p) if (!slp_node && grouped_access_p)
{ {
first_stmt = GROUP_FIRST_ELEMENT (stmt_info); first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)); dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
} }
...@@ -1215,7 +1215,7 @@ vect_model_load_cost (stmt_vec_info stmt_info, unsigned ncopies, ...@@ -1215,7 +1215,7 @@ vect_model_load_cost (stmt_vec_info stmt_info, unsigned ncopies,
bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info)); bool first_stmt_p = (first_stmt == STMT_VINFO_STMT (stmt_info));
/* We assume that the cost of a single load-lanes instruction is /* We assume that the cost of a single load-lanes instruction is
equivalent to the cost of GROUP_SIZE separate loads. If a grouped equivalent to the cost of DR_GROUP_SIZE separate loads. If a grouped
access is instead being provided by a load-and-permute operation, access is instead being provided by a load-and-permute operation,
include the cost of the permutes. */ include the cost of the permutes. */
if (first_stmt_p if (first_stmt_p
...@@ -1223,7 +1223,7 @@ vect_model_load_cost (stmt_vec_info stmt_info, unsigned ncopies, ...@@ -1223,7 +1223,7 @@ vect_model_load_cost (stmt_vec_info stmt_info, unsigned ncopies,
{ {
/* Uses an even and odd extract operations or shuffle operations /* Uses an even and odd extract operations or shuffle operations
for each needed permute. */ for each needed permute. */
int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt)); int group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
int nstmts = ncopies * ceil_log2 (group_size) * group_size; int nstmts = ncopies * ceil_log2 (group_size) * group_size;
inside_cost += record_stmt_cost (cost_vec, nstmts, vec_perm, inside_cost += record_stmt_cost (cost_vec, nstmts, vec_perm,
stmt_info, 0, vect_body); stmt_info, 0, vect_body);
...@@ -2211,12 +2211,12 @@ get_group_load_store_type (gimple *stmt, tree vectype, bool slp, ...@@ -2211,12 +2211,12 @@ get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
vec_info *vinfo = stmt_info->vinfo; vec_info *vinfo = stmt_info->vinfo;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL; struct loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
gimple *first_stmt = GROUP_FIRST_ELEMENT (stmt_info); gimple *first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
data_reference *first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)); data_reference *first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
unsigned int group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt)); unsigned int group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
bool single_element_p = (stmt == first_stmt bool single_element_p = (stmt == first_stmt
&& !GROUP_NEXT_ELEMENT (stmt_info)); && !DR_GROUP_NEXT_ELEMENT (stmt_info));
unsigned HOST_WIDE_INT gap = GROUP_GAP (vinfo_for_stmt (first_stmt)); unsigned HOST_WIDE_INT gap = DR_GROUP_GAP (vinfo_for_stmt (first_stmt));
poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype); poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
/* True if the vectorized statements would access beyond the last /* True if the vectorized statements would access beyond the last
...@@ -2241,7 +2241,7 @@ get_group_load_store_type (gimple *stmt, tree vectype, bool slp, ...@@ -2241,7 +2241,7 @@ get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
{ {
if (STMT_VINFO_STRIDED_P (stmt_info)) if (STMT_VINFO_STRIDED_P (stmt_info))
{ {
/* Try to use consecutive accesses of GROUP_SIZE elements, /* Try to use consecutive accesses of DR_GROUP_SIZE elements,
separated by the stride, until we have a complete vector. separated by the stride, until we have a complete vector.
Fall back to scalar accesses if that isn't possible. */ Fall back to scalar accesses if that isn't possible. */
if (multiple_p (nunits, group_size)) if (multiple_p (nunits, group_size))
...@@ -2347,7 +2347,7 @@ get_group_load_store_type (gimple *stmt, tree vectype, bool slp, ...@@ -2347,7 +2347,7 @@ get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
{ {
/* STMT is the leader of the group. Check the operands of all the /* STMT is the leader of the group. Check the operands of all the
stmts of the group. */ stmts of the group. */
gimple *next_stmt = GROUP_NEXT_ELEMENT (stmt_info); gimple *next_stmt = DR_GROUP_NEXT_ELEMENT (stmt_info);
while (next_stmt) while (next_stmt)
{ {
tree op = vect_get_store_rhs (next_stmt); tree op = vect_get_store_rhs (next_stmt);
...@@ -2360,7 +2360,7 @@ get_group_load_store_type (gimple *stmt, tree vectype, bool slp, ...@@ -2360,7 +2360,7 @@ get_group_load_store_type (gimple *stmt, tree vectype, bool slp,
"use not simple.\n"); "use not simple.\n");
return false; return false;
} }
next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
} }
} }
...@@ -2512,9 +2512,9 @@ get_load_store_type (gimple *stmt, tree vectype, bool slp, bool masked_p, ...@@ -2512,9 +2512,9 @@ get_load_store_type (gimple *stmt, tree vectype, bool slp, bool masked_p,
traditional behavior until that can be fixed. */ traditional behavior until that can be fixed. */
if (*memory_access_type == VMAT_ELEMENTWISE if (*memory_access_type == VMAT_ELEMENTWISE
&& !STMT_VINFO_STRIDED_P (stmt_info) && !STMT_VINFO_STRIDED_P (stmt_info)
&& !(stmt == GROUP_FIRST_ELEMENT (stmt_info) && !(stmt == DR_GROUP_FIRST_ELEMENT (stmt_info)
&& !GROUP_NEXT_ELEMENT (stmt_info) && !DR_GROUP_NEXT_ELEMENT (stmt_info)
&& !pow2p_hwi (GROUP_SIZE (stmt_info)))) && !pow2p_hwi (DR_GROUP_SIZE (stmt_info))))
{ {
if (dump_enabled_p ()) if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
...@@ -6182,7 +6182,7 @@ get_group_alias_ptr_type (gimple *first_stmt) ...@@ -6182,7 +6182,7 @@ get_group_alias_ptr_type (gimple *first_stmt)
gimple *next_stmt; gimple *next_stmt;
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)); first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first_stmt)); next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (first_stmt));
while (next_stmt) while (next_stmt)
{ {
next_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (next_stmt)); next_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (next_stmt));
...@@ -6194,7 +6194,7 @@ get_group_alias_ptr_type (gimple *first_stmt) ...@@ -6194,7 +6194,7 @@ get_group_alias_ptr_type (gimple *first_stmt)
"conflicting alias set types.\n"); "conflicting alias set types.\n");
return ptr_type_node; return ptr_type_node;
} }
next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
} }
return reference_alias_ptr_type (DR_REF (first_dr)); return reference_alias_ptr_type (DR_REF (first_dr));
} }
...@@ -6385,9 +6385,9 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt, ...@@ -6385,9 +6385,9 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
&& (slp || memory_access_type != VMAT_CONTIGUOUS)); && (slp || memory_access_type != VMAT_CONTIGUOUS));
if (grouped_store) if (grouped_store)
{ {
first_stmt = GROUP_FIRST_ELEMENT (stmt_info); first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)); first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt)); group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
} }
else else
{ {
...@@ -6568,8 +6568,8 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt, ...@@ -6568,8 +6568,8 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
if (STMT_VINFO_GROUPED_ACCESS (stmt_info)) if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
{ {
gimple *group_stmt = GROUP_FIRST_ELEMENT (stmt_info); gimple *group_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
GROUP_STORE_COUNT (vinfo_for_stmt (group_stmt))++; DR_GROUP_STORE_COUNT (vinfo_for_stmt (group_stmt))++;
} }
if (grouped_store) if (grouped_store)
...@@ -6579,8 +6579,8 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt, ...@@ -6579,8 +6579,8 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
/* We vectorize all the stmts of the interleaving group when we /* We vectorize all the stmts of the interleaving group when we
reach the last stmt in the group. */ reach the last stmt in the group. */
if (GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt)) if (DR_GROUP_STORE_COUNT (vinfo_for_stmt (first_stmt))
< GROUP_SIZE (vinfo_for_stmt (first_stmt)) < DR_GROUP_SIZE (vinfo_for_stmt (first_stmt))
&& !slp) && !slp)
{ {
*vec_stmt = NULL; *vec_stmt = NULL;
...@@ -6594,7 +6594,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt, ...@@ -6594,7 +6594,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
group. */ group. */
vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node); vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0]; first_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[0];
gcc_assert (GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt)) == first_stmt); gcc_assert (DR_GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_stmt)) == first_stmt);
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)); first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
op = vect_get_store_rhs (first_stmt); op = vect_get_store_rhs (first_stmt);
} }
...@@ -6848,7 +6848,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt, ...@@ -6848,7 +6848,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
} }
} }
} }
next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
if (slp) if (slp)
break; break;
} }
...@@ -6965,21 +6965,21 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt, ...@@ -6965,21 +6965,21 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
used as an input to vect_permute_store_chain(), and OPRNDS as used as an input to vect_permute_store_chain(), and OPRNDS as
an input to vect_get_vec_def_for_stmt_copy() for the next copy. an input to vect_get_vec_def_for_stmt_copy() for the next copy.
If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN and
OPRNDS are of size 1. */ OPRNDS are of size 1. */
next_stmt = first_stmt; next_stmt = first_stmt;
for (i = 0; i < group_size; i++) for (i = 0; i < group_size; i++)
{ {
/* Since gaps are not supported for interleaved stores, /* Since gaps are not supported for interleaved stores,
GROUP_SIZE is the exact number of stmts in the chain. DR_GROUP_SIZE is the exact number of stmts in the chain.
Therefore, NEXT_STMT can't be NULL_TREE. In case that Therefore, NEXT_STMT can't be NULL_TREE. In case that
there is no interleaving, GROUP_SIZE is 1, and only one there is no interleaving, DR_GROUP_SIZE is 1, and only one
iteration of the loop will be executed. */ iteration of the loop will be executed. */
op = vect_get_store_rhs (next_stmt); op = vect_get_store_rhs (next_stmt);
vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt); vec_oprnd = vect_get_vec_def_for_operand (op, next_stmt);
dr_chain.quick_push (vec_oprnd); dr_chain.quick_push (vec_oprnd);
oprnds.quick_push (vec_oprnd); oprnds.quick_push (vec_oprnd);
next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
} }
if (mask) if (mask)
vec_mask = vect_get_vec_def_for_operand (mask, stmt, vec_mask = vect_get_vec_def_for_operand (mask, stmt,
...@@ -7025,7 +7025,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt, ...@@ -7025,7 +7025,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
DR_CHAIN is then used as an input to vect_permute_store_chain(), DR_CHAIN is then used as an input to vect_permute_store_chain(),
and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the and OPRNDS as an input to vect_get_vec_def_for_stmt_copy() for the
next copy. next copy.
If the store is not grouped, GROUP_SIZE is 1, and DR_CHAIN and If the store is not grouped, DR_GROUP_SIZE is 1, and DR_CHAIN and
OPRNDS are of size 1. */ OPRNDS are of size 1. */
for (i = 0; i < group_size; i++) for (i = 0; i < group_size; i++)
{ {
...@@ -7229,7 +7229,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt, ...@@ -7229,7 +7229,7 @@ vectorizable_store (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
if (slp) if (slp)
continue; continue;
next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt)); next_stmt = DR_GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
if (!next_stmt) if (!next_stmt)
break; break;
} }
...@@ -7545,8 +7545,8 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt, ...@@ -7545,8 +7545,8 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
gcc_assert (!nested_in_vect_loop); gcc_assert (!nested_in_vect_loop);
gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info)); gcc_assert (!STMT_VINFO_GATHER_SCATTER_P (stmt_info));
first_stmt = GROUP_FIRST_ELEMENT (stmt_info); first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt)); group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()) if (slp && SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
slp_perm = true; slp_perm = true;
...@@ -7568,10 +7568,10 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt, ...@@ -7568,10 +7568,10 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
/* Similarly when the stmt is a load that is both part of a SLP /* Similarly when the stmt is a load that is both part of a SLP
instance and a loop vectorized stmt via the same-dr mechanism instance and a loop vectorized stmt via the same-dr mechanism
we have to give up. */ we have to give up. */
if (STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info) if (DR_GROUP_SAME_DR_STMT (stmt_info)
&& (STMT_SLP_TYPE (stmt_info) && (STMT_SLP_TYPE (stmt_info)
!= STMT_SLP_TYPE (vinfo_for_stmt != STMT_SLP_TYPE (vinfo_for_stmt
(STMT_VINFO_GROUP_SAME_DR_STMT (stmt_info))))) (DR_GROUP_SAME_DR_STMT (stmt_info)))))
{ {
if (dump_enabled_p ()) if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
...@@ -7676,7 +7676,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt, ...@@ -7676,7 +7676,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
if (grouped_load) if (grouped_load)
{ {
first_stmt = GROUP_FIRST_ELEMENT (stmt_info); first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt)); first_dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_stmt));
} }
else else
...@@ -7686,7 +7686,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt, ...@@ -7686,7 +7686,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
} }
if (slp && grouped_load) if (slp && grouped_load)
{ {
group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt)); group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
ref_type = get_group_alias_ptr_type (first_stmt); ref_type = get_group_alias_ptr_type (first_stmt);
} }
else else
...@@ -7899,8 +7899,8 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt, ...@@ -7899,8 +7899,8 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
if (grouped_load) if (grouped_load)
{ {
first_stmt = GROUP_FIRST_ELEMENT (stmt_info); first_stmt = DR_GROUP_FIRST_ELEMENT (stmt_info);
group_size = GROUP_SIZE (vinfo_for_stmt (first_stmt)); group_size = DR_GROUP_SIZE (vinfo_for_stmt (first_stmt));
/* For SLP vectorization we directly vectorize a subchain /* For SLP vectorization we directly vectorize a subchain
without permutation. */ without permutation. */
if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ()) if (slp && ! SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
...@@ -8525,7 +8525,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt, ...@@ -8525,7 +8525,7 @@ vectorizable_load (gimple *stmt, gimple_stmt_iterator *gsi, gimple **vec_stmt,
/* With SLP permutation we load the gaps as well, without /* With SLP permutation we load the gaps as well, without
we need to skip the gaps after we manage to fully load we need to skip the gaps after we manage to fully load
all elements. group_gap_adj is GROUP_SIZE here. */ all elements. group_gap_adj is DR_GROUP_SIZE here. */
group_elt += nunits; group_elt += nunits;
if (maybe_ne (group_gap_adj, 0U) if (maybe_ne (group_gap_adj, 0U)
&& !slp_perm && !slp_perm
...@@ -9685,8 +9685,8 @@ vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi, ...@@ -9685,8 +9685,8 @@ vect_transform_stmt (gimple *stmt, gimple_stmt_iterator *gsi,
meanwhile. */ meanwhile. */
*grouped_store = true; *grouped_store = true;
stmt_vec_info group_info stmt_vec_info group_info
= vinfo_for_stmt (GROUP_FIRST_ELEMENT (stmt_info)); = vinfo_for_stmt (DR_GROUP_FIRST_ELEMENT (stmt_info));
if (GROUP_STORE_COUNT (group_info) == GROUP_SIZE (group_info)) if (DR_GROUP_STORE_COUNT (group_info) == DR_GROUP_SIZE (group_info))
is_store = true; is_store = true;
} }
else else
...@@ -9802,7 +9802,7 @@ vect_remove_stores (gimple *first_stmt) ...@@ -9802,7 +9802,7 @@ vect_remove_stores (gimple *first_stmt)
{ {
stmt_vec_info stmt_info = vinfo_for_stmt (next); stmt_vec_info stmt_info = vinfo_for_stmt (next);
tmp = GROUP_NEXT_ELEMENT (stmt_info); tmp = DR_GROUP_NEXT_ELEMENT (stmt_info);
if (is_pattern_stmt_p (stmt_info)) if (is_pattern_stmt_p (stmt_info))
next = STMT_VINFO_RELATED_STMT (stmt_info); next = STMT_VINFO_RELATED_STMT (stmt_info);
/* Free the attached stmt_vec_info and remove the stmt. */ /* Free the attached stmt_vec_info and remove the stmt. */
...@@ -9851,12 +9851,12 @@ new_stmt_vec_info (gimple *stmt, vec_info *vinfo) ...@@ -9851,12 +9851,12 @@ new_stmt_vec_info (gimple *stmt, vec_info *vinfo)
STMT_SLP_TYPE (res) = loop_vect; STMT_SLP_TYPE (res) = loop_vect;
STMT_VINFO_NUM_SLP_USES (res) = 0; STMT_VINFO_NUM_SLP_USES (res) = 0;
GROUP_FIRST_ELEMENT (res) = NULL; res->first_element = NULL; /* GROUP_FIRST_ELEMENT */
GROUP_NEXT_ELEMENT (res) = NULL; res->next_element = NULL; /* GROUP_NEXT_ELEMENT */
GROUP_SIZE (res) = 0; res->size = 0; /* GROUP_SIZE */
GROUP_STORE_COUNT (res) = 0; res->store_count = 0; /* GROUP_STORE_COUNT */
GROUP_GAP (res) = 0; res->gap = 0; /* GROUP_GAP */
GROUP_SAME_DR_STMT (res) = NULL; res->same_dr_stmt = NULL; /* GROUP_SAME_DR_STMT */
return res; return res;
} }
......
...@@ -951,13 +951,7 @@ STMT_VINFO_BB_VINFO (stmt_vec_info stmt_vinfo) ...@@ -951,13 +951,7 @@ STMT_VINFO_BB_VINFO (stmt_vec_info stmt_vinfo)
#define STMT_VINFO_SAME_ALIGN_REFS(S) (S)->same_align_refs #define STMT_VINFO_SAME_ALIGN_REFS(S) (S)->same_align_refs
#define STMT_VINFO_SIMD_CLONE_INFO(S) (S)->simd_clone_info #define STMT_VINFO_SIMD_CLONE_INFO(S) (S)->simd_clone_info
#define STMT_VINFO_DEF_TYPE(S) (S)->def_type #define STMT_VINFO_DEF_TYPE(S) (S)->def_type
#define STMT_VINFO_GROUP_FIRST_ELEMENT(S) (S)->first_element #define STMT_VINFO_GROUPED_ACCESS(S) ((S)->data_ref_info && DR_GROUP_FIRST_ELEMENT(S))
#define STMT_VINFO_GROUP_NEXT_ELEMENT(S) (S)->next_element
#define STMT_VINFO_GROUP_SIZE(S) (S)->size
#define STMT_VINFO_GROUP_STORE_COUNT(S) (S)->store_count
#define STMT_VINFO_GROUP_GAP(S) (S)->gap
#define STMT_VINFO_GROUP_SAME_DR_STMT(S) (S)->same_dr_stmt
#define STMT_VINFO_GROUPED_ACCESS(S) ((S)->first_element != NULL && (S)->data_ref_info)
#define STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED(S) (S)->loop_phi_evolution_base_unchanged #define STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED(S) (S)->loop_phi_evolution_base_unchanged
#define STMT_VINFO_LOOP_PHI_EVOLUTION_PART(S) (S)->loop_phi_evolution_part #define STMT_VINFO_LOOP_PHI_EVOLUTION_PART(S) (S)->loop_phi_evolution_part
#define STMT_VINFO_MIN_NEG_DIST(S) (S)->min_neg_dist #define STMT_VINFO_MIN_NEG_DIST(S) (S)->min_neg_dist
...@@ -965,12 +959,16 @@ STMT_VINFO_BB_VINFO (stmt_vec_info stmt_vinfo) ...@@ -965,12 +959,16 @@ STMT_VINFO_BB_VINFO (stmt_vec_info stmt_vinfo)
#define STMT_VINFO_REDUC_TYPE(S) (S)->reduc_type #define STMT_VINFO_REDUC_TYPE(S) (S)->reduc_type
#define STMT_VINFO_REDUC_DEF(S) (S)->reduc_def #define STMT_VINFO_REDUC_DEF(S) (S)->reduc_def
#define GROUP_FIRST_ELEMENT(S) (S)->first_element #define DR_GROUP_FIRST_ELEMENT(S) (gcc_checking_assert ((S)->data_ref_info), (S)->first_element)
#define GROUP_NEXT_ELEMENT(S) (S)->next_element #define DR_GROUP_NEXT_ELEMENT(S) (gcc_checking_assert ((S)->data_ref_info), (S)->next_element)
#define GROUP_SIZE(S) (S)->size #define DR_GROUP_SIZE(S) (gcc_checking_assert ((S)->data_ref_info), (S)->size)
#define GROUP_STORE_COUNT(S) (S)->store_count #define DR_GROUP_STORE_COUNT(S) (gcc_checking_assert ((S)->data_ref_info), (S)->store_count)
#define GROUP_GAP(S) (S)->gap #define DR_GROUP_GAP(S) (gcc_checking_assert ((S)->data_ref_info), (S)->gap)
#define GROUP_SAME_DR_STMT(S) (S)->same_dr_stmt #define DR_GROUP_SAME_DR_STMT(S) (gcc_checking_assert ((S)->data_ref_info), (S)->same_dr_stmt)
#define REDUC_GROUP_FIRST_ELEMENT(S) (gcc_checking_assert (!(S)->data_ref_info), (S)->first_element)
#define REDUC_GROUP_NEXT_ELEMENT(S) (gcc_checking_assert (!(S)->data_ref_info), (S)->next_element)
#define REDUC_GROUP_SIZE(S) (gcc_checking_assert (!(S)->data_ref_info), (S)->size)
#define STMT_VINFO_RELEVANT_P(S) ((S)->relevant != vect_unused_in_scope) #define STMT_VINFO_RELEVANT_P(S) ((S)->relevant != vect_unused_in_scope)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment