Commit a5b50aa1 by Richard Biener Committed by Richard Biener

tree-vectorizer.h (vect_slp_analyze_and_verify_instance_alignment): Declare.

2015-11-11  Richard Biener  <rguenther@suse.de>

	* tree-vectorizer.h (vect_slp_analyze_and_verify_instance_alignment):
	Declare.
	(vect_analyze_data_refs_alignment): Make loop vect specific.
	(vect_verify_datarefs_alignment): Likewise.
	* tree-vect-data-refs.c (vect_slp_analyze_data_ref_dependences):
	Add missing continue.
	(vect_compute_data_ref_alignment): Export.
	(vect_compute_data_refs_alignment): Merge into...
	(vect_analyze_data_refs_alignment): ... this.
	(verify_data_ref_alignment): Split out from ...
	(vect_verify_datarefs_alignment): ... here.
	(vect_slp_analyze_and_verify_node_alignment): New function.
	(vect_slp_analyze_and_verify_instance_alignment): Likewise.
	* tree-vect-slp.c (vect_supported_load_permutation_p): Remove
	misplaced checks on alignment.
	(vect_slp_analyze_bb_1): Add fatal output parameter.  Do
	alignment analysis after SLP discovery and do it per instance.
	(vect_slp_bb): When vect_slp_analyze_bb_1 fatally failed do not
	bother to re-try using different vector sizes.

From-SVN: r230173
parent 4ac93c7c
2015-11-11 Richard Biener <rguenther@suse.de>
* tree-vectorizer.h (vect_slp_analyze_and_verify_instance_alignment):
Declare.
(vect_analyze_data_refs_alignment): Make loop vect specific.
(vect_verify_datarefs_alignment): Likewise.
* tree-vect-data-refs.c (vect_slp_analyze_data_ref_dependences):
Add missing continue.
(vect_compute_data_ref_alignment): Export.
(vect_compute_data_refs_alignment): Merge into...
(vect_analyze_data_refs_alignment): ... this.
(verify_data_ref_alignment): Split out from ...
(vect_verify_datarefs_alignment): ... here.
(vect_slp_analyze_and_verify_node_alignment): New function.
(vect_slp_analyze_and_verify_instance_alignment): Likewise.
* tree-vect-slp.c (vect_supported_load_permutation_p): Remove
misplaced checks on alignment.
(vect_slp_analyze_bb_1): Add fatal output parameter. Do
alignment analysis after SLP discovery and do it per instance.
(vect_slp_bb): When vect_slp_analyze_bb_1 fatally failed do not
bother to re-try using different vector sizes.
2015-11-11 Nathan Sidwell <nathan@codesourcery.com> 2015-11-11 Nathan Sidwell <nathan@codesourcery.com>
Cesar Philippidis <cesar@codesourcery.com> Cesar Philippidis <cesar@codesourcery.com>
...@@ -645,6 +645,7 @@ vect_slp_analyze_data_ref_dependences (bb_vec_info bb_vinfo) ...@@ -645,6 +645,7 @@ vect_slp_analyze_data_ref_dependences (bb_vec_info bb_vinfo)
(SLP_INSTANCE_TREE (instance))[0], 0); (SLP_INSTANCE_TREE (instance))[0], 0);
vect_free_slp_instance (instance); vect_free_slp_instance (instance);
BB_VINFO_SLP_INSTANCES (bb_vinfo).ordered_remove (i); BB_VINFO_SLP_INSTANCES (bb_vinfo).ordered_remove (i);
continue;
} }
i++; i++;
} }
...@@ -668,7 +669,7 @@ vect_slp_analyze_data_ref_dependences (bb_vec_info bb_vinfo) ...@@ -668,7 +669,7 @@ vect_slp_analyze_data_ref_dependences (bb_vec_info bb_vinfo)
FOR NOW: No analysis is actually performed. Misalignment is calculated FOR NOW: No analysis is actually performed. Misalignment is calculated
only for trivial cases. TODO. */ only for trivial cases. TODO. */
static bool bool
vect_compute_data_ref_alignment (struct data_reference *dr) vect_compute_data_ref_alignment (struct data_reference *dr)
{ {
gimple *stmt = DR_STMT (dr); gimple *stmt = DR_STMT (dr);
...@@ -838,45 +839,6 @@ vect_compute_data_ref_alignment (struct data_reference *dr) ...@@ -838,45 +839,6 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
} }
/* Function vect_compute_data_refs_alignment
Compute the misalignment of data references in the loop.
Return FALSE if a data reference is found that cannot be vectorized. */
static bool
vect_compute_data_refs_alignment (vec_info *vinfo)
{
vec<data_reference_p> datarefs = vinfo->datarefs;
struct data_reference *dr;
unsigned int i;
FOR_EACH_VEC_ELT (datarefs, i, dr)
{
stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr));
if (STMT_VINFO_VECTORIZABLE (stmt_info)
&& !vect_compute_data_ref_alignment (dr))
{
/* Strided accesses perform only component accesses, misalignment
information is irrelevant for them. */
if (STMT_VINFO_STRIDED_P (stmt_info)
&& !STMT_VINFO_GROUPED_ACCESS (stmt_info))
continue;
if (is_a <bb_vec_info> (vinfo))
{
/* Mark unsupported statement as unvectorizable. */
STMT_VINFO_VECTORIZABLE (vinfo_for_stmt (DR_STMT (dr))) = false;
continue;
}
else
return false;
}
}
return true;
}
/* Function vect_update_misalignment_for_peel /* Function vect_update_misalignment_for_peel
DR - the data reference whose misalignment is to be adjusted. DR - the data reference whose misalignment is to be adjusted.
...@@ -936,63 +898,76 @@ vect_update_misalignment_for_peel (struct data_reference *dr, ...@@ -936,63 +898,76 @@ vect_update_misalignment_for_peel (struct data_reference *dr,
} }
/* Function verify_data_ref_alignment
Return TRUE if DR can be handled with respect to alignment. */
static bool
verify_data_ref_alignment (data_reference_p dr)
{
enum dr_alignment_support supportable_dr_alignment;
gimple *stmt = DR_STMT (dr);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
if (!STMT_VINFO_RELEVANT_P (stmt_info))
return true;
/* For interleaving, only the alignment of the first access matters.
Skip statements marked as not vectorizable. */
if ((STMT_VINFO_GROUPED_ACCESS (stmt_info)
&& GROUP_FIRST_ELEMENT (stmt_info) != stmt)
|| !STMT_VINFO_VECTORIZABLE (stmt_info))
return true;
/* Strided accesses perform only component accesses, alignment is
irrelevant for them. */
if (STMT_VINFO_STRIDED_P (stmt_info)
&& !STMT_VINFO_GROUPED_ACCESS (stmt_info))
return true;
supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
if (!supportable_dr_alignment)
{
if (dump_enabled_p ())
{
if (DR_IS_READ (dr))
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported unaligned load.");
else
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported unaligned "
"store.");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
DR_REF (dr));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
if (supportable_dr_alignment != dr_aligned && dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Vectorizing an unaligned access.\n");
return true;
}
/* Function vect_verify_datarefs_alignment /* Function vect_verify_datarefs_alignment
Return TRUE if all data references in the loop can be Return TRUE if all data references in the loop can be
handled with respect to alignment. */ handled with respect to alignment. */
bool bool
vect_verify_datarefs_alignment (vec_info *vinfo) vect_verify_datarefs_alignment (loop_vec_info vinfo)
{ {
vec<data_reference_p> datarefs = vinfo->datarefs; vec<data_reference_p> datarefs = vinfo->datarefs;
struct data_reference *dr; struct data_reference *dr;
enum dr_alignment_support supportable_dr_alignment;
unsigned int i; unsigned int i;
FOR_EACH_VEC_ELT (datarefs, i, dr) FOR_EACH_VEC_ELT (datarefs, i, dr)
{ if (! verify_data_ref_alignment (dr))
gimple *stmt = DR_STMT (dr); return false;
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
if (!STMT_VINFO_RELEVANT_P (stmt_info))
continue;
/* For interleaving, only the alignment of the first access matters.
Skip statements marked as not vectorizable. */
if ((STMT_VINFO_GROUPED_ACCESS (stmt_info)
&& GROUP_FIRST_ELEMENT (stmt_info) != stmt)
|| !STMT_VINFO_VECTORIZABLE (stmt_info))
continue;
/* Strided accesses perform only component accesses, alignment is
irrelevant for them. */
if (STMT_VINFO_STRIDED_P (stmt_info)
&& !STMT_VINFO_GROUPED_ACCESS (stmt_info))
continue;
supportable_dr_alignment = vect_supportable_dr_alignment (dr, false);
if (!supportable_dr_alignment)
{
if (dump_enabled_p ())
{
if (DR_IS_READ (dr))
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported unaligned load.");
else
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported unaligned "
"store.");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
DR_REF (dr));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
if (supportable_dr_alignment != dr_aligned && dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Vectorizing an unaligned access.\n");
}
return true; return true;
} }
...@@ -2064,7 +2039,7 @@ vect_find_same_alignment_drs (struct data_dependence_relation *ddr, ...@@ -2064,7 +2039,7 @@ vect_find_same_alignment_drs (struct data_dependence_relation *ddr,
Return FALSE if a data reference is found that cannot be vectorized. */ Return FALSE if a data reference is found that cannot be vectorized. */
bool bool
vect_analyze_data_refs_alignment (vec_info *vinfo) vect_analyze_data_refs_alignment (loop_vec_info vinfo)
{ {
if (dump_enabled_p ()) if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, dump_printf_loc (MSG_NOTE, vect_location,
...@@ -2072,28 +2047,100 @@ vect_analyze_data_refs_alignment (vec_info *vinfo) ...@@ -2072,28 +2047,100 @@ vect_analyze_data_refs_alignment (vec_info *vinfo)
/* Mark groups of data references with same alignment using /* Mark groups of data references with same alignment using
data dependence information. */ data dependence information. */
if (is_a <loop_vec_info> (vinfo)) vec<ddr_p> ddrs = vinfo->ddrs;
struct data_dependence_relation *ddr;
unsigned int i;
FOR_EACH_VEC_ELT (ddrs, i, ddr)
vect_find_same_alignment_drs (ddr, vinfo);
vec<data_reference_p> datarefs = vinfo->datarefs;
struct data_reference *dr;
FOR_EACH_VEC_ELT (datarefs, i, dr)
{ {
vec<ddr_p> ddrs = vinfo->ddrs; stmt_vec_info stmt_info = vinfo_for_stmt (DR_STMT (dr));
struct data_dependence_relation *ddr; if (STMT_VINFO_VECTORIZABLE (stmt_info)
unsigned int i; && !vect_compute_data_ref_alignment (dr))
{
/* Strided accesses perform only component accesses, misalignment
information is irrelevant for them. */
if (STMT_VINFO_STRIDED_P (stmt_info)
&& !STMT_VINFO_GROUPED_ACCESS (stmt_info))
continue;
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: can't calculate alignment "
"for data ref.\n");
FOR_EACH_VEC_ELT (ddrs, i, ddr) return false;
vect_find_same_alignment_drs (ddr, as_a <loop_vec_info> (vinfo)); }
} }
if (!vect_compute_data_refs_alignment (vinfo)) return true;
}
/* Analyze alignment of DRs of stmts in NODE. */
static bool
vect_slp_analyze_and_verify_node_alignment (slp_tree node)
{
unsigned i;
gimple *stmt;
FOR_EACH_VEC_ELT (SLP_TREE_SCALAR_STMTS (node), i, stmt)
{ {
if (dump_enabled_p ()) stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: can't calculate alignment " /* Strided accesses perform only component accesses, misalignment
"for data ref.\n"); information is irrelevant for them. */
return false; if (STMT_VINFO_STRIDED_P (stmt_info)
&& !STMT_VINFO_GROUPED_ACCESS (stmt_info))
continue;
data_reference_p dr = STMT_VINFO_DATA_REF (stmt_info);
if (! vect_compute_data_ref_alignment (dr)
|| ! verify_data_ref_alignment (dr))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: bad data alignment in basic "
"block.\n");
return false;
}
} }
return true; return true;
} }
/* Function vect_slp_analyze_instance_alignment
Analyze the alignment of the data-references in the SLP instance.
Return FALSE if a data reference is found that cannot be vectorized. */
bool
vect_slp_analyze_and_verify_instance_alignment (slp_instance instance)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_slp_analyze_and_verify_instance_alignment ===\n");
slp_tree node;
unsigned i;
FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), i, node)
if (! vect_slp_analyze_and_verify_node_alignment (node))
return false;
node = SLP_INSTANCE_TREE (instance);
if (STMT_VINFO_DATA_REF (vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node)[0]))
&& ! vect_slp_analyze_and_verify_node_alignment
(SLP_INSTANCE_TREE (instance)))
return false;
return true;
}
/* Analyze groups of accesses: check that DR belongs to a group of /* Analyze groups of accesses: check that DR belongs to a group of
accesses of legal size, step, etc. Detect gaps, single element accesses of legal size, step, etc. Detect gaps, single element
......
...@@ -1282,8 +1282,7 @@ vect_supported_load_permutation_p (slp_instance slp_instn) ...@@ -1282,8 +1282,7 @@ vect_supported_load_permutation_p (slp_instance slp_instn)
unsigned int group_size = SLP_INSTANCE_GROUP_SIZE (slp_instn); unsigned int group_size = SLP_INSTANCE_GROUP_SIZE (slp_instn);
unsigned int i, j, k, next; unsigned int i, j, k, next;
slp_tree node; slp_tree node;
gimple *stmt, *load, *next_load, *first_load; gimple *stmt, *load, *next_load;
struct data_reference *dr;
if (dump_enabled_p ()) if (dump_enabled_p ())
{ {
...@@ -1365,33 +1364,6 @@ vect_supported_load_permutation_p (slp_instance slp_instn) ...@@ -1365,33 +1364,6 @@ vect_supported_load_permutation_p (slp_instance slp_instn)
} }
} }
} }
/* Check that the alignment of the first load in every subchain, i.e.,
the first statement in every load node, is supported.
??? This belongs in alignment checking. */
FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (slp_instn), i, node)
{
first_load = SLP_TREE_SCALAR_STMTS (node)[0];
if (first_load != GROUP_FIRST_ELEMENT (vinfo_for_stmt (first_load)))
{
dr = STMT_VINFO_DATA_REF (vinfo_for_stmt (first_load));
if (vect_supportable_dr_alignment (dr, false)
== dr_unaligned_unsupported)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION,
vect_location,
"unsupported unaligned load ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
first_load, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
}
}
return true; return true;
} }
...@@ -2311,12 +2283,15 @@ vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo) ...@@ -2311,12 +2283,15 @@ vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo)
return true; return true;
} }
/* Check if the basic block can be vectorized. */ /* Check if the basic block can be vectorized. Returns a bb_vec_info
if so and sets fatal to true if failure is independent of
current_vector_size. */
static bb_vec_info static bb_vec_info
vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin, vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin,
gimple_stmt_iterator region_end, gimple_stmt_iterator region_end,
vec<data_reference_p> datarefs, int n_stmts) vec<data_reference_p> datarefs, int n_stmts,
bool &fatal)
{ {
bb_vec_info bb_vinfo; bb_vec_info bb_vinfo;
vec<slp_instance> slp_instances; vec<slp_instance> slp_instances;
...@@ -2324,6 +2299,9 @@ vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin, ...@@ -2324,6 +2299,9 @@ vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin,
int i; int i;
int min_vf = 2; int min_vf = 2;
/* The first group of checks is independent of the vector size. */
fatal = true;
if (n_stmts > PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB)) if (n_stmts > PARAM_VALUE (PARAM_SLP_MAX_INSNS_IN_BB))
{ {
if (dump_enabled_p ()) if (dump_enabled_p ())
...@@ -2375,19 +2353,25 @@ vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin, ...@@ -2375,19 +2353,25 @@ vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin,
return NULL; return NULL;
} }
vect_pattern_recog (bb_vinfo); /* If there are no grouped stores in the region there is no need
to continue with pattern recog as vect_analyze_slp will fail
if (!vect_analyze_data_refs_alignment (bb_vinfo)) anyway. */
if (bb_vinfo->grouped_stores.is_empty ())
{ {
if (dump_enabled_p ()) if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: bad data alignment in basic " "not vectorized: no grouped stores in "
"block.\n"); "basic block.\n");
destroy_bb_vec_info (bb_vinfo); destroy_bb_vec_info (bb_vinfo);
return NULL; return NULL;
} }
/* While the rest of the analysis below depends on it in some way. */
fatal = false;
vect_pattern_recog (bb_vinfo);
/* Check the SLP opportunities in the basic block, analyze and build SLP /* Check the SLP opportunities in the basic block, analyze and build SLP
trees. */ trees. */
if (!vect_analyze_slp (bb_vinfo, n_stmts)) if (!vect_analyze_slp (bb_vinfo, n_stmts))
...@@ -2405,6 +2389,30 @@ vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin, ...@@ -2405,6 +2389,30 @@ vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin,
return NULL; return NULL;
} }
/* Analyze and verify the alignment of data references in the SLP
instances. */
for (i = 0; BB_VINFO_SLP_INSTANCES (bb_vinfo).iterate (i, &instance); )
{
if (! vect_slp_analyze_and_verify_instance_alignment (instance))
{
dump_printf_loc (MSG_NOTE, vect_location,
"removing SLP instance operations starting from: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
SLP_TREE_SCALAR_STMTS
(SLP_INSTANCE_TREE (instance))[0], 0);
vect_free_slp_instance (instance);
BB_VINFO_SLP_INSTANCES (bb_vinfo).ordered_remove (i);
continue;
}
i++;
}
if (! BB_VINFO_SLP_INSTANCES (bb_vinfo).length ())
{
destroy_bb_vec_info (bb_vinfo);
return NULL;
}
slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo); slp_instances = BB_VINFO_SLP_INSTANCES (bb_vinfo);
/* Mark all the statements that we want to vectorize as pure SLP and /* Mark all the statements that we want to vectorize as pure SLP and
...@@ -2427,23 +2435,13 @@ vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin, ...@@ -2427,23 +2435,13 @@ vect_slp_analyze_bb_1 (gimple_stmt_iterator region_begin,
/* Analyze dependences. At this point all stmts not participating in /* Analyze dependences. At this point all stmts not participating in
vectorization have to be marked. Dependence analysis assumes vectorization have to be marked. Dependence analysis assumes
that we either vectorize all SLP instances or none at all. */ that we either vectorize all SLP instances or none at all. */
if (!vect_slp_analyze_data_ref_dependences (bb_vinfo)) if (! vect_slp_analyze_data_ref_dependences (bb_vinfo))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unhandled data dependence "
"in basic block.\n");
destroy_bb_vec_info (bb_vinfo);
return NULL;
}
if (!vect_verify_datarefs_alignment (bb_vinfo))
{ {
if (dump_enabled_p ()) if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported alignment in basic " "not vectorized: unhandled data dependence "
"block.\n"); "in basic block.\n");
destroy_bb_vec_info (bb_vinfo); destroy_bb_vec_info (bb_vinfo);
return NULL; return NULL;
} }
...@@ -2533,8 +2531,9 @@ vect_slp_bb (basic_block bb) ...@@ -2533,8 +2531,9 @@ vect_slp_bb (basic_block bb)
gimple_stmt_iterator region_end = gsi; gimple_stmt_iterator region_end = gsi;
bool vectorized = false; bool vectorized = false;
bool fatal = false;
bb_vinfo = vect_slp_analyze_bb_1 (region_begin, region_end, bb_vinfo = vect_slp_analyze_bb_1 (region_begin, region_end,
datarefs, insns); datarefs, insns, fatal);
if (bb_vinfo if (bb_vinfo
&& dbg_cnt (vect_slp)) && dbg_cnt (vect_slp))
{ {
...@@ -2559,7 +2558,10 @@ vect_slp_bb (basic_block bb) ...@@ -2559,7 +2558,10 @@ vect_slp_bb (basic_block bb)
vector_sizes &= ~current_vector_size; vector_sizes &= ~current_vector_size;
if (vectorized if (vectorized
|| vector_sizes == 0 || vector_sizes == 0
|| current_vector_size == 0) || current_vector_size == 0
/* If vect_slp_analyze_bb_1 signaled that analysis for all
vector sizes will fail do not bother iterating. */
|| fatal)
{ {
if (gsi_end_p (region_end)) if (gsi_end_p (region_end))
break; break;
......
...@@ -1011,8 +1011,9 @@ extern tree vect_get_smallest_scalar_type (gimple *, HOST_WIDE_INT *, ...@@ -1011,8 +1011,9 @@ extern tree vect_get_smallest_scalar_type (gimple *, HOST_WIDE_INT *,
extern bool vect_analyze_data_ref_dependences (loop_vec_info, int *); extern bool vect_analyze_data_ref_dependences (loop_vec_info, int *);
extern bool vect_slp_analyze_data_ref_dependences (bb_vec_info); extern bool vect_slp_analyze_data_ref_dependences (bb_vec_info);
extern bool vect_enhance_data_refs_alignment (loop_vec_info); extern bool vect_enhance_data_refs_alignment (loop_vec_info);
extern bool vect_analyze_data_refs_alignment (vec_info *); extern bool vect_analyze_data_refs_alignment (loop_vec_info);
extern bool vect_verify_datarefs_alignment (vec_info *); extern bool vect_verify_datarefs_alignment (loop_vec_info);
extern bool vect_slp_analyze_and_verify_instance_alignment (slp_instance);
extern bool vect_analyze_data_ref_accesses (vec_info *); extern bool vect_analyze_data_ref_accesses (vec_info *);
extern bool vect_prune_runtime_alias_test_list (loop_vec_info); extern bool vect_prune_runtime_alias_test_list (loop_vec_info);
extern tree vect_check_gather_scatter (gimple *, loop_vec_info, tree *, tree *, extern tree vect_check_gather_scatter (gimple *, loop_vec_info, tree *, tree *,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment