Commit e9dbe7bb by Ira Rosen Committed by Ira Rosen

re PR tree-optimization/39129 (The meaning of 'BB' in "too many BBs in loop")


	PR tree-optimization/39129
	* tree-vect-loop-manip.c (conservative_cost_threshold): Change the 
	printed message.
	(vect_do_peeling_for_loop_bound): Use 
	LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT and
	LOOP_REQUIRES_VERSIONING_FOR_ALIAS macros.
	(vect_loop_versioning): Likewise.
	(vect_create_cond_for_alias_checks): Fix indentation.
	* tree-vectorizer.h (struct _loop_vec_info): Fix indentation of the 
	macros.
	(LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT): Define.
	(LOOP_REQUIRES_VERSIONING_FOR_ALIAS): Likewise.
	* tree-vect-loop.c (vect_analyze_loop_form): Change "too many BBs" to 
	"control flow in loop".
	(vect_estimate_min_profitable_iters): Use 
	LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT and
	LOOP_REQUIRES_VERSIONING_FOR_ALIAS macros.
	* tree-vect-data-refs.c (vect_enhance_data_refs_alignment): Likewise.
	(vect_create_data_ref_ptr): Don't mention array dimension in printing.
	* tree-vect-stmts.c (vectorizable_store): Replace the check that the 
	statement belongs to a group of strided accesses with the exact code 
	check.
	(vectorizable_load): Likewise.
	* tree-vect-slp.c (vect_analyze_slp_instance): Spell out "basic block".
	(vect_slp_analyze_bb, vect_slp_transform_bb): Likewise.

From-SVN: r148036
parent a7ad6c2d
2009-06-01 Ira Rosen <irar@il.ibm.com>
PR tree-optimization/39129
* tree-vect-loop-manip.c (conservative_cost_threshold): Change the
printed message.
(vect_do_peeling_for_loop_bound): Use
LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT and
LOOP_REQUIRES_VERSIONING_FOR_ALIAS macros.
(vect_loop_versioning): Likewise.
(vect_create_cond_for_alias_checks): Fix indentation.
* tree-vectorizer.h (struct _loop_vec_info): Fix indentation of the
macros.
(LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT): Define.
(LOOP_REQUIRES_VERSIONING_FOR_ALIAS): Likewise.
* tree-vect-loop.c (vect_analyze_loop_form): Change "too many BBs" to
"control flow in loop".
(vect_estimate_min_profitable_iters): Use
LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT and
LOOP_REQUIRES_VERSIONING_FOR_ALIAS macros.
* tree-vect-data-refs.c (vect_enhance_data_refs_alignment): Likewise.
(vect_create_data_ref_ptr): Don't mention array dimension in printing.
* tree-vect-stmts.c (vectorizable_store): Replace the check that the
statement belongs to a group of strided accesses with the exact code
check.
(vectorizable_load): Likewise.
* tree-vect-slp.c (vect_analyze_slp_instance): Spell out "basic block".
(vect_slp_analyze_bb, vect_slp_transform_bb): Likewise.
2009-06-01 Gerald Pfeifer <gerald@pfeifer.com> 2009-06-01 Gerald Pfeifer <gerald@pfeifer.com>
* config/freebsd-stdint.h: New file. * config/freebsd-stdint.h: New file.
......
...@@ -1196,15 +1196,15 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo) ...@@ -1196,15 +1196,15 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
} }
} }
vect_versioning_for_alias_required = vect_versioning_for_alias_required
(VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)) > 0); = LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo);
/* Temporarily, if versioning for alias is required, we disable peeling /* Temporarily, if versioning for alias is required, we disable peeling
until we support peeling and versioning. Often peeling for alignment until we support peeling and versioning. Often peeling for alignment
will require peeling for loop-bound, which in turn requires that we will require peeling for loop-bound, which in turn requires that we
know how to adjust the loop ivs after the loop. */ know how to adjust the loop ivs after the loop. */
if (vect_versioning_for_alias_required if (vect_versioning_for_alias_required
|| !vect_can_advance_ivs_p (loop_vinfo) || !vect_can_advance_ivs_p (loop_vinfo)
|| !slpeel_can_duplicate_loop_p (loop, single_exit (loop))) || !slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
do_peeling = false; do_peeling = false;
...@@ -1366,7 +1366,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo) ...@@ -1366,7 +1366,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
} }
/* Versioning requires at least one misaligned data reference. */ /* Versioning requires at least one misaligned data reference. */
if (VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo)) == 0) if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
do_versioning = false; do_versioning = false;
else if (!do_versioning) else if (!do_versioning)
VEC_truncate (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo), 0); VEC_truncate (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo), 0);
...@@ -2356,10 +2356,9 @@ vect_create_data_ref_ptr (gimple stmt, struct loop *at_loop, ...@@ -2356,10 +2356,9 @@ vect_create_data_ref_ptr (gimple stmt, struct loop *at_loop,
tree data_ref_base = base_name; tree data_ref_base = base_name;
fprintf (vect_dump, "create vector-pointer variable to type: "); fprintf (vect_dump, "create vector-pointer variable to type: ");
print_generic_expr (vect_dump, vectype, TDF_SLIM); print_generic_expr (vect_dump, vectype, TDF_SLIM);
if (TREE_CODE (data_ref_base) == VAR_DECL) if (TREE_CODE (data_ref_base) == VAR_DECL
fprintf (vect_dump, " vectorizing a one dimensional array ref: "); || TREE_CODE (data_ref_base) == ARRAY_REF)
else if (TREE_CODE (data_ref_base) == ARRAY_REF) fprintf (vect_dump, " vectorizing an array ref: ");
fprintf (vect_dump, " vectorizing a multidimensional array ref: ");
else if (TREE_CODE (data_ref_base) == COMPONENT_REF) else if (TREE_CODE (data_ref_base) == COMPONENT_REF)
fprintf (vect_dump, " vectorizing a record based array ref: "); fprintf (vect_dump, " vectorizing a record based array ref: ");
else if (TREE_CODE (data_ref_base) == SSA_NAME) else if (TREE_CODE (data_ref_base) == SSA_NAME)
......
...@@ -1680,7 +1680,7 @@ conservative_cost_threshold (loop_vec_info loop_vinfo, ...@@ -1680,7 +1680,7 @@ conservative_cost_threshold (loop_vec_info loop_vinfo,
th = (unsigned) min_profitable_iters; th = (unsigned) min_profitable_iters;
if (th && vect_print_dump_info (REPORT_COST)) if (th && vect_print_dump_info (REPORT_COST))
fprintf (vect_dump, "Vectorization may not be profitable."); fprintf (vect_dump, "Profitability threshold is %u loop iterations.", th);
return th; return th;
} }
...@@ -1730,8 +1730,8 @@ vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio, ...@@ -1730,8 +1730,8 @@ vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio,
/* If cost model check not done during versioning and /* If cost model check not done during versioning and
peeling for alignment. */ peeling for alignment. */
if (!VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo)) if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
&& !VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo)) && !LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo)
&& !LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo) && !LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo)
&& !cond_expr) && !cond_expr)
{ {
...@@ -2280,10 +2280,10 @@ vect_create_cond_for_alias_checks (loop_vec_info loop_vinfo, ...@@ -2280,10 +2280,10 @@ vect_create_cond_for_alias_checks (loop_vec_info loop_vinfo,
else else
*cond_expr = part_cond_expr; *cond_expr = part_cond_expr;
} }
if (vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
fprintf (vect_dump, "created %u versioning for alias checks.\n",
VEC_length (ddr_p, may_alias_ddrs));
if (vect_print_dump_info (REPORT_VECTORIZED_LOCATIONS))
fprintf (vect_dump, "created %u versioning for alias checks.\n",
VEC_length (ddr_p, may_alias_ddrs));
} }
...@@ -2339,11 +2339,11 @@ vect_loop_versioning (loop_vec_info loop_vinfo, bool do_versioning, ...@@ -2339,11 +2339,11 @@ vect_loop_versioning (loop_vec_info loop_vinfo, bool do_versioning,
*cond_expr = force_gimple_operand (*cond_expr, cond_expr_stmt_list, *cond_expr = force_gimple_operand (*cond_expr, cond_expr_stmt_list,
false, NULL_TREE); false, NULL_TREE);
if (VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))) if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
vect_create_cond_for_align_checks (loop_vinfo, cond_expr, vect_create_cond_for_align_checks (loop_vinfo, cond_expr,
cond_expr_stmt_list); cond_expr_stmt_list);
if (VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo))) if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
vect_create_cond_for_alias_checks (loop_vinfo, cond_expr, vect_create_cond_for_alias_checks (loop_vinfo, cond_expr,
cond_expr_stmt_list); cond_expr_stmt_list);
......
...@@ -846,7 +846,7 @@ vect_analyze_loop_form (struct loop *loop) ...@@ -846,7 +846,7 @@ vect_analyze_loop_form (struct loop *loop)
if (loop->num_nodes != 2) if (loop->num_nodes != 2)
{ {
if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS)) if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
fprintf (vect_dump, "not vectorized: too many BBs in loop."); fprintf (vect_dump, "not vectorized: control flow in loop.");
return NULL; return NULL;
} }
...@@ -908,7 +908,7 @@ vect_analyze_loop_form (struct loop *loop) ...@@ -908,7 +908,7 @@ vect_analyze_loop_form (struct loop *loop)
if (loop->num_nodes != 5) if (loop->num_nodes != 5)
{ {
if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS)) if (vect_print_dump_info (REPORT_BAD_FORM_LOOPS))
fprintf (vect_dump, "not vectorized: too many BBs in loop."); fprintf (vect_dump, "not vectorized: control flow in loop.");
destroy_loop_vec_info (inner_loop_vinfo, true); destroy_loop_vec_info (inner_loop_vinfo, true);
return NULL; return NULL;
} }
...@@ -1756,7 +1756,7 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo) ...@@ -1756,7 +1756,7 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
} }
/* Requires loop versioning tests to handle misalignment. */ /* Requires loop versioning tests to handle misalignment. */
if (VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo))) if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
{ {
/* FIXME: Make cost depend on complexity of individual check. */ /* FIXME: Make cost depend on complexity of individual check. */
vec_outside_cost += vec_outside_cost +=
...@@ -1766,7 +1766,8 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo) ...@@ -1766,7 +1766,8 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
"versioning to treat misalignment.\n"); "versioning to treat misalignment.\n");
} }
if (VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo))) /* Requires loop versioning with alias checks. */
if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
{ {
/* FIXME: Make cost depend on complexity of individual check. */ /* FIXME: Make cost depend on complexity of individual check. */
vec_outside_cost += vec_outside_cost +=
...@@ -1776,11 +1777,9 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo) ...@@ -1776,11 +1777,9 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
"versioning aliasing.\n"); "versioning aliasing.\n");
} }
if (VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo)) if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
|| VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo))) || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
{ vec_outside_cost += TARG_COND_TAKEN_BRANCH_COST;
vec_outside_cost += TARG_COND_TAKEN_BRANCH_COST;
}
/* Count statements in scalar loop. Using this as scalar cost for a single /* Count statements in scalar loop. Using this as scalar cost for a single
iteration for now. iteration for now.
...@@ -1946,12 +1945,12 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo) ...@@ -1946,12 +1945,12 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo)
decide whether to vectorize at compile time. Hence the scalar version decide whether to vectorize at compile time. Hence the scalar version
do not carry cost model guard costs. */ do not carry cost model guard costs. */
if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
|| VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo)) || LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
|| VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo))) || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
{ {
/* Cost model check occurs at versioning. */ /* Cost model check occurs at versioning. */
if (VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo)) if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
|| VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo))) || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
scalar_outside_cost += TARG_COND_NOT_TAKEN_BRANCH_COST; scalar_outside_cost += TARG_COND_NOT_TAKEN_BRANCH_COST;
else else
{ {
...@@ -3648,8 +3647,8 @@ vect_transform_loop (loop_vec_info loop_vinfo) ...@@ -3648,8 +3647,8 @@ vect_transform_loop (loop_vec_info loop_vinfo)
|| (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) || (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
&& LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0)); && LOOP_VINFO_INT_NITERS (loop_vinfo) % vectorization_factor != 0));
if (VEC_length (gimple, LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo)) if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo)
|| VEC_length (ddr_p, LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo))) || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
vect_loop_versioning (loop_vinfo, vect_loop_versioning (loop_vinfo,
!do_peeling_for_loop_bound, !do_peeling_for_loop_bound,
&cond_expr, &cond_expr_stmt_list); &cond_expr, &cond_expr_stmt_list);
......
...@@ -912,7 +912,8 @@ vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo, ...@@ -912,7 +912,8 @@ vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
if (unrolling_factor != 1 && !loop_vinfo) if (unrolling_factor != 1 && !loop_vinfo)
{ {
if (vect_print_dump_info (REPORT_SLP)) if (vect_print_dump_info (REPORT_SLP))
fprintf (vect_dump, "Build SLP failed: unrolling required in BB SLP"); fprintf (vect_dump, "Build SLP failed: unrolling required in basic"
" block SLP");
return false; return false;
} }
...@@ -1367,7 +1368,7 @@ vect_slp_analyze_bb (basic_block bb) ...@@ -1367,7 +1368,7 @@ vect_slp_analyze_bb (basic_block bb)
} }
if (vect_print_dump_info (REPORT_DETAILS)) if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "BB will be vectorized using SLP\n"); fprintf (vect_dump, "Basic block will be vectorized using SLP\n");
return bb_vinfo; return bb_vinfo;
} }
...@@ -2088,7 +2089,7 @@ vect_slp_transform_bb (basic_block bb) ...@@ -2088,7 +2089,7 @@ vect_slp_transform_bb (basic_block bb)
update_ssa (TODO_update_ssa); update_ssa (TODO_update_ssa);
if (vect_print_dump_info (REPORT_DETAILS)) if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "BB VECTORIZED\n"); fprintf (vect_dump, "BASIC BLOCK VECTORIZED\n");
destroy_bb_vec_info (bb_vinfo); destroy_bb_vec_info (bb_vinfo);
} }
......
...@@ -2903,7 +2903,9 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, ...@@ -2903,7 +2903,9 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
scalar_dest = gimple_assign_lhs (stmt); scalar_dest = gimple_assign_lhs (stmt);
if (TREE_CODE (scalar_dest) != ARRAY_REF if (TREE_CODE (scalar_dest) != ARRAY_REF
&& TREE_CODE (scalar_dest) != INDIRECT_REF && TREE_CODE (scalar_dest) != INDIRECT_REF
&& !STMT_VINFO_STRIDED_ACCESS (stmt_info)) && TREE_CODE (scalar_dest) != COMPONENT_REF
&& TREE_CODE (scalar_dest) != IMAGPART_EXPR
&& TREE_CODE (scalar_dest) != REALPART_EXPR)
return false; return false;
gcc_assert (gimple_assign_single_p (stmt)); gcc_assert (gimple_assign_single_p (stmt));
...@@ -3285,7 +3287,9 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, ...@@ -3285,7 +3287,9 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
code = gimple_assign_rhs_code (stmt); code = gimple_assign_rhs_code (stmt);
if (code != ARRAY_REF if (code != ARRAY_REF
&& code != INDIRECT_REF && code != INDIRECT_REF
&& !STMT_VINFO_STRIDED_ACCESS (stmt_info)) && code != COMPONENT_REF
&& code != IMAGPART_EXPR
&& code != REALPART_EXPR)
return false; return false;
if (!STMT_VINFO_DATA_REF (stmt_info)) if (!STMT_VINFO_DATA_REF (stmt_info))
......
...@@ -239,33 +239,38 @@ typedef struct _loop_vec_info { ...@@ -239,33 +239,38 @@ typedef struct _loop_vec_info {
} *loop_vec_info; } *loop_vec_info;
/* Access Functions. */ /* Access Functions. */
#define LOOP_VINFO_LOOP(L) (L)->loop #define LOOP_VINFO_LOOP(L) (L)->loop
#define LOOP_VINFO_BBS(L) (L)->bbs #define LOOP_VINFO_BBS(L) (L)->bbs
#define LOOP_VINFO_NITERS(L) (L)->num_iters #define LOOP_VINFO_NITERS(L) (L)->num_iters
/* Since LOOP_VINFO_NITERS can change after prologue peeling /* Since LOOP_VINFO_NITERS can change after prologue peeling
retain total unchanged scalar loop iterations for cost model. */ retain total unchanged scalar loop iterations for cost model. */
#define LOOP_VINFO_NITERS_UNCHANGED(L) (L)->num_iters_unchanged #define LOOP_VINFO_NITERS_UNCHANGED(L) (L)->num_iters_unchanged
#define LOOP_VINFO_COST_MODEL_MIN_ITERS(L) (L)->min_profitable_iters #define LOOP_VINFO_COST_MODEL_MIN_ITERS(L) (L)->min_profitable_iters
#define LOOP_VINFO_VECTORIZABLE_P(L) (L)->vectorizable #define LOOP_VINFO_VECTORIZABLE_P(L) (L)->vectorizable
#define LOOP_VINFO_VECT_FACTOR(L) (L)->vectorization_factor #define LOOP_VINFO_VECT_FACTOR(L) (L)->vectorization_factor
#define LOOP_VINFO_PTR_MASK(L) (L)->ptr_mask #define LOOP_VINFO_PTR_MASK(L) (L)->ptr_mask
#define LOOP_VINFO_DATAREFS(L) (L)->datarefs #define LOOP_VINFO_DATAREFS(L) (L)->datarefs
#define LOOP_VINFO_DDRS(L) (L)->ddrs #define LOOP_VINFO_DDRS(L) (L)->ddrs
#define LOOP_VINFO_INT_NITERS(L) (TREE_INT_CST_LOW ((L)->num_iters)) #define LOOP_VINFO_INT_NITERS(L) (TREE_INT_CST_LOW ((L)->num_iters))
#define LOOP_PEELING_FOR_ALIGNMENT(L) (L)->peeling_for_alignment #define LOOP_PEELING_FOR_ALIGNMENT(L) (L)->peeling_for_alignment
#define LOOP_VINFO_UNALIGNED_DR(L) (L)->unaligned_dr #define LOOP_VINFO_UNALIGNED_DR(L) (L)->unaligned_dr
#define LOOP_VINFO_MAY_MISALIGN_STMTS(L) (L)->may_misalign_stmts #define LOOP_VINFO_MAY_MISALIGN_STMTS(L) (L)->may_misalign_stmts
#define LOOP_VINFO_LOC(L) (L)->loop_line_number #define LOOP_VINFO_LOC(L) (L)->loop_line_number
#define LOOP_VINFO_MAY_ALIAS_DDRS(L) (L)->may_alias_ddrs #define LOOP_VINFO_MAY_ALIAS_DDRS(L) (L)->may_alias_ddrs
#define LOOP_VINFO_STRIDED_STORES(L) (L)->strided_stores #define LOOP_VINFO_STRIDED_STORES(L) (L)->strided_stores
#define LOOP_VINFO_SLP_INSTANCES(L) (L)->slp_instances #define LOOP_VINFO_SLP_INSTANCES(L) (L)->slp_instances
#define LOOP_VINFO_SLP_UNROLLING_FACTOR(L) (L)->slp_unrolling_factor #define LOOP_VINFO_SLP_UNROLLING_FACTOR(L) (L)->slp_unrolling_factor
#define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L) \
VEC_length (gimple, (L)->may_misalign_stmts) > 0
#define LOOP_REQUIRES_VERSIONING_FOR_ALIAS(L) \
VEC_length (ddr_p, (L)->may_alias_ddrs) > 0
#define NITERS_KNOWN_P(n) \ #define NITERS_KNOWN_P(n) \
(host_integerp ((n),0) \ (host_integerp ((n),0) \
&& TREE_INT_CST_LOW ((n)) > 0) && TREE_INT_CST_LOW ((n)) > 0)
#define LOOP_VINFO_NITERS_KNOWN_P(L) \ #define LOOP_VINFO_NITERS_KNOWN_P(L) \
NITERS_KNOWN_P((L)->num_iters) NITERS_KNOWN_P((L)->num_iters)
static inline loop_vec_info static inline loop_vec_info
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment