Commit e645e942 by Teresa Johnson Committed by Teresa Johnson

dumpfile.c (dump_loc): Remove newline emission.

2013-09-16  Teresa Johnson  <tejohnson@google.com>

	* dumpfile.c (dump_loc): Remove newline emission.
	* tree-vect-data-refs.c (vect_lanes_optab_supported_p): Add newline
	emission to dump_printf_loc calls where missing.
	(vect_mark_for_runtime_alias_test): Ditto.
	(vect_analyze_data_ref_dependence): Ditto.
	(vect_analyze_data_ref_dependences): Ditto.
	(vect_slp_analyze_data_ref_dependence): Ditto.
	(vect_slp_analyze_data_ref_dependences): Ditto.
	(vect_compute_data_ref_alignment): Ditto.
	(vect_update_misalignment_for_peel): Ditto.
	(vect_verify_datarefs_alignment): Ditto.
	(vector_alignment_reachable_p): Ditto.
	(vect_get_data_access_cost): Ditto.
	(vect_enhance_data_refs_alignment): Ditto.
	(vect_find_same_alignment_drs): Ditto.
	(vect_analyze_data_refs_alignment): Ditto.
	(vect_analyze_group_access): Ditto.
	(vect_analyze_data_ref_access): Ditto.
	(vect_analyze_data_ref_accesses): Ditto.
	(vect_prune_runtime_alias_test_list): Ditto.
	(vect_analyze_data_refs): Ditto.
	(vect_create_addr_base_for_vector_ref): Ditto.
	(vect_create_data_ref_ptr): Ditto.
	(vect_grouped_store_supported): Ditto.
	(vect_grouped_load_supported): Ditto.
	* value-prof.c (check_counter): Ditto.
	(check_ic_target): Ditto.
	* tree-vect-patterns.c (vect_recog_dot_prod_pattern): Ditto.
	(vect_recog_widen_mult_pattern): Ditto.
	(vect_recog_widen_sum_pattern): Ditto.
	(vect_recog_over_widening_pattern): Ditto.
	(vect_recog_widen_shift_pattern): Ditto.
	(vect_recog_rotate_pattern): Ditto.
	(vect_recog_vector_vector_shift_pattern): Ditto.
	(vect_recog_divmod_pattern): Ditto.
	(vect_recog_mixed_size_cond_pattern): Ditto.
	(vect_recog_bool_pattern): Ditto.
	(vect_pattern_recog_1): Ditto.
	(vect_pattern_recog): Ditto.
	* tree-vect-loop.c (vect_determine_vectorization_factor): Ditto.
	(vect_is_simple_iv_evolution): Ditto.
	(vect_analyze_scalar_cycles_1): Ditto.
	(vect_get_loop_niters): Ditto.
	(vect_analyze_loop_1): Ditto.
	(vect_analyze_loop_form): Ditto.
	(vect_analyze_loop_operations): Ditto.
	(vect_analyze_loop_2): Ditto.
	(vect_analyze_loop): Ditto.
	(report_vect_op): Ditto.
	(vect_is_slp_reduction): Ditto.
	(vect_is_simple_reduction_1): Ditto.
	(vect_get_known_peeling_cost): Ditto.
	(vect_estimate_min_profitable_iters): Ditto.
	(vect_model_reduction_cost): Ditto.
	(vect_model_induction_cost): Ditto.
	(get_initial_def_for_induction): Ditto.
	(vect_create_epilog_for_reduction): Ditto.
	(vectorizable_reduction): Ditto.
	(vectorizable_induction): Ditto.
	(vectorizable_live_operation): Ditto.
	(vect_loop_kill_debug_uses): Ditto.
	(vect_transform_loop): Ditto.
	* tree-vect-stmts.c (vect_mark_relevant): Ditto.
	(vect_stmt_relevant_p): Ditto.
	(process_use): Ditto.
	(vect_mark_stmts_to_be_vectorized): Ditto.
	(vect_model_simple_cost): Ditto.
	(vect_model_promotion_demotion_cost): Ditto.
	(vect_model_store_cost): Ditto.
	(vect_get_store_cost): Ditto.
	(vect_model_load_cost): Ditto.
	(vect_get_load_cost): Ditto.
	(vect_init_vector_1): Ditto.
	(vect_get_vec_def_for_operand): Ditto.
	(vect_finish_stmt_generation): Ditto.
	(vectorizable_call): Ditto.
	(vectorizable_conversion): Ditto.
	(vectorizable_assignment): Ditto.
	(vectorizable_shift): Ditto.
	(vectorizable_operation): Ditto.
	(vectorizable_store): Ditto.
	(vectorizable_load): Ditto.
	(vectorizable_condition): Ditto.
	(vect_analyze_stmt): Ditto.
	(vect_transform_stmt): Ditto.
	(vect_is_simple_use): Ditto.
	* tree-vect-loop-manip.c (slpeel_make_loop_iterate_ntimes): Ditto.
	(vect_can_advance_ivs_p): Ditto.
	(vect_update_ivs_after_vectorizer): Ditto.
	(vect_do_peeling_for_loop_bound): Ditto.
	(vect_gen_niters_for_prolog_loop): Ditto.
	(vect_update_inits_of_drs): Ditto.
	(vect_create_cond_for_alias_checks): Ditto.
	* tree-vect-slp.c (vect_get_and_check_slp_defs): Ditto.
	(vect_build_slp_tree_1): Ditto.
	(vect_supported_load_permutation_p): Ditto.
	(vect_analyze_slp_instance): Ditto.
	(vect_analyze_slp): Ditto.
	(vect_make_slp_decision): Ditto.
	(vect_detect_hybrid_slp): Ditto.
	(vect_bb_vectorization_profitable_p): Ditto.
	(vect_slp_analyze_bb_1): Ditto.
	(vect_update_slp_costs_according_to_vf): Ditto.
	(vect_get_mask_element): Ditto.
	(vect_transform_slp_perm_load): Ditto.
	(vect_schedule_slp_instance): Ditto.
	(vect_schedule_slp): Ditto.
	(vect_slp_transform_bb): Ditto.
	* profile.c (read_profile_edge_counts): Ditto.
	(compute_branch_probabilities): Ditto.
	* coverage.c (get_coverage_counts): Ditto.

From-SVN: r202628
parent 46f851f3
2013-09-16 Teresa Johnson <tejohnson@google.com>
* dumpfile.c (dump_loc): Remove newline emission.
* tree-vect-data-refs.c (vect_lanes_optab_supported_p): Add newline
emission to dump_printf_loc calls where missing.
(vect_mark_for_runtime_alias_test): Ditto.
(vect_analyze_data_ref_dependence): Ditto.
(vect_analyze_data_ref_dependences): Ditto.
(vect_slp_analyze_data_ref_dependence): Ditto.
(vect_slp_analyze_data_ref_dependences): Ditto.
(vect_compute_data_ref_alignment): Ditto.
(vect_update_misalignment_for_peel): Ditto.
(vect_verify_datarefs_alignment): Ditto.
(vector_alignment_reachable_p): Ditto.
(vect_get_data_access_cost): Ditto.
(vect_enhance_data_refs_alignment): Ditto.
(vect_find_same_alignment_drs): Ditto.
(vect_analyze_data_refs_alignment): Ditto.
(vect_analyze_group_access): Ditto.
(vect_analyze_data_ref_access): Ditto.
(vect_analyze_data_ref_accesses): Ditto.
(vect_prune_runtime_alias_test_list): Ditto.
(vect_analyze_data_refs): Ditto.
(vect_create_addr_base_for_vector_ref): Ditto.
(vect_create_data_ref_ptr): Ditto.
(vect_grouped_store_supported): Ditto.
(vect_grouped_load_supported): Ditto.
* value-prof.c (check_counter): Ditto.
(check_ic_target): Ditto.
* tree-vect-patterns.c (vect_recog_dot_prod_pattern): Ditto.
(vect_recog_widen_mult_pattern): Ditto.
(vect_recog_widen_sum_pattern): Ditto.
(vect_recog_over_widening_pattern): Ditto.
(vect_recog_widen_shift_pattern): Ditto.
(vect_recog_rotate_pattern): Ditto.
(vect_recog_vector_vector_shift_pattern): Ditto.
(vect_recog_divmod_pattern): Ditto.
(vect_recog_mixed_size_cond_pattern): Ditto.
(vect_recog_bool_pattern): Ditto.
(vect_pattern_recog_1): Ditto.
(vect_pattern_recog): Ditto.
* tree-vect-loop.c (vect_determine_vectorization_factor): Ditto.
(vect_is_simple_iv_evolution): Ditto.
(vect_analyze_scalar_cycles_1): Ditto.
(vect_get_loop_niters): Ditto.
(vect_analyze_loop_1): Ditto.
(vect_analyze_loop_form): Ditto.
(vect_analyze_loop_operations): Ditto.
(vect_analyze_loop_2): Ditto.
(vect_analyze_loop): Ditto.
(report_vect_op): Ditto.
(vect_is_slp_reduction): Ditto.
(vect_is_simple_reduction_1): Ditto.
(vect_get_known_peeling_cost): Ditto.
(vect_estimate_min_profitable_iters): Ditto.
(vect_model_reduction_cost): Ditto.
(vect_model_induction_cost): Ditto.
(get_initial_def_for_induction): Ditto.
(vect_create_epilog_for_reduction): Ditto.
(vectorizable_reduction): Ditto.
(vectorizable_induction): Ditto.
(vectorizable_live_operation): Ditto.
(vect_loop_kill_debug_uses): Ditto.
(vect_transform_loop): Ditto.
* tree-vect-stmts.c (vect_mark_relevant): Ditto.
(vect_stmt_relevant_p): Ditto.
(process_use): Ditto.
(vect_mark_stmts_to_be_vectorized): Ditto.
(vect_model_simple_cost): Ditto.
(vect_model_promotion_demotion_cost): Ditto.
(vect_model_store_cost): Ditto.
(vect_get_store_cost): Ditto.
(vect_model_load_cost): Ditto.
(vect_get_load_cost): Ditto.
(vect_init_vector_1): Ditto.
(vect_get_vec_def_for_operand): Ditto.
(vect_finish_stmt_generation): Ditto.
(vectorizable_call): Ditto.
(vectorizable_conversion): Ditto.
(vectorizable_assignment): Ditto.
(vectorizable_shift): Ditto.
(vectorizable_operation): Ditto.
(vectorizable_store): Ditto.
(vectorizable_load): Ditto.
(vectorizable_condition): Ditto.
(vect_analyze_stmt): Ditto.
(vect_transform_stmt): Ditto.
(vect_is_simple_use): Ditto.
* tree-vect-loop-manip.c (slpeel_make_loop_iterate_ntimes): Ditto.
(vect_can_advance_ivs_p): Ditto.
(vect_update_ivs_after_vectorizer): Ditto.
(vect_do_peeling_for_loop_bound): Ditto.
(vect_gen_niters_for_prolog_loop): Ditto.
(vect_update_inits_of_drs): Ditto.
(vect_create_cond_for_alias_checks): Ditto.
* tree-vect-slp.c (vect_get_and_check_slp_defs): Ditto.
(vect_build_slp_tree_1): Ditto.
(vect_supported_load_permutation_p): Ditto.
(vect_analyze_slp_instance): Ditto.
(vect_analyze_slp): Ditto.
(vect_make_slp_decision): Ditto.
(vect_detect_hybrid_slp): Ditto.
(vect_bb_vectorization_profitable_p): Ditto.
(vect_slp_analyze_bb_1): Ditto.
(vect_update_slp_costs_according_to_vf): Ditto.
(vect_get_mask_element): Ditto.
(vect_transform_slp_perm_load): Ditto.
(vect_schedule_slp_instance): Ditto.
(vect_schedule_slp): Ditto.
(vect_slp_transform_bb): Ditto.
* profile.c (read_profile_edge_counts): Ditto.
(compute_branch_probabilities): Ditto.
* coverage.c (get_coverage_counts): Ditto.
2013-09-16 Diego Novillo <dnovillo@google.com>
* tree-core.h: Add missing comment lines from refactoring
......
......@@ -349,7 +349,7 @@ get_coverage_counts (unsigned counter, unsigned expected,
(flag_guess_branch_prob
? "file %s not found, execution counts estimated"
: "file %s not found, execution counts assumed to "
"be zero"),
"be zero\n"),
da_file_name);
return NULL;
}
......@@ -379,20 +379,20 @@ get_coverage_counts (unsigned counter, unsigned expected,
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, input_location,
"use -Wno-error=coverage-mismatch to tolerate "
"the mismatch but performance may drop if the "
"function is hot");
"function is hot\n");
if (!seen_error ()
&& !warned++)
{
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, input_location,
"coverage mismatch ignored");
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, input_location,
flag_guess_branch_prob
? G_("execution counts estimated")
: G_("execution counts assumed to be zero"));
"coverage mismatch ignored\n");
dump_printf (MSG_OPTIMIZED_LOCATIONS,
flag_guess_branch_prob
? G_("execution counts estimated\n")
: G_("execution counts assumed to be zero\n"));
if (!flag_guess_branch_prob)
dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, input_location,
"this can result in poorly optimized code");
dump_printf (MSG_OPTIMIZED_LOCATIONS,
"this can result in poorly optimized code\n");
}
}
......
......@@ -262,10 +262,10 @@ dump_loc (int dump_kind, FILE *dfile, source_location loc)
if (dump_kind)
{
if (LOCATION_LOCUS (loc) > BUILTINS_LOCATION)
fprintf (dfile, "\n%s:%d:%d: note: ", LOCATION_FILE (loc),
fprintf (dfile, "%s:%d:%d: note: ", LOCATION_FILE (loc),
LOCATION_LINE (loc), LOCATION_COLUMN (loc));
else if (current_function_decl)
fprintf (dfile, "\n%s:%d:%d: note: ",
fprintf (dfile, "%s:%d:%d: note: ",
DECL_SOURCE_FILE (current_function_decl),
DECL_SOURCE_LINE (current_function_decl),
DECL_SOURCE_COLUMN (current_function_decl));
......
......@@ -434,7 +434,8 @@ read_profile_edge_counts (gcov_type *exec_counts)
static bool informed = 0;
if (dump_enabled_p () && !informed)
dump_printf_loc (MSG_NOTE, input_location,
"corrupted profile info: edge count exceeds maximal count");
"corrupted profile info: edge count"
" exceeds maximal count\n");
informed = 1;
}
else
......@@ -696,7 +697,7 @@ compute_branch_probabilities (unsigned cfg_checksum, unsigned lineno_checksum)
{
informed = 1;
dump_printf_loc (MSG_NOTE, input_location,
"correcting inconsistent profile data");
"correcting inconsistent profile data\n");
}
correct_negative_edge_counts ();
/* Set bb counts to the sum of the outgoing edge counts */
......
......@@ -59,8 +59,8 @@ vect_lanes_optab_supported_p (const char *name, convert_optab optab,
if (array_mode == BLKmode)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no array mode for %s[" HOST_WIDE_INT_PRINT_DEC "]",
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no array mode for %s[" HOST_WIDE_INT_PRINT_DEC "]\n",
GET_MODE_NAME (mode), count);
return false;
}
......@@ -69,14 +69,14 @@ vect_lanes_optab_supported_p (const char *name, convert_optab optab,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"cannot use %s<%s><%s>", name,
"cannot use %s<%s><%s>\n", name,
GET_MODE_NAME (array_mode), GET_MODE_NAME (mode));
return false;
}
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"can use %s<%s><%s>", name, GET_MODE_NAME (array_mode),
"can use %s<%s><%s>\n", name, GET_MODE_NAME (array_mode),
GET_MODE_NAME (mode));
return true;
......@@ -182,13 +182,15 @@ vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_A (ddr)));
dump_printf (MSG_NOTE, " and ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_B (ddr)));
dump_printf (MSG_NOTE, "\n");
}
if (optimize_loop_nest_for_size_p (loop))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning not supported when optimizing for size.");
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning not supported when optimizing"
" for size.\n");
return false;
}
......@@ -196,8 +198,8 @@ vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
if (loop->inner)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning not yet supported for outer-loops.");
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning not yet supported for outer-loops.\n");
return false;
}
......@@ -207,9 +209,9 @@ vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
|| TREE_CODE (DR_STEP (DDR_B (ddr))) != INTEGER_CST)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning not yet supported for non-constant "
"step");
"step\n");
return false;
}
......@@ -276,6 +278,7 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
DR_REF (drb));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return true;
}
......@@ -290,6 +293,7 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
DR_REF (drb));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
/* Add to list of ddrs that need to be tested at run-time. */
......@@ -321,18 +325,20 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
DR_REF (drb));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return true;
}
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"versioning for alias required: "
"bad dist vector for ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
/* Add to list of ddrs that need to be tested at run-time. */
return !vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
......@@ -345,17 +351,18 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"dependence distance = %d.", dist);
"dependence distance = %d.\n", dist);
if (dist == 0)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"dependence distance == 0 between ");
dump_printf_loc (MSG_NOTE, vect_location,
"dependence distance == 0 between ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
dump_printf (MSG_NOTE, " and ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
/* When we perform grouped accesses and perform implicit CSE
......@@ -383,7 +390,8 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"READ_WRITE dependence in interleaving.");
"READ_WRITE dependence in interleaving."
"\n");
return true;
}
}
......@@ -398,7 +406,7 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
distance is negative. */
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"dependence distance negative.");
"dependence distance negative.\n");
continue;
}
......@@ -410,8 +418,8 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
*max_vf = abs (dist);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"adjusting maximal vectorization factor to %i",
*max_vf);
"adjusting maximal vectorization factor to %i\n",
*max_vf);
}
if (abs (dist) >= *max_vf)
......@@ -420,18 +428,19 @@ vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
vectorization is concerned, in this case. */
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"dependence distance >= VF.");
"dependence distance >= VF.\n");
continue;
}
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized, possible dependence "
"between data-refs ");
"not vectorized, possible dependence "
"between data-refs ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
dump_printf (MSG_NOTE, " and ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
dump_printf (MSG_NOTE, "\n");
}
return true;
......@@ -454,7 +463,7 @@ vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo, int *max_vf)
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_data_ref_dependences ===");
"=== vect_analyze_data_ref_dependences ===\n");
if (!compute_all_dependences (LOOP_VINFO_DATAREFS (loop_vinfo),
&LOOP_VINFO_DDRS (loop_vinfo),
......@@ -515,6 +524,7 @@ vect_slp_analyze_data_ref_dependence (struct data_dependence_relation *ddr)
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dra));
dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (drb));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
/* We do not vectorize basic blocks with write-write dependencies. */
......@@ -536,6 +546,7 @@ vect_slp_analyze_data_ref_dependence (struct data_dependence_relation *ddr)
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
dump_printf (MSG_NOTE, " and ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
dump_printf (MSG_NOTE, "\n");
}
/* Do not vectorize basic blocks with write-write dependences. */
......@@ -600,7 +611,7 @@ vect_slp_analyze_data_ref_dependences (bb_vec_info bb_vinfo)
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_slp_analyze_data_ref_dependences ===");
"=== vect_slp_analyze_data_ref_dependences ===\n");
if (!compute_all_dependences (BB_VINFO_DATAREFS (bb_vinfo),
&BB_VINFO_DDRS (bb_vinfo),
......@@ -643,7 +654,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_compute_data_ref_alignment:");
"vect_compute_data_ref_alignment:\n");
if (loop_vinfo)
loop = LOOP_VINFO_LOOP (loop_vinfo);
......@@ -676,7 +687,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"inner step divides the vector-size.");
"inner step divides the vector-size.\n");
misalign = STMT_VINFO_DR_INIT (stmt_info);
aligned_to = STMT_VINFO_DR_ALIGNED_TO (stmt_info);
base_addr = STMT_VINFO_DR_BASE_ADDRESS (stmt_info);
......@@ -685,7 +696,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"inner step doesn't divide the vector-size.");
"inner step doesn't divide the vector-size.\n");
misalign = NULL_TREE;
}
}
......@@ -703,8 +714,8 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
if (dr_step % GET_MODE_SIZE (TYPE_MODE (vectype)) != 0)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"SLP: step doesn't divide the vector-size.");
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"SLP: step doesn't divide the vector-size.\n");
misalign = NULL_TREE;
}
}
......@@ -718,8 +729,9 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unknown alignment for access: ");
"Unknown alignment for access: ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, base);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return true;
}
......@@ -748,8 +760,9 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"can't force alignment of ref: ");
"can't force alignment of ref: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
dump_printf (MSG_NOTE, "\n");
}
return true;
}
......@@ -761,6 +774,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
{
dump_printf_loc (MSG_NOTE, vect_location, "force alignment of ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, ref);
dump_printf (MSG_NOTE, "\n");
}
((dataref_aux *)dr->aux)->base_decl = base;
......@@ -788,7 +802,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
/* Negative or overflowed misalignment value. */
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unexpected misalign value");
"unexpected misalign value\n");
return false;
}
......@@ -799,6 +813,7 @@ vect_compute_data_ref_alignment (struct data_reference *dr)
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"misalign = %d bytes of ref ", DR_MISALIGNMENT (dr));
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, ref);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return true;
......@@ -895,7 +910,7 @@ vect_update_misalignment_for_peel (struct data_reference *dr,
}
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "Setting misalignment to -1.");
dump_printf_loc (MSG_NOTE, vect_location, "Setting misalignment to -1.\n");
SET_DR_MISALIGNMENT (dr, -1);
}
......@@ -953,12 +968,13 @@ vect_verify_datarefs_alignment (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
DR_REF (dr));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
if (supportable_dr_alignment != dr_aligned && dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Vectorizing an unaligned access.");
"Vectorizing an unaligned access.\n");
}
return true;
}
......@@ -1015,16 +1031,16 @@ vector_alignment_reachable_p (struct data_reference *dr)
int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"data size =" HOST_WIDE_INT_PRINT_DEC, elmsize);
dump_printf (MSG_NOTE,
". misalignment = %d. ", DR_MISALIGNMENT (dr));
dump_printf_loc (MSG_NOTE, vect_location,
"data size =" HOST_WIDE_INT_PRINT_DEC, elmsize);
dump_printf (MSG_NOTE,
". misalignment = %d.\n", DR_MISALIGNMENT (dr));
}
if (DR_MISALIGNMENT (dr) % elmsize)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"data size does not divide the misalignment.\n");
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"data size does not divide the misalignment.\n");
return false;
}
}
......@@ -1034,8 +1050,8 @@ vector_alignment_reachable_p (struct data_reference *dr)
tree type = TREE_TYPE (DR_REF (dr));
bool is_packed = not_size_aligned (DR_REF (dr));
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unknown misalignment, is_packed = %d",is_packed);
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unknown misalignment, is_packed = %d\n",is_packed);
if ((TYPE_USER_ALIGN (type) && !is_packed)
|| targetm.vectorize.vector_alignment_reachable (type, is_packed))
return true;
......@@ -1071,7 +1087,7 @@ vect_get_data_access_cost (struct data_reference *dr,
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_get_data_access_cost: inside_cost = %d, "
"outside_cost = %d.", *inside_cost, *outside_cost);
"outside_cost = %d.\n", *inside_cost, *outside_cost);
}
......@@ -1346,7 +1362,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_enhance_data_refs_alignment ===");
"=== vect_enhance_data_refs_alignment ===\n");
/* While cost model enhancements are expected in the future, the high level
view of the code at this time is as follows:
......@@ -1515,8 +1531,8 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
if (!aligned_access_p (dr))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"vector alignment may not be reachable");
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"vector alignment may not be reachable\n");
break;
}
}
......@@ -1652,7 +1668,7 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Try peeling by %d", npeel);
"Try peeling by %d\n", npeel);
}
/* Ensure that all data refs can be vectorized after the peel. */
......@@ -1725,9 +1741,9 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Alignment of access forced using peeling.");
"Alignment of access forced using peeling.\n");
dump_printf_loc (MSG_NOTE, vect_location,
"Peeling for alignment will be applied.");
"Peeling for alignment will be applied.\n");
}
/* We've delayed passing the inside-loop peeling costs to the
target cost model until we were sure peeling would happen.
......@@ -1847,13 +1863,13 @@ vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
dr = STMT_VINFO_DATA_REF (stmt_info);
SET_DR_MISALIGNMENT (dr, 0);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Alignment of access forced using versioning.");
dump_printf_loc (MSG_NOTE, vect_location,
"Alignment of access forced using versioning.\n");
}
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Versioning for alignment will be applied.");
dump_printf_loc (MSG_NOTE, vect_location,
"Versioning for alignment will be applied.\n");
/* Peeling and versioning can't be done together at this time. */
gcc_assert (! (do_peeling && do_versioning));
......@@ -1919,7 +1935,7 @@ vect_find_same_alignment_drs (struct data_dependence_relation *ddr,
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"dependence distance = %d.", dist);
"dependence distance = %d.\n", dist);
/* Same loop iteration. */
if (dist == 0
......@@ -1930,13 +1946,14 @@ vect_find_same_alignment_drs (struct data_dependence_relation *ddr,
STMT_VINFO_SAME_ALIGN_REFS (stmtinfo_b).safe_push (dra);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"accesses have the same alignment.");
dump_printf_loc (MSG_NOTE, vect_location,
"accesses have the same alignment.\n");
dump_printf (MSG_NOTE,
"dependence distance modulo vf == 0 between ");
"dependence distance modulo vf == 0 between ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
dump_printf (MSG_NOTE, " and ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
dump_printf (MSG_NOTE, "\n");
}
}
}
......@@ -1954,7 +1971,7 @@ vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_data_refs_alignment ===");
"=== vect_analyze_data_refs_alignment ===\n");
/* Mark groups of data references with same alignment using
data dependence information. */
......@@ -1971,9 +1988,9 @@ vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo,
if (!vect_compute_data_refs_alignment (loop_vinfo, bb_vinfo))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: can't calculate alignment "
"for data ref.");
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: can't calculate alignment "
"for data ref.\n");
return false;
}
......@@ -2025,25 +2042,26 @@ vect_analyze_group_access (struct data_reference *dr)
GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Detected single element interleaving ");
dump_printf_loc (MSG_NOTE, vect_location,
"Detected single element interleaving ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr));
dump_printf (MSG_NOTE, " step ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, step);
dump_printf (MSG_NOTE, "\n");
}
if (loop_vinfo)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Data access with gaps requires scalar "
"epilogue loop");
"Data access with gaps requires scalar "
"epilogue loop\n");
if (loop->inner)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Peeling for outer loop is not"
" supported");
" supported\n");
return false;
}
......@@ -2056,8 +2074,9 @@ vect_analyze_group_access (struct data_reference *dr)
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not consecutive access ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
"not consecutive access ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
if (bb_vinfo)
......@@ -2094,8 +2113,8 @@ vect_analyze_group_access (struct data_reference *dr)
if (DR_IS_WRITE (data_ref))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Two store stmts share the same dr.");
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Two store stmts share the same dr.\n");
return false;
}
......@@ -2124,8 +2143,8 @@ vect_analyze_group_access (struct data_reference *dr)
if (DR_IS_WRITE (data_ref))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"interleaved store with gaps");
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"interleaved store with gaps\n");
return false;
}
......@@ -2155,9 +2174,11 @@ vect_analyze_group_access (struct data_reference *dr)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"interleaving size is greater than step for ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, DR_REF (dr));
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
DR_REF (dr));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
......@@ -2178,8 +2199,8 @@ vect_analyze_group_access (struct data_reference *dr)
else
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"interleaved store with gaps");
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"interleaved store with gaps\n");
return false;
}
}
......@@ -2196,6 +2217,7 @@ vect_analyze_group_access (struct data_reference *dr)
dump_printf (MSG_MISSED_OPTIMIZATION, " size ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
TYPE_SIZE_UNIT (scalar_type));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
......@@ -2205,8 +2227,8 @@ vect_analyze_group_access (struct data_reference *dr)
GROUP_SIZE (vinfo_for_stmt (stmt)) = groupsize;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Detected interleaving of size %d", (int)groupsize);
dump_printf_loc (MSG_NOTE, vect_location,
"Detected interleaving of size %d\n", (int)groupsize);
/* SLP: create an SLP data structure for every interleaving group of
stores for further analysis in vect_analyse_slp. */
......@@ -2223,13 +2245,13 @@ vect_analyze_group_access (struct data_reference *dr)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Data access with gaps requires scalar "
"epilogue loop");
"Data access with gaps requires scalar "
"epilogue loop\n");
if (loop->inner)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Peeling for outer loop is not supported");
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Peeling for outer loop is not supported\n");
return false;
}
......@@ -2261,8 +2283,8 @@ vect_analyze_data_ref_access (struct data_reference *dr)
if (loop_vinfo && !step)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data-ref access in loop");
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data-ref access in loop\n");
return false;
}
......@@ -2274,7 +2296,7 @@ vect_analyze_data_ref_access (struct data_reference *dr)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"zero step in inner loop of nest");
"zero step in inner loop of nest\n");
return false;
}
return DR_IS_READ (dr);
......@@ -2292,7 +2314,7 @@ vect_analyze_data_ref_access (struct data_reference *dr)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"zero step in outer loop.");
"zero step in outer loop.\n");
if (DR_IS_READ (dr))
return true;
else
......@@ -2318,7 +2340,7 @@ vect_analyze_data_ref_access (struct data_reference *dr)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"grouped access in outer loop.");
"grouped access in outer loop.\n");
return false;
}
......@@ -2482,7 +2504,7 @@ vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_data_ref_accesses ===");
"=== vect_analyze_data_ref_accesses ===\n");
if (loop_vinfo)
datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
......@@ -2567,6 +2589,7 @@ vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dra));
dump_printf (MSG_NOTE, " and ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (drb));
dump_printf (MSG_NOTE, "\n");
}
/* Link the found element into the group list. */
......@@ -2586,8 +2609,8 @@ vect_analyze_data_ref_accesses (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
&& !vect_analyze_data_ref_access (dr))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: complicated access pattern.");
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: complicated access pattern.\n");
if (bb_vinfo)
{
......@@ -2617,7 +2640,7 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_prune_runtime_alias_test_list ===");
"=== vect_prune_runtime_alias_test_list ===\n");
for (i = 0; i < ddrs.length (); )
{
......@@ -2636,14 +2659,19 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"found equal ranges ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_A (ddr_i)));
"found equal ranges ");
dump_generic_expr (MSG_NOTE, TDF_SLIM,
DR_REF (DDR_A (ddr_i)));
dump_printf (MSG_NOTE, ", ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_B (ddr_i)));
dump_generic_expr (MSG_NOTE, TDF_SLIM,
DR_REF (DDR_B (ddr_i)));
dump_printf (MSG_NOTE, " and ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_A (ddr_j)));
dump_generic_expr (MSG_NOTE, TDF_SLIM,
DR_REF (DDR_A (ddr_j)));
dump_printf (MSG_NOTE, ", ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (DDR_B (ddr_j)));
dump_generic_expr (MSG_NOTE, TDF_SLIM,
DR_REF (DDR_B (ddr_j)));
dump_printf (MSG_NOTE, "\n");
}
found = true;
break;
......@@ -2663,9 +2691,9 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"disable versioning for alias - max number of "
"generated checks exceeded.");
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"disable versioning for alias - max number of "
"generated checks exceeded.\n");
}
LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).truncate (0);
......@@ -2908,9 +2936,9 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo,
(loop, &LOOP_VINFO_DATAREFS (loop_vinfo)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: loop contains function calls"
" or data references that cannot be analyzed");
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: loop contains function calls"
" or data references that cannot be analyzed\n");
return false;
}
......@@ -2957,7 +2985,7 @@ again:
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unhandled data-ref ");
"not vectorized: unhandled data-ref\n");
return false;
}
......@@ -3060,10 +3088,11 @@ again:
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: data ref analysis "
"failed ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
if (bb_vinfo)
......@@ -3078,7 +3107,7 @@ again:
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: base addr of dr is a "
"constant");
"constant\n");
if (bb_vinfo)
break;
......@@ -3095,6 +3124,7 @@ again:
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: volatile type ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
if (bb_vinfo)
......@@ -3111,6 +3141,7 @@ again:
"not vectorized: statement can throw an "
"exception ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
if (bb_vinfo)
......@@ -3130,6 +3161,7 @@ again:
"not vectorized: statement is bitfield "
"access ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
if (bb_vinfo)
......@@ -3149,8 +3181,9 @@ again:
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: dr in a call ");
"not vectorized: dr in a call ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
if (bb_vinfo)
......@@ -3190,6 +3223,7 @@ again:
dump_printf_loc (MSG_NOTE, vect_location,
"analyze in outer-loop: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, inner_base);
dump_printf (MSG_NOTE, "\n");
}
outer_base = get_inner_reference (inner_base, &pbitsize, &pbitpos,
......@@ -3209,7 +3243,7 @@ again:
&base_iv, false))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"failed: evolution of base is not affine.\n");
return false;
}
......@@ -3232,7 +3266,7 @@ again:
&offset_iv, false))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"evolution of offset is not affine.\n");
return false;
}
......@@ -3275,6 +3309,7 @@ again:
dump_printf (MSG_NOTE, "\n\touter aligned to: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM,
STMT_VINFO_DR_ALIGNED_TO (stmt_info));
dump_printf (MSG_NOTE, "\n");
}
}
......@@ -3286,6 +3321,7 @@ again:
"not vectorized: more than one data ref "
"in stmt: ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
if (bb_vinfo)
......@@ -3311,12 +3347,13 @@ again:
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: no vectype for stmt: ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, " scalar_type: ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_DETAILS,
scalar_type);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
if (bb_vinfo)
......@@ -3338,6 +3375,7 @@ again:
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
dump_generic_expr (MSG_NOTE, TDF_SLIM,
STMT_VINFO_VECTYPE (stmt_info));
dump_printf (MSG_NOTE, "\n");
}
}
......@@ -3365,6 +3403,7 @@ again:
"not vectorized: not suitable for gather "
"load ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
......@@ -3384,6 +3423,7 @@ again:
"not vectorized: not suitable for strided "
"load ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
......@@ -3569,6 +3609,7 @@ vect_create_addr_base_for_vector_ref (gimple stmt,
{
dump_printf_loc (MSG_NOTE, vect_location, "created ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, addr_base);
dump_printf (MSG_NOTE, "\n");
}
return addr_base;
......@@ -3700,6 +3741,7 @@ vect_create_data_ref_ptr (gimple stmt, tree aggr_type, struct loop *at_loop,
else
dump_printf (MSG_NOTE, " vectorizing a pointer ref: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_BASE_OBJECT (dr));
dump_printf (MSG_NOTE, "\n");
}
/* (1) Create the new aggregate-pointer variable.
......@@ -4005,7 +4047,7 @@ vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"the size of the group of accesses"
" is not a power of 2");
" is not a power of 2\n");
return false;
}
......@@ -4030,7 +4072,7 @@ vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
if (dump_enabled_p ())
dump_printf (MSG_MISSED_OPTIMIZATION,
"interleave op not supported by target.");
"interleave op not supported by target.\n");
return false;
}
......@@ -4452,7 +4494,7 @@ vect_grouped_load_supported (tree vectype, unsigned HOST_WIDE_INT count)
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"the size of the group of accesses"
" is not a power of 2");
" is not a power of 2\n");
return false;
}
......@@ -4475,7 +4517,7 @@ vect_grouped_load_supported (tree vectype, unsigned HOST_WIDE_INT count)
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"extract even/odd not supported by target");
"extract even/odd not supported by target\n");
return false;
}
......
......@@ -683,6 +683,7 @@ slpeel_make_loop_iterate_ntimes (struct loop *loop, tree niters)
dump_printf (MSG_NOTE, "\nloop at %s:%d: ", LOC_FILE (loop_loc),
LOC_LINE (loop_loc));
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, cond_stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
loop->nb_iterations = niters;
}
......@@ -1552,7 +1553,7 @@ vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
/* Analyze phi functions of the loop header. */
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "vect_can_advance_ivs_p:");
dump_printf_loc (MSG_NOTE, vect_location, "vect_can_advance_ivs_p:\n");
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
tree evolution_part;
......@@ -1562,6 +1563,7 @@ vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
{
dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
dump_printf (MSG_NOTE, "\n");
}
/* Skip virtual phi's. The data dependences that are associated with
......@@ -1571,7 +1573,7 @@ vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"virtual phi. skip.");
"virtual phi. skip.\n");
continue;
}
......@@ -1581,7 +1583,7 @@ vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduc phi. skip.");
"reduc phi. skip.\n");
continue;
}
......@@ -1593,7 +1595,7 @@ vect_can_advance_ivs_p (loop_vec_info loop_vinfo)
{
if (dump_enabled_p ())
dump_printf (MSG_MISSED_OPTIMIZATION,
"No access function or evolution.");
"No access function or evolution.\n");
return false;
}
......@@ -1682,6 +1684,7 @@ vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, tree niters,
dump_printf_loc (MSG_NOTE, vect_location,
"vect_update_ivs_after_vectorizer: phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
dump_printf (MSG_NOTE, "\n");
}
/* Skip virtual phi's. */
......@@ -1689,7 +1692,7 @@ vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, tree niters,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"virtual phi. skip.");
"virtual phi. skip.\n");
continue;
}
......@@ -1699,7 +1702,7 @@ vect_update_ivs_after_vectorizer (loop_vec_info loop_vinfo, tree niters,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduc phi. skip.");
"reduc phi. skip.\n");
continue;
}
......@@ -1762,7 +1765,7 @@ vect_do_peeling_for_loop_bound (loop_vec_info loop_vinfo, tree *ratio,
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_do_peeling_for_loop_bound ===");
"=== vect_do_peeling_for_loop_bound ===\n");
initialize_original_copy_tables ();
......@@ -1881,7 +1884,7 @@ vect_gen_niters_for_prolog_loop (loop_vec_info loop_vinfo, tree loop_niters, int
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"known peeling = %d.", npeel);
"known peeling = %d.\n", npeel);
iters = build_int_cst (niters_type, npeel);
*bound = LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo);
......@@ -1938,6 +1941,7 @@ vect_gen_niters_for_prolog_loop (loop_vec_info loop_vinfo, tree loop_niters, int
dump_printf_loc (MSG_NOTE, vect_location,
"niters for prolog loop: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, iters);
dump_printf (MSG_NOTE, "\n");
}
var = create_tmp_var (niters_type, "prolog_loop_niters");
......@@ -1993,7 +1997,7 @@ vect_update_inits_of_drs (loop_vec_info loop_vinfo, tree niters)
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_update_inits_of_dr ===");
"=== vect_update_inits_of_dr ===\n");
FOR_EACH_VEC_ELT (datarefs, i, dr)
vect_update_init_of_dr (dr, niters);
......@@ -2338,6 +2342,7 @@ vect_create_cond_for_alias_checks (loop_vec_info loop_vinfo, tree * cond_expr)
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_a));
dump_printf (MSG_NOTE, " and ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, DR_REF (dr_b));
dump_printf (MSG_NOTE, "\n");
}
seg_a_min = addr_base_a;
......
......@@ -188,7 +188,7 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_determine_vectorization_factor ===");
"=== vect_determine_vectorization_factor ===\n");
for (i = 0; i < nbbs; i++)
{
......@@ -202,6 +202,7 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
{
dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
dump_printf (MSG_NOTE, "\n");
}
gcc_assert (stmt_info);
......@@ -216,6 +217,7 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
dump_printf_loc (MSG_NOTE, vect_location,
"get vectype for scalar type: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
dump_printf (MSG_NOTE, "\n");
}
vectype = get_vectype_for_scalar_type (scalar_type);
......@@ -228,6 +230,7 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
"data-type ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
scalar_type);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
......@@ -237,11 +240,13 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
{
dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
dump_printf (MSG_NOTE, "\n");
}
nunits = TYPE_VECTOR_SUBPARTS (vectype);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "nunits = %d", nunits);
dump_printf_loc (MSG_NOTE, vect_location, "nunits = %d\n",
nunits);
if (!vectorization_factor
|| (nunits > vectorization_factor))
......@@ -265,6 +270,7 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
gcc_assert (stmt_info);
......@@ -286,12 +292,13 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
}
else
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "skip.");
dump_printf_loc (MSG_NOTE, vect_location, "skip.\n");
gsi_next (&si);
continue;
}
......@@ -336,6 +343,7 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
"==> examining pattern def stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
pattern_def_stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
stmt = pattern_def_stmt;
......@@ -359,6 +367,7 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
"not vectorized: irregular stmt.");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt,
0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
......@@ -370,6 +379,7 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: vector stmt in loop:");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
......@@ -394,6 +404,7 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
dump_printf_loc (MSG_NOTE, vect_location,
"get vectype for scalar type: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
dump_printf (MSG_NOTE, "\n");
}
vectype = get_vectype_for_scalar_type (scalar_type);
if (!vectype)
......@@ -405,6 +416,7 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
"data-type ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
scalar_type);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
......@@ -415,6 +427,7 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
{
dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
dump_printf (MSG_NOTE, "\n");
}
}
......@@ -428,6 +441,7 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
dump_printf_loc (MSG_NOTE, vect_location,
"get vectype for scalar type: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
dump_printf (MSG_NOTE, "\n");
}
vf_vectype = get_vectype_for_scalar_type (scalar_type);
if (!vf_vectype)
......@@ -438,6 +452,7 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
"not vectorized: unsupported data-type ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
scalar_type);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
......@@ -455,6 +470,7 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
vf_vectype);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
......@@ -463,11 +479,12 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
{
dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, vf_vectype);
dump_printf (MSG_NOTE, "\n");
}
nunits = TYPE_VECTOR_SUBPARTS (vf_vectype);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "nunits = %d", nunits);
dump_printf_loc (MSG_NOTE, vect_location, "nunits = %d\n", nunits);
if (!vectorization_factor
|| (nunits > vectorization_factor))
vectorization_factor = nunits;
......@@ -482,13 +499,13 @@ vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
/* TODO: Analyze cost. Decide if worth while to vectorize. */
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = %d",
dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = %d\n",
vectorization_factor);
if (vectorization_factor <= 1)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported data-type");
"not vectorized: unsupported data-type\n");
return false;
}
LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
......@@ -530,6 +547,7 @@ vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
dump_generic_expr (MSG_NOTE, TDF_SLIM, step_expr);
dump_printf (MSG_NOTE, ", init: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, init_expr);
dump_printf (MSG_NOTE, "\n");
}
*init = init_expr;
......@@ -547,7 +565,7 @@ vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"step unknown.");
"step unknown.\n");
return false;
}
......@@ -573,7 +591,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_scalar_cycles ===");
"=== vect_analyze_scalar_cycles ===\n");
/* First - identify all inductions. Reduction detection assumes that all the
inductions have been identified, therefore, this order must not be
......@@ -589,6 +607,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
{
dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
dump_printf (MSG_NOTE, "\n");
}
/* Skip virtual phi's. The data dependences that are associated with
......@@ -608,6 +627,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
dump_printf_loc (MSG_NOTE, vect_location,
"Access function of PHI: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, access_fn);
dump_printf (MSG_NOTE, "\n");
}
STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo)
= evolution_part_in_loop_num (access_fn, loop->num);
......@@ -625,7 +645,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.");
dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.\n");
STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
}
......@@ -643,6 +663,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
{
dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
dump_printf (MSG_NOTE, "\n");
}
gcc_assert (!virtual_operand_p (def)
......@@ -657,7 +678,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Detected double reduction.");
"Detected double reduction.\n");
STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
......@@ -669,7 +690,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Detected vectorizable nested cycle.");
"Detected vectorizable nested cycle.\n");
STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
......@@ -679,7 +700,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Detected reduction.");
"Detected reduction.\n");
STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
......@@ -693,7 +714,7 @@ vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
else
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unknown def-use cycle pattern.");
"Unknown def-use cycle pattern.\n");
}
worklist.release ();
......@@ -755,7 +776,7 @@ vect_get_loop_niters (struct loop *loop, tree *number_of_iterations)
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== get_loop_niters ===");
"=== get_loop_niters ===\n");
niters = number_of_exit_cond_executions (loop);
if (niters != NULL_TREE
......@@ -767,6 +788,7 @@ vect_get_loop_niters (struct loop *loop, tree *number_of_iterations)
{
dump_printf_loc (MSG_NOTE, vect_location, "==> get_loop_niters:");
dump_generic_expr (MSG_NOTE, TDF_SLIM, *number_of_iterations);
dump_printf (MSG_NOTE, "\n");
}
}
......@@ -996,7 +1018,7 @@ vect_analyze_loop_1 (struct loop *loop)
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"===== analyze_loop_nest_1 =====");
"===== analyze_loop_nest_1 =====\n");
/* Check the CFG characteristics of the loop (nesting, entry/exit, etc. */
......@@ -1005,7 +1027,7 @@ vect_analyze_loop_1 (struct loop *loop)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad inner-loop form.");
"bad inner-loop form.\n");
return NULL;
}
......@@ -1031,7 +1053,7 @@ vect_analyze_loop_form (struct loop *loop)
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_loop_form ===");
"=== vect_analyze_loop_form ===\n");
/* Different restrictions apply when we are considering an inner-most loop,
vs. an outer (nested) loop.
......@@ -1055,7 +1077,7 @@ vect_analyze_loop_form (struct loop *loop)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: control flow in loop.");
"not vectorized: control flow in loop.\n");
return NULL;
}
......@@ -1063,7 +1085,7 @@ vect_analyze_loop_form (struct loop *loop)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: empty loop.");
"not vectorized: empty loop.\n");
return NULL;
}
}
......@@ -1093,7 +1115,7 @@ vect_analyze_loop_form (struct loop *loop)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: multiple nested loops.");
"not vectorized: multiple nested loops.\n");
return NULL;
}
......@@ -1103,7 +1125,7 @@ vect_analyze_loop_form (struct loop *loop)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: Bad inner loop.");
"not vectorized: Bad inner loop.\n");
return NULL;
}
......@@ -1111,8 +1133,9 @@ vect_analyze_loop_form (struct loop *loop)
LOOP_VINFO_NITERS (inner_loop_vinfo)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: inner-loop count not invariant.");
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: inner-loop count not"
" invariant.\n");
destroy_loop_vec_info (inner_loop_vinfo, true);
return NULL;
}
......@@ -1121,7 +1144,7 @@ vect_analyze_loop_form (struct loop *loop)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: control flow in loop.");
"not vectorized: control flow in loop.\n");
destroy_loop_vec_info (inner_loop_vinfo, true);
return NULL;
}
......@@ -1136,15 +1159,15 @@ vect_analyze_loop_form (struct loop *loop)
|| single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported outerloop form.");
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported outerloop form.\n");
destroy_loop_vec_info (inner_loop_vinfo, true);
return NULL;
}
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Considering outer-loop vectorization.");
"Considering outer-loop vectorization.\n");
}
if (!single_exit (loop)
......@@ -1154,10 +1177,10 @@ vect_analyze_loop_form (struct loop *loop)
{
if (!single_exit (loop))
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: multiple exits.");
"not vectorized: multiple exits.\n");
else if (EDGE_COUNT (loop->header->preds) != 2)
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: too many incoming edges.");
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: too many incoming edges.\n");
}
if (inner_loop_vinfo)
destroy_loop_vec_info (inner_loop_vinfo, true);
......@@ -1173,7 +1196,7 @@ vect_analyze_loop_form (struct loop *loop)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: latch block not empty.");
"not vectorized: latch block not empty.\n");
if (inner_loop_vinfo)
destroy_loop_vec_info (inner_loop_vinfo, true);
return NULL;
......@@ -1187,13 +1210,13 @@ vect_analyze_loop_form (struct loop *loop)
{
split_loop_exit_edge (e);
if (dump_enabled_p ())
dump_printf (MSG_NOTE, "split exit edge.");
dump_printf (MSG_NOTE, "split exit edge.\n");
}
else
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: abnormal loop exit edge.");
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: abnormal loop exit edge.\n");
if (inner_loop_vinfo)
destroy_loop_vec_info (inner_loop_vinfo, true);
return NULL;
......@@ -1204,8 +1227,8 @@ vect_analyze_loop_form (struct loop *loop)
if (!loop_cond)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: complicated exit condition.");
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: complicated exit condition.\n");
if (inner_loop_vinfo)
destroy_loop_vec_info (inner_loop_vinfo, true);
return NULL;
......@@ -1214,9 +1237,9 @@ vect_analyze_loop_form (struct loop *loop)
if (!number_of_iterations)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: number of iterations cannot be "
"computed.");
"computed.\n");
if (inner_loop_vinfo)
destroy_loop_vec_info (inner_loop_vinfo, true);
return NULL;
......@@ -1226,7 +1249,7 @@ vect_analyze_loop_form (struct loop *loop)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Infinite number of iterations.");
"Infinite number of iterations.\n");
if (inner_loop_vinfo)
destroy_loop_vec_info (inner_loop_vinfo, true);
return NULL;
......@@ -1239,13 +1262,14 @@ vect_analyze_loop_form (struct loop *loop)
dump_printf_loc (MSG_NOTE, vect_location,
"Symbolic number of iterations is ");
dump_generic_expr (MSG_NOTE, TDF_DETAILS, number_of_iterations);
dump_printf (MSG_NOTE, "\n");
}
}
else if (TREE_INT_CST_LOW (number_of_iterations) == 0)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: number of iterations = 0.");
"not vectorized: number of iterations = 0.\n");
if (inner_loop_vinfo)
destroy_loop_vec_info (inner_loop_vinfo, true);
return NULL;
......@@ -1293,7 +1317,7 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo, bool slp)
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_loop_operations ===");
"=== vect_analyze_loop_operations ===\n");
gcc_assert (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
......@@ -1329,7 +1353,7 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo, bool slp)
LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Updating vectorization factor to %d ",
"Updating vectorization factor to %d\n",
vectorization_factor);
}
......@@ -1347,6 +1371,7 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo, bool slp)
{
dump_printf_loc (MSG_NOTE, vect_location, "examining phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
dump_printf (MSG_NOTE, "\n");
}
/* Inner-loop loop-closed exit phi in outer-loop vectorization
......@@ -1363,9 +1388,9 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo, bool slp)
!= vect_double_reduction_def)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unsupported loop-closed phi in "
"outer-loop.");
"outer-loop.\n");
return false;
}
......@@ -1406,7 +1431,7 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo, bool slp)
/* FORNOW: not yet supported. */
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: value used after loop.");
"not vectorized: value used after loop.\n");
return false;
}
......@@ -1415,8 +1440,8 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo, bool slp)
{
/* A scalar-dependence cycle that we don't support. */
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: scalar dependence cycle.");
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: scalar dependence cycle.\n");
return false;
}
......@@ -1431,10 +1456,11 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo, bool slp)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: relevant phi not "
"supported: ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, phi, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
......@@ -1458,18 +1484,18 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo, bool slp)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"All the computation can be taken out of the loop.");
"All the computation can be taken out of the loop.\n");
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: redundant loop. no profit to "
"vectorize.");
"vectorize.\n");
return false;
}
if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vectorization_factor = %d, niters = "
HOST_WIDE_INT_PRINT_DEC, vectorization_factor,
HOST_WIDE_INT_PRINT_DEC "\n", vectorization_factor,
LOOP_VINFO_INT_NITERS (loop_vinfo));
if ((LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
......@@ -1479,11 +1505,11 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo, bool slp)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: iteration count too small.");
"not vectorized: iteration count too small.\n");
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: iteration count smaller than "
"vectorization factor.");
"vectorization factor.\n");
return false;
}
......@@ -1501,11 +1527,11 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo, bool slp)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: vectorization not profitable.");
"not vectorized: vectorization not profitable.\n");
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: vector version will never be "
"profitable.");
"profitable.\n");
return false;
}
......@@ -1527,12 +1553,12 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo, bool slp)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: vectorization not profitable.");
"not vectorized: vectorization not profitable.\n");
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"not vectorized: iteration count smaller than user "
"specified loop bound parameter or minimum profitable "
"iterations (whichever is more conservative).");
"iterations (whichever is more conservative).\n");
return false;
}
......@@ -1543,13 +1569,13 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo, bool slp)
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: estimated iteration count too "
"small.");
"small.\n");
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"not vectorized: estimated iteration count smaller "
"than specified loop bound parameter or minimum "
"profitable iterations (whichever is more "
"conservative).");
"conservative).\n");
return false;
}
......@@ -1558,19 +1584,19 @@ vect_analyze_loop_operations (loop_vec_info loop_vinfo, bool slp)
|| LOOP_PEELING_FOR_ALIGNMENT (loop_vinfo))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required.");
dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required.\n");
if (!vect_can_advance_ivs_p (loop_vinfo))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: can't create epilog loop 1.");
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: can't create epilog loop 1.\n");
return false;
}
if (!slpeel_can_duplicate_loop_p (loop, single_exit (loop)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: can't create epilog loop 2.");
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: can't create epilog loop 2.\n");
return false;
}
}
......@@ -1603,7 +1629,7 @@ vect_analyze_loop_2 (loop_vec_info loop_vinfo)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data references.");
"bad data references.\n");
return false;
}
......@@ -1615,7 +1641,7 @@ vect_analyze_loop_2 (loop_vec_info loop_vinfo)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data access.");
"bad data access.\n");
return false;
}
......@@ -1633,7 +1659,7 @@ vect_analyze_loop_2 (loop_vec_info loop_vinfo)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unexpected pattern.");
"unexpected pattern.\n");
return false;
}
......@@ -1648,7 +1674,7 @@ vect_analyze_loop_2 (loop_vec_info loop_vinfo)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data dependence.");
"bad data dependence.\n");
return false;
}
......@@ -1657,14 +1683,14 @@ vect_analyze_loop_2 (loop_vec_info loop_vinfo)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't determine vectorization factor.");
"can't determine vectorization factor.\n");
return false;
}
if (max_vf < LOOP_VINFO_VECT_FACTOR (loop_vinfo))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data dependence.");
"bad data dependence.\n");
return false;
}
......@@ -1676,7 +1702,7 @@ vect_analyze_loop_2 (loop_vec_info loop_vinfo)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data alignment.");
"bad data alignment.\n");
return false;
}
......@@ -1689,7 +1715,7 @@ vect_analyze_loop_2 (loop_vec_info loop_vinfo)
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"too long list of versioning for alias "
"run-time tests.");
"run-time tests.\n");
return false;
}
......@@ -1701,7 +1727,7 @@ vect_analyze_loop_2 (loop_vec_info loop_vinfo)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data alignment.");
"bad data alignment.\n");
return false;
}
......@@ -1726,7 +1752,7 @@ vect_analyze_loop_2 (loop_vec_info loop_vinfo)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad operation or unsupported loop bound.");
"bad operation or unsupported loop bound.\n");
return false;
}
......@@ -1750,7 +1776,7 @@ vect_analyze_loop (struct loop *loop)
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"===== analyze_loop_nest =====");
"===== analyze_loop_nest =====\n");
if (loop_outer (loop)
&& loop_vec_info_for_loop (loop_outer (loop))
......@@ -1758,7 +1784,7 @@ vect_analyze_loop (struct loop *loop)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"outer-loop already vectorized.");
"outer-loop already vectorized.\n");
return NULL;
}
......@@ -1770,7 +1796,7 @@ vect_analyze_loop (struct loop *loop)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad loop form.");
"bad loop form.\n");
return NULL;
}
......@@ -1851,6 +1877,7 @@ report_vect_op (int msg_type, gimple stmt, const char *msg)
{
dump_printf_loc (msg_type, vect_location, "%s", msg);
dump_gimple_stmt (msg_type, TDF_SLIM, stmt, 0);
dump_printf (msg_type, "\n");
}
......@@ -2026,6 +2053,7 @@ vect_is_slp_reduction (loop_vec_info loop_info, gimple phi, gimple first_stmt)
{
dump_printf_loc (MSG_NOTE, vect_location, "swapping oprnds: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, next_stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
swap_tree_operands (next_stmt,
......@@ -2126,7 +2154,7 @@ vect_is_simple_reduction_1 (loop_vec_info loop_info, gimple phi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"intermediate value used outside loop.");
"intermediate value used outside loop.\n");
return NULL;
}
......@@ -2138,7 +2166,7 @@ vect_is_simple_reduction_1 (loop_vec_info loop_info, gimple phi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduction used in loop.");
"reduction used in loop.\n");
return NULL;
}
}
......@@ -2150,6 +2178,7 @@ vect_is_simple_reduction_1 (loop_vec_info loop_info, gimple phi,
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduction: not ssa_name: ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, loop_arg);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return NULL;
}
......@@ -2159,14 +2188,17 @@ vect_is_simple_reduction_1 (loop_vec_info loop_info, gimple phi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduction: no def_stmt.");
"reduction: no def_stmt.\n");
return NULL;
}
if (!is_gimple_assign (def_stmt) && gimple_code (def_stmt) != GIMPLE_PHI)
{
if (dump_enabled_p ())
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
{
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
return NULL;
}
......@@ -2195,7 +2227,7 @@ vect_is_simple_reduction_1 (loop_vec_info loop_info, gimple phi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduction used in loop.");
"reduction used in loop.\n");
return NULL;
}
}
......@@ -2211,7 +2243,7 @@ vect_is_simple_reduction_1 (loop_vec_info loop_info, gimple phi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported phi node definition.");
"unsupported phi node definition.\n");
return NULL;
}
......@@ -2334,6 +2366,7 @@ vect_is_simple_reduction_1 (loop_vec_info loop_info, gimple phi,
dump_generic_expr (MSG_NOTE, TDF_SLIM,
TREE_TYPE (op4));
}
dump_printf (MSG_NOTE, "\n");
}
return NULL;
......@@ -2591,7 +2624,7 @@ vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue,
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"cost model: epilogue peel iters set to vf/2 "
"because loop iterations are unknown .");
"because loop iterations are unknown .\n");
/* If peeled iterations are known but number of scalar loop
iterations are unknown, count a taken branch per peeled loop. */
......@@ -2649,7 +2682,7 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
/* Cost model disabled. */
if (!flag_vect_cost_model)
{
dump_printf_loc (MSG_NOTE, vect_location, "cost model disabled.");
dump_printf_loc (MSG_NOTE, vect_location, "cost model disabled.\n");
*ret_min_profitable_niters = 0;
*ret_min_profitable_estimate = 0;
return;
......@@ -2707,14 +2740,14 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
{
peel_iters_prologue = vf/2;
dump_printf (MSG_NOTE, "cost model: "
"prologue peel iters set to vf/2.");
"prologue peel iters set to vf/2.\n");
/* If peeling for alignment is unknown, loop bound of main loop becomes
unknown. */
peel_iters_epilogue = vf/2;
dump_printf (MSG_NOTE, "cost model: "
"epilogue peel iters set to vf/2 because "
"peeling for alignment is unknown.");
"peeling for alignment is unknown.\n");
/* If peeled iterations are unknown, count a taken branch and a not taken
branch per peeled loop. Even if scalar loop iterations are known,
......@@ -2884,7 +2917,8 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"cost model: the vector iteration cost = %d "
"divided by the scalar iteration cost = %d "
"is greater or equal to the vectorization factor = %d.",
"is greater or equal to the vectorization factor = %d"
".\n",
vec_inside_cost, scalar_single_iter_cost, vf);
*ret_min_profitable_niters = -1;
*ret_min_profitable_estimate = -1;
......@@ -2910,9 +2944,10 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
peel_iters_prologue);
dump_printf (MSG_NOTE, " epilogue iterations: %d\n",
peel_iters_epilogue);
dump_printf (MSG_NOTE,
dump_printf (MSG_NOTE,
" Calculated minimum iters for profitability: %d\n",
min_profitable_iters);
dump_printf (MSG_NOTE, "\n");
}
min_profitable_iters =
......@@ -2925,7 +2960,8 @@ vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
" Runtime profitability threshold = %d\n", min_profitable_iters);
" Runtime profitability threshold = %d\n",
min_profitable_iters);
*ret_min_profitable_niters = min_profitable_iters;
......@@ -3014,6 +3050,7 @@ vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
"unsupported data-type ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
TREE_TYPE (reduction_op));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
......@@ -3082,7 +3119,7 @@ vect_model_reduction_cost (stmt_vec_info stmt_info, enum tree_code reduc_code,
if (dump_enabled_p ())
dump_printf (MSG_NOTE,
"vect_model_reduction_cost: inside_cost = %d, "
"prologue_cost = %d, epilogue_cost = %d .", inside_cost,
"prologue_cost = %d, epilogue_cost = %d .\n", inside_cost,
prologue_cost, epilogue_cost);
return true;
......@@ -3111,7 +3148,7 @@ vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies)
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_induction_cost: inside_cost = %d, "
"prologue_cost = %d .", inside_cost, prologue_cost);
"prologue_cost = %d .\n", inside_cost, prologue_cost);
}
......@@ -3258,6 +3295,7 @@ get_initial_def_for_induction (gimple iv_phi)
dump_printf_loc (MSG_NOTE, vect_location,
"created new init_stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, init_stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
constant_p = false;
}
......@@ -3426,6 +3464,7 @@ get_initial_def_for_induction (gimple iv_phi)
dump_printf_loc (MSG_NOTE, vect_location,
"vector of inductions after inner-loop:");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
}
}
......@@ -3439,6 +3478,7 @@ get_initial_def_for_induction (gimple iv_phi)
dump_printf (MSG_NOTE, "\n");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
SSA_NAME_DEF_STMT (vec_def), 0);
dump_printf (MSG_NOTE, "\n");
}
STMT_VINFO_VEC_STMT (phi_info) = induction_phi;
......@@ -3846,6 +3886,7 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple stmt,
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
dump_printf (MSG_NOTE, "\n");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (def), 0);
dump_printf (MSG_NOTE, "\n");
}
phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
......@@ -4042,7 +4083,7 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple stmt,
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Reduce using direct vector reduction.");
"Reduce using direct vector reduction.\n");
vec_dest = vect_create_destination_var (scalar_dest, vectype);
tmp = build1 (reduc_code, vectype, new_phi_result);
......@@ -4093,7 +4134,7 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple stmt,
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Reduce using vector shifts");
"Reduce using vector shifts\n");
vec_dest = vect_create_destination_var (scalar_dest, vectype);
new_temp = new_phi_result;
......@@ -4134,7 +4175,7 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple stmt,
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Reduce using scalar code. ");
"Reduce using scalar code.\n");
vec_size_in_bits = tree_low_cst (TYPE_SIZE (vectype), 1);
FOR_EACH_VEC_ELT (new_phis, i, new_phi)
......@@ -4225,7 +4266,7 @@ vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple stmt,
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"extract scalar result");
"extract scalar result\n");
if (BYTES_BIG_ENDIAN)
bitpos = size_binop (MULT_EXPR,
......@@ -4464,6 +4505,7 @@ vect_finalize_reduction:
dump_printf_loc (MSG_NOTE, vect_location,
"created double reduction phi node: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vect_phi, 0);
dump_printf (MSG_NOTE, "\n");
}
vect_phi_res = PHI_RESULT (vect_phi);
......@@ -4819,7 +4861,7 @@ vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported condition in reduction");
"unsupported condition in reduction\n");
return false;
}
......@@ -4835,7 +4877,7 @@ vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
not vectorizable_reduction. */
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported shift or rotation.");
"unsupported shift or rotation.\n");
return false;
}
......@@ -4845,7 +4887,7 @@ vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no optab.");
"no optab.\n");
return false;
}
......@@ -4853,7 +4895,7 @@ vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
if (optab_handler (optab, vec_mode) == CODE_FOR_nothing)
{
if (dump_enabled_p ())
dump_printf (MSG_NOTE, "op not supported by target.");
dump_printf (MSG_NOTE, "op not supported by target.\n");
if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
|| LOOP_VINFO_VECT_FACTOR (loop_vinfo)
......@@ -4861,7 +4903,7 @@ vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
return false;
if (dump_enabled_p ())
dump_printf (MSG_NOTE, "proceeding using word mode.");
dump_printf (MSG_NOTE, "proceeding using word mode.\n");
}
/* Worthwhile without SIMD support? */
......@@ -4871,7 +4913,7 @@ vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not worthwhile without SIMD support.");
"not worthwhile without SIMD support.\n");
return false;
}
......@@ -4952,7 +4994,7 @@ vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no optab for reduction.");
"no optab for reduction.\n");
epilog_reduc_code = ERROR_MARK;
}
......@@ -4962,7 +5004,7 @@ vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduc op not supported by target.");
"reduc op not supported by target.\n");
epilog_reduc_code = ERROR_MARK;
}
......@@ -4973,7 +5015,7 @@ vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no reduc code for scalar code.");
"no reduc code for scalar code.\n");
return false;
}
......@@ -4983,7 +5025,7 @@ vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"multiple types in double reduction");
"multiple types in double reduction\n");
return false;
}
......@@ -5002,7 +5044,7 @@ vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"invalid types in dot-prod");
"invalid types in dot-prod\n");
return false;
}
......@@ -5019,7 +5061,7 @@ vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
/** Transform. **/
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "transform reduction.");
dump_printf_loc (MSG_NOTE, vect_location, "transform reduction.\n");
/* FORNOW: Multiple types are not supported for condition. */
if (code == COND_EXPR)
......@@ -5306,7 +5348,7 @@ vectorizable_induction (gimple phi, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"multiple types in nested loop.");
"multiple types in nested loop.\n");
return false;
}
......@@ -5329,9 +5371,9 @@ vectorizable_induction (gimple phi, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
&& !STMT_VINFO_LIVE_P (exit_phi_vinfo)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"inner-loop induction only used outside "
"of the outer vectorized loop.");
"of the outer vectorized loop.\n");
return false;
}
}
......@@ -5354,7 +5396,7 @@ vectorizable_induction (gimple phi, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vectorizable_induction ===");
"=== vectorizable_induction ===\n");
vect_model_induction_cost (stmt_info, ncopies);
return true;
}
......@@ -5362,7 +5404,7 @@ vectorizable_induction (gimple phi, gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
/** Transform. **/
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "transform induction phi.");
dump_printf_loc (MSG_NOTE, vect_location, "transform induction phi.\n");
vec_def = get_initial_def_for_induction (phi);
*vec_stmt = SSA_NAME_DEF_STMT (vec_def);
......@@ -5462,7 +5504,7 @@ vectorizable_live_operation (gimple stmt,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
"use not simple.\n");
return false;
}
......@@ -5501,7 +5543,7 @@ vect_loop_kill_debug_uses (struct loop *loop, gimple stmt)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"killing debug use");
"killing debug use\n");
gimple_debug_bind_reset_value (ustmt);
update_stmt (ustmt);
......@@ -5542,7 +5584,7 @@ vect_transform_loop (loop_vec_info loop_vinfo)
gcov_type expected_iterations = expected_loop_iterations_unbounded (loop);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "=== vec_transform_loop ===");
dump_printf_loc (MSG_NOTE, vect_location, "=== vec_transform_loop ===\n");
/* If profile is inprecise, we have chance to fix it up. */
if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
......@@ -5561,7 +5603,8 @@ vect_transform_loop (loop_vec_info loop_vinfo)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Profitability threshold is %d loop iterations.", th);
"Profitability threshold is %d loop iterations.\n",
th);
check_profitability = true;
}
......@@ -5628,6 +5671,7 @@ vect_transform_loop (loop_vec_info loop_vinfo)
dump_printf_loc (MSG_NOTE, vect_location,
"------>vectorizing phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
dump_printf (MSG_NOTE, "\n");
}
stmt_info = vinfo_for_stmt (phi);
if (!stmt_info)
......@@ -5643,12 +5687,12 @@ vect_transform_loop (loop_vec_info loop_vinfo)
if ((TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info))
!= (unsigned HOST_WIDE_INT) vectorization_factor)
&& dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.");
dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "transform phi.");
dump_printf_loc (MSG_NOTE, vect_location, "transform phi.\n");
vect_transform_stmt (phi, NULL, NULL, NULL, NULL);
}
}
......@@ -5678,6 +5722,7 @@ vect_transform_loop (loop_vec_info loop_vinfo)
dump_printf_loc (MSG_NOTE, vect_location,
"------>vectorizing statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
stmt_info = vinfo_for_stmt (stmt);
......@@ -5752,6 +5797,7 @@ vect_transform_loop (loop_vec_info loop_vinfo)
"stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
pattern_def_stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
stmt = pattern_def_stmt;
......@@ -5776,7 +5822,7 @@ vect_transform_loop (loop_vec_info loop_vinfo)
/* For SLP VF is set according to unrolling factor, and not to
vector size, hence for SLP this print is not valid. */
dump_printf_loc (MSG_NOTE, vect_location,
"multiple-types.");
"multiple-types.\n");
/* SLP. Schedule all the SLP instances when the first SLP stmt is
reached. */
......@@ -5788,7 +5834,7 @@ vect_transform_loop (loop_vec_info loop_vinfo)
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== scheduling SLP instances ===");
"=== scheduling SLP instances ===\n");
vect_schedule_slp (loop_vinfo, NULL);
}
......@@ -5807,7 +5853,7 @@ vect_transform_loop (loop_vec_info loop_vinfo)
/* -------- vectorize statement ------------ */
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "transform statement.");
dump_printf_loc (MSG_NOTE, vect_location, "transform statement.\n");
grouped_store = false;
is_store = vect_transform_stmt (stmt, &si, &grouped_store, NULL, NULL);
......@@ -5870,5 +5916,6 @@ vect_transform_loop (loop_vec_info loop_vinfo)
if (loop->inner)
dump_printf_loc (MSG_NOTE, vect_location,
"OUTER LOOP VECTORIZED\n");
dump_printf (MSG_NOTE, "\n");
}
}
......@@ -422,6 +422,7 @@ vect_recog_dot_prod_pattern (vec<gimple> *stmts, tree *type_in,
dump_printf_loc (MSG_NOTE, vect_location,
"vect_recog_dot_prod_pattern: detected: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
/* We don't allow changing the order of the computation in the inner-loop
......@@ -682,7 +683,7 @@ vect_recog_widen_mult_pattern (vec<gimple> *stmts,
/* Pattern detected. */
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_recog_widen_mult_pattern: detected: ");
"vect_recog_widen_mult_pattern: detected:\n");
/* Check target support */
vectype = get_vectype_for_scalar_type (half_type0);
......@@ -921,6 +922,7 @@ vect_recog_widen_sum_pattern (vec<gimple> *stmts, tree *type_in,
dump_printf_loc (MSG_NOTE, vect_location,
"vect_recog_widen_sum_pattern: detected: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
/* We don't allow changing the order of the computation in the inner-loop
......@@ -1226,6 +1228,7 @@ vect_recog_over_widening_pattern (vec<gimple> *stmts,
dump_printf_loc (MSG_NOTE, vect_location,
"created pattern stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
type = gimple_expr_type (stmt);
......@@ -1294,6 +1297,7 @@ vect_recog_over_widening_pattern (vec<gimple> *stmts,
dump_printf_loc (MSG_NOTE, vect_location,
"vect_recog_over_widening_pattern: detected: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
return pattern_stmt;
......@@ -1427,7 +1431,7 @@ vect_recog_widen_shift_pattern (vec<gimple> *stmts,
/* Pattern detected. */
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_recog_widen_shift_pattern: detected: ");
"vect_recog_widen_shift_pattern: detected:\n");
/* Check target support. */
vectype = get_vectype_for_scalar_type (half_type0);
......@@ -1694,7 +1698,7 @@ vect_recog_rotate_pattern (vec<gimple> *stmts, tree *type_in, tree *type_out)
/* Pattern detected. */
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_recog_rotate_pattern: detected: ");
"vect_recog_rotate_pattern: detected:\n");
/* Pattern supported. Create a stmt to be used to replace the pattern. */
var = vect_recog_temp_ssa_var (type, NULL);
......@@ -1824,7 +1828,7 @@ vect_recog_vector_vector_shift_pattern (vec<gimple> *stmts,
/* Pattern detected. */
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_recog_vector_vector_shift_pattern: detected: ");
"vect_recog_vector_vector_shift_pattern: detected:\n");
/* Pattern supported. Create a stmt to be used to replace the pattern. */
var = vect_recog_temp_ssa_var (TREE_TYPE (oprnd0), NULL);
......@@ -1942,7 +1946,7 @@ vect_recog_divmod_pattern (vec<gimple> *stmts,
/* Pattern detected. */
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_recog_divmod_pattern: detected: ");
"vect_recog_divmod_pattern: detected:\n");
cond = build2 (LT_EXPR, boolean_type_node, oprnd0,
build_int_cst (itype, 0));
......@@ -2291,6 +2295,7 @@ vect_recog_divmod_pattern (vec<gimple> *stmts,
dump_printf_loc (MSG_NOTE, vect_location,
"vect_recog_divmod_pattern: detected: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
stmts->safe_push (last_stmt);
......@@ -2456,7 +2461,7 @@ vect_recog_mixed_size_cond_pattern (vec<gimple> *stmts, tree *type_in,
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_recog_mixed_size_cond_pattern: detected: ");
"vect_recog_mixed_size_cond_pattern: detected:\n");
return pattern_stmt;
}
......@@ -2849,7 +2854,7 @@ vect_recog_bool_pattern (vec<gimple> *stmts, tree *type_in,
stmts->safe_push (last_stmt);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_recog_bool_pattern: detected: ");
"vect_recog_bool_pattern: detected:\n");
return pattern_stmt;
}
......@@ -2895,7 +2900,7 @@ vect_recog_bool_pattern (vec<gimple> *stmts, tree *type_in,
stmts->safe_push (last_stmt);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_recog_bool_pattern: detected: ");
"vect_recog_bool_pattern: detected:\n");
return pattern_stmt;
}
else
......@@ -3048,6 +3053,7 @@ vect_pattern_recog_1 (vect_recog_func_ptr vect_recog_func,
dump_printf_loc (MSG_NOTE, vect_location,
"pattern recognized: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
/* Mark the stmts that are involved in the pattern. */
......@@ -3074,6 +3080,7 @@ vect_pattern_recog_1 (vect_recog_func_ptr vect_recog_func,
dump_printf_loc (MSG_NOTE, vect_location,
"additional pattern stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
vect_mark_pattern_stmts (stmt, pattern_stmt, NULL_TREE);
......@@ -3173,7 +3180,7 @@ vect_pattern_recog (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_pattern_recog ===");
"=== vect_pattern_recog ===\n");
if (loop_vinfo)
{
......
......@@ -257,6 +257,7 @@ vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: can't find def for ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
......@@ -283,6 +284,7 @@ vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
"Build SLP failed: some of the stmts"
" are in a pattern, and others are not ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, oprnd);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
......@@ -295,7 +297,7 @@ vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unsupported pattern.");
"Unsupported pattern.\n");
return false;
}
......@@ -312,7 +314,7 @@ vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
default:
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported defining stmt: ");
"unsupported defining stmt:\n");
return false;
}
}
......@@ -342,7 +344,7 @@ vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: different types ");
"Build SLP failed: different types\n");
return false;
}
......@@ -367,6 +369,7 @@ vect_get_and_check_slp_defs (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: illegal type of def ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, def);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
......@@ -415,6 +418,7 @@ vect_build_slp_tree_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
{
dump_printf_loc (MSG_NOTE, vect_location, "Build SLP for ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
/* Fail to vectorize statements marked as unvectorizable. */
......@@ -425,6 +429,7 @@ vect_build_slp_tree_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: unvectorizable statement ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
/* Fatal mismatch. */
matches[0] = false;
......@@ -440,6 +445,7 @@ vect_build_slp_tree_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
"Build SLP failed: not GIMPLE_ASSIGN nor "
"GIMPLE_CALL ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
/* Fatal mismatch. */
matches[0] = false;
......@@ -457,6 +463,7 @@ vect_build_slp_tree_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
"Build SLP failed: condition is not "
"comparison ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
/* Fatal mismatch. */
matches[0] = false;
......@@ -473,6 +480,7 @@ vect_build_slp_tree_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
"Build SLP failed: unsupported data-type ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
scalar_type);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
/* Fatal mismatch. */
matches[0] = false;
......@@ -501,6 +509,7 @@ vect_build_slp_tree_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: unsupported call type ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
/* Fatal mismatch. */
matches[0] = false;
......@@ -538,7 +547,7 @@ vect_build_slp_tree_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: no optab.");
"Build SLP failed: no optab.\n");
/* Fatal mismatch. */
matches[0] = false;
return false;
......@@ -549,7 +558,7 @@ vect_build_slp_tree_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: "
"op not supported by target.");
"op not supported by target.\n");
/* Fatal mismatch. */
matches[0] = false;
return false;
......@@ -588,6 +597,7 @@ vect_build_slp_tree_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
"Build SLP failed: different operation "
"in stmt ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
/* Mismatch. */
continue;
......@@ -602,6 +612,7 @@ vect_build_slp_tree_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
"Build SLP failed: different shift "
"arguments in ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
/* Mismatch. */
continue;
......@@ -622,6 +633,7 @@ vect_build_slp_tree_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
"Build SLP failed: different calls in ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
/* Mismatch. */
continue;
......@@ -661,6 +673,7 @@ vect_build_slp_tree_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
"loads have gaps ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
/* Fatal mismatch. */
matches[0] = false;
......@@ -685,6 +698,7 @@ vect_build_slp_tree_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
"the SLP group size ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
/* Fatal mismatch. */
matches[0] = false;
......@@ -707,6 +721,7 @@ vect_build_slp_tree_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
"interleaving chains in one node ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
/* Mismatch. */
continue;
......@@ -731,6 +746,7 @@ vect_build_slp_tree_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
"unaligned load ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
/* Fatal mismatch. */
matches[0] = false;
......@@ -749,6 +765,7 @@ vect_build_slp_tree_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: not grouped load ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
/* FORNOW: Not grouped loads are not supported. */
......@@ -769,6 +786,7 @@ vect_build_slp_tree_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
"Build SLP failed: operation");
dump_printf (MSG_MISSED_OPTIMIZATION, " unsupported ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
/* Fatal mismatch. */
matches[0] = false;
......@@ -790,6 +808,7 @@ vect_build_slp_tree_1 (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
" operation");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
/* Mismatch. */
continue;
......@@ -1076,6 +1095,7 @@ vect_supported_load_permutation_p (slp_instance slp_instn)
else
for (i = 0; i < group_size; ++i)
dump_printf (MSG_NOTE, "%d ", i);
dump_printf (MSG_NOTE, "\n");
}
/* In case of reduction every load permutation is allowed, since the order
......@@ -1187,6 +1207,7 @@ vect_supported_load_permutation_p (slp_instance slp_instn)
"unsupported unaligned load ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
first_load, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
......@@ -1449,6 +1470,7 @@ vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: unsupported data-type ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, scalar_type);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
......@@ -1465,9 +1487,9 @@ vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
if (unrolling_factor != 1 && !loop_vinfo)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: unrolling required in basic"
" block SLP");
" block SLP\n");
return false;
}
......@@ -1514,9 +1536,9 @@ vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
if (unrolling_factor != 1 && !loop_vinfo)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: unrolling required in basic"
" block SLP");
" block SLP\n");
vect_free_slp_tree (node);
loads.release ();
return false;
......@@ -1567,10 +1589,11 @@ vect_analyze_slp_instance (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo,
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Build SLP failed: unsupported load "
"permutation ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
vect_free_slp_instance (new_instance);
return false;
......@@ -1618,7 +1641,7 @@ vect_analyze_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
bool ok = false;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_slp ===");
dump_printf_loc (MSG_NOTE, vect_location, "=== vect_analyze_slp ===\n");
if (loop_vinfo)
{
......@@ -1638,7 +1661,7 @@ vect_analyze_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Failed to SLP the basic block.");
"Failed to SLP the basic block.\n");
return false;
}
......@@ -1680,7 +1703,8 @@ vect_make_slp_decision (loop_vec_info loop_vinfo)
int decided_to_slp = 0;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "=== vect_make_slp_decision ===");
dump_printf_loc (MSG_NOTE, vect_location, "=== vect_make_slp_decision ==="
"\n");
FOR_EACH_VEC_ELT (slp_instances, i, instance)
{
......@@ -1699,7 +1723,7 @@ vect_make_slp_decision (loop_vec_info loop_vinfo)
if (decided_to_slp && dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Decided to SLP %d instances. Unrolling factor %d",
"Decided to SLP %d instances. Unrolling factor %d\n",
decided_to_slp, unrolling_factor);
return (decided_to_slp > 0);
......@@ -1763,7 +1787,8 @@ vect_detect_hybrid_slp (loop_vec_info loop_vinfo)
slp_instance instance;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "=== vect_detect_hybrid_slp ===");
dump_printf_loc (MSG_NOTE, vect_location, "=== vect_detect_hybrid_slp ==="
"\n");
FOR_EACH_VEC_ELT (slp_instances, i, instance)
vect_detect_hybrid_slp_stmts (SLP_INSTANCE_TREE (instance));
......@@ -2016,7 +2041,7 @@ vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo)
vec_inside_cost);
dump_printf (MSG_NOTE, " Vector prologue cost: %d\n", vec_prologue_cost);
dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n", vec_epilogue_cost);
dump_printf (MSG_NOTE, " Scalar cost of basic block: %d", scalar_cost);
dump_printf (MSG_NOTE, " Scalar cost of basic block: %d\n", scalar_cost);
}
/* Vectorization is profitable if its cost is less than the cost of scalar
......@@ -2135,7 +2160,7 @@ vect_slp_analyze_bb_1 (basic_block bb)
if (!vect_slp_analyze_operations (bb_vinfo))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: bad operation in basic block.\n");
destroy_bb_vec_info (bb_vinfo);
......@@ -2239,7 +2264,7 @@ vect_update_slp_costs_according_to_vf (loop_vec_info loop_vinfo)
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_update_slp_costs_according_to_vf ===");
"=== vect_update_slp_costs_according_to_vf ===\n");
FOR_EACH_VEC_ELT (slp_instances, i, instance)
{
......@@ -2739,9 +2764,10 @@ vect_get_mask_element (gimple stmt, int first_mask_element, int m,
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"permutation requires at least two vectors ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
......@@ -2761,6 +2787,7 @@ vect_get_mask_element (gimple stmt, int first_mask_element, int m,
"permutation requires at "
"least three vectors ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
......@@ -2828,6 +2855,7 @@ vect_transform_slp_perm_load (slp_tree node, vec<tree> dr_chain,
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no vect permute for ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
......@@ -2998,6 +3026,7 @@ vect_schedule_slp_instance (slp_tree node, slp_instance instance,
dump_printf_loc (MSG_NOTE,vect_location,
"------>vectorizing SLP node starting from: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
/* Loads should be inserted before the first load. */
......@@ -3104,7 +3133,7 @@ vect_schedule_slp (loop_vec_info loop_vinfo, bb_vec_info bb_vinfo)
instance, vf);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vectorizing stmts using SLP.");
"vectorizing stmts using SLP.\n");
}
FOR_EACH_VEC_ELT (slp_instances, i, instance)
......@@ -3168,6 +3197,7 @@ vect_slp_transform_bb (basic_block bb)
dump_printf_loc (MSG_NOTE, vect_location,
"------>SLPing statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
stmt_info = vinfo_for_stmt (stmt);
......
......@@ -191,7 +191,7 @@ vect_mark_relevant (vec<gimple> *worklist, gimple stmt,
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"mark relevant %d, live %d.", relevant, live_p);
"mark relevant %d, live %d.\n", relevant, live_p);
/* If this stmt is an original stmt in a pattern, we might need to mark its
related pattern stmt instead of the original stmt. However, such stmts
......@@ -248,7 +248,7 @@ vect_mark_relevant (vec<gimple> *worklist, gimple stmt,
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"last stmt in pattern. don't mark"
" relevant/live.");
" relevant/live.\n");
stmt_info = vinfo_for_stmt (pattern_stmt);
gcc_assert (STMT_VINFO_RELATED_STMT (stmt_info) == stmt);
save_relevant = STMT_VINFO_RELEVANT (stmt_info);
......@@ -266,7 +266,7 @@ vect_mark_relevant (vec<gimple> *worklist, gimple stmt,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"already marked relevant/live.");
"already marked relevant/live.\n");
return;
}
......@@ -311,7 +311,7 @@ vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vec_stmt_relevant_p: stmt has vdefs.");
"vec_stmt_relevant_p: stmt has vdefs.\n");
*relevant = vect_used_in_scope;
}
......@@ -325,7 +325,7 @@ vect_stmt_relevant_p (gimple stmt, loop_vec_info loop_vinfo,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vec_stmt_relevant_p: used out of loop.");
"vec_stmt_relevant_p: used out of loop.\n");
if (is_gimple_debug (USE_STMT (use_p)))
continue;
......@@ -438,7 +438,7 @@ process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported use in stmt.");
"not vectorized: unsupported use in stmt.\n");
return false;
}
......@@ -449,7 +449,7 @@ process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
if (!flow_bb_inside_loop_p (loop, def_bb))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.");
dump_printf_loc (MSG_NOTE, vect_location, "def_stmt is out of loop.\n");
return true;
}
......@@ -468,7 +468,7 @@ process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"reduc-stmt defining reduc-phi in the same nest.");
"reduc-stmt defining reduc-phi in the same nest.\n");
if (STMT_VINFO_IN_PATTERN_P (dstmt_vinfo))
dstmt_vinfo = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (dstmt_vinfo));
gcc_assert (STMT_VINFO_RELEVANT (dstmt_vinfo) < vect_used_by_reduction);
......@@ -488,7 +488,7 @@ process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"outer-loop def-stmt defining inner-loop stmt.");
"outer-loop def-stmt defining inner-loop stmt.\n");
switch (relevant)
{
......@@ -526,7 +526,7 @@ process_use (gimple stmt, tree use, loop_vec_info loop_vinfo, bool live_p,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"inner-loop def-stmt defining outer-loop stmt.");
"inner-loop def-stmt defining outer-loop stmt.\n");
switch (relevant)
{
......@@ -590,7 +590,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_mark_stmts_to_be_vectorized ===");
"=== vect_mark_stmts_to_be_vectorized ===\n");
worklist.create (64);
......@@ -605,6 +605,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
{
dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
dump_printf (MSG_NOTE, "\n");
}
if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
......@@ -617,6 +618,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
{
dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
......@@ -635,6 +637,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
{
dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
/* Examine the USEs of STMT. For each USE, mark the stmt that defines it
......@@ -678,7 +681,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
default:
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported use of reduction.");
"unsupported use of reduction.\n");
worklist.release ();
return false;
}
......@@ -693,7 +696,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported use of nested cycle.");
"unsupported use of nested cycle.\n");
worklist.release ();
return false;
......@@ -708,7 +711,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported use of double reduction.");
"unsupported use of double reduction.\n");
worklist.release ();
return false;
......@@ -832,7 +835,7 @@ vect_model_simple_cost (stmt_vec_info stmt_info, int ncopies,
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_simple_cost: inside_cost = %d, "
"prologue_cost = %d .", inside_cost, prologue_cost);
"prologue_cost = %d .\n", inside_cost, prologue_cost);
}
......@@ -878,7 +881,7 @@ vect_model_promotion_demotion_cost (stmt_vec_info stmt_info,
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_promotion_demotion_cost: inside_cost = %d, "
"prologue_cost = %d .", inside_cost, prologue_cost);
"prologue_cost = %d .\n", inside_cost, prologue_cost);
}
/* Function vect_cost_group_size
......@@ -961,7 +964,7 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_store_cost: strided group_size = %d .",
"vect_model_store_cost: strided group_size = %d .\n",
group_size);
}
......@@ -971,7 +974,7 @@ vect_model_store_cost (stmt_vec_info stmt_info, int ncopies,
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_store_cost: inside_cost = %d, "
"prologue_cost = %d .", inside_cost, prologue_cost);
"prologue_cost = %d .\n", inside_cost, prologue_cost);
}
......@@ -995,7 +998,7 @@ vect_get_store_cost (struct data_reference *dr, int ncopies,
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_store_cost: aligned.");
"vect_model_store_cost: aligned.\n");
break;
}
......@@ -1008,7 +1011,7 @@ vect_get_store_cost (struct data_reference *dr, int ncopies,
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_store_cost: unaligned supported by "
"hardware.");
"hardware.\n");
break;
}
......@@ -1018,7 +1021,7 @@ vect_get_store_cost (struct data_reference *dr, int ncopies,
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"vect_model_store_cost: unsupported access.");
"vect_model_store_cost: unsupported access.\n");
break;
}
......@@ -1076,8 +1079,8 @@ vect_model_load_cost (stmt_vec_info stmt_info, int ncopies,
stmt_info, 0, vect_body);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_load_cost: strided group_size = %d .",
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_load_cost: strided group_size = %d .\n",
group_size);
}
......@@ -1102,7 +1105,7 @@ vect_model_load_cost (stmt_vec_info stmt_info, int ncopies,
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_load_cost: inside_cost = %d, "
"prologue_cost = %d .", inside_cost, prologue_cost);
"prologue_cost = %d .\n", inside_cost, prologue_cost);
}
......@@ -1128,7 +1131,7 @@ vect_get_load_cost (struct data_reference *dr, int ncopies,
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_load_cost: aligned.");
"vect_model_load_cost: aligned.\n");
break;
}
......@@ -1142,7 +1145,7 @@ vect_get_load_cost (struct data_reference *dr, int ncopies,
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_load_cost: unaligned supported by "
"hardware.");
"hardware.\n");
break;
}
......@@ -1161,17 +1164,17 @@ vect_get_load_cost (struct data_reference *dr, int ncopies,
stmt_info, 0, vect_body);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_load_cost: explicit realign");
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_load_cost: explicit realign\n");
break;
}
case dr_explicit_realign_optimized:
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_load_cost: unaligned software "
"pipelined.");
"pipelined.\n");
/* Unaligned software pipeline has a load of an address, an initial
load, and possibly a mask operation to "prime" the loop. However,
......@@ -1198,7 +1201,8 @@ vect_get_load_cost (struct data_reference *dr, int ncopies,
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_load_cost: explicit realign optimized");
"vect_model_load_cost: explicit realign optimized"
"\n");
break;
}
......@@ -1209,7 +1213,7 @@ vect_get_load_cost (struct data_reference *dr, int ncopies,
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"vect_model_load_cost: unsupported access.");
"vect_model_load_cost: unsupported access.\n");
break;
}
......@@ -1262,6 +1266,7 @@ vect_init_vector_1 (gimple stmt, gimple new_stmt, gimple_stmt_iterator *gsi)
dump_printf_loc (MSG_NOTE, vect_location,
"created new init_stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
}
......@@ -1344,6 +1349,7 @@ vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
dump_printf_loc (MSG_NOTE, vect_location,
"vect_get_vec_def_for_operand: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, op);
dump_printf (MSG_NOTE, "\n");
}
is_simple_use = vect_is_simple_use (op, stmt, loop_vinfo, NULL,
......@@ -1357,6 +1363,7 @@ vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
dump_printf_loc (MSG_NOTE, vect_location, "def = ");
loc_printed = 1;
dump_generic_expr (MSG_NOTE, TDF_SLIM, def);
dump_printf (MSG_NOTE, "\n");
}
if (def_stmt)
{
......@@ -1365,6 +1372,7 @@ vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
else
dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
}
......@@ -1383,7 +1391,7 @@ vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
/* Create 'vect_cst_ = {cst,cst,...,cst}' */
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Create vector_cst. nunits = %d", nunits);
"Create vector_cst. nunits = %d\n", nunits);
return vect_init_vector (stmt, op, vector_type, NULL);
}
......@@ -1399,7 +1407,7 @@ vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
/* Create 'vec_inv = {inv,inv,..,inv}' */
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "Create vector_inv.");
dump_printf_loc (MSG_NOTE, vect_location, "Create vector_inv.\n");
return vect_init_vector (stmt, def, vector_type, NULL);
}
......@@ -1666,6 +1674,7 @@ vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
{
dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
gimple_set_location (vec_stmt, gimple_location (stmt));
......@@ -1775,7 +1784,7 @@ vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"argument types differ.");
"argument types differ.\n");
return false;
}
if (!rhs_type)
......@@ -1786,7 +1795,7 @@ vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
"use not simple.\n");
return false;
}
......@@ -1797,7 +1806,7 @@ vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"argument vector types differ.");
"argument vector types differ.\n");
return false;
}
}
......@@ -1814,6 +1823,7 @@ vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no vectype for scalar type ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
......@@ -1855,7 +1865,7 @@ vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"function is not vectorizable.");
"function is not vectorizable.\n");
return false;
}
}
......@@ -1877,7 +1887,8 @@ vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
{
STMT_VINFO_TYPE (stmt_info) = call_vec_info_type;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ===");
dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_call ==="
"\n");
vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
return true;
}
......@@ -1885,7 +1896,7 @@ vectorizable_call (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
/** Transform. **/
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "transform call.");
dump_printf_loc (MSG_NOTE, vect_location, "transform call.\n");
/* Handle def. */
scalar_dest = gimple_call_lhs (stmt);
......@@ -2408,7 +2419,8 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"type conversion to/from bit-precision unsupported.");
"type conversion to/from bit-precision unsupported."
"\n");
return false;
}
......@@ -2418,7 +2430,7 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
"use not simple.\n");
return false;
}
if (op_type == binary_op)
......@@ -2440,7 +2452,7 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
"use not simple.\n");
return false;
}
}
......@@ -2458,6 +2470,7 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no vectype for scalar type ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, rhs_type);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
......@@ -2499,7 +2512,7 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
unsupported:
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"conversion not supported by target.");
"conversion not supported by target.\n");
return false;
case WIDEN:
......@@ -2598,7 +2611,7 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vectorizable_conversion ===");
"=== vectorizable_conversion ===\n");
if (code == FIX_TRUNC_EXPR || code == FLOAT_EXPR)
{
STMT_VINFO_TYPE (stmt_info) = type_conversion_vec_info_type;
......@@ -2621,7 +2634,7 @@ vectorizable_conversion (gimple stmt, gimple_stmt_iterator *gsi,
/** Transform. **/
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"transform conversion. ncopies = %d.", ncopies);
"transform conversion. ncopies = %d.\n", ncopies);
if (op_type == binary_op)
{
......@@ -2967,7 +2980,7 @@ vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
"use not simple.\n");
return false;
}
......@@ -2997,7 +3010,7 @@ vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"type conversion to/from bit-precision "
"unsupported.");
"unsupported.\n");
return false;
}
......@@ -3006,14 +3019,14 @@ vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vectorizable_assignment ===");
"=== vectorizable_assignment ===\n");
vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
return true;
}
/** Transform. **/
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.");
dump_printf_loc (MSG_NOTE, vect_location, "transform assignment.\n");
/* Handle def. */
vec_dest = vect_create_destination_var (scalar_dest, vectype);
......@@ -3162,7 +3175,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bit-precision shifts not supported.");
"bit-precision shifts not supported.\n");
return false;
}
......@@ -3172,7 +3185,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
"use not simple.\n");
return false;
}
/* If op0 is an external or constant def use a vector type with
......@@ -3185,7 +3198,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no vectype for scalar type ");
"no vectype for scalar type\n");
return false;
}
......@@ -3200,7 +3213,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
"use not simple.\n");
return false;
}
......@@ -3245,7 +3258,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"operand mode requires invariant argument.");
"operand mode requires invariant argument.\n");
return false;
}
......@@ -3255,7 +3268,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
optab = optab_for_tree_code (code, vectype, optab_vector);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vector/vector shift/rotate found.");
"vector/vector shift/rotate found.\n");
if (!op1_vectype)
op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out);
......@@ -3265,7 +3278,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unusable type for last operand in"
" vector/vector shift/rotate.");
" vector/vector shift/rotate.\n");
return false;
}
}
......@@ -3279,7 +3292,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vector/scalar shift/rotate found.");
"vector/scalar shift/rotate found.\n");
}
else
{
......@@ -3292,7 +3305,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vector/vector shift/rotate found.");
"vector/vector shift/rotate found.\n");
/* Unlike the other binary operators, shifts/rotates have
the rhs being int, instead of the same type as the lhs,
......@@ -3310,7 +3323,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unusable type for last operand in"
" vector/vector shift/rotate.");
" vector/vector shift/rotate.\n");
return false;
}
if (vec_stmt && !slp_node)
......@@ -3329,7 +3342,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no optab.");
"no optab.\n");
return false;
}
vec_mode = TYPE_MODE (vectype);
......@@ -3338,14 +3351,15 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"op not supported by target.");
"op not supported by target.\n");
/* Check only during analysis. */
if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
|| (vf < vect_min_worthwhile_factor (code)
&& !vec_stmt))
return false;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "proceeding using word mode.");
dump_printf_loc (MSG_NOTE, vect_location,
"proceeding using word mode.\n");
}
/* Worthwhile without SIMD support? Check only during analysis. */
......@@ -3355,7 +3369,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not worthwhile without SIMD support.");
"not worthwhile without SIMD support.\n");
return false;
}
......@@ -3363,7 +3377,8 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
{
STMT_VINFO_TYPE (stmt_info) = shift_vec_info_type;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "=== vectorizable_shift ===");
dump_printf_loc (MSG_NOTE, vect_location,
"=== vectorizable_shift ===\n");
vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
return true;
}
......@@ -3372,7 +3387,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"transform binary/unary operation.");
"transform binary/unary operation.\n");
/* Handle def. */
vec_dest = vect_create_destination_var (scalar_dest, vectype);
......@@ -3394,7 +3409,7 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"operand 1 using scalar mode.");
"operand 1 using scalar mode.\n");
vec_oprnd1 = op1;
vec_oprnds1.create (slp_node ? slp_node->vec_stmts_size : 1);
vec_oprnds1.quick_push (vec_oprnd1);
......@@ -3525,7 +3540,7 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"num. args = %d (not unary/binary/ternary op).",
"num. args = %d (not unary/binary/ternary op).\n",
op_type);
return false;
}
......@@ -3544,7 +3559,7 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bit-precision arithmetic not supported.");
"bit-precision arithmetic not supported.\n");
return false;
}
......@@ -3554,7 +3569,7 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
"use not simple.\n");
return false;
}
/* If op0 is an external or constant def use a vector type with
......@@ -3571,6 +3586,7 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
"no vectype for scalar type ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
TREE_TYPE (op0));
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
......@@ -3589,7 +3605,7 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
"use not simple.\n");
return false;
}
}
......@@ -3601,7 +3617,7 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
"use not simple.\n");
return false;
}
}
......@@ -3643,7 +3659,7 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no optab.");
"no optab.\n");
return false;
}
icode = (int) optab_handler (optab, vec_mode);
......@@ -3653,13 +3669,14 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"op not supported by target.");
"op not supported by target.\n");
/* Check only during analysis. */
if (GET_MODE_SIZE (vec_mode) != UNITS_PER_WORD
|| (!vec_stmt && vf < vect_min_worthwhile_factor (code)))
return false;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "proceeding using word mode.");
dump_printf_loc (MSG_NOTE, vect_location,
"proceeding using word mode.\n");
}
/* Worthwhile without SIMD support? Check only during analysis. */
......@@ -3669,7 +3686,7 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not worthwhile without SIMD support.");
"not worthwhile without SIMD support.\n");
return false;
}
......@@ -3678,7 +3695,7 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
STMT_VINFO_TYPE (stmt_info) = op_vec_info_type;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vectorizable_operation ===");
"=== vectorizable_operation ===\n");
vect_model_simple_cost (stmt_info, ncopies, dt, NULL, NULL);
return true;
}
......@@ -3687,7 +3704,7 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"transform binary/unary operation.");
"transform binary/unary operation.\n");
/* Handle def. */
vec_dest = vect_create_destination_var (scalar_dest, vectype);
......@@ -3897,7 +3914,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"multiple types in nested loop.");
"multiple types in nested loop.\n");
return false;
}
......@@ -3932,7 +3949,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
"use not simple.\n");
return false;
}
......@@ -3953,7 +3970,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"negative step for store.");
"negative step for store.\n");
return false;
}
......@@ -3984,7 +4001,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"use not simple.");
"use not simple.\n");
return false;
}
next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
......@@ -4048,7 +4065,7 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"transform store. ncopies = %d", ncopies);
"transform store. ncopies = %d\n", ncopies);
dr_chain.create (group_size);
oprnds.create (group_size);
......@@ -4459,7 +4476,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"multiple types in nested loop.");
"multiple types in nested loop.\n");
return false;
}
......@@ -4500,7 +4517,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Aligned load, but unsupported type.");
"Aligned load, but unsupported type.\n");
return false;
}
......@@ -4536,7 +4553,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"gather index use not simple.");
"gather index use not simple.\n");
return false;
}
}
......@@ -4552,7 +4569,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"multiple types with negative step.");
"multiple types with negative step.\n");
return false;
}
......@@ -4562,7 +4579,8 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"negative step for group load not supported");
"negative step for group load not supported"
"\n");
return false;
}
alignment_support_scheme = vect_supportable_dr_alignment (dr, false);
......@@ -4571,14 +4589,15 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"negative step but alignment required.");
"negative step but alignment required.\n");
return false;
}
if (!perm_mask_for_reverse (vectype))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"negative step and reversing not supported.");
"negative step and reversing not supported."
"\n");
return false;
}
}
......@@ -4593,7 +4612,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"transform load. ncopies = %d", ncopies);
"transform load. ncopies = %d\n", ncopies);
/** Transform. **/
......@@ -5444,7 +5463,7 @@ vectorizable_condition (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"value used after loop.");
"value used after loop.\n");
return false;
}
......@@ -5654,13 +5673,14 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
{
dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
if (gimple_has_volatile_ops (stmt))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: stmt has volatile operands");
"not vectorized: stmt has volatile operands\n");
return false;
}
......@@ -5696,12 +5716,13 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
}
else
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.");
dump_printf_loc (MSG_NOTE, vect_location, "irrelevant.\n");
return true;
}
......@@ -5718,6 +5739,7 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
if (!vect_analyze_stmt (pattern_stmt, need_to_vectorize, node))
......@@ -5742,6 +5764,7 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern def statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
if (!vect_analyze_stmt (pattern_def_stmt,
......@@ -5781,6 +5804,7 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
dump_printf_loc (MSG_NOTE, vect_location,
"get vectype for scalar type: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
dump_printf (MSG_NOTE, "\n");
}
vectype = get_vectype_for_scalar_type (scalar_type);
......@@ -5792,6 +5816,7 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
"not SLPed: unsupported data-type ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
scalar_type);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
......@@ -5800,6 +5825,7 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
{
dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
dump_printf (MSG_NOTE, "\n");
}
STMT_VINFO_VECTYPE (stmt_info) = vectype;
......@@ -5846,6 +5872,7 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
"not vectorized: relevant stmt not ");
dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
......@@ -5868,6 +5895,7 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
"not vectorized: live stmt not ");
dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
......@@ -5964,7 +5992,7 @@ vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"stmt not supported.");
"stmt not supported.\n");
gcc_unreachable ();
}
}
......@@ -5989,7 +6017,7 @@ vect_transform_stmt (gimple stmt, gimple_stmt_iterator *gsi,
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Record the vdef for outer-loop vectorization.");
"Record the vdef for outer-loop vectorization.\n");
/* Find the relevant loop-exit phi-node, and reord the vec_stmt there
(to be used when vectorizing outer-loop stmts that use the DEF of
......@@ -6303,6 +6331,7 @@ vect_is_simple_use (tree operand, gimple stmt, loop_vec_info loop_vinfo,
dump_printf_loc (MSG_NOTE, vect_location,
"vect_is_simple_use: operand ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, operand);
dump_printf (MSG_NOTE, "\n");
}
if (CONSTANT_CLASS_P (operand))
......@@ -6321,7 +6350,7 @@ vect_is_simple_use (tree operand, gimple stmt, loop_vec_info loop_vinfo,
if (TREE_CODE (operand) == PAREN_EXPR)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "non-associatable copy.");
dump_printf_loc (MSG_NOTE, vect_location, "non-associatable copy.\n");
operand = TREE_OPERAND (operand, 0);
}
......@@ -6329,7 +6358,7 @@ vect_is_simple_use (tree operand, gimple stmt, loop_vec_info loop_vinfo,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not ssa-name.");
"not ssa-name.\n");
return false;
}
......@@ -6338,7 +6367,7 @@ vect_is_simple_use (tree operand, gimple stmt, loop_vec_info loop_vinfo,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no def_stmt.");
"no def_stmt.\n");
return false;
}
......@@ -6346,6 +6375,7 @@ vect_is_simple_use (tree operand, gimple stmt, loop_vec_info loop_vinfo,
{
dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
/* Empty stmt is expected only in case of a function argument.
......@@ -6376,12 +6406,12 @@ vect_is_simple_use (tree operand, gimple stmt, loop_vec_info loop_vinfo,
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unsupported pattern.");
"Unsupported pattern.\n");
return false;
}
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "type of def: %d.", *dt);
dump_printf_loc (MSG_NOTE, vect_location, "type of def: %d.\n", *dt);
switch (gimple_code (*def_stmt))
{
......@@ -6401,7 +6431,7 @@ vect_is_simple_use (tree operand, gimple stmt, loop_vec_info loop_vinfo,
default:
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported defining stmt: ");
"unsupported defining stmt:\n");
return false;
}
......
......@@ -589,7 +589,7 @@ check_counter (gimple stmt, const char * name,
dump_printf_loc (MSG_MISSED_OPTIMIZATION, locus,
"correcting inconsistent value profile: %s "
"profiler overall count (%d) does not match BB "
"count (%d)", name, (int)*all, (int)bb_count);
"count (%d)\n", name, (int)*all, (int)bb_count);
*all = bb_count;
if (*count > *all)
*count = *all;
......@@ -1275,7 +1275,7 @@ check_ic_target (gimple call_stmt, struct cgraph_node *target)
locus = gimple_location (call_stmt);
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, locus,
"Skipping target %s with mismatching types for icall ",
"Skipping target %s with mismatching types for icall\n",
cgraph_node_name (target));
return false;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment