Commit 566d377a by Richard Biener Committed by Richard Biener

re PR tree-optimization/64829 ([ARM] ICE at -O3 in vect_get_vec_def_for_stmt_copy)

2015-01-30  Richard Biener  <rguenther@suse.de>

	PR tree-optimization/64829
	* tree-vect-patterns.c (vect_handle_widen_op_by_const): Do
	not add a widening conversion pattern but hand off extra
	widenings to callers.
	(vect_recog_widen_mult_pattern): Handle extra widening produced
	by vect_handle_widen_op_by_const.
	(vect_recog_widen_shift_pattern): Likewise.
	(vect_pattern_recog_1): Remove excess vertical space in dumping.
	* tree-vect-stmts.c (vect_mark_stmts_to_be_vectorized): Likewise.
	(vect_init_vector_1): Likewise.
	(vect_get_vec_def_for_operand): Likewise.
	(vect_finish_stmt_generation): Likewise.
	(vectorizable_load): Likewise.
	(vect_analyze_stmt): Likewise.
	(vect_is_simple_use): Likewise.

	* gcc.dg/vect/pr64829.c: New testcase.

From-SVN: r220275
parent 445e5888
2015-01-30 Richard Biener <rguenther@suse.de>
PR tree-optimization/64829
* tree-vect-patterns.c (vect_handle_widen_op_by_const): Do
not add a widening conversion pattern but hand off extra
widenings to callers.
(vect_recog_widen_mult_pattern): Handle extra widening produced
by vect_handle_widen_op_by_const.
(vect_recog_widen_shift_pattern): Likewise.
(vect_pattern_recog_1): Remove excess vertical space in dumping.
* tree-vect-stmts.c (vect_mark_stmts_to_be_vectorized): Likewise.
(vect_init_vector_1): Likewise.
(vect_get_vec_def_for_operand): Likewise.
(vect_finish_stmt_generation): Likewise.
(vectorizable_load): Likewise.
(vect_analyze_stmt): Likewise.
(vect_is_simple_use): Likewise.
2015-01-29 Jeff Law <law@redhat.com>
* combine.c (try_combine): Fix typo in comment.
......
2015-01-30 Richard Biener <rguenther@suse.de>
PR tree-optimization/64829
* gcc.dg/vect/pr64829.c: New testcase.
2015-01-29 Marek Polacek <polacek@redhat.com>
PR c/64709
......
/* { dg-do compile } */
typedef unsigned char Uint8;
typedef int Sint32;
typedef unsigned int Uint32;
typedef union RMColorDataRef
{
Uint8* data8;
} RMColorDataRef;
typedef struct RMColorData
{
Uint32 dataCount;
RMColorDataRef dataRef;
} RMColorData;
typedef struct RMColorTable
{
Uint8 dataCompsOut;
RMColorDataRef dataRef;
} RMColorTable;
int fail ( const RMColorData * pInColor,
RMColorData * pOutColor,
const RMColorTable * pColorTable )
{
Uint32 comp;
Uint8 nCompOut;
Sint32 result;
Uint32 interpFrac1, interpFrac2, interpFrac3;
Sint32 val0, val1, val2, val3;
Uint8 * pOut;
const Uint8 * pClutData;
const Uint8 * pCornerPoint0;
Uint8 lastOut[((8) > (4) ? (8) : (4))];
pOut = pOutColor->dataRef.data8;
pClutData = pColorTable->dataRef.data8;
nCompOut = pColorTable->dataCompsOut;
pCornerPoint0 = pClutData;
for (comp = 0; comp < nCompOut; comp++)
{
val0 = *pCornerPoint0++;
result = val0 << 4;
result += (val1 - val0) * interpFrac1;
result += (val2 - val1) * interpFrac2;
result += (val3 - val2) * interpFrac3;
*pOut++ = lastOut[comp] = (Uint8)(result >> 4);
}
return (0);
}
/* { dg-final { cleanup-tree-dump "vect" } } */
......@@ -721,16 +721,15 @@ vect_recog_sad_pattern (vec<gimple> *stmts, tree *type_in,
HALF_TYPE, and there is an intermediate type (2 times smaller than TYPE)
that satisfies the above restrictions, we can perform a widening opeartion
from the intermediate type to TYPE and replace a_T = (TYPE) a_t;
with a_it = (interm_type) a_t; */
with a_it = (interm_type) a_t; Store such operation in *WSTMT. */
static bool
vect_handle_widen_op_by_const (gimple stmt, enum tree_code code,
tree const_oprnd, tree *oprnd,
vec<gimple> *stmts, tree type,
gimple *wstmt, tree type,
tree *half_type, gimple def_stmt)
{
tree new_type, new_oprnd;
gimple new_stmt;
if (code != MULT_EXPR && code != LSHIFT_EXPR)
return false;
......@@ -761,29 +760,11 @@ vect_handle_widen_op_by_const (gimple stmt, enum tree_code code,
&& compare_tree_int (const_oprnd, TYPE_PRECISION (new_type)) == 1))
return false;
/* Use NEW_TYPE for widening operation. */
if (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt)))
{
new_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt));
/* Check if the already created pattern stmt is what we need. */
if (!is_gimple_assign (new_stmt)
|| !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (new_stmt))
|| TREE_TYPE (gimple_assign_lhs (new_stmt)) != new_type)
return false;
stmts->safe_push (def_stmt);
*oprnd = gimple_assign_lhs (new_stmt);
}
else
{
/* Create a_T = (NEW_TYPE) a_t; */
*oprnd = gimple_assign_rhs1 (def_stmt);
new_oprnd = make_ssa_name (new_type);
new_stmt = gimple_build_assign (new_oprnd, NOP_EXPR, *oprnd);
STMT_VINFO_RELATED_STMT (vinfo_for_stmt (def_stmt)) = new_stmt;
stmts->safe_push (def_stmt);
*oprnd = new_oprnd;
}
/* Use NEW_TYPE for widening operation and create a_T = (NEW_TYPE) a_t; */
*oprnd = gimple_assign_rhs1 (def_stmt);
new_oprnd = make_ssa_name (new_type);
*wstmt = gimple_build_assign (new_oprnd, NOP_EXPR, *oprnd);
*oprnd = new_oprnd;
*half_type = new_type;
return true;
......@@ -920,7 +901,7 @@ vect_recog_widen_mult_pattern (vec<gimple> *stmts,
if (TREE_CODE (oprnd1) == INTEGER_CST
&& TREE_CODE (half_type0) == INTEGER_TYPE
&& vect_handle_widen_op_by_const (last_stmt, MULT_EXPR, oprnd1,
&oprnd0, stmts, type,
&oprnd0, &new_stmt, type,
&half_type0, def_stmt0))
{
half_type1 = half_type0;
......@@ -934,6 +915,10 @@ vect_recog_widen_mult_pattern (vec<gimple> *stmts,
the smaller type into the larger type. */
if (TYPE_PRECISION (half_type0) != TYPE_PRECISION (half_type1))
{
/* If we already used up the single-stmt slot give up. */
if (new_stmt)
return NULL;
tree* oprnd = NULL;
gimple def_stmt = NULL;
......@@ -1734,7 +1719,7 @@ vect_recog_widen_shift_pattern (vec<gimple> *stmts,
/* Check operand 0: it has to be defined by a type promotion. */
if (!type_conversion_p (oprnd0, last_stmt, false, &half_type0, &def_stmt0,
&promotion)
&promotion)
|| !promotion)
return NULL;
......@@ -1764,8 +1749,9 @@ vect_recog_widen_shift_pattern (vec<gimple> *stmts,
}
/* Check if this a widening operation. */
gimple wstmt = NULL;
if (!vect_handle_widen_op_by_const (last_stmt, LSHIFT_EXPR, oprnd1,
&oprnd0, stmts,
&oprnd0, &wstmt,
type, &half_type0, def_stmt0))
return NULL;
......@@ -1793,6 +1779,17 @@ vect_recog_widen_shift_pattern (vec<gimple> *stmts,
var = vect_recog_temp_ssa_var (type, NULL);
pattern_stmt =
gimple_build_assign (var, WIDEN_LSHIFT_EXPR, oprnd0, oprnd1);
if (wstmt)
{
stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
new_pattern_def_seq (stmt_vinfo, wstmt);
stmt_vec_info new_stmt_info
= new_stmt_vec_info (wstmt, loop_vinfo, bb_vinfo);
set_vinfo_for_stmt (wstmt, new_stmt_info);
STMT_VINFO_VECTYPE (new_stmt_info) = vectype;
}
if (dump_enabled_p ())
dump_gimple_stmt_loc (MSG_NOTE, vect_location, TDF_SLIM, pattern_stmt, 0);
......@@ -3414,7 +3411,6 @@ vect_pattern_recog_1 (vect_recog_func_ptr vect_recog_func,
dump_printf_loc (MSG_NOTE, vect_location,
"pattern recognized: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
/* Mark the stmts that are involved in the pattern. */
......@@ -3441,7 +3437,6 @@ vect_pattern_recog_1 (vect_recog_func_ptr vect_recog_func,
dump_printf_loc (MSG_NOTE, vect_location,
"additional pattern stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
vect_mark_pattern_stmts (stmt, pattern_stmt, NULL_TREE);
......
......@@ -676,7 +676,6 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
{
dump_printf_loc (MSG_NOTE, vect_location, "init: phi relevant? ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
dump_printf (MSG_NOTE, "\n");
}
if (vect_stmt_relevant_p (phi, loop_vinfo, &relevant, &live_p))
......@@ -689,7 +688,6 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
{
dump_printf_loc (MSG_NOTE, vect_location, "init: stmt relevant? ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
if (vect_stmt_relevant_p (stmt, loop_vinfo, &relevant, &live_p))
......@@ -708,7 +706,6 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
{
dump_printf_loc (MSG_NOTE, vect_location, "worklist: examine stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
/* Examine the USEs of STMT. For each USE, mark the stmt that defines it
......@@ -809,7 +806,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
live_p, relevant, &worklist, false)
|| !process_use (stmt, TREE_OPERAND (op, 1), loop_vinfo,
live_p, relevant, &worklist, false))
return false;
return false;
i = 2;
}
for (; i < gimple_num_ops (stmt); i++)
......@@ -847,7 +844,7 @@ vect_mark_stmts_to_be_vectorized (loop_vec_info loop_vinfo)
gcc_assert (decl);
if (!process_use (stmt, off, loop_vinfo, live_p, relevant,
&worklist, true))
return false;
return false;
}
} /* while worklist */
......@@ -1319,7 +1316,6 @@ vect_init_vector_1 (gimple stmt, gimple new_stmt, gimple_stmt_iterator *gsi)
dump_printf_loc (MSG_NOTE, vect_location,
"created new init_stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
}
......@@ -1423,7 +1419,6 @@ vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
else
dump_printf_loc (MSG_NOTE, vect_location, " def_stmt = ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, def_stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
}
......@@ -1720,7 +1715,6 @@ vect_finish_stmt_generation (gimple stmt, gimple vec_stmt,
{
dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vec_stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
gimple_set_location (vec_stmt, gimple_location (stmt));
......@@ -6584,7 +6578,6 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
"hoisting out of the vectorized "
"loop: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
tree tem = copy_ssa_name (scalar_dest);
gsi_insert_on_edge_immediate
......@@ -6999,7 +6992,6 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
{
dump_printf_loc (MSG_NOTE, vect_location, "==> examining statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
if (gimple_has_volatile_ops (stmt))
......@@ -7042,7 +7034,6 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
}
else
......@@ -7065,7 +7056,6 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
if (!vect_analyze_stmt (pattern_stmt, need_to_vectorize, node))
......@@ -7090,7 +7080,6 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern def statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, pattern_def_stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
if (!vect_analyze_stmt (pattern_def_stmt,
......@@ -7202,7 +7191,6 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
"not vectorized: relevant stmt not ");
dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
......@@ -7225,7 +7213,6 @@ vect_analyze_stmt (gimple stmt, bool *need_to_vectorize, slp_tree node)
"not vectorized: live stmt not ");
dump_printf (MSG_MISSED_OPTIMIZATION, "supported: ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
......@@ -7727,7 +7714,6 @@ vect_is_simple_use (tree operand, gimple stmt, loop_vec_info loop_vinfo,
{
dump_printf_loc (MSG_NOTE, vect_location, "def_stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, *def_stmt, 0);
dump_printf (MSG_NOTE, "\n");
}
/* Empty stmt is expected only in case of a function argument.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment