Commit bc4fb355 by Ira Rosen Committed by Ulrich Weigand

tree-vect-patterns.c (widened_name_p): Rename to ...

ChangeLog:

	* tree-vect-patterns.c (widened_name_p): Rename to ...
	(type_conversion_p): ... this.  Add new argument to determine
	if it's a promotion or demotion operation.  Check for
	CONVERT_EXPR_CODE_P instead of NOP_EXPR.
	(vect_recog_dot_prod_pattern): Call type_conversion_p instead
	widened_name_p.
	(vect_recog_widen_mult_pattern, vect_recog_widen_sum_pattern,
	vect_operation_fits_smaller_type, vect_recog_widen_shift_pattern):
	Likewise.
	(vect_recog_mixed_size_cond_pattern): Likewise and allow
	non-constant then and else clauses.

testsuite/ChangeLog:

	* gcc.dg/vect/slp-cond-3.c: New test.
	* gcc.dg/vect/slp-cond-4.c: New test.

Co-Authored-By: Ulrich Weigand <ulrich.weigand@linaro.org>

From-SVN: r185437
parent f5709183
2012-03-15 Ira Rosen <irar@il.ibm.com> 2012-03-15 Ira Rosen <irar@il.ibm.com>
Ulrich Weigand <ulrich.weigand@linaro.org> Ulrich Weigand <ulrich.weigand@linaro.org>
* tree-vect-patterns.c (widened_name_p): Rename to ...
(type_conversion_p): ... this. Add new argument to determine
if it's a promotion or demotion operation. Check for
CONVERT_EXPR_CODE_P instead of NOP_EXPR.
(vect_recog_dot_prod_pattern): Call type_conversion_p instead
widened_name_p.
(vect_recog_widen_mult_pattern, vect_recog_widen_sum_pattern,
vect_operation_fits_smaller_type, vect_recog_widen_shift_pattern):
Likewise.
(vect_recog_mixed_size_cond_pattern): Likewise and allow
non-constant then and else clauses.
2012-03-15 Ira Rosen <irar@il.ibm.com>
Ulrich Weigand <ulrich.weigand@linaro.org>
* tree-vectorizer.h (vect_pattern_recog): Add new argument. * tree-vectorizer.h (vect_pattern_recog): Add new argument.
* tree-vect-loop.c (vect_analyze_loop_2): Update call to * tree-vect-loop.c (vect_analyze_loop_2): Update call to
vect_pattern_recog. vect_pattern_recog.
......
2012-03-15 Ira Rosen <irar@il.ibm.com> 2012-03-15 Ira Rosen <irar@il.ibm.com>
Ulrich Weigand <ulrich.weigand@linaro.org> Ulrich Weigand <ulrich.weigand@linaro.org>
* gcc.dg/vect/slp-cond-3.c: New test.
* gcc.dg/vect/slp-cond-4.c: New test.
2012-03-15 Ira Rosen <irar@il.ibm.com>
Ulrich Weigand <ulrich.weigand@linaro.org>
* gcc.dg/vect/bb-slp-pattern-1.c: New test. * gcc.dg/vect/bb-slp-pattern-1.c: New test.
* gcc.dg/vect/bb-slp-pattern-2.c: New test. * gcc.dg/vect/bb-slp-pattern-2.c: New test.
......
/* { dg-require-effective-target vect_condition } */
#include "tree-vect.h"
#define N 128
/* Comparison in int, then/else and result in unsigned char. */
static inline unsigned char
foo (int x, int y, int a, int b)
{
if (x >= y)
return a;
else
return b;
}
__attribute__((noinline, noclone)) void
bar (unsigned char * __restrict__ a, unsigned char * __restrict__ b,
unsigned char * __restrict__ c, unsigned char * __restrict__ d,
unsigned char * __restrict__ e, int w)
{
int i;
for (i = 0; i < N/16; i++, a += 16, b += 16, c += 16, d += 16, e += 16)
{
e[0] = foo (c[0], d[0], a[0] * w, b[0] * w);
e[1] = foo (c[1], d[1], a[1] * w, b[1] * w);
e[2] = foo (c[2], d[2], a[2] * w, b[2] * w);
e[3] = foo (c[3], d[3], a[3] * w, b[3] * w);
e[4] = foo (c[4], d[4], a[4] * w, b[4] * w);
e[5] = foo (c[5], d[5], a[5] * w, b[5] * w);
e[6] = foo (c[6], d[6], a[6] * w, b[6] * w);
e[7] = foo (c[7], d[7], a[7] * w, b[7] * w);
e[8] = foo (c[8], d[8], a[8] * w, b[8] * w);
e[9] = foo (c[9], d[9], a[9] * w, b[9] * w);
e[10] = foo (c[10], d[10], a[10] * w, b[10] * w);
e[11] = foo (c[11], d[11], a[11] * w, b[11] * w);
e[12] = foo (c[12], d[12], a[12] * w, b[12] * w);
e[13] = foo (c[13], d[13], a[13] * w, b[13] * w);
e[14] = foo (c[14], d[14], a[14] * w, b[14] * w);
e[15] = foo (c[15], d[15], a[15] * w, b[15] * w);
}
}
unsigned char a[N], b[N], c[N], d[N], e[N];
int main ()
{
int i;
check_vect ();
for (i = 0; i < N; i++)
{
a[i] = i;
b[i] = 5;
e[i] = 0;
switch (i % 9)
{
case 0: asm (""); c[i] = i; d[i] = i + 1; break;
case 1: c[i] = 0; d[i] = 0; break;
case 2: c[i] = i + 1; d[i] = i - 1; break;
case 3: c[i] = i; d[i] = i + 7; break;
case 4: c[i] = i; d[i] = i; break;
case 5: c[i] = i + 16; d[i] = i + 3; break;
case 6: c[i] = i - 5; d[i] = i; break;
case 7: c[i] = i; d[i] = i; break;
case 8: c[i] = i; d[i] = i - 7; break;
}
}
bar (a, b, c, d, e, 2);
for (i = 0; i < N; i++)
if (e[i] != ((i % 3) == 0 ? 10 : 2 * i))
abort ();
return 0;
}
/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" } } */
/* { dg-final { cleanup-tree-dump "vect" } } */
/* { dg-require-effective-target vect_condition } */
#include "tree-vect.h"
#define N 128
/* Comparison in short, then/else and result in int. */
static inline int
foo (short x, short y, int a, int b)
{
if (x >= y)
return a;
else
return b;
}
__attribute__((noinline, noclone)) void
bar (short * __restrict__ a, short * __restrict__ b,
short * __restrict__ c, short * __restrict__ d,
int * __restrict__ e, int w)
{
int i;
int stride = 16;
for (i = 0; i < N/stride; i++, a += stride, b += stride, c += stride,
d += stride, e += stride)
{
e[0] = foo (c[0], d[0], a[0], b[0]);
e[1] = foo (c[1], d[1], a[1], b[1]);
e[2] = foo (c[2], d[2], a[2], b[2]);
e[3] = foo (c[3], d[3], a[3], b[3]);
e[4] = foo (c[4], d[4], a[4], b[4]);
e[5] = foo (c[5], d[5], a[5], b[5]);
e[6] = foo (c[6], d[6], a[6], b[6]);
e[7] = foo (c[7], d[7], a[7], b[7]);
e[8] = foo (c[8], d[8], a[8], b[8]);
e[9] = foo (c[9], d[9], a[9], b[9]);
e[10] = foo (c[10], d[10], a[10], b[10]);
e[11] = foo (c[11], d[11], a[11], b[11]);
e[12] = foo (c[12], d[12], a[12], b[12]);
e[13] = foo (c[13], d[13], a[13], b[13]);
e[14] = foo (c[14], d[14], a[14], b[14]);
e[15] = foo (c[15], d[15], a[15], b[15]);
}
}
short a[N], b[N], c[N], d[N];
int e[N];
int main ()
{
int i;
check_vect ();
for (i = 0; i < N; i++)
{
a[i] = i;
b[i] = 5;
e[i] = 0;
switch (i % 9)
{
case 0: asm (""); c[i] = - i - 1; d[i] = i + 1; break;
case 1: c[i] = 0; d[i] = 0; break;
case 2: c[i] = i + 1; d[i] = - i - 1; break;
case 3: c[i] = i; d[i] = i + 7; break;
case 4: c[i] = i; d[i] = i; break;
case 5: c[i] = i + 16; d[i] = i + 3; break;
case 6: c[i] = - i - 5; d[i] = - i; break;
case 7: c[i] = - i; d[i] = - i; break;
case 8: c[i] = - i; d[i] = - i - 7; break;
}
}
bar (a, b, c, d, e, 2);
for (i = 0; i < N; i++)
if (e[i] != ((i % 3) == 0 ? 5 : i))
abort ();
return 0;
}
/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 1 "vect" } } */
/* { dg-final { cleanup-tree-dump "vect" } } */
...@@ -84,18 +84,16 @@ new_pattern_def_seq (stmt_vec_info stmt_info, gimple stmt) ...@@ -84,18 +84,16 @@ new_pattern_def_seq (stmt_vec_info stmt_info, gimple stmt)
append_pattern_def_seq (stmt_info, stmt); append_pattern_def_seq (stmt_info, stmt);
} }
/* Function widened_name_p /* Check whether NAME, an ssa-name used in USE_STMT,
is a result of a type promotion or demotion, such that:
Check whether NAME, an ssa-name used in USE_STMT,
is a result of a type-promotion, such that:
DEF_STMT: NAME = NOP (name0) DEF_STMT: NAME = NOP (name0)
where the type of name0 (HALF_TYPE) is smaller than the type of NAME. where the type of name0 (ORIG_TYPE) is smaller/bigger than the type of NAME.
If CHECK_SIGN is TRUE, check that either both types are signed or both are If CHECK_SIGN is TRUE, check that either both types are signed or both are
unsigned. */ unsigned. */
static bool static bool
widened_name_p (tree name, gimple use_stmt, tree *half_type, gimple *def_stmt, type_conversion_p (tree name, gimple use_stmt, bool check_sign,
bool check_sign) tree *orig_type, gimple *def_stmt, bool *promotion)
{ {
tree dummy; tree dummy;
gimple dummy_gimple; gimple dummy_gimple;
...@@ -118,21 +116,27 @@ widened_name_p (tree name, gimple use_stmt, tree *half_type, gimple *def_stmt, ...@@ -118,21 +116,27 @@ widened_name_p (tree name, gimple use_stmt, tree *half_type, gimple *def_stmt,
&& dt != vect_external_def && dt != vect_constant_def) && dt != vect_external_def && dt != vect_constant_def)
return false; return false;
if (! *def_stmt) if (!*def_stmt)
return false; return false;
if (!is_gimple_assign (*def_stmt)) if (!is_gimple_assign (*def_stmt))
return false; return false;
if (gimple_assign_rhs_code (*def_stmt) != NOP_EXPR) if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (*def_stmt)))
return false; return false;
oprnd0 = gimple_assign_rhs1 (*def_stmt); oprnd0 = gimple_assign_rhs1 (*def_stmt);
*half_type = TREE_TYPE (oprnd0); *orig_type = TREE_TYPE (oprnd0);
if (!INTEGRAL_TYPE_P (type) || !INTEGRAL_TYPE_P (*half_type) if (!INTEGRAL_TYPE_P (type) || !INTEGRAL_TYPE_P (*orig_type)
|| ((TYPE_UNSIGNED (type) != TYPE_UNSIGNED (*half_type)) && check_sign) || ((TYPE_UNSIGNED (type) != TYPE_UNSIGNED (*orig_type)) && check_sign))
|| (TYPE_PRECISION (type) < (TYPE_PRECISION (*half_type) * 2))) return false;
if (TYPE_PRECISION (type) >= (TYPE_PRECISION (*orig_type) * 2))
*promotion = true;
else if (TYPE_PRECISION (*orig_type) >= (TYPE_PRECISION (type) * 2))
*promotion = false;
else
return false; return false;
if (!vect_is_simple_use (oprnd0, *def_stmt, loop_vinfo, if (!vect_is_simple_use (oprnd0, *def_stmt, loop_vinfo,
...@@ -214,6 +218,7 @@ vect_recog_dot_prod_pattern (VEC (gimple, heap) **stmts, tree *type_in, ...@@ -214,6 +218,7 @@ vect_recog_dot_prod_pattern (VEC (gimple, heap) **stmts, tree *type_in,
loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_vinfo); loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
struct loop *loop; struct loop *loop;
tree var; tree var;
bool promotion;
if (!loop_info) if (!loop_info)
return NULL; return NULL;
...@@ -277,7 +282,9 @@ vect_recog_dot_prod_pattern (VEC (gimple, heap) **stmts, tree *type_in, ...@@ -277,7 +282,9 @@ vect_recog_dot_prod_pattern (VEC (gimple, heap) **stmts, tree *type_in,
return NULL; return NULL;
stmt = last_stmt; stmt = last_stmt;
if (widened_name_p (oprnd0, stmt, &half_type, &def_stmt, true)) if (type_conversion_p (oprnd0, stmt, true, &half_type, &def_stmt,
&promotion)
&& promotion)
{ {
stmt = def_stmt; stmt = def_stmt;
oprnd0 = gimple_assign_rhs1 (stmt); oprnd0 = gimple_assign_rhs1 (stmt);
...@@ -334,10 +341,14 @@ vect_recog_dot_prod_pattern (VEC (gimple, heap) **stmts, tree *type_in, ...@@ -334,10 +341,14 @@ vect_recog_dot_prod_pattern (VEC (gimple, heap) **stmts, tree *type_in,
if (!types_compatible_p (TREE_TYPE (oprnd0), prod_type) if (!types_compatible_p (TREE_TYPE (oprnd0), prod_type)
|| !types_compatible_p (TREE_TYPE (oprnd1), prod_type)) || !types_compatible_p (TREE_TYPE (oprnd1), prod_type))
return NULL; return NULL;
if (!widened_name_p (oprnd0, stmt, &half_type0, &def_stmt, true)) if (!type_conversion_p (oprnd0, stmt, true, &half_type0, &def_stmt,
&promotion)
|| !promotion)
return NULL; return NULL;
oprnd00 = gimple_assign_rhs1 (def_stmt); oprnd00 = gimple_assign_rhs1 (def_stmt);
if (!widened_name_p (oprnd1, stmt, &half_type1, &def_stmt, true)) if (!type_conversion_p (oprnd0, stmt, true, &half_type1, &def_stmt,
&promotion)
|| !promotion)
return NULL; return NULL;
oprnd01 = gimple_assign_rhs1 (def_stmt); oprnd01 = gimple_assign_rhs1 (def_stmt);
if (!types_compatible_p (half_type0, half_type1)) if (!types_compatible_p (half_type0, half_type1))
...@@ -552,6 +563,7 @@ vect_recog_widen_mult_pattern (VEC (gimple, heap) **stmts, ...@@ -552,6 +563,7 @@ vect_recog_widen_mult_pattern (VEC (gimple, heap) **stmts,
int dummy_int; int dummy_int;
VEC (tree, heap) *dummy_vec; VEC (tree, heap) *dummy_vec;
bool op1_ok; bool op1_ok;
bool promotion;
if (!is_gimple_assign (last_stmt)) if (!is_gimple_assign (last_stmt))
return NULL; return NULL;
...@@ -571,12 +583,15 @@ vect_recog_widen_mult_pattern (VEC (gimple, heap) **stmts, ...@@ -571,12 +583,15 @@ vect_recog_widen_mult_pattern (VEC (gimple, heap) **stmts,
return NULL; return NULL;
/* Check argument 0. */ /* Check argument 0. */
if (!widened_name_p (oprnd0, last_stmt, &half_type0, &def_stmt0, false)) if (!type_conversion_p (oprnd0, last_stmt, false, &half_type0, &def_stmt0,
return NULL; &promotion)
|| !promotion)
return NULL;
/* Check argument 1. */ /* Check argument 1. */
op1_ok = widened_name_p (oprnd1, last_stmt, &half_type1, &def_stmt1, false); op1_ok = type_conversion_p (oprnd1, last_stmt, false, &half_type1,
&def_stmt1, &promotion);
if (op1_ok) if (op1_ok && promotion)
{ {
oprnd0 = gimple_assign_rhs1 (def_stmt0); oprnd0 = gimple_assign_rhs1 (def_stmt0);
oprnd1 = gimple_assign_rhs1 (def_stmt1); oprnd1 = gimple_assign_rhs1 (def_stmt1);
...@@ -816,6 +831,7 @@ vect_recog_widen_sum_pattern (VEC (gimple, heap) **stmts, tree *type_in, ...@@ -816,6 +831,7 @@ vect_recog_widen_sum_pattern (VEC (gimple, heap) **stmts, tree *type_in,
loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_vinfo); loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
struct loop *loop; struct loop *loop;
tree var; tree var;
bool promotion;
if (!loop_info) if (!loop_info)
return NULL; return NULL;
...@@ -855,8 +871,10 @@ vect_recog_widen_sum_pattern (VEC (gimple, heap) **stmts, tree *type_in, ...@@ -855,8 +871,10 @@ vect_recog_widen_sum_pattern (VEC (gimple, heap) **stmts, tree *type_in,
Left to check that oprnd0 is defined by a cast from type 'type' to type Left to check that oprnd0 is defined by a cast from type 'type' to type
'TYPE'. */ 'TYPE'. */
if (!widened_name_p (oprnd0, last_stmt, &half_type, &stmt, true)) if (!type_conversion_p (oprnd0, last_stmt, true, &half_type, &stmt,
return NULL; &promotion)
|| !promotion)
return NULL;
oprnd0 = gimple_assign_rhs1 (stmt); oprnd0 = gimple_assign_rhs1 (stmt);
*type_in = half_type; *type_in = half_type;
...@@ -922,6 +940,7 @@ vect_operation_fits_smaller_type (gimple stmt, tree def, tree *new_type, ...@@ -922,6 +940,7 @@ vect_operation_fits_smaller_type (gimple stmt, tree def, tree *new_type,
loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (vinfo_for_stmt (stmt)); loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (vinfo_for_stmt (stmt));
bb_vec_info bb_info = STMT_VINFO_BB_VINFO (vinfo_for_stmt (stmt)); bb_vec_info bb_info = STMT_VINFO_BB_VINFO (vinfo_for_stmt (stmt));
struct loop *loop = NULL; struct loop *loop = NULL;
bool promotion;
if (loop_info) if (loop_info)
loop = LOOP_VINFO_LOOP (loop_info); loop = LOOP_VINFO_LOOP (loop_info);
...@@ -956,7 +975,9 @@ vect_operation_fits_smaller_type (gimple stmt, tree def, tree *new_type, ...@@ -956,7 +975,9 @@ vect_operation_fits_smaller_type (gimple stmt, tree def, tree *new_type,
else else
{ {
first = true; first = true;
if (!widened_name_p (oprnd, stmt, &half_type, &def_stmt, false) if (!type_conversion_p (oprnd, stmt, false, &half_type, &def_stmt,
&promotion)
|| !promotion
|| !gimple_bb (def_stmt) || !gimple_bb (def_stmt)
|| (loop && !flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))) || (loop && !flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
|| (!loop && gimple_bb (def_stmt) != BB_VINFO_BB (bb_info) || (!loop && gimple_bb (def_stmt) != BB_VINFO_BB (bb_info)
...@@ -1350,6 +1371,7 @@ vect_recog_widen_shift_pattern (VEC (gimple, heap) **stmts, ...@@ -1350,6 +1371,7 @@ vect_recog_widen_shift_pattern (VEC (gimple, heap) **stmts,
VEC (tree, heap) * dummy_vec; VEC (tree, heap) * dummy_vec;
gimple use_stmt = NULL; gimple use_stmt = NULL;
bool over_widen = false; bool over_widen = false;
bool promotion;
if (!is_gimple_assign (last_stmt) || !vinfo_for_stmt (last_stmt)) if (!is_gimple_assign (last_stmt) || !vinfo_for_stmt (last_stmt))
return NULL; return NULL;
...@@ -1404,8 +1426,10 @@ vect_recog_widen_shift_pattern (VEC (gimple, heap) **stmts, ...@@ -1404,8 +1426,10 @@ vect_recog_widen_shift_pattern (VEC (gimple, heap) **stmts,
return NULL; return NULL;
/* Check operand 0: it has to be defined by a type promotion. */ /* Check operand 0: it has to be defined by a type promotion. */
if (!widened_name_p (oprnd0, last_stmt, &half_type0, &def_stmt0, false)) if (!type_conversion_p (oprnd0, last_stmt, false, &half_type0, &def_stmt0,
return NULL; &promotion)
|| !promotion)
return NULL;
/* Check operand 1: has to be positive. We check that it fits the type /* Check operand 1: has to be positive. We check that it fits the type
in vect_handle_widen_op_by_const (). */ in vect_handle_widen_op_by_const (). */
...@@ -1847,9 +1871,9 @@ vect_recog_sdivmod_pow2_pattern (VEC (gimple, heap) **stmts, ...@@ -1847,9 +1871,9 @@ vect_recog_sdivmod_pow2_pattern (VEC (gimple, heap) **stmts,
S1 a_T = x_t CMP y_t ? b_T : c_T; S1 a_T = x_t CMP y_t ? b_T : c_T;
where type 'TYPE' is an integral type which has different size where type 'TYPE' is an integral type which has different size
from 'type'. b_T and c_T are constants and if 'TYPE' is wider from 'type'. b_T and c_T are either constants (and if 'TYPE' is wider
than 'type', the constants need to fit into an integer type than 'type', the constants need to fit into an integer type
with the same width as 'type'. with the same width as 'type') or results of conversion from 'type'.
Input: Input:
...@@ -1874,11 +1898,15 @@ vect_recog_mixed_size_cond_pattern (VEC (gimple, heap) **stmts, tree *type_in, ...@@ -1874,11 +1898,15 @@ vect_recog_mixed_size_cond_pattern (VEC (gimple, heap) **stmts, tree *type_in,
gimple last_stmt = VEC_index (gimple, *stmts, 0); gimple last_stmt = VEC_index (gimple, *stmts, 0);
tree cond_expr, then_clause, else_clause; tree cond_expr, then_clause, else_clause;
stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt), def_stmt_info; stmt_vec_info stmt_vinfo = vinfo_for_stmt (last_stmt), def_stmt_info;
tree type, vectype, comp_vectype, itype, vecitype; tree type, vectype, comp_vectype, itype = NULL_TREE, vecitype;
enum machine_mode cmpmode; enum machine_mode cmpmode;
gimple pattern_stmt, def_stmt; gimple pattern_stmt, def_stmt;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo); bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_vinfo);
tree orig_type0 = NULL_TREE, orig_type1 = NULL_TREE;
gimple def_stmt0 = NULL, def_stmt1 = NULL;
bool promotion;
tree comp_scalar_type;
if (!is_gimple_assign (last_stmt) if (!is_gimple_assign (last_stmt)
|| gimple_assign_rhs_code (last_stmt) != COND_EXPR || gimple_assign_rhs_code (last_stmt) != COND_EXPR
...@@ -1889,19 +1917,50 @@ vect_recog_mixed_size_cond_pattern (VEC (gimple, heap) **stmts, tree *type_in, ...@@ -1889,19 +1917,50 @@ vect_recog_mixed_size_cond_pattern (VEC (gimple, heap) **stmts, tree *type_in,
then_clause = gimple_assign_rhs2 (last_stmt); then_clause = gimple_assign_rhs2 (last_stmt);
else_clause = gimple_assign_rhs3 (last_stmt); else_clause = gimple_assign_rhs3 (last_stmt);
if (TREE_CODE (then_clause) != INTEGER_CST
|| TREE_CODE (else_clause) != INTEGER_CST)
return NULL;
if (!COMPARISON_CLASS_P (cond_expr)) if (!COMPARISON_CLASS_P (cond_expr))
return NULL; return NULL;
comp_vectype comp_scalar_type = TREE_TYPE (TREE_OPERAND (cond_expr, 0));
= get_vectype_for_scalar_type (TREE_TYPE (TREE_OPERAND (cond_expr, 0))); comp_vectype = get_vectype_for_scalar_type (comp_scalar_type);
if (comp_vectype == NULL_TREE) if (comp_vectype == NULL_TREE)
return NULL; return NULL;
type = gimple_expr_type (last_stmt); type = gimple_expr_type (last_stmt);
if (types_compatible_p (type, comp_scalar_type)
|| ((TREE_CODE (then_clause) != INTEGER_CST
|| TREE_CODE (else_clause) != INTEGER_CST)
&& !INTEGRAL_TYPE_P (comp_scalar_type))
|| !INTEGRAL_TYPE_P (type))
return NULL;
if ((TREE_CODE (then_clause) != INTEGER_CST
&& !type_conversion_p (then_clause, last_stmt, false, &orig_type0,
&def_stmt0, &promotion))
|| (TREE_CODE (else_clause) != INTEGER_CST
&& !type_conversion_p (else_clause, last_stmt, false, &orig_type1,
&def_stmt1, &promotion)))
return NULL;
if (orig_type0 && orig_type1
&& !types_compatible_p (orig_type0, orig_type1))
return NULL;
if (orig_type0)
{
if (!types_compatible_p (orig_type0, comp_scalar_type))
return NULL;
then_clause = gimple_assign_rhs1 (def_stmt0);
itype = orig_type0;
}
if (orig_type1)
{
if (!types_compatible_p (orig_type1, comp_scalar_type))
return NULL;
else_clause = gimple_assign_rhs1 (def_stmt1);
itype = orig_type1;
}
cmpmode = GET_MODE_INNER (TYPE_MODE (comp_vectype)); cmpmode = GET_MODE_INNER (TYPE_MODE (comp_vectype));
if (GET_MODE_BITSIZE (TYPE_MODE (type)) == GET_MODE_BITSIZE (cmpmode)) if (GET_MODE_BITSIZE (TYPE_MODE (type)) == GET_MODE_BITSIZE (cmpmode))
...@@ -1914,8 +1973,10 @@ vect_recog_mixed_size_cond_pattern (VEC (gimple, heap) **stmts, tree *type_in, ...@@ -1914,8 +1973,10 @@ vect_recog_mixed_size_cond_pattern (VEC (gimple, heap) **stmts, tree *type_in,
if (expand_vec_cond_expr_p (vectype, comp_vectype)) if (expand_vec_cond_expr_p (vectype, comp_vectype))
return NULL; return NULL;
itype = build_nonstandard_integer_type (GET_MODE_BITSIZE (cmpmode), if (itype == NULL_TREE)
TYPE_UNSIGNED (type)); itype = build_nonstandard_integer_type (GET_MODE_BITSIZE (cmpmode),
TYPE_UNSIGNED (type));
if (itype == NULL_TREE if (itype == NULL_TREE
|| GET_MODE_BITSIZE (TYPE_MODE (itype)) != GET_MODE_BITSIZE (cmpmode)) || GET_MODE_BITSIZE (TYPE_MODE (itype)) != GET_MODE_BITSIZE (cmpmode))
return NULL; return NULL;
...@@ -1929,8 +1990,10 @@ vect_recog_mixed_size_cond_pattern (VEC (gimple, heap) **stmts, tree *type_in, ...@@ -1929,8 +1990,10 @@ vect_recog_mixed_size_cond_pattern (VEC (gimple, heap) **stmts, tree *type_in,
if (GET_MODE_BITSIZE (TYPE_MODE (type)) > GET_MODE_BITSIZE (cmpmode)) if (GET_MODE_BITSIZE (TYPE_MODE (type)) > GET_MODE_BITSIZE (cmpmode))
{ {
if (!int_fits_type_p (then_clause, itype) if ((TREE_CODE (then_clause) == INTEGER_CST
|| !int_fits_type_p (else_clause, itype)) && !int_fits_type_p (then_clause, itype))
|| (TREE_CODE (else_clause) == INTEGER_CST
&& !int_fits_type_p (else_clause, itype)))
return NULL; return NULL;
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment