Commit 7b7b1813 by Richard Guenther Committed by Richard Biener

tree-vect-stmts.c (vect_get_vec_def_for_operand): Convert constants to vector element type.

2011-10-24  Richard Guenther  <rguenther@suse.de>

	* tree-vect-stmts.c (vect_get_vec_def_for_operand): Convert constants
	to vector element type.
	(vectorizable_assignment): Bail out for non-mode-precision operations.
	(vectorizable_shift): Likewise.
	(vectorizable_operation): Likewise.
	(vectorizable_type_demotion): Likewise.
	(vectorizable_type_promotion): Likewise.
	(vectorizable_store): Handle non-mode-precision stores.
	(vectorizable_load): Handle non-mode-precision loads.
	(get_vectype_for_scalar_type_and_size): Return a vector type
	for non-mode-precision integers.
	* tree-vect-loop.c (vectorizable_reduction): Bail out for
	non-mode-precision reductions.

	* gcc.dg/vect/vect-bool-1.c: New testcase.

From-SVN: r180384
parent 695074be
2011-10-24 Richard Guenther <rguenther@suse.de>
* tree-vect-stmts.c (vect_get_vec_def_for_operand): Convert constants
to vector element type.
(vectorizable_assignment): Bail out for non-mode-precision operations.
(vectorizable_shift): Likewise.
(vectorizable_operation): Likewise.
(vectorizable_type_demotion): Likewise.
(vectorizable_type_promotion): Likewise.
(vectorizable_store): Handle non-mode-precision stores.
(vectorizable_load): Handle non-mode-precision loads.
(get_vectype_for_scalar_type_and_size): Return a vector type
for non-mode-precision integers.
* tree-vect-loop.c (vectorizable_reduction): Bail out for
non-mode-precision reductions.
2011-10-24 Julian Brown <julian@codesourcery.com> 2011-10-24 Julian Brown <julian@codesourcery.com>
* config/m68k/m68k.c (notice_update_cc): Tighten condition for * config/m68k/m68k.c (notice_update_cc): Tighten condition for
2011-10-24 Richard Guenther <rguenther@suse.de> 2011-10-24 Richard Guenther <rguenther@suse.de>
* gcc.dg/vect/vect-bool-1.c: New testcase.
2011-10-24 Richard Guenther <rguenther@suse.de>
PR tree-optimization/50838 PR tree-optimization/50838
* gcc.dg/torture/pr50838.c: New testcase. * gcc.dg/torture/pr50838.c: New testcase.
......
/* { dg-do compile } */
/* { dg-require-effective-target vect_int } */
_Bool a[1024];
_Bool b[1024];
_Bool c[1024];
void foo (void)
{
unsigned i;
for (i = 0; i < 1024; ++i)
a[i] = b[i] | c[i];
}
/* { dg-final { scan-tree-dump "vectorized 1 loops" "vect" } } */
/* { dg-final { cleanup-tree-dump "vect" } } */
...@@ -4422,6 +4422,11 @@ vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi, ...@@ -4422,6 +4422,11 @@ vectorizable_reduction (gimple stmt, gimple_stmt_iterator *gsi,
&& !SCALAR_FLOAT_TYPE_P (scalar_type)) && !SCALAR_FLOAT_TYPE_P (scalar_type))
return false; return false;
/* Do not try to vectorize bit-precision reductions. */
if ((TYPE_PRECISION (scalar_type)
!= GET_MODE_PRECISION (TYPE_MODE (scalar_type))))
return false;
/* All uses but the last are expected to be defined in the loop. /* All uses but the last are expected to be defined in the loop.
The last use is the reduction variable. In case of nested cycle this The last use is the reduction variable. In case of nested cycle this
assumption is not true: we use reduc_index to record the index of the assumption is not true: we use reduc_index to record the index of the
......
...@@ -1204,7 +1204,9 @@ vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def) ...@@ -1204,7 +1204,9 @@ vect_get_vec_def_for_operand (tree op, gimple stmt, tree *scalar_def)
if (vect_print_dump_info (REPORT_DETAILS)) if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits); fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
vec_cst = build_vector_from_val (vector_type, op); vec_cst = build_vector_from_val (vector_type,
fold_convert (TREE_TYPE (vector_type),
op));
return vect_init_vector (stmt, vec_cst, vector_type, NULL); return vect_init_vector (stmt, vec_cst, vector_type, NULL);
} }
...@@ -2173,6 +2175,25 @@ vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi, ...@@ -2173,6 +2175,25 @@ vectorizable_assignment (gimple stmt, gimple_stmt_iterator *gsi,
!= GET_MODE_SIZE (TYPE_MODE (vectype_in))))) != GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
return false; return false;
/* We do not handle bit-precision changes. */
if ((CONVERT_EXPR_CODE_P (code)
|| code == VIEW_CONVERT_EXPR)
&& INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
&& ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
!= GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
|| ((TYPE_PRECISION (TREE_TYPE (op))
!= GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op))))))
/* But a conversion that does not change the bit-pattern is ok. */
&& !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
> TYPE_PRECISION (TREE_TYPE (op)))
&& TYPE_UNSIGNED (TREE_TYPE (op))))
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "type conversion to/from bit-precision "
"unsupported.");
return false;
}
if (!vec_stmt) /* transformation not required. */ if (!vec_stmt) /* transformation not required. */
{ {
STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type; STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
...@@ -2326,6 +2347,13 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi, ...@@ -2326,6 +2347,13 @@ vectorizable_shift (gimple stmt, gimple_stmt_iterator *gsi,
scalar_dest = gimple_assign_lhs (stmt); scalar_dest = gimple_assign_lhs (stmt);
vectype_out = STMT_VINFO_VECTYPE (stmt_info); vectype_out = STMT_VINFO_VECTYPE (stmt_info);
if (TYPE_PRECISION (TREE_TYPE (scalar_dest))
!= GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "bit-precision shifts not supported.");
return false;
}
op0 = gimple_assign_rhs1 (stmt); op0 = gimple_assign_rhs1 (stmt);
if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo, if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
...@@ -2660,6 +2688,20 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi, ...@@ -2660,6 +2688,20 @@ vectorizable_operation (gimple stmt, gimple_stmt_iterator *gsi,
scalar_dest = gimple_assign_lhs (stmt); scalar_dest = gimple_assign_lhs (stmt);
vectype_out = STMT_VINFO_VECTYPE (stmt_info); vectype_out = STMT_VINFO_VECTYPE (stmt_info);
/* Most operations cannot handle bit-precision types without extra
truncations. */
if ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
!= GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
/* Exception are bitwise binary operations. */
&& code != BIT_IOR_EXPR
&& code != BIT_XOR_EXPR
&& code != BIT_AND_EXPR)
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "bit-precision arithmetic not supported.");
return false;
}
op0 = gimple_assign_rhs1 (stmt); op0 = gimple_assign_rhs1 (stmt);
if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo, if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
&def_stmt, &def, &dt[0], &vectype)) &def_stmt, &def, &dt[0], &vectype))
...@@ -3082,9 +3124,20 @@ vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi, ...@@ -3082,9 +3124,20 @@ vectorizable_type_demotion (gimple stmt, gimple_stmt_iterator *gsi,
if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest)) if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
&& INTEGRAL_TYPE_P (TREE_TYPE (op0))) && INTEGRAL_TYPE_P (TREE_TYPE (op0)))
|| (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest)) || (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
&& SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0)) && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0)))))
&& CONVERT_EXPR_CODE_P (code))))
return false; return false;
if (INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
&& ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
!= GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
|| ((TYPE_PRECISION (TREE_TYPE (op0))
!= GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op0)))))))
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "type demotion to/from bit-precision unsupported.");
return false;
}
if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo, if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
&def_stmt, &def, &dt[0], &vectype_in)) &def_stmt, &def, &dt[0], &vectype_in))
{ {
...@@ -3365,6 +3418,19 @@ vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi, ...@@ -3365,6 +3418,19 @@ vectorizable_type_promotion (gimple stmt, gimple_stmt_iterator *gsi,
&& SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0)) && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
&& CONVERT_EXPR_CODE_P (code)))) && CONVERT_EXPR_CODE_P (code))))
return false; return false;
if (INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
&& ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
!= GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
|| ((TYPE_PRECISION (TREE_TYPE (op0))
!= GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op0)))))))
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "type promotion to/from bit-precision "
"unsupported.");
return false;
}
if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo, if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
&def_stmt, &def, &dt[0], &vectype_in)) &def_stmt, &def, &dt[0], &vectype_in))
{ {
...@@ -3673,17 +3739,9 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, ...@@ -3673,17 +3739,9 @@ vectorizable_store (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
return false; return false;
} }
/* The scalar rhs type needs to be trivially convertible to the vector
component type. This should always be the case. */
elem_type = TREE_TYPE (vectype); elem_type = TREE_TYPE (vectype);
if (!useless_type_conversion_p (elem_type, TREE_TYPE (op)))
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "??? operands of different types");
return false;
}
vec_mode = TYPE_MODE (vectype); vec_mode = TYPE_MODE (vectype);
/* FORNOW. In some cases can vectorize even if data-type not supported /* FORNOW. In some cases can vectorize even if data-type not supported
(e.g. - array initialization with 0). */ (e.g. - array initialization with 0). */
if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing) if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
...@@ -4117,7 +4175,6 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, ...@@ -4117,7 +4175,6 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
bool strided_load = false; bool strided_load = false;
bool load_lanes_p = false; bool load_lanes_p = false;
gimple first_stmt; gimple first_stmt;
tree scalar_type;
bool inv_p; bool inv_p;
bool negative; bool negative;
bool compute_in_loop = false; bool compute_in_loop = false;
...@@ -4192,7 +4249,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, ...@@ -4192,7 +4249,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
return false; return false;
} }
scalar_type = TREE_TYPE (DR_REF (dr)); elem_type = TREE_TYPE (vectype);
mode = TYPE_MODE (vectype); mode = TYPE_MODE (vectype);
/* FORNOW. In some cases can vectorize even if data-type not supported /* FORNOW. In some cases can vectorize even if data-type not supported
...@@ -4204,16 +4261,6 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, ...@@ -4204,16 +4261,6 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
return false; return false;
} }
/* The vector component type needs to be trivially convertible to the
scalar lhs. This should always be the case. */
elem_type = TREE_TYPE (vectype);
if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), elem_type))
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "??? operands of different types");
return false;
}
/* Check if the load is a part of an interleaving chain. */ /* Check if the load is a part of an interleaving chain. */
if (STMT_VINFO_STRIDED_ACCESS (stmt_info)) if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
{ {
...@@ -4560,7 +4607,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt, ...@@ -4560,7 +4607,7 @@ vectorizable_load (gimple stmt, gimple_stmt_iterator *gsi, gimple *vec_stmt,
msq = new_temp; msq = new_temp;
bump = size_binop (MULT_EXPR, vs_minus_1, bump = size_binop (MULT_EXPR, vs_minus_1,
TYPE_SIZE_UNIT (scalar_type)); TYPE_SIZE_UNIT (elem_type));
ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump); ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
new_stmt = gimple_build_assign_with_ops new_stmt = gimple_build_assign_with_ops
(BIT_AND_EXPR, NULL_TREE, ptr, (BIT_AND_EXPR, NULL_TREE, ptr,
...@@ -5441,13 +5488,14 @@ get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size) ...@@ -5441,13 +5488,14 @@ get_vectype_for_scalar_type_and_size (tree scalar_type, unsigned size)
if (nbytes < TYPE_ALIGN_UNIT (scalar_type)) if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
return NULL_TREE; return NULL_TREE;
/* If we'd build a vector type of elements whose mode precision doesn't /* For vector types of elements whose mode precision doesn't
match their types precision we'll get mismatched types on vector match their types precision we use a element type of mode
extracts via BIT_FIELD_REFs. This effectively means we disable precision. The vectorization routines will have to make sure
vectorization of bool and/or enum types in some languages. */ they support the proper result truncation/extension. */
if (INTEGRAL_TYPE_P (scalar_type) if (INTEGRAL_TYPE_P (scalar_type)
&& GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type)) && GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type))
return NULL_TREE; scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
TYPE_UNSIGNED (scalar_type));
if (GET_MODE_CLASS (inner_mode) != MODE_INT if (GET_MODE_CLASS (inner_mode) != MODE_INT
&& GET_MODE_CLASS (inner_mode) != MODE_FLOAT) && GET_MODE_CLASS (inner_mode) != MODE_FLOAT)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment