Commit cb2a532e by Aldy Hernandez

simd-1.c: New.


2002-06-16  Aldy Hernandez  <aldyh@redhat.com>

	* gcc.c-torture/execute/simd-1.c: New.

	* gcc.dg/simd-1.c: New.

	* doc/extend.texi (Vector Extensions): Document that we can
	specify simd types not specifically supported by the hardware.
	Document that simd types can be used as function arguments.
	Document that signness does make a difference in SIMD types.
	Misc cleanups and revisions to the "vector extensions" section.

	* simplify-rtx.c (simplify_subreg): Simplify subregs of vector
	constants.

	* expr.c (vector_mode_valid_p): New.

	* expr.h: Add vector_mode_valid_p.

	* defaults.h (VECTOR_MODE_SUPPORTED_P): Set default.

	* emit-rtl.c (immed_double_const): Do not abort on vectors.

	* c-common.c (type_for_mode): Always build vector nodes regardless
	of VECTOR_MODE_SUPPORTED_P.
	(handle_mode_attribute): Error if we can't emulate a nonexisting
	vector mode.
	(handle_vector_size_attribute): Same.

	* optabs.c (expand_binop): Open-code vector operations.
	(expand_unop): Open-code vector unops.
	(expand_vector_binop): New.
	(expand_vector_unop): New.

	* c-typeck.c (build_binary_op): Allow vectors in binops.
	Allow vectors in conditional operatiors.
	(build_unary_op): Allow vectors in unary minus.

	* config/rs6000/rs6000.h (ALTIVEC_VECTOR_MODE): Conditionalize on
	TARGET_ALTIVEC.

From-SVN: r54727
parent 147d5f6f
...@@ -1602,38 +1602,33 @@ c_common_type_for_mode (mode, unsignedp) ...@@ -1602,38 +1602,33 @@ c_common_type_for_mode (mode, unsignedp)
if (mode == TYPE_MODE (build_pointer_type (integer_type_node))) if (mode == TYPE_MODE (build_pointer_type (integer_type_node)))
return build_pointer_type (integer_type_node); return build_pointer_type (integer_type_node);
#ifdef VECTOR_MODE_SUPPORTED_P switch (mode)
if (VECTOR_MODE_SUPPORTED_P (mode)) {
{ case V16QImode:
switch (mode) return unsignedp ? unsigned_V16QI_type_node : V16QI_type_node;
{ case V8HImode:
case V16QImode: return unsignedp ? unsigned_V8HI_type_node : V8HI_type_node;
return unsignedp ? unsigned_V16QI_type_node : V16QI_type_node; case V4SImode:
case V8HImode: return unsignedp ? unsigned_V4SI_type_node : V4SI_type_node;
return unsignedp ? unsigned_V8HI_type_node : V8HI_type_node; case V2DImode:
case V4SImode: return unsignedp ? unsigned_V2DI_type_node : V2DI_type_node;
return unsignedp ? unsigned_V4SI_type_node : V4SI_type_node; case V2SImode:
case V2DImode: return unsignedp ? unsigned_V2SI_type_node : V2SI_type_node;
return unsignedp ? unsigned_V2DI_type_node : V2DI_type_node; case V4HImode:
case V2SImode: return unsignedp ? unsigned_V4HI_type_node : V4HI_type_node;
return unsignedp ? unsigned_V2SI_type_node : V2SI_type_node; case V8QImode:
case V4HImode: return unsignedp ? unsigned_V8QI_type_node : V8QI_type_node;
return unsignedp ? unsigned_V4HI_type_node : V4HI_type_node; case V16SFmode:
case V8QImode: return V16SF_type_node;
return unsignedp ? unsigned_V8QI_type_node : V8QI_type_node; case V4SFmode:
case V16SFmode: return V4SF_type_node;
return V16SF_type_node; case V2SFmode:
case V4SFmode: return V2SF_type_node;
return V4SF_type_node; case V2DFmode:
case V2SFmode: return V2DF_type_node;
return V2SF_type_node; default:
case V2DFmode: break;
return V2DF_type_node;
default:
break;
}
} }
#endif
return 0; return 0;
} }
...@@ -5058,8 +5053,20 @@ handle_mode_attribute (node, name, args, flags, no_add_attrs) ...@@ -5058,8 +5053,20 @@ handle_mode_attribute (node, name, args, flags, no_add_attrs)
(mode, TREE_UNSIGNED (type)))) (mode, TREE_UNSIGNED (type))))
error ("no data type for mode `%s'", p); error ("no data type for mode `%s'", p);
else else
*node = typefm; {
/* No need to layout the type here. The caller should do this. */ /* If this is a vector, make sure we either have hardware
support, or we can emulate it. */
if ((GET_MODE_CLASS (mode) == MODE_VECTOR_INT
|| GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
&& !vector_mode_valid_p (mode))
{
error ("unable to emulate '%s'", GET_MODE_NAME (mode));
return NULL_TREE;
}
*node = typefm;
/* No need to layout the type here. The caller should do this. */
}
} }
return NULL_TREE; return NULL_TREE;
...@@ -5604,6 +5611,16 @@ handle_vector_size_attribute (node, name, args, flags, no_add_attrs) ...@@ -5604,6 +5611,16 @@ handle_vector_size_attribute (node, name, args, flags, no_add_attrs)
new_type = build_type_copy (new_type); new_type = build_type_copy (new_type);
/* If this is a vector, make sure we either have hardware
support, or we can emulate it. */
if ((GET_MODE_CLASS (mode) == MODE_VECTOR_INT
|| GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
&& !vector_mode_valid_p (mode))
{
error ("unable to emulate '%s'", GET_MODE_NAME (mode));
return NULL_TREE;
}
/* Set the debug information here, because this is the only /* Set the debug information here, because this is the only
place where we know the underlying type for a vector made place where we know the underlying type for a vector made
with vector_size. For debugging purposes we pretend a vector with vector_size. For debugging purposes we pretend a vector
......
...@@ -2046,9 +2046,9 @@ build_binary_op (code, orig_op0, orig_op1, convert_p) ...@@ -2046,9 +2046,9 @@ build_binary_op (code, orig_op0, orig_op1, convert_p)
warning ("division by zero"); warning ("division by zero");
if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE
|| code0 == COMPLEX_TYPE) || code0 == COMPLEX_TYPE || code0 == VECTOR_TYPE)
&& (code1 == INTEGER_TYPE || code1 == REAL_TYPE && (code1 == INTEGER_TYPE || code1 == REAL_TYPE
|| code1 == COMPLEX_TYPE)) || code1 == COMPLEX_TYPE || code1 == VECTOR_TYPE))
{ {
if (!(code0 == INTEGER_TYPE && code1 == INTEGER_TYPE)) if (!(code0 == INTEGER_TYPE && code1 == INTEGER_TYPE))
resultcode = RDIV_EXPR; resultcode = RDIV_EXPR;
...@@ -2197,9 +2197,11 @@ build_binary_op (code, orig_op0, orig_op1, convert_p) ...@@ -2197,9 +2197,11 @@ build_binary_op (code, orig_op0, orig_op1, convert_p)
but don't convert the args to int! */ but don't convert the args to int! */
build_type = integer_type_node; build_type = integer_type_node;
if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE
|| code0 == COMPLEX_TYPE) || code0 == COMPLEX_TYPE
|| code0 == VECTOR_TYPE)
&& (code1 == INTEGER_TYPE || code1 == REAL_TYPE && (code1 == INTEGER_TYPE || code1 == REAL_TYPE
|| code1 == COMPLEX_TYPE)) || code1 == COMPLEX_TYPE
|| code1 == VECTOR_TYPE))
short_compare = 1; short_compare = 1;
else if (code0 == POINTER_TYPE && code1 == POINTER_TYPE) else if (code0 == POINTER_TYPE && code1 == POINTER_TYPE)
{ {
...@@ -2342,9 +2344,11 @@ build_binary_op (code, orig_op0, orig_op1, convert_p) ...@@ -2342,9 +2344,11 @@ build_binary_op (code, orig_op0, orig_op1, convert_p)
break; break;
} }
if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE || code0 == COMPLEX_TYPE) if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE || code0 == COMPLEX_TYPE
|| code0 == VECTOR_TYPE)
&& &&
(code1 == INTEGER_TYPE || code1 == REAL_TYPE || code1 == COMPLEX_TYPE)) (code1 == INTEGER_TYPE || code1 == REAL_TYPE || code1 == COMPLEX_TYPE
|| code1 == VECTOR_TYPE))
{ {
int none_complex = (code0 != COMPLEX_TYPE && code1 != COMPLEX_TYPE); int none_complex = (code0 != COMPLEX_TYPE && code1 != COMPLEX_TYPE);
...@@ -2763,7 +2767,8 @@ build_unary_op (code, xarg, flag) ...@@ -2763,7 +2767,8 @@ build_unary_op (code, xarg, flag)
case NEGATE_EXPR: case NEGATE_EXPR:
if (!(typecode == INTEGER_TYPE || typecode == REAL_TYPE if (!(typecode == INTEGER_TYPE || typecode == REAL_TYPE
|| typecode == COMPLEX_TYPE)) || typecode == COMPLEX_TYPE
|| typecode == VECTOR_TYPE))
{ {
error ("wrong type argument to unary minus"); error ("wrong type argument to unary minus");
return error_mark_node; return error_mark_node;
...@@ -4079,7 +4084,7 @@ convert_for_assignment (type, rhs, errtype, fundecl, funname, parmnum) ...@@ -4079,7 +4084,7 @@ convert_for_assignment (type, rhs, errtype, fundecl, funname, parmnum)
else if ((codel == INTEGER_TYPE || codel == REAL_TYPE else if ((codel == INTEGER_TYPE || codel == REAL_TYPE
|| codel == ENUMERAL_TYPE || codel == COMPLEX_TYPE || codel == ENUMERAL_TYPE || codel == COMPLEX_TYPE
|| codel == BOOLEAN_TYPE) || codel == BOOLEAN_TYPE)
&& (coder == INTEGER_TYPE || coder == REAL_TYPE && (coder == INTEGER_TYPE || coder == REAL_TYPE
|| coder == ENUMERAL_TYPE || coder == COMPLEX_TYPE || coder == ENUMERAL_TYPE || coder == COMPLEX_TYPE
|| coder == BOOLEAN_TYPE)) || coder == BOOLEAN_TYPE))
return convert_and_check (type, rhs); return convert_and_check (type, rhs);
......
...@@ -817,10 +817,11 @@ extern int rs6000_default_long_calls; ...@@ -817,10 +817,11 @@ extern int rs6000_default_long_calls;
: ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)) : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
#define ALTIVEC_VECTOR_MODE(MODE) \ #define ALTIVEC_VECTOR_MODE(MODE) \
((MODE) == V16QImode \ (TARGET_ALTIVEC && \
|| (MODE) == V8HImode \ ((MODE) == V16QImode \
|| (MODE) == V4SFmode \ || (MODE) == V8HImode \
|| (MODE) == V4SImode) || (MODE) == V4SFmode \
|| (MODE) == V4SImode))
/* Define this macro to be nonzero if the port is prepared to handle /* Define this macro to be nonzero if the port is prepared to handle
insns involving vector mode MODE. At the very least, it must have insns involving vector mode MODE. At the very least, it must have
......
...@@ -513,6 +513,10 @@ You Lose! You must define PREFERRED_DEBUGGING_TYPE! ...@@ -513,6 +513,10 @@ You Lose! You must define PREFERRED_DEBUGGING_TYPE!
#define UNLIKELY_EXECUTED_TEXT_SECTION_NAME "text.unlikely" #define UNLIKELY_EXECUTED_TEXT_SECTION_NAME "text.unlikely"
#endif #endif
#ifndef VECTOR_MODE_SUPPORTED_P
#define VECTOR_MODE_SUPPORTED_P(MODE) 0
#endif
/* Determine whether __cxa_atexit, rather than atexit, is used to /* Determine whether __cxa_atexit, rather than atexit, is used to
register C++ destructors for local statics and global objects. */ register C++ destructors for local statics and global objects. */
#ifndef DEFAULT_USE_CXA_ATEXIT #ifndef DEFAULT_USE_CXA_ATEXIT
......
...@@ -4373,28 +4373,52 @@ A floating point value, as wide as a SI mode integer, usually 32 bits. ...@@ -4373,28 +4373,52 @@ A floating point value, as wide as a SI mode integer, usually 32 bits.
A floating point value, as wide as a DI mode integer, usually 64 bits. A floating point value, as wide as a DI mode integer, usually 64 bits.
@end table @end table
Not all base types or combinations are always valid; which modes can be used
is determined by the target machine. For example, if targetting the i386 MMX
extensions, only @code{V8QI}, @code{V4HI} and @code{V2SI} are allowed modes.
There are no @code{V1xx} vector modes - they would be identical to the There are no @code{V1xx} vector modes - they would be identical to the
corresponding base mode. corresponding base mode.
There is no distinction between signed and unsigned vector modes. This Specifying a combination that is not valid for the current architecture
distinction is made by the operations that perform on the vectors, not will cause gcc to synthesize the instructions using a narrower mode.
by the data type. For example, if you specify a variable of type @code{V4SI} and your
architecture does not allow for this specific SIMD type, gcc will
The types defined in this manner are somewhat special, they cannot be produce code that uses 4 @code{SIs}.
used with most normal C operations (i.e., a vector addition can @emph{not}
be represented by a normal addition of two vector type variables). You The types defined in this manner can be used with a subset of normal C
can declare only variables and use them in function calls and returns, as operations. Currently, gcc will allow using the following operators on
well as in assignments and some casts. It is possible to cast from one these types: @code{+, -, *, /, unary minus}@.
vector type to another, provided they are of the same size (in fact, you
can also cast vectors to and from other datatypes of the same size). The operations behave like C++ @code{valarrays}. Addition is defined as
the addition of the corresponding elements of the operands. For
A port that supports vector operations provides a set of built-in functions example, in the code below, each of the 4 elements in @var{a} will be
that can be used to operate on vectors. For example, a function to add two added to the corresponding 4 elements in @var{b} and the resulting
vectors and multiply the result by a third could look like this: vector will be stored in @var{c}.
@example
typedef int v4si __attribute__ ((mode(V4SI)));
v4si a, b, c;
c = a + b;
@end example
Subtraction, multiplication, and division operate in a similar manner.
Likewise, the result of using the unary minus operator on a vector type
is a vector whose elements are the negative value of the corresponding
elements in the operand.
You can declare variables and use them in function calls and returns, as
well as in assignments and some casts. You can specify a vector type as
a return type for a function. Vector types can also be used as function
arguments. It is possible to cast from one vector type to another,
provided they are of the same size (in fact, you can also cast vectors
to and from other datatypes of the same size).
You cannot operate between vectors of different lengths or different
signness without a cast.
A port that supports hardware vector operations, usually provides a set
of built-in functions that can be used to operate on vectors. For
example, a function to add two vectors and multiply the result by a
third could look like this:
@example @example
v4si f (v4si a, v4si b, v4si c) v4si f (v4si a, v4si b, v4si c)
......
...@@ -421,7 +421,10 @@ immed_double_const (i0, i1, mode) ...@@ -421,7 +421,10 @@ immed_double_const (i0, i1, mode)
{ {
int width; int width;
if (GET_MODE_CLASS (mode) != MODE_INT if (GET_MODE_CLASS (mode) != MODE_INT
&& GET_MODE_CLASS (mode) != MODE_PARTIAL_INT) && GET_MODE_CLASS (mode) != MODE_PARTIAL_INT
/* We can get a 0 for an error mark. */
&& GET_MODE_CLASS (mode) != MODE_VECTOR_INT
&& GET_MODE_CLASS (mode) != MODE_VECTOR_FLOAT)
abort (); abort ();
/* We clear out all bits that don't belong in MODE, unless they and /* We clear out all bits that don't belong in MODE, unless they and
......
...@@ -10791,4 +10791,34 @@ try_tablejump (index_type, index_expr, minval, range, ...@@ -10791,4 +10791,34 @@ try_tablejump (index_type, index_expr, minval, range,
return 1; return 1;
} }
/* Nonzero if the mode is a valid vector mode for this architecture.
This returns nonzero even if there is no hardware support for the
vector mode, but we can emulate with narrower modes. */
int
vector_mode_valid_p (mode)
enum machine_mode mode;
{
enum mode_class class = GET_MODE_CLASS (mode);
enum machine_mode innermode;
/* Doh! What's going on? */
if (class != MODE_VECTOR_INT
&& class != MODE_VECTOR_FLOAT)
return 0;
/* Hardware support. Woo hoo! */
if (VECTOR_MODE_SUPPORTED_P (mode))
return 1;
innermode = GET_MODE_INNER (mode);
/* We should probably return 1 if requesting V4DI and we have no DI,
but we have V2DI, but this is probably very unlikely. */
/* If we have support for the inner mode, we can safely emulate it.
We may not have V2DI, but me can emulate with a pair of DIs. */
return mov_optab->handlers[innermode].insn_code != CODE_FOR_nothing;
}
#include "gt-expr.h" #include "gt-expr.h"
...@@ -786,3 +786,5 @@ extern void do_jump_by_parts_greater_rtx PARAMS ((enum machine_mode, ...@@ -786,3 +786,5 @@ extern void do_jump_by_parts_greater_rtx PARAMS ((enum machine_mode,
extern void mark_seen_cases PARAMS ((tree, unsigned char *, extern void mark_seen_cases PARAMS ((tree, unsigned char *,
HOST_WIDE_INT, int)); HOST_WIDE_INT, int));
#endif #endif
extern int vector_mode_valid_p PARAMS ((enum machine_mode));
...@@ -120,6 +120,11 @@ static void emit_cmp_and_jump_insn_1 PARAMS ((rtx, rtx, enum machine_mode, ...@@ -120,6 +120,11 @@ static void emit_cmp_and_jump_insn_1 PARAMS ((rtx, rtx, enum machine_mode,
enum rtx_code, int, rtx)); enum rtx_code, int, rtx));
static void prepare_float_lib_cmp PARAMS ((rtx *, rtx *, enum rtx_code *, static void prepare_float_lib_cmp PARAMS ((rtx *, rtx *, enum rtx_code *,
enum machine_mode *, int *)); enum machine_mode *, int *));
static rtx expand_vector_binop PARAMS ((enum machine_mode, optab,
rtx, rtx, rtx, int,
enum optab_methods));
static rtx expand_vector_unop PARAMS ((enum machine_mode, optab, rtx, rtx,
int));
/* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to /* Add a REG_EQUAL note to the last insn in INSNS. TARGET is being set to
the result of operation CODE applied to OP0 (and OP1 if it is a binary the result of operation CODE applied to OP0 (and OP1 if it is a binary
...@@ -1531,6 +1536,12 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) ...@@ -1531,6 +1536,12 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods)
delete_insns_since (last); delete_insns_since (last);
} }
/* Open-code the vector operations if we have no hardware support
for them. */
if (class == MODE_VECTOR_INT || class == MODE_VECTOR_FLOAT)
return expand_vector_binop (mode, binoptab, op0, op1, target,
unsignedp, methods);
/* We need to open-code the complex type operations: '+, -, * and /' */ /* We need to open-code the complex type operations: '+, -, * and /' */
/* At this point we allow operations between two similar complex /* At this point we allow operations between two similar complex
...@@ -1900,6 +1911,125 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods) ...@@ -1900,6 +1911,125 @@ expand_binop (mode, binoptab, op0, op1, target, unsignedp, methods)
delete_insns_since (entry_last); delete_insns_since (entry_last);
return 0; return 0;
} }
/* Like expand_binop, but for open-coding vectors binops. */
static rtx
expand_vector_binop (mode, binoptab, op0, op1, target, unsignedp, methods)
enum machine_mode mode;
optab binoptab;
rtx op0, op1;
rtx target;
int unsignedp;
enum optab_methods methods;
{
enum machine_mode submode;
int elts, i;
rtx t, a, b, res, seq;
enum mode_class class;
class = GET_MODE_CLASS (mode);
submode = GET_MODE_INNER (mode);
elts = GET_MODE_NUNITS (mode);
if (!target)
target = gen_reg_rtx (mode);
start_sequence ();
/* FIXME: Optimally, we should try to do this in narrower vector
modes if available. E.g. When trying V8SI, try V4SI, else
V2SI, else decay into SI. */
switch (binoptab->code)
{
case PLUS:
case MINUS:
case MULT:
case DIV:
for (i = 0; i < elts; ++i)
{
t = simplify_gen_subreg (submode, target, mode,
i * UNITS_PER_WORD);
a = simplify_gen_subreg (submode, op0, mode,
i * UNITS_PER_WORD);
b = simplify_gen_subreg (submode, op1, mode,
i * UNITS_PER_WORD);
if (binoptab->code == DIV)
{
if (class == MODE_VECTOR_FLOAT)
res = expand_binop (submode, binoptab, a, b, t,
unsignedp, methods);
else
res = expand_divmod (0, TRUNC_DIV_EXPR, submode,
a, b, t, unsignedp);
}
else
res = expand_binop (submode, binoptab, a, b, t,
unsignedp, methods);
if (res == 0)
break;
emit_move_insn (t, res);
}
break;
default:
abort ();
}
seq = get_insns ();
end_sequence ();
emit_insn (seq);
return target;
}
/* Like expand_unop but for open-coding vector unops. */
static rtx
expand_vector_unop (mode, unoptab, op0, target, unsignedp)
enum machine_mode mode;
optab unoptab;
rtx op0;
rtx target;
int unsignedp;
{
enum machine_mode submode;
int elts, i;
rtx t, a, res, seq;
submode = GET_MODE_INNER (mode);
elts = GET_MODE_NUNITS (mode);
if (!target)
target = gen_reg_rtx (mode);
start_sequence ();
/* FIXME: Optimally, we should try to do this in narrower vector
modes if available. E.g. When trying V8SI, try V4SI, else
V2SI, else decay into SI. */
for (i = 0; i < elts; ++i)
{
t = simplify_gen_subreg (submode, target, mode, i * UNITS_PER_WORD);
a = simplify_gen_subreg (submode, op0, mode, i * UNITS_PER_WORD);
res = expand_unop (submode, unoptab, a, t, unsignedp);
emit_move_insn (t, res);
}
seq = get_insns ();
end_sequence ();
emit_insn (seq);
return target;
}
/* Expand a binary operator which has both signed and unsigned forms. /* Expand a binary operator which has both signed and unsigned forms.
UOPTAB is the optab for unsigned operations, and SOPTAB is for UOPTAB is the optab for unsigned operations, and SOPTAB is for
...@@ -2324,6 +2454,9 @@ expand_unop (mode, unoptab, op0, target, unsignedp) ...@@ -2324,6 +2454,9 @@ expand_unop (mode, unoptab, op0, target, unsignedp)
return target; return target;
} }
if (class == MODE_VECTOR_FLOAT || class == MODE_VECTOR_INT)
return expand_vector_unop (mode, unoptab, op0, target, unsignedp);
/* It can't be done in this mode. Can we do it in a wider mode? */ /* It can't be done in this mode. Can we do it in a wider mode? */
if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT) if (class == MODE_INT || class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT)
......
...@@ -2268,6 +2268,24 @@ simplify_subreg (outermode, op, innermode, byte) ...@@ -2268,6 +2268,24 @@ simplify_subreg (outermode, op, innermode, byte)
if (outermode == innermode && !byte) if (outermode == innermode && !byte)
return op; return op;
/* Simplify subregs of vector constants. */
if (GET_CODE (op) == CONST_VECTOR)
{
int offset = byte / UNITS_PER_WORD;
rtx elt;
/* This shouldn't happen, but let's not do anything stupid. */
if (GET_MODE_INNER (innermode) != outermode)
return NULL_RTX;
elt = CONST_VECTOR_ELT (op, offset);
/* ?? We probably don't need this copy_rtx because constants
can be shared. ?? */
return copy_rtx (elt);
}
/* Attempt to simplify constant to non-SUBREG expression. */ /* Attempt to simplify constant to non-SUBREG expression. */
if (CONSTANT_P (op)) if (CONSTANT_P (op))
{ {
......
/* Origin: Aldy Hernandez <aldyh@redhat.com>
Purpose: Test generic SIMD support. This test should work
regardless of if the target has SIMD instructions.
*/
typedef int __attribute__((mode(V4SI))) vecint;
vecint i = { 150, 100, 150, 200 };
vecint j = { 10, 13, 20, 30 };
vecint k;
union {
vecint v;
int i[4];
} res;
/* This should go away once we can use == and != on vector types. */
void
verify (int a1, int a2, int a3, int a4,
int b1, int b2, int b3, int b4)
{
if (a1 != b1
|| a2 != b2
|| a3 != b3
|| a4 != b4)
abort ();
}
int
main ()
{
k = i + j;
res.v = k;
verify (res.i[0], res.i[1], res.i[2], res.i[3], 160, 113, 170, 230);
k = i * j;
res.v = k;
verify (res.i[0], res.i[1], res.i[2], res.i[3], 1500, 1300, 3000, 6000);
k = i / j;
res.v = k;
verify (res.i[0], res.i[1], res.i[2], res.i[3], 15, 7, 7, 6);
k = -i;
res.v = k;
verify (res.i[0], res.i[1], res.i[2], res.i[3],
-150, -100, -150, -200);
exit (0);
}
/* { dg-do compile } */
/* { dg-options "-Wall" } */
/* Origin: Aldy Hernandez <aldyh@redhat.com>. */
/* Purpose: Program to test generic SIMD support. */
typedef int __attribute__((mode(V4SI))) v4si;
typedef int __attribute__((mode(V8HI))) v8hi;
typedef int __attribute__((mode(V2SI))) v2si;
typedef unsigned int __attribute__((mode(V4SI))) uv4si;
v4si a, b;
v2si c, d;
v8hi e;
uv4si f;
int foo __attribute__((mode(DI)));
int foo1 __attribute__((mode(SI)));
int foo2 __attribute__((mode(V4HI)));
void
hanneke ()
{
/* Assignment. */
a = b;
/* Assignment of different types. */
b = c; /* { dg-error "incompatible types in assignment" } */
d = a; /* { dg-error "incompatible types in assignment" } */
/* Casting between SIMDs of the same size. */
e = (typeof (e)) a;
/* Different signed SIMD assignment. */
f = a; /* { dg-error "incompatible types in assignment" } */
/* Casted different signed SIMD assignment. */
f = (uv4si) a;
/* Assignment between scalar and SIMD of different size. */
foo = a; /* { dg-error "incompatible types in assignment" } */
/* Casted assignment between scalar and SIMD of same size. */
foo = (typeof (foo)) foo2;
/* Casted assignment between scalar and SIMD of different size. */
foo1 = (typeof (foo1)) foo2; /* { dg-error "can't convert between vector values of different size" } */
/* Operators on compatible SIMD types. */
a += b + b;
a -= b;
a *= b;
a /= b;
a = -b;
/* Operators on incompatible SIMD types. */
a = b + c; /* { dg-error "can't convert between vector values of different size" } */
a = b - c; /* { dg-error "can't convert between vector values of different size" } */
a = b * c; /* { dg-error "can't convert between vector values of different size" } */
a = b / c; /* { dg-error "can't convert between vector values of different size" } */
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment