Commit c9485473 by Michael Meissner Committed by Michael Meissner

Fix PR target/47272

From-SVN: r169780
parent b24d1acf
2011-02-02 Michael Meissner <meissner@linux.vnet.ibm.com>
PR target/47272
* doc/extend.texi (PowerPC AltiVec/VSX Built-in Functions):
Document using vector double with the load/store builtins, and
that the load/store builtins always use Altivec instructions.
* config/rs6000/vector.md (vector_altivec_load_<mode>): New insns
to use altivec memory instructions, even on VSX.
(vector_altivec_store_<mode>): Ditto.
* config/rs6000/rs6000-protos.h (rs6000_address_for_altivec): New
function.
* config/rs6000/rs6000-c.c (altivec_overloaded_builtins): Add
V2DF, V2DI support to load/store overloaded builtins.
* config/rs6000/rs6000-builtin.def (ALTIVEC_BUILTIN_*): Add
altivec load/store builtins for V2DF/V2DI types.
* config/rs6000/rs6000.c (rs6000_option_override_internal): Don't
set avoid indexed addresses on power6 if -maltivec.
(altivec_expand_ld_builtin): Add V2DF, V2DI support, use
vector_altivec_load/vector_altivec_store builtins.
(altivec_expand_st_builtin): Ditto.
(altivec_expand_builtin): Add VSX memory builtins.
(rs6000_init_builtins): Add V2DI types to internal types.
(altivec_init_builtins): Add support for V2DF/V2DI altivec
load/store builtins.
(rs6000_address_for_altivec): Insure memory address is appropriate
for Altivec.
* config/rs6000/vsx.md (vsx_load_<mode>): New expanders for
vec_vsx_ld and vec_vsx_st.
(vsx_store_<mode>): Ditto.
* config/rs6000/rs6000.h (RS6000_BTI_long_long): New type
variables to hold long long types for VSX vector memory builtins.
(RS6000_BTI_unsigned_long_long): Ditto.
(long_long_integer_type_internal_node): Ditti.
(long_long_unsigned_type_internal_node): Ditti.
* config/rs6000/altivec.md (UNSPEC_LVX): New UNSPEC.
(altivec_lvx_<mode>): Make altivec_lvx use a mode iterator.
(altivec_stvx_<mode>): Make altivec_stvx use a mode iterator.
* config/rs6000/altivec.h (vec_vsx_ld): Define VSX memory builtin
short cuts.
(vec_vsx_st): Ditto.
2011-02-02 Joseph Myers <joseph@codesourcery.com>
* config/pa/pa-hpux10.opt: New.
......
/* PowerPC AltiVec include file.
Copyright (C) 2002, 2003, 2004, 2005, 2008, 2009, 2010
Copyright (C) 2002, 2003, 2004, 2005, 2008, 2009, 2010, 2011
Free Software Foundation, Inc.
Contributed by Aldy Hernandez (aldyh@redhat.com).
Rewritten by Paolo Bonzini (bonzini@gnu.org).
......@@ -318,6 +318,8 @@
#define vec_nearbyint __builtin_vec_nearbyint
#define vec_rint __builtin_vec_rint
#define vec_sqrt __builtin_vec_sqrt
#define vec_vsx_ld __builtin_vec_vsx_ld
#define vec_vsx_st __builtin_vec_vsx_st
#endif
/* Predicates.
......
;; AltiVec patterns.
;; Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
;; Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
;; Free Software Foundation, Inc.
;; Contributed by Aldy Hernandez (aldy@quesejoda.com)
......@@ -96,7 +96,7 @@
(UNSPEC_STVE 203)
(UNSPEC_SET_VSCR 213)
(UNSPEC_GET_VRSAVE 214)
;; 215 deleted
(UNSPEC_LVX 215)
(UNSPEC_REDUC_PLUS 217)
(UNSPEC_VECSH 219)
(UNSPEC_EXTEVEN_V4SI 220)
......@@ -1750,17 +1750,19 @@
"lvxl %0,%y1"
[(set_attr "type" "vecload")])
(define_insn "altivec_lvx"
[(set (match_operand:V4SI 0 "register_operand" "=v")
(match_operand:V4SI 1 "memory_operand" "Z"))]
(define_insn "altivec_lvx_<mode>"
[(parallel
[(set (match_operand:VM2 0 "register_operand" "=v")
(match_operand:VM2 1 "memory_operand" "Z"))
(unspec [(const_int 0)] UNSPEC_LVX)])]
"TARGET_ALTIVEC"
"lvx %0,%y1"
[(set_attr "type" "vecload")])
(define_insn "altivec_stvx"
(define_insn "altivec_stvx_<mode>"
[(parallel
[(set (match_operand:V4SI 0 "memory_operand" "=Z")
(match_operand:V4SI 1 "register_operand" "v"))
[(set (match_operand:VM2 0 "memory_operand" "=Z")
(match_operand:VM2 1 "register_operand" "v"))
(unspec [(const_int 0)] UNSPEC_STVX)])]
"TARGET_ALTIVEC"
"stvx %1,%y0"
......
/* Builtin functions for rs6000/powerpc.
Copyright (C) 2009, 2010
Copyright (C) 2009, 2010, 2011
Free Software Foundation, Inc.
Contributed by Michael Meissner (meissner@linux.vnet.ibm.com)
......@@ -37,6 +37,10 @@ RS6000_BUILTIN(ALTIVEC_BUILTIN_ST_INTERNAL_16qi, RS6000_BTC_MEM)
RS6000_BUILTIN(ALTIVEC_BUILTIN_LD_INTERNAL_16qi, RS6000_BTC_MEM)
RS6000_BUILTIN(ALTIVEC_BUILTIN_ST_INTERNAL_4sf, RS6000_BTC_MEM)
RS6000_BUILTIN(ALTIVEC_BUILTIN_LD_INTERNAL_4sf, RS6000_BTC_MEM)
RS6000_BUILTIN(ALTIVEC_BUILTIN_ST_INTERNAL_2df, RS6000_BTC_MEM)
RS6000_BUILTIN(ALTIVEC_BUILTIN_LD_INTERNAL_2df, RS6000_BTC_MEM)
RS6000_BUILTIN(ALTIVEC_BUILTIN_ST_INTERNAL_2di, RS6000_BTC_MEM)
RS6000_BUILTIN(ALTIVEC_BUILTIN_LD_INTERNAL_2di, RS6000_BTC_MEM)
RS6000_BUILTIN(ALTIVEC_BUILTIN_VADDUBM, RS6000_BTC_CONST)
RS6000_BUILTIN(ALTIVEC_BUILTIN_VADDUHM, RS6000_BTC_CONST)
RS6000_BUILTIN(ALTIVEC_BUILTIN_VADDUWM, RS6000_BTC_CONST)
......@@ -778,12 +782,20 @@ RS6000_BUILTIN(PAIRED_BUILTIN_CMPU1, RS6000_BTC_MISC)
/* VSX builtins. */
RS6000_BUILTIN(VSX_BUILTIN_LXSDX, RS6000_BTC_MEM)
RS6000_BUILTIN(VSX_BUILTIN_LXVD2X, RS6000_BTC_MEM)
RS6000_BUILTIN(VSX_BUILTIN_LXVD2X_V2DF, RS6000_BTC_MEM)
RS6000_BUILTIN(VSX_BUILTIN_LXVD2X_V2DI, RS6000_BTC_MEM)
RS6000_BUILTIN(VSX_BUILTIN_LXVDSX, RS6000_BTC_MEM)
RS6000_BUILTIN(VSX_BUILTIN_LXVW4X, RS6000_BTC_MEM)
RS6000_BUILTIN(VSX_BUILTIN_LXVW4X_V4SF, RS6000_BTC_MEM)
RS6000_BUILTIN(VSX_BUILTIN_LXVW4X_V4SI, RS6000_BTC_MEM)
RS6000_BUILTIN(VSX_BUILTIN_LXVW4X_V8HI, RS6000_BTC_MEM)
RS6000_BUILTIN(VSX_BUILTIN_LXVW4X_V16QI, RS6000_BTC_MEM)
RS6000_BUILTIN(VSX_BUILTIN_STXSDX, RS6000_BTC_MEM)
RS6000_BUILTIN(VSX_BUILTIN_STXVD2X, RS6000_BTC_MEM)
RS6000_BUILTIN(VSX_BUILTIN_STXVW4X, RS6000_BTC_MEM)
RS6000_BUILTIN(VSX_BUILTIN_STXVD2X_V2DF, RS6000_BTC_MEM)
RS6000_BUILTIN(VSX_BUILTIN_STXVD2X_V2DI, RS6000_BTC_MEM)
RS6000_BUILTIN(VSX_BUILTIN_STXVW4X_V4SF, RS6000_BTC_MEM)
RS6000_BUILTIN(VSX_BUILTIN_STXVW4X_V4SI, RS6000_BTC_MEM)
RS6000_BUILTIN(VSX_BUILTIN_STXVW4X_V8HI, RS6000_BTC_MEM)
RS6000_BUILTIN(VSX_BUILTIN_STXVW4X_V16QI, RS6000_BTC_MEM)
RS6000_BUILTIN(VSX_BUILTIN_XSABSDP, RS6000_BTC_CONST)
RS6000_BUILTIN(VSX_BUILTIN_XSADDDP, RS6000_BTC_FP_PURE)
RS6000_BUILTIN(VSX_BUILTIN_XSCMPODP, RS6000_BTC_FP_PURE)
......@@ -983,8 +995,10 @@ RS6000_BUILTIN(VSX_BUILTIN_VEC_XXPERMDI, RS6000_BTC_MISC)
RS6000_BUILTIN(VSX_BUILTIN_VEC_XXSLDWI, RS6000_BTC_MISC)
RS6000_BUILTIN(VSX_BUILTIN_VEC_XXSPLTD, RS6000_BTC_MISC)
RS6000_BUILTIN(VSX_BUILTIN_VEC_XXSPLTW, RS6000_BTC_MISC)
RS6000_BUILTIN(VSX_BUILTIN_VEC_LD, RS6000_BTC_MISC)
RS6000_BUILTIN(VSX_BUILTIN_VEC_ST, RS6000_BTC_MISC)
RS6000_BUILTIN_EQUATE(VSX_BUILTIN_OVERLOADED_LAST,
VSX_BUILTIN_VEC_XXSPLTW)
VSX_BUILTIN_VEC_ST)
/* Combined VSX/Altivec builtins. */
RS6000_BUILTIN(VECTOR_BUILTIN_FLOAT_V4SI_V4SF, RS6000_BTC_FP_PURE)
......
/* Definitions of target machine for GNU compiler, for IBM RS/6000.
Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
2010, 2011
Free Software Foundation, Inc.
Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
......@@ -129,6 +130,7 @@ extern void rs6000_emit_parity (rtx, rtx);
extern rtx rs6000_machopic_legitimize_pic_address (rtx, enum machine_mode,
rtx);
extern rtx rs6000_address_for_fpconvert (rtx);
extern rtx rs6000_address_for_altivec (rtx);
extern rtx rs6000_allocate_stack_temp (enum machine_mode, bool, bool);
extern int rs6000_loop_align (rtx);
#endif /* RTX_CODE */
......
/* Definitions of target machine for GNU compiler, for IBM RS/6000.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
2010
2010, 2011
Free Software Foundation, Inc.
Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
......@@ -2368,6 +2368,8 @@ enum rs6000_builtin_type_index
RS6000_BTI_pixel_V8HI, /* __vector __pixel */
RS6000_BTI_long, /* long_integer_type_node */
RS6000_BTI_unsigned_long, /* long_unsigned_type_node */
RS6000_BTI_long_long, /* long_long_integer_type_node */
RS6000_BTI_unsigned_long_long, /* long_long_unsigned_type_node */
RS6000_BTI_INTQI, /* intQI_type_node */
RS6000_BTI_UINTQI, /* unsigned_intQI_type_node */
RS6000_BTI_INTHI, /* intHI_type_node */
......@@ -2411,6 +2413,8 @@ enum rs6000_builtin_type_index
#define bool_V2DI_type_node (rs6000_builtin_types[RS6000_BTI_bool_V2DI])
#define pixel_V8HI_type_node (rs6000_builtin_types[RS6000_BTI_pixel_V8HI])
#define long_long_integer_type_internal_node (rs6000_builtin_types[RS6000_BTI_long_long])
#define long_long_unsigned_type_internal_node (rs6000_builtin_types[RS6000_BTI_unsigned_long_long])
#define long_integer_type_internal_node (rs6000_builtin_types[RS6000_BTI_long])
#define long_unsigned_type_internal_node (rs6000_builtin_types[RS6000_BTI_unsigned_long])
#define intQI_type_internal_node (rs6000_builtin_types[RS6000_BTI_INTQI])
......
......@@ -3,7 +3,7 @@
;; expander, and the actual vector instructions will be in altivec.md and
;; vsx.md
;; Copyright (C) 2009, 2010
;; Copyright (C) 2009, 2010, 2011
;; Free Software Foundation, Inc.
;; Contributed by Michael Meissner <meissner@linux.vnet.ibm.com>
......@@ -123,6 +123,43 @@
DONE;
})
;; Vector floating point load/store instructions that uses the Altivec
;; instructions even if we are compiling for VSX, since the Altivec
;; instructions silently ignore the bottom 3 bits of the address, and VSX does
;; not.
(define_expand "vector_altivec_load_<mode>"
[(set (match_operand:VEC_M 0 "vfloat_operand" "")
(match_operand:VEC_M 1 "memory_operand" ""))]
"VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)"
"
{
gcc_assert (VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode));
if (VECTOR_MEM_VSX_P (<MODE>mode))
{
operands[1] = rs6000_address_for_altivec (operands[1]);
emit_insn (gen_altivec_lvx_<mode> (operands[0], operands[1]));
DONE;
}
}")
(define_expand "vector_altivec_store_<mode>"
[(set (match_operand:VEC_M 0 "memory_operand" "")
(match_operand:VEC_M 1 "vfloat_operand" ""))]
"VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)"
"
{
gcc_assert (VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode));
if (VECTOR_MEM_VSX_P (<MODE>mode))
{
operands[0] = rs6000_address_for_altivec (operands[0]);
emit_insn (gen_altivec_stvx_<mode> (operands[0], operands[1]));
DONE;
}
}")
;; Reload patterns for vector operations. We may need an addtional base
;; register to convert the reg+offset addressing to reg+reg for vector
......
......@@ -308,6 +308,19 @@
}
[(set_attr "type" "vecstore,vecload,vecsimple,*,*,*,vecsimple,*,vecstore,vecload")])
;; Explicit load/store expanders for the builtin functions
(define_expand "vsx_load_<mode>"
[(set (match_operand:VSX_M 0 "vsx_register_operand" "")
(match_operand:VSX_M 1 "memory_operand" ""))]
"VECTOR_MEM_VSX_P (<MODE>mode)"
"")
(define_expand "vsx_store_<mode>"
[(set (match_operand:VEC_M 0 "memory_operand" "")
(match_operand:VEC_M 1 "vsx_register_operand" ""))]
"VECTOR_MEM_VSX_P (<MODE>mode)"
"")
;; VSX scalar and vector floating point arithmetic instructions
(define_insn "*vsx_add<mode>3"
......
......@@ -12359,6 +12359,12 @@ vector bool long vec_cmplt (vector double, vector double);
vector float vec_div (vector float, vector float);
vector double vec_div (vector double, vector double);
vector double vec_floor (vector double);
vector double vec_ld (int, const vector double *);
vector double vec_ld (int, const double *);
vector double vec_ldl (int, const vector double *);
vector double vec_ldl (int, const double *);
vector unsigned char vec_lvsl (int, const volatile double *);
vector unsigned char vec_lvsr (int, const volatile double *);
vector double vec_madd (vector double, vector double, vector double);
vector double vec_max (vector double, vector double);
vector double vec_min (vector double, vector double);
......@@ -12387,6 +12393,8 @@ vector double vec_sel (vector double, vector double, vector unsigned long);
vector double vec_sub (vector double, vector double);
vector float vec_sqrt (vector float);
vector double vec_sqrt (vector double);
void vec_st (vector double, int, vector double *);
void vec_st (vector double, int, double *);
vector double vec_trunc (vector double);
vector double vec_xor (vector double, vector double);
vector double vec_xor (vector double, vector bool long);
......@@ -12415,7 +12423,65 @@ int vec_any_ngt (vector double, vector double);
int vec_any_nle (vector double, vector double);
int vec_any_nlt (vector double, vector double);
int vec_any_numeric (vector double);
@end smallexample
vector double vec_vsx_ld (int, const vector double *);
vector double vec_vsx_ld (int, const double *);
vector float vec_vsx_ld (int, const vector float *);
vector float vec_vsx_ld (int, const float *);
vector bool int vec_vsx_ld (int, const vector bool int *);
vector signed int vec_vsx_ld (int, const vector signed int *);
vector signed int vec_vsx_ld (int, const int *);
vector signed int vec_vsx_ld (int, const long *);
vector unsigned int vec_vsx_ld (int, const vector unsigned int *);
vector unsigned int vec_vsx_ld (int, const unsigned int *);
vector unsigned int vec_vsx_ld (int, const unsigned long *);
vector bool short vec_vsx_ld (int, const vector bool short *);
vector pixel vec_vsx_ld (int, const vector pixel *);
vector signed short vec_vsx_ld (int, const vector signed short *);
vector signed short vec_vsx_ld (int, const short *);
vector unsigned short vec_vsx_ld (int, const vector unsigned short *);
vector unsigned short vec_vsx_ld (int, const unsigned short *);
vector bool char vec_vsx_ld (int, const vector bool char *);
vector signed char vec_vsx_ld (int, const vector signed char *);
vector signed char vec_vsx_ld (int, const signed char *);
vector unsigned char vec_vsx_ld (int, const vector unsigned char *);
vector unsigned char vec_vsx_ld (int, const unsigned char *);
void vec_vsx_st (vector double, int, vector double *);
void vec_vsx_st (vector double, int, double *);
void vec_vsx_st (vector float, int, vector float *);
void vec_vsx_st (vector float, int, float *);
void vec_vsx_st (vector signed int, int, vector signed int *);
void vec_vsx_st (vector signed int, int, int *);
void vec_vsx_st (vector unsigned int, int, vector unsigned int *);
void vec_vsx_st (vector unsigned int, int, unsigned int *);
void vec_vsx_st (vector bool int, int, vector bool int *);
void vec_vsx_st (vector bool int, int, unsigned int *);
void vec_vsx_st (vector bool int, int, int *);
void vec_vsx_st (vector signed short, int, vector signed short *);
void vec_vsx_st (vector signed short, int, short *);
void vec_vsx_st (vector unsigned short, int, vector unsigned short *);
void vec_vsx_st (vector unsigned short, int, unsigned short *);
void vec_vsx_st (vector bool short, int, vector bool short *);
void vec_vsx_st (vector bool short, int, unsigned short *);
void vec_vsx_st (vector pixel, int, vector pixel *);
void vec_vsx_st (vector pixel, int, unsigned short *);
void vec_vsx_st (vector pixel, int, short *);
void vec_vsx_st (vector bool short, int, short *);
void vec_vsx_st (vector signed char, int, vector signed char *);
void vec_vsx_st (vector signed char, int, signed char *);
void vec_vsx_st (vector unsigned char, int, vector unsigned char *);
void vec_vsx_st (vector unsigned char, int, unsigned char *);
void vec_vsx_st (vector bool char, int, vector bool char *);
void vec_vsx_st (vector bool char, int, unsigned char *);
void vec_vsx_st (vector bool char, int, signed char *);
@end smallexample
Note that the @samp{vec_ld} and @samp{vec_st} builtins will always
generate the Altivec @samp{LVX} and @samp{STVX} instructions even
if the VSX instruction set is available. The @samp{vec_vsx_ld} and
@samp{vec_vsx_st} builtins will always generate the VSX @samp{LXVD2X},
@samp{LXVW4X}, @samp{STXVD2X}, and @samp{STXVW4X} instructions.
GCC provides a few other builtins on Powerpc to access certain instructions:
@smallexample
......
2011-02-02 Michael Meissner <meissner@linux.vnet.ibm.com>
PR target/47272
* gcc.target/powerpc/vsx-builtin-8.c: New file, test vec_vsx_ld
and vec_vsx_st.
* gcc.target/powerpc/avoid-indexed-addresses.c: Disable altivec
and vsx so a default --with-cpu=power7 doesn't give an error
when -mavoid-indexed-addresses is used.
* gcc.target/powerpc/ppc32-abi-dfp-1.c: Rewrite to use an asm
wrapper function to save the arguments and then jump to the real
function, rather than depending on the compiler not to move stuff
before an asm.
* gcc.target/powerpc/ppc64-abi-dfp-2.c: Ditto.
2011-02-02 Janus Weil <janus@gcc.gnu.org>
Paul Thomas <pault@gcc.gnu.org>
......
/* { dg-do compile { target { powerpc*-*-* } } } */
/* { dg-options "-O2 -mavoid-indexed-addresses" } */
/* { dg-options "-O2 -mavoid-indexed-addresses -mno-altivec -mno-vsx" } */
/* { dg-final { scan-assembler-not "lbzx" } }
......
/* { dg-do compile { target { powerpc*-*-* } } } */
/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
/* { dg-require-effective-target powerpc_vsx_ok } */
/* { dg-options "-O3 -mcpu=power7" } */
/* Test the various load/store varients. */
#include <altivec.h>
#define TEST_COPY(NAME, TYPE) \
void NAME ## _copy_native (vector TYPE *a, vector TYPE *b) \
{ \
*a = *b; \
} \
\
void NAME ## _copy_vec (vector TYPE *a, vector TYPE *b) \
{ \
vector TYPE x = vec_ld (0, b); \
vec_st (x, 0, a); \
} \
#define TEST_COPYL(NAME, TYPE) \
void NAME ## _lvxl (vector TYPE *a, vector TYPE *b) \
{ \
vector TYPE x = vec_ldl (0, b); \
vec_stl (x, 0, a); \
} \
#define TEST_VSX_COPY(NAME, TYPE) \
void NAME ## _copy_vsx (vector TYPE *a, vector TYPE *b) \
{ \
vector TYPE x = vec_vsx_ld (0, b); \
vec_vsx_st (x, 0, a); \
} \
#define TEST_ALIGN(NAME, TYPE) \
void NAME ## _align (vector unsigned char *a, TYPE *b) \
{ \
vector unsigned char x = vec_lvsl (0, b); \
vector unsigned char y = vec_lvsr (0, b); \
vec_st (x, 0, a); \
vec_st (y, 8, a); \
}
#ifndef NO_COPY
TEST_COPY(uchar, unsigned char)
TEST_COPY(schar, signed char)
TEST_COPY(bchar, bool char)
TEST_COPY(ushort, unsigned short)
TEST_COPY(sshort, signed short)
TEST_COPY(bshort, bool short)
TEST_COPY(uint, unsigned int)
TEST_COPY(sint, signed int)
TEST_COPY(bint, bool int)
TEST_COPY(float, float)
TEST_COPY(double, double)
#endif /* NO_COPY */
#ifndef NO_COPYL
TEST_COPYL(uchar, unsigned char)
TEST_COPYL(schar, signed char)
TEST_COPYL(bchar, bool char)
TEST_COPYL(ushort, unsigned short)
TEST_COPYL(sshort, signed short)
TEST_COPYL(bshort, bool short)
TEST_COPYL(uint, unsigned int)
TEST_COPYL(sint, signed int)
TEST_COPYL(bint, bool int)
TEST_COPYL(float, float)
TEST_COPYL(double, double)
#endif /* NO_COPYL */
#ifndef NO_ALIGN
TEST_ALIGN(uchar, unsigned char)
TEST_ALIGN(schar, signed char)
TEST_ALIGN(ushort, unsigned short)
TEST_ALIGN(sshort, signed short)
TEST_ALIGN(uint, unsigned int)
TEST_ALIGN(sint, signed int)
TEST_ALIGN(float, float)
TEST_ALIGN(double, double)
#endif /* NO_ALIGN */
#ifndef NO_VSX_COPY
TEST_VSX_COPY(uchar, unsigned char)
TEST_VSX_COPY(schar, signed char)
TEST_VSX_COPY(bchar, bool char)
TEST_VSX_COPY(ushort, unsigned short)
TEST_VSX_COPY(sshort, signed short)
TEST_VSX_COPY(bshort, bool short)
TEST_VSX_COPY(uint, unsigned int)
TEST_VSX_COPY(sint, signed int)
TEST_VSX_COPY(bint, bool int)
TEST_VSX_COPY(float, float)
TEST_VSX_COPY(double, double)
#endif /* NO_VSX_COPY */
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment