Commit 68d3bacf by Bill Schmidt Committed by William Schmidt

rs6000.c (altivec_expand_vec_perm_const): Use CODE_FOR_altivec_vmrg*_direct…

rs6000.c (altivec_expand_vec_perm_const): Use CODE_FOR_altivec_vmrg*_direct rather than CODE_FOR_altivec_vmrg*.

gcc:

2014-01-29  Bill Schmidt  <wschmidt@linux.vnet.ibm.com>

	* config/rs6000/rs6000.c (altivec_expand_vec_perm_const):  Use
	CODE_FOR_altivec_vmrg*_direct rather than CODE_FOR_altivec_vmrg*.
	* config/rs6000/vsx.md (vsx_mergel_<mode>): Adjust for
	-maltivec=be with LE targets.
	(vsx_mergeh_<mode>): Likewise.
	* config/rs6000/altivec.md (UNSPEC_VMRG[HL]_DIRECT): New
	unspecs.
	(mulv8hi3): Use gen_altivec_vmrg[hl]w_direct.
	(altivec_vmrghb): Replace with define_expand and new
	*altivec_vmrghb_internal insn; adjust for -maltivec=be with LE
	targets.
	(altivec_vmrghb_direct): New define_insn.
	(altivec_vmrghh): Replace with define_expand and new
	*altivec_vmrghh_internal insn; adjust for -maltivec=be with LE
	targets.
	(altivec_vmrghh_direct): New define_insn.
	(altivec_vmrghw): Replace with define_expand and new
	*altivec_vmrghw_internal insn; adjust for -maltivec=be with LE
	targets.
	(altivec_vmrghw_direct): New define_insn.
	(*altivec_vmrghsf): Adjust for endianness.
	(altivec_vmrglb): Replace with define_expand and new
	*altivec_vmrglb_internal insn; adjust for -maltivec=be with LE
	targets.
	(altivec_vmrglb_direct): New define_insn.
	(altivec_vmrglh): Replace with define_expand and new
	*altivec_vmrglh_internal insn; adjust for -maltivec=be with LE
	targets.
	(altivec_vmrglh_direct): New define_insn.
	(altivec_vmrglw): Replace with define_expand and new
	*altivec_vmrglw_internal insn; adjust for -maltivec=be with LE
	targets.
	(altivec_vmrglw_direct): New define_insn.
	(*altivec_vmrglsf): Adjust for endianness.
	(vec_widen_umult_hi_v16qi): Use gen_altivec_vmrghh_direct.
	(vec_widen_umult_lo_v16qi): Use gen_altivec_vmrglh_direct.
	(vec_widen_smult_hi_v16qi): Use gen_altivec_vmrghh_direct.
	(vec_widen_smult_lo_v16qi): Use gen_altivec_vmrglh_direct.
	(vec_widen_umult_hi_v8hi): Use gen_altivec_vmrghw_direct.
	(vec_widen_umult_lo_v8hi): Use gen_altivec_vmrglw_direct.
	(vec_widen_smult_hi_v8hi): Use gen_altivec_vmrghw_direct.
	(vec_widen_smult_lo_v8hi): Use gen_altivec_vmrglw_direct.

gcc/testsuite:

2014-01-29  Bill Schmidt  <wschmidt@linux.vnet.ibm.com>

	* gcc.dg/vmx/merge-be-order.c: New.
	* gcc.dg/vmx/merge.c: New.
	* gcc.dg/vmx/merge-vsx-be-order.c: New.
	* gcc.dg/vmx/merge-vsx.c: New.

From-SVN: r207262
parent 98b5d298
2014-01-29 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
* config/rs6000/rs6000.c (altivec_expand_vec_perm_const): Use
CODE_FOR_altivec_vmrg*_direct rather than CODE_FOR_altivec_vmrg*.
* config/rs6000/vsx.md (vsx_mergel_<mode>): Adjust for
-maltivec=be with LE targets.
(vsx_mergeh_<mode>): Likewise.
* config/rs6000/altivec.md (UNSPEC_VMRG[HL]_DIRECT): New
unspecs.
(mulv8hi3): Use gen_altivec_vmrg[hl]w_direct.
(altivec_vmrghb): Replace with define_expand and new
*altivec_vmrghb_internal insn; adjust for -maltivec=be with LE
targets.
(altivec_vmrghb_direct): New define_insn.
(altivec_vmrghh): Replace with define_expand and new
*altivec_vmrghh_internal insn; adjust for -maltivec=be with LE
targets.
(altivec_vmrghh_direct): New define_insn.
(altivec_vmrghw): Replace with define_expand and new
*altivec_vmrghw_internal insn; adjust for -maltivec=be with LE
targets.
(altivec_vmrghw_direct): New define_insn.
(*altivec_vmrghsf): Adjust for endianness.
(altivec_vmrglb): Replace with define_expand and new
*altivec_vmrglb_internal insn; adjust for -maltivec=be with LE
targets.
(altivec_vmrglb_direct): New define_insn.
(altivec_vmrglh): Replace with define_expand and new
*altivec_vmrglh_internal insn; adjust for -maltivec=be with LE
targets.
(altivec_vmrglh_direct): New define_insn.
(altivec_vmrglw): Replace with define_expand and new
*altivec_vmrglw_internal insn; adjust for -maltivec=be with LE
targets.
(altivec_vmrglw_direct): New define_insn.
(*altivec_vmrglsf): Adjust for endianness.
(vec_widen_umult_hi_v16qi): Use gen_altivec_vmrghh_direct.
(vec_widen_umult_lo_v16qi): Use gen_altivec_vmrglh_direct.
(vec_widen_smult_hi_v16qi): Use gen_altivec_vmrghh_direct.
(vec_widen_smult_lo_v16qi): Use gen_altivec_vmrglh_direct.
(vec_widen_umult_hi_v8hi): Use gen_altivec_vmrghw_direct.
(vec_widen_umult_lo_v8hi): Use gen_altivec_vmrglw_direct.
(vec_widen_smult_hi_v8hi): Use gen_altivec_vmrghw_direct.
(vec_widen_smult_lo_v8hi): Use gen_altivec_vmrglw_direct.
2014-01-29 Marcus Shawcroft <marcus.shawcroft@arm.com>
* config/aarch64/aarch64.c (aarch64_expand_mov_immediate)
......
......@@ -29895,22 +29895,28 @@ altivec_expand_vec_perm_const (rtx operands[4])
{ OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum,
{ 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
{ OPTION_MASK_ALTIVEC,
BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb : CODE_FOR_altivec_vmrglb,
(BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
: CODE_FOR_altivec_vmrglb_direct),
{ 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
{ OPTION_MASK_ALTIVEC,
BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh : CODE_FOR_altivec_vmrglh,
(BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
: CODE_FOR_altivec_vmrglh_direct),
{ 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
{ OPTION_MASK_ALTIVEC,
BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw : CODE_FOR_altivec_vmrglw,
(BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
: CODE_FOR_altivec_vmrglw_direct),
{ 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
{ OPTION_MASK_ALTIVEC,
BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb : CODE_FOR_altivec_vmrghb,
(BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
: CODE_FOR_altivec_vmrghb_direct),
{ 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
{ OPTION_MASK_ALTIVEC,
BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh : CODE_FOR_altivec_vmrghh,
(BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
: CODE_FOR_altivec_vmrghh_direct),
{ 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
{ OPTION_MASK_ALTIVEC,
BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw : CODE_FOR_altivec_vmrghw,
(BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
: CODE_FOR_altivec_vmrghw_direct),
{ 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
{ OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgew,
{ 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
......
......@@ -1678,24 +1678,54 @@
;; Expanders for builtins
(define_expand "vsx_mergel_<mode>"
[(set (match_operand:VSX_D 0 "vsx_register_operand" "")
(vec_select:VSX_D
(vec_concat:<VS_double>
(match_operand:VSX_D 1 "vsx_register_operand" "")
(match_operand:VSX_D 2 "vsx_register_operand" ""))
(parallel [(const_int 1) (const_int 3)])))]
[(use (match_operand:VSX_D 0 "vsx_register_operand" ""))
(use (match_operand:VSX_D 1 "vsx_register_operand" ""))
(use (match_operand:VSX_D 2 "vsx_register_operand" ""))]
"VECTOR_MEM_VSX_P (<MODE>mode)"
"")
{
rtvec v;
rtx x;
/* Special handling for LE with -maltivec=be. */
if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
{
v = gen_rtvec (2, GEN_INT (0), GEN_INT (2));
x = gen_rtx_VEC_CONCAT (<VS_double>mode, operands[2], operands[1]);
}
else
{
v = gen_rtvec (2, GEN_INT (1), GEN_INT (3));
x = gen_rtx_VEC_CONCAT (<VS_double>mode, operands[1], operands[2]);
}
x = gen_rtx_VEC_SELECT (<MODE>mode, x, gen_rtx_PARALLEL (VOIDmode, v));
emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
})
(define_expand "vsx_mergeh_<mode>"
[(set (match_operand:VSX_D 0 "vsx_register_operand" "")
(vec_select:VSX_D
(vec_concat:<VS_double>
(match_operand:VSX_D 1 "vsx_register_operand" "")
(match_operand:VSX_D 2 "vsx_register_operand" ""))
(parallel [(const_int 0) (const_int 2)])))]
[(use (match_operand:VSX_D 0 "vsx_register_operand" ""))
(use (match_operand:VSX_D 1 "vsx_register_operand" ""))
(use (match_operand:VSX_D 2 "vsx_register_operand" ""))]
"VECTOR_MEM_VSX_P (<MODE>mode)"
"")
{
rtvec v;
rtx x;
/* Special handling for LE with -maltivec=be. */
if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
{
v = gen_rtvec (2, GEN_INT (1), GEN_INT (3));
x = gen_rtx_VEC_CONCAT (<VS_double>mode, operands[2], operands[1]);
}
else
{
v = gen_rtvec (2, GEN_INT (0), GEN_INT (2));
x = gen_rtx_VEC_CONCAT (<VS_double>mode, operands[1], operands[2]);
}
x = gen_rtx_VEC_SELECT (<MODE>mode, x, gen_rtx_PARALLEL (VOIDmode, v));
emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
})
;; V2DF/V2DI splat
(define_insn "vsx_splat_<mode>"
......
2014-01-29 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
* gcc.dg/vmx/merge-be-order.c: New.
* gcc.dg/vmx/merge.c: New.
* gcc.dg/vmx/merge-vsx-be-order.c: New.
* gcc.dg/vmx/merge-vsx.c: New.
2014-01-29 Richard Biener <rguenther@suse.de>
PR tree-optimization/58742
......
/* { dg-options "-maltivec=be -mabi=altivec -std=gnu99 -mno-vsx" } */
#include "harness.h"
static void test()
{
/* Input vectors. */
vector unsigned char vuca = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
vector unsigned char vucb
= {16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31};
vector signed char vsca
= {-16,-15,-14,-13,-12,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2,-1};
vector signed char vscb = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
vector unsigned short vusa = {0,1,2,3,4,5,6,7};
vector unsigned short vusb = {8,9,10,11,12,13,14,15};
vector signed short vssa = {-8,-7,-6,-5,-4,-3,-2,-1};
vector signed short vssb = {0,1,2,3,4,5,6,7};
vector unsigned int vuia = {0,1,2,3};
vector unsigned int vuib = {4,5,6,7};
vector signed int vsia = {-4,-3,-2,-1};
vector signed int vsib = {0,1,2,3};
vector float vfa = {-4.0,-3.0,-2.0,-1.0};
vector float vfb = {0.0,1.0,2.0,3.0};
/* Result vectors. */
vector unsigned char vuch, vucl;
vector signed char vsch, vscl;
vector unsigned short vush, vusl;
vector signed short vssh, vssl;
vector unsigned int vuih, vuil;
vector signed int vsih, vsil;
vector float vfh, vfl;
/* Expected result vectors. */
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
vector unsigned char vucrh = {24,8,25,9,26,10,27,11,28,12,29,13,30,14,31,15};
vector unsigned char vucrl = {16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7};
vector signed char vscrh = {8,-8,9,-7,10,-6,11,-5,12,-4,13,-3,14,-2,15,-1};
vector signed char vscrl = {0,-16,1,-15,2,-14,3,-13,4,-12,5,-11,6,-10,7,-9};
vector unsigned short vusrh = {12,4,13,5,14,6,15,7};
vector unsigned short vusrl = {8,0,9,1,10,2,11,3};
vector signed short vssrh = {4,-4,5,-3,6,-2,7,-1};
vector signed short vssrl = {0,-8,1,-7,2,-6,3,-5};
vector unsigned int vuirh = {6,2,7,3};
vector unsigned int vuirl = {4,0,5,1};
vector signed int vsirh = {2,-2,3,-1};
vector signed int vsirl = {0,-4,1,-3};
vector float vfrh = {2.0,-2.0,3.0,-1.0};
vector float vfrl = {0.0,-4.0,1.0,-3.0};
#else
vector unsigned char vucrh = {0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23};
vector unsigned char vucrl = {8,24,9,25,10,26,11,27,12,28,13,29,14,30,15,31};
vector signed char vscrh = {-16,0,-15,1,-14,2,-13,3,-12,4,-11,5,-10,6,-9,7};
vector signed char vscrl = {-8,8,-7,9,-6,10,-5,11,-4,12,-3,13,-2,14,-1,15};
vector unsigned short vusrh = {0,8,1,9,2,10,3,11};
vector unsigned short vusrl = {4,12,5,13,6,14,7,15};
vector signed short vssrh = {-8,0,-7,1,-6,2,-5,3};
vector signed short vssrl = {-4,4,-3,5,-2,6,-1,7};
vector unsigned int vuirh = {0,4,1,5};
vector unsigned int vuirl = {2,6,3,7};
vector signed int vsirh = {-4,0,-3,1};
vector signed int vsirl = {-2,2,-1,3};
vector float vfrh = {-4.0,0.0,-3.0,1.0};
vector float vfrl = {-2.0,2.0,-1.0,3.0};
#endif
vuch = vec_mergeh (vuca, vucb);
vucl = vec_mergel (vuca, vucb);
vsch = vec_mergeh (vsca, vscb);
vscl = vec_mergel (vsca, vscb);
vush = vec_mergeh (vusa, vusb);
vusl = vec_mergel (vusa, vusb);
vssh = vec_mergeh (vssa, vssb);
vssl = vec_mergel (vssa, vssb);
vuih = vec_mergeh (vuia, vuib);
vuil = vec_mergel (vuia, vuib);
vsih = vec_mergeh (vsia, vsib);
vsil = vec_mergel (vsia, vsib);
vfh = vec_mergeh (vfa, vfb );
vfl = vec_mergel (vfa, vfb );
check (vec_all_eq (vuch, vucrh), "vuch");
check (vec_all_eq (vucl, vucrl), "vucl");
check (vec_all_eq (vsch, vscrh), "vsch");
check (vec_all_eq (vscl, vscrl), "vscl");
check (vec_all_eq (vush, vusrh), "vush");
check (vec_all_eq (vusl, vusrl), "vusl");
check (vec_all_eq (vssh, vssrh), "vssh");
check (vec_all_eq (vssl, vssrl), "vssl");
check (vec_all_eq (vuih, vuirh), "vuih");
check (vec_all_eq (vuil, vuirl), "vuil");
check (vec_all_eq (vsih, vsirh), "vsih");
check (vec_all_eq (vsil, vsirl), "vsil");
check (vec_all_eq (vfh, vfrh), "vfh");
check (vec_all_eq (vfl, vfrl), "vfl");
}
/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
/* { dg-require-effective-target powerpc_vsx_ok } */
/* { dg-options "-maltivec=be -mabi=altivec -std=gnu99 -mvsx" } */
#include "harness.h"
static int vec_long_eq (vector long x, vector long y)
{
return (x[0] == y[0] && x[1] == y[1]);
}
static void test()
{
/* Input vectors. */
vector long vla = {-2,-1};
vector long vlb = {0,1};
vector double vda = {-2.0,-1.0};
vector double vdb = {0.0,1.0};
/* Result vectors. */
vector long vlh, vll;
vector double vdh, vdl;
/* Expected result vectors. */
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
vector long vlrh = {1,-1};
vector long vlrl = {0,-2};
vector double vdrh = {1.0,-1.0};
vector double vdrl = {0.0,-2.0};
#else
vector long vlrh = {-2,0};
vector long vlrl = {-1,1};
vector double vdrh = {-2.0,0.0};
vector double vdrl = {-1.0,1.0};
#endif
vlh = vec_mergeh (vla, vlb);
vll = vec_mergel (vla, vlb);
vdh = vec_mergeh (vda, vdb);
vdl = vec_mergel (vda, vdb);
check (vec_long_eq (vlh, vlrh), "vlh");
check (vec_long_eq (vll, vlrl), "vll");
check (vec_all_eq (vdh, vdrh), "vdh" );
check (vec_all_eq (vdl, vdrl), "vdl" );
}
/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
/* { dg-require-effective-target powerpc_vsx_ok } */
/* { dg-options "-maltivec -mabi=altivec -std=gnu99 -mvsx" } */
#include "harness.h"
static int vec_long_eq (vector long x, vector long y)
{
return (x[0] == y[0] && x[1] == y[1]);
}
static void test()
{
/* Input vectors. */
vector long vla = {-2,-1};
vector long vlb = {0,1};
vector double vda = {-2.0,-1.0};
vector double vdb = {0.0,1.0};
/* Result vectors. */
vector long vlh, vll;
vector double vdh, vdl;
/* Expected result vectors. */
vector long vlrh = {-2,0};
vector long vlrl = {-1,1};
vector double vdrh = {-2.0,0.0};
vector double vdrl = {-1.0,1.0};
vlh = vec_mergeh (vla, vlb);
vll = vec_mergel (vla, vlb);
vdh = vec_mergeh (vda, vdb);
vdl = vec_mergel (vda, vdb);
check (vec_long_eq (vlh, vlrh), "vlh");
check (vec_long_eq (vll, vlrl), "vll");
check (vec_all_eq (vdh, vdrh), "vdh" );
check (vec_all_eq (vdl, vdrl), "vdl" );
}
#include "harness.h"
static void test()
{
/* Input vectors. */
vector unsigned char vuca = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
vector unsigned char vucb
= {16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31};
vector signed char vsca
= {-16,-15,-14,-13,-12,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2,-1};
vector signed char vscb = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
vector unsigned short vusa = {0,1,2,3,4,5,6,7};
vector unsigned short vusb = {8,9,10,11,12,13,14,15};
vector signed short vssa = {-8,-7,-6,-5,-4,-3,-2,-1};
vector signed short vssb = {0,1,2,3,4,5,6,7};
vector unsigned int vuia = {0,1,2,3};
vector unsigned int vuib = {4,5,6,7};
vector signed int vsia = {-4,-3,-2,-1};
vector signed int vsib = {0,1,2,3};
vector float vfa = {-4.0,-3.0,-2.0,-1.0};
vector float vfb = {0.0,1.0,2.0,3.0};
/* Result vectors. */
vector unsigned char vuch, vucl;
vector signed char vsch, vscl;
vector unsigned short vush, vusl;
vector signed short vssh, vssl;
vector unsigned int vuih, vuil;
vector signed int vsih, vsil;
vector float vfh, vfl;
/* Expected result vectors. */
vector unsigned char vucrh = {0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23};
vector unsigned char vucrl = {8,24,9,25,10,26,11,27,12,28,13,29,14,30,15,31};
vector signed char vscrh = {-16,0,-15,1,-14,2,-13,3,-12,4,-11,5,-10,6,-9,7};
vector signed char vscrl = {-8,8,-7,9,-6,10,-5,11,-4,12,-3,13,-2,14,-1,15};
vector unsigned short vusrh = {0,8,1,9,2,10,3,11};
vector unsigned short vusrl = {4,12,5,13,6,14,7,15};
vector signed short vssrh = {-8,0,-7,1,-6,2,-5,3};
vector signed short vssrl = {-4,4,-3,5,-2,6,-1,7};
vector unsigned int vuirh = {0,4,1,5};
vector unsigned int vuirl = {2,6,3,7};
vector signed int vsirh = {-4,0,-3,1};
vector signed int vsirl = {-2,2,-1,3};
vector float vfrh = {-4.0,0.0,-3.0,1.0};
vector float vfrl = {-2.0,2.0,-1.0,3.0};
vuch = vec_mergeh (vuca, vucb);
vucl = vec_mergel (vuca, vucb);
vsch = vec_mergeh (vsca, vscb);
vscl = vec_mergel (vsca, vscb);
vush = vec_mergeh (vusa, vusb);
vusl = vec_mergel (vusa, vusb);
vssh = vec_mergeh (vssa, vssb);
vssl = vec_mergel (vssa, vssb);
vuih = vec_mergeh (vuia, vuib);
vuil = vec_mergel (vuia, vuib);
vsih = vec_mergeh (vsia, vsib);
vsil = vec_mergel (vsia, vsib);
vfh = vec_mergeh (vfa, vfb );
vfl = vec_mergel (vfa, vfb );
check (vec_all_eq (vuch, vucrh), "vuch");
check (vec_all_eq (vucl, vucrl), "vucl");
check (vec_all_eq (vsch, vscrh), "vsch");
check (vec_all_eq (vscl, vscrl), "vscl");
check (vec_all_eq (vush, vusrh), "vush");
check (vec_all_eq (vusl, vusrl), "vusl");
check (vec_all_eq (vssh, vssrh), "vssh");
check (vec_all_eq (vssl, vssrl), "vssl");
check (vec_all_eq (vuih, vuirh), "vuih");
check (vec_all_eq (vuil, vuirl), "vuil");
check (vec_all_eq (vsih, vsirh), "vsih");
check (vec_all_eq (vsil, vsirl), "vsil");
check (vec_all_eq (vfh, vfrh), "vfh");
check (vec_all_eq (vfl, vfrl), "vfl");
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment