Commit bc73c4c2 by Richard Sandiford Committed by Richard Sandiford

[AArch64] Add main SVE ACLE tests

Now that the PCS support is applied, this patch adds the main
SVE ACLE tests.  The idea is to test various combinations of operands
for each ACLE function, with each combination using a specific register
allocation and with each combination being wrapped its own test function.
We then compare the full assembly output of these test functions against
the expected/preferred sequences.  This provides both optimisation and
correctness testing, since ultimately the ACLE functions are defined in
terms of the underlying SVE instructions.

2019-10-29  Richard Sandiford  <richard.sandiford@arm.com>
	    Kugan Vivekanandarajah  <kugan.vivekanandarajah@linaro.org>
	    Prathamesh Kulkarni  <prathamesh.kulkarni@linaro.org>

gcc/testsuite/
	* g++.target/aarch64/sve/acle/aarch64-sve-acle-asm.exp: New file.
	* gcc.target/aarch64/sve/acle/aarch64-sve-acle-asm.exp: New file.
	* gcc.target/aarch64/sve/acle/asm: New test directory.

Co-Authored-By: Kugan Vivekanandarajah <kuganv@linaro.org>
Co-Authored-By: Prathamesh Kulkarni <prathamesh.kulkarni@linaro.org>

From-SVN: r277565
parent c600df9a

Too many changes to show.

To preserve performance only 1000 of 1000+ files are displayed.

2019-10-29 Richard Sandiford <richard.sandiford@arm.com> 2019-10-29 Richard Sandiford <richard.sandiford@arm.com>
Kugan Vivekanandarajah <kugan.vivekanandarajah@linaro.org>
Prathamesh Kulkarni <prathamesh.kulkarni@linaro.org>
* g++.target/aarch64/sve/acle/aarch64-sve-acle-asm.exp: New file.
* gcc.target/aarch64/sve/acle/aarch64-sve-acle-asm.exp: New file.
* gcc.target/aarch64/sve/acle/asm: New test directory.
2019-10-29 Richard Sandiford <richard.sandiford@arm.com>
* gcc.target/aarch64/sve/pcs/aarch64-sve-pcs.exp: New file. * gcc.target/aarch64/sve/pcs/aarch64-sve-pcs.exp: New file.
* gcc.target/aarch64/sve/pcs/annotate_1.c: New test. * gcc.target/aarch64/sve/pcs/annotate_1.c: New test.
......
# Assembly-based regression-test driver for the SVE ACLE
# Copyright (C) 2009-2019 Free Software Foundation, Inc.
#
# This file is part of GCC.
#
# GCC is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GCC is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GCC; see the file COPYING3. If not see
# <http://www.gnu.org/licenses/>. */
# GCC testsuite that uses the `dg.exp' driver.
# Exit immediately if this isn't an AArch64 target.
if { ![istarget aarch64*-*-*] } {
return
}
# Load support procs.
load_lib g++-dg.exp
# Initialize `dg'.
dg-init
# Force SVE if we're not testing it already.
if { [check_effective_target_aarch64_sve] } {
set sve_flags ""
} else {
set sve_flags "-march=armv8.2-a+sve"
}
global gcc_runtest_parallelize_limit_minor
if { [info exists gcc_runtest_parallelize_limit_minor] } {
set old_limit_minor $gcc_runtest_parallelize_limit_minor
set gcc_runtest_parallelize_limit_minor 1
}
torture-init
set-torture-options {
"-std=c++98 -O0 -g"
"-std=c++98 -O1 -g"
"-std=c++11 -O2 -g"
"-std=c++14 -O3 -g"
"-std=c++17 -Og -g"
"-std=c++2a -Os -g"
"-std=gnu++98 -O2 -fno-schedule-insns -DCHECK_ASM --save-temps"
"-std=gnu++11 -Ofast -g"
"-std=gnu++17 -O3 -g"
"-std=gnu++2a -O0 -g"
} {
"-DTEST_FULL"
"-DTEST_OVERLOADS"
}
# Main loop.
set gcc_subdir [string replace $subdir 0 2 gcc]
set files [glob -nocomplain $srcdir/$gcc_subdir/asm/*.c]
set save-dg-do-what-default ${dg-do-what-default}
if { [check_effective_target_aarch64_asm_sve_ok]
&& [check_effective_target_aarch64_variant_pcs] } {
set dg-do-what-default assemble
} else {
set dg-do-what-default compile
}
gcc-dg-runtest [lsort $files] "" "$sve_flags -fno-ipa-icf"
set dg-do-what-default ${save-dg-do-what-default}
torture-finish
if { [info exists gcc_runtest_parallelize_limit_minor] } {
set gcc_runtest_parallelize_limit_minor $old_limit_minor
}
# All done.
dg-finish
# Assembly-based regression-test driver for the SVE ACLE
# Copyright (C) 2009-2019 Free Software Foundation, Inc.
#
# This file is part of GCC.
#
# GCC is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GCC is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GCC; see the file COPYING3. If not see
# <http://www.gnu.org/licenses/>. */
# GCC testsuite that uses the `dg.exp' driver.
# Exit immediately if this isn't an AArch64 target.
if {![istarget aarch64*-*-*] } {
return
}
# Load support procs.
load_lib gcc-dg.exp
# Initialize `dg'.
dg-init
# Force SVE if we're not testing it already.
if { [check_effective_target_aarch64_sve] } {
set sve_flags ""
} else {
set sve_flags "-march=armv8.2-a+sve"
}
global gcc_runtest_parallelize_limit_minor
if { [info exists gcc_runtest_parallelize_limit_minor] } {
set old_limit_minor $gcc_runtest_parallelize_limit_minor
set gcc_runtest_parallelize_limit_minor 1
}
torture-init
set-torture-options {
"-std=c90 -O0 -g"
"-std=c90 -O1 -g"
"-std=c99 -O2 -g"
"-std=c11 -O3 -g"
"-std=gnu90 -O2 -fno-schedule-insns -DCHECK_ASM --save-temps"
"-std=gnu99 -Ofast -g"
"-std=gnu11 -Os -g"
} {
"-DTEST_FULL"
"-DTEST_OVERLOADS"
}
# Main loop.
set files [glob -nocomplain $srcdir/$subdir/asm/*.c]
set save-dg-do-what-default ${dg-do-what-default}
if { [check_effective_target_aarch64_asm_sve_ok]
&& [check_effective_target_aarch64_variant_pcs] } {
set dg-do-what-default assemble
} else {
set dg-do-what-default compile
}
gcc-dg-runtest [lsort $files] "" "$sve_flags -fno-ipa-icf"
set dg-do-what-default ${save-dg-do-what-default}
torture-finish
if { [info exists gcc_runtest_parallelize_limit_minor] } {
set gcc_runtest_parallelize_limit_minor $old_limit_minor
}
# All done.
dg-finish
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
/*
** abd_s16_m_tied1:
** sabd z0\.h, p0/m, z0\.h, z1\.h
** ret
*/
TEST_UNIFORM_Z (abd_s16_m_tied1, svint16_t,
z0 = svabd_s16_m (p0, z0, z1),
z0 = svabd_m (p0, z0, z1))
/*
** abd_s16_m_tied2:
** mov (z[0-9]+)\.d, z0\.d
** movprfx z0, z1
** sabd z0\.h, p0/m, z0\.h, \1\.h
** ret
*/
TEST_UNIFORM_Z (abd_s16_m_tied2, svint16_t,
z0 = svabd_s16_m (p0, z1, z0),
z0 = svabd_m (p0, z1, z0))
/*
** abd_s16_m_untied:
** movprfx z0, z1
** sabd z0\.h, p0/m, z0\.h, z2\.h
** ret
*/
TEST_UNIFORM_Z (abd_s16_m_untied, svint16_t,
z0 = svabd_s16_m (p0, z1, z2),
z0 = svabd_m (p0, z1, z2))
/*
** abd_w0_s16_m_tied1:
** mov (z[0-9]+\.h), w0
** sabd z0\.h, p0/m, z0\.h, \1
** ret
*/
TEST_UNIFORM_ZX (abd_w0_s16_m_tied1, svint16_t, int16_t,
z0 = svabd_n_s16_m (p0, z0, x0),
z0 = svabd_m (p0, z0, x0))
/*
** abd_w0_s16_m_untied: { xfail *-*-* }
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** sabd z0\.h, p0/m, z0\.h, \1
** ret
*/
TEST_UNIFORM_ZX (abd_w0_s16_m_untied, svint16_t, int16_t,
z0 = svabd_n_s16_m (p0, z1, x0),
z0 = svabd_m (p0, z1, x0))
/*
** abd_1_s16_m_tied1:
** mov (z[0-9]+\.h), #1
** sabd z0\.h, p0/m, z0\.h, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_s16_m_tied1, svint16_t,
z0 = svabd_n_s16_m (p0, z0, 1),
z0 = svabd_m (p0, z0, 1))
/*
** abd_1_s16_m_untied: { xfail *-*-* }
** mov (z[0-9]+\.h), #1
** movprfx z0, z1
** sabd z0\.h, p0/m, z0\.h, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_s16_m_untied, svint16_t,
z0 = svabd_n_s16_m (p0, z1, 1),
z0 = svabd_m (p0, z1, 1))
/*
** abd_s16_z_tied1:
** movprfx z0\.h, p0/z, z0\.h
** sabd z0\.h, p0/m, z0\.h, z1\.h
** ret
*/
TEST_UNIFORM_Z (abd_s16_z_tied1, svint16_t,
z0 = svabd_s16_z (p0, z0, z1),
z0 = svabd_z (p0, z0, z1))
/*
** abd_s16_z_tied2:
** movprfx z0\.h, p0/z, z0\.h
** sabd z0\.h, p0/m, z0\.h, z1\.h
** ret
*/
TEST_UNIFORM_Z (abd_s16_z_tied2, svint16_t,
z0 = svabd_s16_z (p0, z1, z0),
z0 = svabd_z (p0, z1, z0))
/*
** abd_s16_z_untied:
** (
** movprfx z0\.h, p0/z, z1\.h
** sabd z0\.h, p0/m, z0\.h, z2\.h
** |
** movprfx z0\.h, p0/z, z2\.h
** sabd z0\.h, p0/m, z0\.h, z1\.h
** )
** ret
*/
TEST_UNIFORM_Z (abd_s16_z_untied, svint16_t,
z0 = svabd_s16_z (p0, z1, z2),
z0 = svabd_z (p0, z1, z2))
/*
** abd_w0_s16_z_tied1:
** mov (z[0-9]+\.h), w0
** movprfx z0\.h, p0/z, z0\.h
** sabd z0\.h, p0/m, z0\.h, \1
** ret
*/
TEST_UNIFORM_ZX (abd_w0_s16_z_tied1, svint16_t, int16_t,
z0 = svabd_n_s16_z (p0, z0, x0),
z0 = svabd_z (p0, z0, x0))
/*
** abd_w0_s16_z_untied:
** mov (z[0-9]+\.h), w0
** (
** movprfx z0\.h, p0/z, z1\.h
** sabd z0\.h, p0/m, z0\.h, \1
** |
** movprfx z0\.h, p0/z, \1
** sabd z0\.h, p0/m, z0\.h, z1\.h
** )
** ret
*/
TEST_UNIFORM_ZX (abd_w0_s16_z_untied, svint16_t, int16_t,
z0 = svabd_n_s16_z (p0, z1, x0),
z0 = svabd_z (p0, z1, x0))
/*
** abd_1_s16_z_tied1:
** mov (z[0-9]+\.h), #1
** movprfx z0\.h, p0/z, z0\.h
** sabd z0\.h, p0/m, z0\.h, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_s16_z_tied1, svint16_t,
z0 = svabd_n_s16_z (p0, z0, 1),
z0 = svabd_z (p0, z0, 1))
/*
** abd_1_s16_z_untied:
** mov (z[0-9]+\.h), #1
** (
** movprfx z0\.h, p0/z, z1\.h
** sabd z0\.h, p0/m, z0\.h, \1
** |
** movprfx z0\.h, p0/z, \1
** sabd z0\.h, p0/m, z0\.h, z1\.h
** )
** ret
*/
TEST_UNIFORM_Z (abd_1_s16_z_untied, svint16_t,
z0 = svabd_n_s16_z (p0, z1, 1),
z0 = svabd_z (p0, z1, 1))
/*
** abd_s16_x_tied1:
** sabd z0\.h, p0/m, z0\.h, z1\.h
** ret
*/
TEST_UNIFORM_Z (abd_s16_x_tied1, svint16_t,
z0 = svabd_s16_x (p0, z0, z1),
z0 = svabd_x (p0, z0, z1))
/*
** abd_s16_x_tied2:
** sabd z0\.h, p0/m, z0\.h, z1\.h
** ret
*/
TEST_UNIFORM_Z (abd_s16_x_tied2, svint16_t,
z0 = svabd_s16_x (p0, z1, z0),
z0 = svabd_x (p0, z1, z0))
/*
** abd_s16_x_untied:
** (
** movprfx z0, z1
** sabd z0\.h, p0/m, z0\.h, z2\.h
** |
** movprfx z0, z2
** sabd z0\.h, p0/m, z0\.h, z1\.h
** )
** ret
*/
TEST_UNIFORM_Z (abd_s16_x_untied, svint16_t,
z0 = svabd_s16_x (p0, z1, z2),
z0 = svabd_x (p0, z1, z2))
/*
** abd_w0_s16_x_tied1:
** mov (z[0-9]+\.h), w0
** sabd z0\.h, p0/m, z0\.h, \1
** ret
*/
TEST_UNIFORM_ZX (abd_w0_s16_x_tied1, svint16_t, int16_t,
z0 = svabd_n_s16_x (p0, z0, x0),
z0 = svabd_x (p0, z0, x0))
/*
** abd_w0_s16_x_untied:
** mov z0\.h, w0
** sabd z0\.h, p0/m, z0\.h, z1\.h
** ret
*/
TEST_UNIFORM_ZX (abd_w0_s16_x_untied, svint16_t, int16_t,
z0 = svabd_n_s16_x (p0, z1, x0),
z0 = svabd_x (p0, z1, x0))
/*
** abd_1_s16_x_tied1:
** mov (z[0-9]+\.h), #1
** sabd z0\.h, p0/m, z0\.h, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_s16_x_tied1, svint16_t,
z0 = svabd_n_s16_x (p0, z0, 1),
z0 = svabd_x (p0, z0, 1))
/*
** abd_1_s16_x_untied:
** mov z0\.h, #1
** sabd z0\.h, p0/m, z0\.h, z1\.h
** ret
*/
TEST_UNIFORM_Z (abd_1_s16_x_untied, svint16_t,
z0 = svabd_n_s16_x (p0, z1, 1),
z0 = svabd_x (p0, z1, 1))
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
/*
** abd_s32_m_tied1:
** sabd z0\.s, p0/m, z0\.s, z1\.s
** ret
*/
TEST_UNIFORM_Z (abd_s32_m_tied1, svint32_t,
z0 = svabd_s32_m (p0, z0, z1),
z0 = svabd_m (p0, z0, z1))
/*
** abd_s32_m_tied2:
** mov (z[0-9]+)\.d, z0\.d
** movprfx z0, z1
** sabd z0\.s, p0/m, z0\.s, \1\.s
** ret
*/
TEST_UNIFORM_Z (abd_s32_m_tied2, svint32_t,
z0 = svabd_s32_m (p0, z1, z0),
z0 = svabd_m (p0, z1, z0))
/*
** abd_s32_m_untied:
** movprfx z0, z1
** sabd z0\.s, p0/m, z0\.s, z2\.s
** ret
*/
TEST_UNIFORM_Z (abd_s32_m_untied, svint32_t,
z0 = svabd_s32_m (p0, z1, z2),
z0 = svabd_m (p0, z1, z2))
/*
** abd_w0_s32_m_tied1:
** mov (z[0-9]+\.s), w0
** sabd z0\.s, p0/m, z0\.s, \1
** ret
*/
TEST_UNIFORM_ZX (abd_w0_s32_m_tied1, svint32_t, int32_t,
z0 = svabd_n_s32_m (p0, z0, x0),
z0 = svabd_m (p0, z0, x0))
/*
** abd_w0_s32_m_untied:
** mov (z[0-9]+\.s), w0
** movprfx z0, z1
** sabd z0\.s, p0/m, z0\.s, \1
** ret
*/
TEST_UNIFORM_ZX (abd_w0_s32_m_untied, svint32_t, int32_t,
z0 = svabd_n_s32_m (p0, z1, x0),
z0 = svabd_m (p0, z1, x0))
/*
** abd_1_s32_m_tied1:
** mov (z[0-9]+\.s), #1
** sabd z0\.s, p0/m, z0\.s, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_s32_m_tied1, svint32_t,
z0 = svabd_n_s32_m (p0, z0, 1),
z0 = svabd_m (p0, z0, 1))
/*
** abd_1_s32_m_untied: { xfail *-*-* }
** mov (z[0-9]+\.s), #1
** movprfx z0, z1
** sabd z0\.s, p0/m, z0\.s, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_s32_m_untied, svint32_t,
z0 = svabd_n_s32_m (p0, z1, 1),
z0 = svabd_m (p0, z1, 1))
/*
** abd_s32_z_tied1:
** movprfx z0\.s, p0/z, z0\.s
** sabd z0\.s, p0/m, z0\.s, z1\.s
** ret
*/
TEST_UNIFORM_Z (abd_s32_z_tied1, svint32_t,
z0 = svabd_s32_z (p0, z0, z1),
z0 = svabd_z (p0, z0, z1))
/*
** abd_s32_z_tied2:
** movprfx z0\.s, p0/z, z0\.s
** sabd z0\.s, p0/m, z0\.s, z1\.s
** ret
*/
TEST_UNIFORM_Z (abd_s32_z_tied2, svint32_t,
z0 = svabd_s32_z (p0, z1, z0),
z0 = svabd_z (p0, z1, z0))
/*
** abd_s32_z_untied:
** (
** movprfx z0\.s, p0/z, z1\.s
** sabd z0\.s, p0/m, z0\.s, z2\.s
** |
** movprfx z0\.s, p0/z, z2\.s
** sabd z0\.s, p0/m, z0\.s, z1\.s
** )
** ret
*/
TEST_UNIFORM_Z (abd_s32_z_untied, svint32_t,
z0 = svabd_s32_z (p0, z1, z2),
z0 = svabd_z (p0, z1, z2))
/*
** abd_w0_s32_z_tied1:
** mov (z[0-9]+\.s), w0
** movprfx z0\.s, p0/z, z0\.s
** sabd z0\.s, p0/m, z0\.s, \1
** ret
*/
TEST_UNIFORM_ZX (abd_w0_s32_z_tied1, svint32_t, int32_t,
z0 = svabd_n_s32_z (p0, z0, x0),
z0 = svabd_z (p0, z0, x0))
/*
** abd_w0_s32_z_untied:
** mov (z[0-9]+\.s), w0
** (
** movprfx z0\.s, p0/z, z1\.s
** sabd z0\.s, p0/m, z0\.s, \1
** |
** movprfx z0\.s, p0/z, \1
** sabd z0\.s, p0/m, z0\.s, z1\.s
** )
** ret
*/
TEST_UNIFORM_ZX (abd_w0_s32_z_untied, svint32_t, int32_t,
z0 = svabd_n_s32_z (p0, z1, x0),
z0 = svabd_z (p0, z1, x0))
/*
** abd_1_s32_z_tied1:
** mov (z[0-9]+\.s), #1
** movprfx z0\.s, p0/z, z0\.s
** sabd z0\.s, p0/m, z0\.s, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_s32_z_tied1, svint32_t,
z0 = svabd_n_s32_z (p0, z0, 1),
z0 = svabd_z (p0, z0, 1))
/*
** abd_1_s32_z_untied:
** mov (z[0-9]+\.s), #1
** (
** movprfx z0\.s, p0/z, z1\.s
** sabd z0\.s, p0/m, z0\.s, \1
** |
** movprfx z0\.s, p0/z, \1
** sabd z0\.s, p0/m, z0\.s, z1\.s
** )
** ret
*/
TEST_UNIFORM_Z (abd_1_s32_z_untied, svint32_t,
z0 = svabd_n_s32_z (p0, z1, 1),
z0 = svabd_z (p0, z1, 1))
/*
** abd_s32_x_tied1:
** sabd z0\.s, p0/m, z0\.s, z1\.s
** ret
*/
TEST_UNIFORM_Z (abd_s32_x_tied1, svint32_t,
z0 = svabd_s32_x (p0, z0, z1),
z0 = svabd_x (p0, z0, z1))
/*
** abd_s32_x_tied2:
** sabd z0\.s, p0/m, z0\.s, z1\.s
** ret
*/
TEST_UNIFORM_Z (abd_s32_x_tied2, svint32_t,
z0 = svabd_s32_x (p0, z1, z0),
z0 = svabd_x (p0, z1, z0))
/*
** abd_s32_x_untied:
** (
** movprfx z0, z1
** sabd z0\.s, p0/m, z0\.s, z2\.s
** |
** movprfx z0, z2
** sabd z0\.s, p0/m, z0\.s, z1\.s
** )
** ret
*/
TEST_UNIFORM_Z (abd_s32_x_untied, svint32_t,
z0 = svabd_s32_x (p0, z1, z2),
z0 = svabd_x (p0, z1, z2))
/*
** abd_w0_s32_x_tied1:
** mov (z[0-9]+\.s), w0
** sabd z0\.s, p0/m, z0\.s, \1
** ret
*/
TEST_UNIFORM_ZX (abd_w0_s32_x_tied1, svint32_t, int32_t,
z0 = svabd_n_s32_x (p0, z0, x0),
z0 = svabd_x (p0, z0, x0))
/*
** abd_w0_s32_x_untied:
** mov z0\.s, w0
** sabd z0\.s, p0/m, z0\.s, z1\.s
** ret
*/
TEST_UNIFORM_ZX (abd_w0_s32_x_untied, svint32_t, int32_t,
z0 = svabd_n_s32_x (p0, z1, x0),
z0 = svabd_x (p0, z1, x0))
/*
** abd_1_s32_x_tied1:
** mov (z[0-9]+\.s), #1
** sabd z0\.s, p0/m, z0\.s, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_s32_x_tied1, svint32_t,
z0 = svabd_n_s32_x (p0, z0, 1),
z0 = svabd_x (p0, z0, 1))
/*
** abd_1_s32_x_untied:
** mov z0\.s, #1
** sabd z0\.s, p0/m, z0\.s, z1\.s
** ret
*/
TEST_UNIFORM_Z (abd_1_s32_x_untied, svint32_t,
z0 = svabd_n_s32_x (p0, z1, 1),
z0 = svabd_x (p0, z1, 1))
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
/*
** abd_s64_m_tied1:
** sabd z0\.d, p0/m, z0\.d, z1\.d
** ret
*/
TEST_UNIFORM_Z (abd_s64_m_tied1, svint64_t,
z0 = svabd_s64_m (p0, z0, z1),
z0 = svabd_m (p0, z0, z1))
/*
** abd_s64_m_tied2:
** mov (z[0-9]+\.d), z0\.d
** movprfx z0, z1
** sabd z0\.d, p0/m, z0\.d, \1
** ret
*/
TEST_UNIFORM_Z (abd_s64_m_tied2, svint64_t,
z0 = svabd_s64_m (p0, z1, z0),
z0 = svabd_m (p0, z1, z0))
/*
** abd_s64_m_untied:
** movprfx z0, z1
** sabd z0\.d, p0/m, z0\.d, z2\.d
** ret
*/
TEST_UNIFORM_Z (abd_s64_m_untied, svint64_t,
z0 = svabd_s64_m (p0, z1, z2),
z0 = svabd_m (p0, z1, z2))
/*
** abd_x0_s64_m_tied1:
** mov (z[0-9]+\.d), x0
** sabd z0\.d, p0/m, z0\.d, \1
** ret
*/
TEST_UNIFORM_ZX (abd_x0_s64_m_tied1, svint64_t, int64_t,
z0 = svabd_n_s64_m (p0, z0, x0),
z0 = svabd_m (p0, z0, x0))
/*
** abd_x0_s64_m_untied:
** mov (z[0-9]+\.d), x0
** movprfx z0, z1
** sabd z0\.d, p0/m, z0\.d, \1
** ret
*/
TEST_UNIFORM_ZX (abd_x0_s64_m_untied, svint64_t, int64_t,
z0 = svabd_n_s64_m (p0, z1, x0),
z0 = svabd_m (p0, z1, x0))
/*
** abd_1_s64_m_tied1:
** mov (z[0-9]+\.d), #1
** sabd z0\.d, p0/m, z0\.d, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_s64_m_tied1, svint64_t,
z0 = svabd_n_s64_m (p0, z0, 1),
z0 = svabd_m (p0, z0, 1))
/*
** abd_1_s64_m_untied: { xfail *-*-* }
** mov (z[0-9]+\.d), #1
** movprfx z0, z1
** sabd z0\.d, p0/m, z0\.d, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_s64_m_untied, svint64_t,
z0 = svabd_n_s64_m (p0, z1, 1),
z0 = svabd_m (p0, z1, 1))
/*
** abd_s64_z_tied1:
** movprfx z0\.d, p0/z, z0\.d
** sabd z0\.d, p0/m, z0\.d, z1\.d
** ret
*/
TEST_UNIFORM_Z (abd_s64_z_tied1, svint64_t,
z0 = svabd_s64_z (p0, z0, z1),
z0 = svabd_z (p0, z0, z1))
/*
** abd_s64_z_tied2:
** movprfx z0\.d, p0/z, z0\.d
** sabd z0\.d, p0/m, z0\.d, z1\.d
** ret
*/
TEST_UNIFORM_Z (abd_s64_z_tied2, svint64_t,
z0 = svabd_s64_z (p0, z1, z0),
z0 = svabd_z (p0, z1, z0))
/*
** abd_s64_z_untied:
** (
** movprfx z0\.d, p0/z, z1\.d
** sabd z0\.d, p0/m, z0\.d, z2\.d
** |
** movprfx z0\.d, p0/z, z2\.d
** sabd z0\.d, p0/m, z0\.d, z1\.d
** )
** ret
*/
TEST_UNIFORM_Z (abd_s64_z_untied, svint64_t,
z0 = svabd_s64_z (p0, z1, z2),
z0 = svabd_z (p0, z1, z2))
/*
** abd_x0_s64_z_tied1:
** mov (z[0-9]+\.d), x0
** movprfx z0\.d, p0/z, z0\.d
** sabd z0\.d, p0/m, z0\.d, \1
** ret
*/
TEST_UNIFORM_ZX (abd_x0_s64_z_tied1, svint64_t, int64_t,
z0 = svabd_n_s64_z (p0, z0, x0),
z0 = svabd_z (p0, z0, x0))
/*
** abd_x0_s64_z_untied:
** mov (z[0-9]+\.d), x0
** (
** movprfx z0\.d, p0/z, z1\.d
** sabd z0\.d, p0/m, z0\.d, \1
** |
** movprfx z0\.d, p0/z, \1
** sabd z0\.d, p0/m, z0\.d, z1\.d
** )
** ret
*/
TEST_UNIFORM_ZX (abd_x0_s64_z_untied, svint64_t, int64_t,
z0 = svabd_n_s64_z (p0, z1, x0),
z0 = svabd_z (p0, z1, x0))
/*
** abd_1_s64_z_tied1:
** mov (z[0-9]+\.d), #1
** movprfx z0\.d, p0/z, z0\.d
** sabd z0\.d, p0/m, z0\.d, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_s64_z_tied1, svint64_t,
z0 = svabd_n_s64_z (p0, z0, 1),
z0 = svabd_z (p0, z0, 1))
/*
** abd_1_s64_z_untied:
** mov (z[0-9]+\.d), #1
** (
** movprfx z0\.d, p0/z, z1\.d
** sabd z0\.d, p0/m, z0\.d, \1
** |
** movprfx z0\.d, p0/z, \1
** sabd z0\.d, p0/m, z0\.d, z1\.d
** )
** ret
*/
TEST_UNIFORM_Z (abd_1_s64_z_untied, svint64_t,
z0 = svabd_n_s64_z (p0, z1, 1),
z0 = svabd_z (p0, z1, 1))
/*
** abd_s64_x_tied1:
** sabd z0\.d, p0/m, z0\.d, z1\.d
** ret
*/
TEST_UNIFORM_Z (abd_s64_x_tied1, svint64_t,
z0 = svabd_s64_x (p0, z0, z1),
z0 = svabd_x (p0, z0, z1))
/*
** abd_s64_x_tied2:
** sabd z0\.d, p0/m, z0\.d, z1\.d
** ret
*/
TEST_UNIFORM_Z (abd_s64_x_tied2, svint64_t,
z0 = svabd_s64_x (p0, z1, z0),
z0 = svabd_x (p0, z1, z0))
/*
** abd_s64_x_untied:
** (
** movprfx z0, z1
** sabd z0\.d, p0/m, z0\.d, z2\.d
** |
** movprfx z0, z2
** sabd z0\.d, p0/m, z0\.d, z1\.d
** )
** ret
*/
TEST_UNIFORM_Z (abd_s64_x_untied, svint64_t,
z0 = svabd_s64_x (p0, z1, z2),
z0 = svabd_x (p0, z1, z2))
/*
** abd_x0_s64_x_tied1:
** mov (z[0-9]+\.d), x0
** sabd z0\.d, p0/m, z0\.d, \1
** ret
*/
TEST_UNIFORM_ZX (abd_x0_s64_x_tied1, svint64_t, int64_t,
z0 = svabd_n_s64_x (p0, z0, x0),
z0 = svabd_x (p0, z0, x0))
/*
** abd_x0_s64_x_untied:
** mov z0\.d, x0
** sabd z0\.d, p0/m, z0\.d, z1\.d
** ret
*/
TEST_UNIFORM_ZX (abd_x0_s64_x_untied, svint64_t, int64_t,
z0 = svabd_n_s64_x (p0, z1, x0),
z0 = svabd_x (p0, z1, x0))
/*
** abd_1_s64_x_tied1:
** mov (z[0-9]+\.d), #1
** sabd z0\.d, p0/m, z0\.d, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_s64_x_tied1, svint64_t,
z0 = svabd_n_s64_x (p0, z0, 1),
z0 = svabd_x (p0, z0, 1))
/*
** abd_1_s64_x_untied:
** mov z0\.d, #1
** sabd z0\.d, p0/m, z0\.d, z1\.d
** ret
*/
TEST_UNIFORM_Z (abd_1_s64_x_untied, svint64_t,
z0 = svabd_n_s64_x (p0, z1, 1),
z0 = svabd_x (p0, z1, 1))
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
/*
** abd_s8_m_tied1:
** sabd z0\.b, p0/m, z0\.b, z1\.b
** ret
*/
TEST_UNIFORM_Z (abd_s8_m_tied1, svint8_t,
z0 = svabd_s8_m (p0, z0, z1),
z0 = svabd_m (p0, z0, z1))
/*
** abd_s8_m_tied2:
** mov (z[0-9]+)\.d, z0\.d
** movprfx z0, z1
** sabd z0\.b, p0/m, z0\.b, \1\.b
** ret
*/
TEST_UNIFORM_Z (abd_s8_m_tied2, svint8_t,
z0 = svabd_s8_m (p0, z1, z0),
z0 = svabd_m (p0, z1, z0))
/*
** abd_s8_m_untied:
** movprfx z0, z1
** sabd z0\.b, p0/m, z0\.b, z2\.b
** ret
*/
TEST_UNIFORM_Z (abd_s8_m_untied, svint8_t,
z0 = svabd_s8_m (p0, z1, z2),
z0 = svabd_m (p0, z1, z2))
/*
** abd_w0_s8_m_tied1:
** mov (z[0-9]+\.b), w0
** sabd z0\.b, p0/m, z0\.b, \1
** ret
*/
TEST_UNIFORM_ZX (abd_w0_s8_m_tied1, svint8_t, int8_t,
z0 = svabd_n_s8_m (p0, z0, x0),
z0 = svabd_m (p0, z0, x0))
/*
** abd_w0_s8_m_untied: { xfail *-*-* }
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** sabd z0\.b, p0/m, z0\.b, \1
** ret
*/
TEST_UNIFORM_ZX (abd_w0_s8_m_untied, svint8_t, int8_t,
z0 = svabd_n_s8_m (p0, z1, x0),
z0 = svabd_m (p0, z1, x0))
/*
** abd_1_s8_m_tied1:
** mov (z[0-9]+\.b), #1
** sabd z0\.b, p0/m, z0\.b, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_s8_m_tied1, svint8_t,
z0 = svabd_n_s8_m (p0, z0, 1),
z0 = svabd_m (p0, z0, 1))
/*
** abd_1_s8_m_untied: { xfail *-*-* }
** mov (z[0-9]+\.b), #1
** movprfx z0, z1
** sabd z0\.b, p0/m, z0\.b, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_s8_m_untied, svint8_t,
z0 = svabd_n_s8_m (p0, z1, 1),
z0 = svabd_m (p0, z1, 1))
/*
** abd_s8_z_tied1:
** movprfx z0\.b, p0/z, z0\.b
** sabd z0\.b, p0/m, z0\.b, z1\.b
** ret
*/
TEST_UNIFORM_Z (abd_s8_z_tied1, svint8_t,
z0 = svabd_s8_z (p0, z0, z1),
z0 = svabd_z (p0, z0, z1))
/*
** abd_s8_z_tied2:
** movprfx z0\.b, p0/z, z0\.b
** sabd z0\.b, p0/m, z0\.b, z1\.b
** ret
*/
TEST_UNIFORM_Z (abd_s8_z_tied2, svint8_t,
z0 = svabd_s8_z (p0, z1, z0),
z0 = svabd_z (p0, z1, z0))
/*
** abd_s8_z_untied:
** (
** movprfx z0\.b, p0/z, z1\.b
** sabd z0\.b, p0/m, z0\.b, z2\.b
** |
** movprfx z0\.b, p0/z, z2\.b
** sabd z0\.b, p0/m, z0\.b, z1\.b
** )
** ret
*/
TEST_UNIFORM_Z (abd_s8_z_untied, svint8_t,
z0 = svabd_s8_z (p0, z1, z2),
z0 = svabd_z (p0, z1, z2))
/*
** abd_w0_s8_z_tied1:
** mov (z[0-9]+\.b), w0
** movprfx z0\.b, p0/z, z0\.b
** sabd z0\.b, p0/m, z0\.b, \1
** ret
*/
TEST_UNIFORM_ZX (abd_w0_s8_z_tied1, svint8_t, int8_t,
z0 = svabd_n_s8_z (p0, z0, x0),
z0 = svabd_z (p0, z0, x0))
/*
** abd_w0_s8_z_untied:
** mov (z[0-9]+\.b), w0
** (
** movprfx z0\.b, p0/z, z1\.b
** sabd z0\.b, p0/m, z0\.b, \1
** |
** movprfx z0\.b, p0/z, \1
** sabd z0\.b, p0/m, z0\.b, z1\.b
** )
** ret
*/
TEST_UNIFORM_ZX (abd_w0_s8_z_untied, svint8_t, int8_t,
z0 = svabd_n_s8_z (p0, z1, x0),
z0 = svabd_z (p0, z1, x0))
/*
** abd_1_s8_z_tied1:
** mov (z[0-9]+\.b), #1
** movprfx z0\.b, p0/z, z0\.b
** sabd z0\.b, p0/m, z0\.b, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_s8_z_tied1, svint8_t,
z0 = svabd_n_s8_z (p0, z0, 1),
z0 = svabd_z (p0, z0, 1))
/*
** abd_1_s8_z_untied:
** mov (z[0-9]+\.b), #1
** (
** movprfx z0\.b, p0/z, z1\.b
** sabd z0\.b, p0/m, z0\.b, \1
** |
** movprfx z0\.b, p0/z, \1
** sabd z0\.b, p0/m, z0\.b, z1\.b
** )
** ret
*/
TEST_UNIFORM_Z (abd_1_s8_z_untied, svint8_t,
z0 = svabd_n_s8_z (p0, z1, 1),
z0 = svabd_z (p0, z1, 1))
/*
** abd_s8_x_tied1:
** sabd z0\.b, p0/m, z0\.b, z1\.b
** ret
*/
TEST_UNIFORM_Z (abd_s8_x_tied1, svint8_t,
z0 = svabd_s8_x (p0, z0, z1),
z0 = svabd_x (p0, z0, z1))
/*
** abd_s8_x_tied2:
** sabd z0\.b, p0/m, z0\.b, z1\.b
** ret
*/
TEST_UNIFORM_Z (abd_s8_x_tied2, svint8_t,
z0 = svabd_s8_x (p0, z1, z0),
z0 = svabd_x (p0, z1, z0))
/*
** abd_s8_x_untied:
** (
** movprfx z0, z1
** sabd z0\.b, p0/m, z0\.b, z2\.b
** |
** movprfx z0, z2
** sabd z0\.b, p0/m, z0\.b, z1\.b
** )
** ret
*/
TEST_UNIFORM_Z (abd_s8_x_untied, svint8_t,
z0 = svabd_s8_x (p0, z1, z2),
z0 = svabd_x (p0, z1, z2))
/*
** abd_w0_s8_x_tied1:
** mov (z[0-9]+\.b), w0
** sabd z0\.b, p0/m, z0\.b, \1
** ret
*/
TEST_UNIFORM_ZX (abd_w0_s8_x_tied1, svint8_t, int8_t,
z0 = svabd_n_s8_x (p0, z0, x0),
z0 = svabd_x (p0, z0, x0))
/*
** abd_w0_s8_x_untied:
** mov z0\.b, w0
** sabd z0\.b, p0/m, z0\.b, z1\.b
** ret
*/
TEST_UNIFORM_ZX (abd_w0_s8_x_untied, svint8_t, int8_t,
z0 = svabd_n_s8_x (p0, z1, x0),
z0 = svabd_x (p0, z1, x0))
/*
** abd_1_s8_x_tied1:
** mov (z[0-9]+\.b), #1
** sabd z0\.b, p0/m, z0\.b, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_s8_x_tied1, svint8_t,
z0 = svabd_n_s8_x (p0, z0, 1),
z0 = svabd_x (p0, z0, 1))
/*
** abd_1_s8_x_untied:
** mov z0\.b, #1
** sabd z0\.b, p0/m, z0\.b, z1\.b
** ret
*/
TEST_UNIFORM_Z (abd_1_s8_x_untied, svint8_t,
z0 = svabd_n_s8_x (p0, z1, 1),
z0 = svabd_x (p0, z1, 1))
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
/*
** abd_u16_m_tied1:
** uabd z0\.h, p0/m, z0\.h, z1\.h
** ret
*/
TEST_UNIFORM_Z (abd_u16_m_tied1, svuint16_t,
z0 = svabd_u16_m (p0, z0, z1),
z0 = svabd_m (p0, z0, z1))
/*
** abd_u16_m_tied2:
** mov (z[0-9]+)\.d, z0\.d
** movprfx z0, z1
** uabd z0\.h, p0/m, z0\.h, \1\.h
** ret
*/
TEST_UNIFORM_Z (abd_u16_m_tied2, svuint16_t,
z0 = svabd_u16_m (p0, z1, z0),
z0 = svabd_m (p0, z1, z0))
/*
** abd_u16_m_untied:
** movprfx z0, z1
** uabd z0\.h, p0/m, z0\.h, z2\.h
** ret
*/
TEST_UNIFORM_Z (abd_u16_m_untied, svuint16_t,
z0 = svabd_u16_m (p0, z1, z2),
z0 = svabd_m (p0, z1, z2))
/*
** abd_w0_u16_m_tied1:
** mov (z[0-9]+\.h), w0
** uabd z0\.h, p0/m, z0\.h, \1
** ret
*/
TEST_UNIFORM_ZX (abd_w0_u16_m_tied1, svuint16_t, uint16_t,
z0 = svabd_n_u16_m (p0, z0, x0),
z0 = svabd_m (p0, z0, x0))
/*
** abd_w0_u16_m_untied: { xfail *-*-* }
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** uabd z0\.h, p0/m, z0\.h, \1
** ret
*/
TEST_UNIFORM_ZX (abd_w0_u16_m_untied, svuint16_t, uint16_t,
z0 = svabd_n_u16_m (p0, z1, x0),
z0 = svabd_m (p0, z1, x0))
/*
** abd_1_u16_m_tied1:
** mov (z[0-9]+\.h), #1
** uabd z0\.h, p0/m, z0\.h, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_u16_m_tied1, svuint16_t,
z0 = svabd_n_u16_m (p0, z0, 1),
z0 = svabd_m (p0, z0, 1))
/*
** abd_1_u16_m_untied: { xfail *-*-* }
** mov (z[0-9]+\.h), #1
** movprfx z0, z1
** uabd z0\.h, p0/m, z0\.h, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_u16_m_untied, svuint16_t,
z0 = svabd_n_u16_m (p0, z1, 1),
z0 = svabd_m (p0, z1, 1))
/*
** abd_u16_z_tied1:
** movprfx z0\.h, p0/z, z0\.h
** uabd z0\.h, p0/m, z0\.h, z1\.h
** ret
*/
TEST_UNIFORM_Z (abd_u16_z_tied1, svuint16_t,
z0 = svabd_u16_z (p0, z0, z1),
z0 = svabd_z (p0, z0, z1))
/*
** abd_u16_z_tied2:
** movprfx z0\.h, p0/z, z0\.h
** uabd z0\.h, p0/m, z0\.h, z1\.h
** ret
*/
TEST_UNIFORM_Z (abd_u16_z_tied2, svuint16_t,
z0 = svabd_u16_z (p0, z1, z0),
z0 = svabd_z (p0, z1, z0))
/*
** abd_u16_z_untied:
** (
** movprfx z0\.h, p0/z, z1\.h
** uabd z0\.h, p0/m, z0\.h, z2\.h
** |
** movprfx z0\.h, p0/z, z2\.h
** uabd z0\.h, p0/m, z0\.h, z1\.h
** )
** ret
*/
TEST_UNIFORM_Z (abd_u16_z_untied, svuint16_t,
z0 = svabd_u16_z (p0, z1, z2),
z0 = svabd_z (p0, z1, z2))
/*
** abd_w0_u16_z_tied1:
** mov (z[0-9]+\.h), w0
** movprfx z0\.h, p0/z, z0\.h
** uabd z0\.h, p0/m, z0\.h, \1
** ret
*/
TEST_UNIFORM_ZX (abd_w0_u16_z_tied1, svuint16_t, uint16_t,
z0 = svabd_n_u16_z (p0, z0, x0),
z0 = svabd_z (p0, z0, x0))
/*
** abd_w0_u16_z_untied:
** mov (z[0-9]+\.h), w0
** (
** movprfx z0\.h, p0/z, z1\.h
** uabd z0\.h, p0/m, z0\.h, \1
** |
** movprfx z0\.h, p0/z, \1
** uabd z0\.h, p0/m, z0\.h, z1\.h
** )
** ret
*/
TEST_UNIFORM_ZX (abd_w0_u16_z_untied, svuint16_t, uint16_t,
z0 = svabd_n_u16_z (p0, z1, x0),
z0 = svabd_z (p0, z1, x0))
/*
** abd_1_u16_z_tied1:
** mov (z[0-9]+\.h), #1
** movprfx z0\.h, p0/z, z0\.h
** uabd z0\.h, p0/m, z0\.h, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_u16_z_tied1, svuint16_t,
z0 = svabd_n_u16_z (p0, z0, 1),
z0 = svabd_z (p0, z0, 1))
/*
** abd_1_u16_z_untied:
** mov (z[0-9]+\.h), #1
** (
** movprfx z0\.h, p0/z, z1\.h
** uabd z0\.h, p0/m, z0\.h, \1
** |
** movprfx z0\.h, p0/z, \1
** uabd z0\.h, p0/m, z0\.h, z1\.h
** )
** ret
*/
TEST_UNIFORM_Z (abd_1_u16_z_untied, svuint16_t,
z0 = svabd_n_u16_z (p0, z1, 1),
z0 = svabd_z (p0, z1, 1))
/*
** abd_u16_x_tied1:
** uabd z0\.h, p0/m, z0\.h, z1\.h
** ret
*/
TEST_UNIFORM_Z (abd_u16_x_tied1, svuint16_t,
z0 = svabd_u16_x (p0, z0, z1),
z0 = svabd_x (p0, z0, z1))
/*
** abd_u16_x_tied2:
** uabd z0\.h, p0/m, z0\.h, z1\.h
** ret
*/
TEST_UNIFORM_Z (abd_u16_x_tied2, svuint16_t,
z0 = svabd_u16_x (p0, z1, z0),
z0 = svabd_x (p0, z1, z0))
/*
** abd_u16_x_untied:
** (
** movprfx z0, z1
** uabd z0\.h, p0/m, z0\.h, z2\.h
** |
** movprfx z0, z2
** uabd z0\.h, p0/m, z0\.h, z1\.h
** )
** ret
*/
TEST_UNIFORM_Z (abd_u16_x_untied, svuint16_t,
z0 = svabd_u16_x (p0, z1, z2),
z0 = svabd_x (p0, z1, z2))
/*
** abd_w0_u16_x_tied1:
** mov (z[0-9]+\.h), w0
** uabd z0\.h, p0/m, z0\.h, \1
** ret
*/
TEST_UNIFORM_ZX (abd_w0_u16_x_tied1, svuint16_t, uint16_t,
z0 = svabd_n_u16_x (p0, z0, x0),
z0 = svabd_x (p0, z0, x0))
/*
** abd_w0_u16_x_untied:
** mov z0\.h, w0
** uabd z0\.h, p0/m, z0\.h, z1\.h
** ret
*/
TEST_UNIFORM_ZX (abd_w0_u16_x_untied, svuint16_t, uint16_t,
z0 = svabd_n_u16_x (p0, z1, x0),
z0 = svabd_x (p0, z1, x0))
/*
** abd_1_u16_x_tied1:
** mov (z[0-9]+\.h), #1
** uabd z0\.h, p0/m, z0\.h, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_u16_x_tied1, svuint16_t,
z0 = svabd_n_u16_x (p0, z0, 1),
z0 = svabd_x (p0, z0, 1))
/*
** abd_1_u16_x_untied:
** mov z0\.h, #1
** uabd z0\.h, p0/m, z0\.h, z1\.h
** ret
*/
TEST_UNIFORM_Z (abd_1_u16_x_untied, svuint16_t,
z0 = svabd_n_u16_x (p0, z1, 1),
z0 = svabd_x (p0, z1, 1))
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
/*
** abd_u32_m_tied1:
** uabd z0\.s, p0/m, z0\.s, z1\.s
** ret
*/
TEST_UNIFORM_Z (abd_u32_m_tied1, svuint32_t,
z0 = svabd_u32_m (p0, z0, z1),
z0 = svabd_m (p0, z0, z1))
/*
** abd_u32_m_tied2:
** mov (z[0-9]+)\.d, z0\.d
** movprfx z0, z1
** uabd z0\.s, p0/m, z0\.s, \1\.s
** ret
*/
TEST_UNIFORM_Z (abd_u32_m_tied2, svuint32_t,
z0 = svabd_u32_m (p0, z1, z0),
z0 = svabd_m (p0, z1, z0))
/*
** abd_u32_m_untied:
** movprfx z0, z1
** uabd z0\.s, p0/m, z0\.s, z2\.s
** ret
*/
TEST_UNIFORM_Z (abd_u32_m_untied, svuint32_t,
z0 = svabd_u32_m (p0, z1, z2),
z0 = svabd_m (p0, z1, z2))
/*
** abd_w0_u32_m_tied1:
** mov (z[0-9]+\.s), w0
** uabd z0\.s, p0/m, z0\.s, \1
** ret
*/
TEST_UNIFORM_ZX (abd_w0_u32_m_tied1, svuint32_t, uint32_t,
z0 = svabd_n_u32_m (p0, z0, x0),
z0 = svabd_m (p0, z0, x0))
/*
** abd_w0_u32_m_untied:
** mov (z[0-9]+\.s), w0
** movprfx z0, z1
** uabd z0\.s, p0/m, z0\.s, \1
** ret
*/
TEST_UNIFORM_ZX (abd_w0_u32_m_untied, svuint32_t, uint32_t,
z0 = svabd_n_u32_m (p0, z1, x0),
z0 = svabd_m (p0, z1, x0))
/*
** abd_1_u32_m_tied1:
** mov (z[0-9]+\.s), #1
** uabd z0\.s, p0/m, z0\.s, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_u32_m_tied1, svuint32_t,
z0 = svabd_n_u32_m (p0, z0, 1),
z0 = svabd_m (p0, z0, 1))
/*
** abd_1_u32_m_untied: { xfail *-*-* }
** mov (z[0-9]+\.s), #1
** movprfx z0, z1
** uabd z0\.s, p0/m, z0\.s, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_u32_m_untied, svuint32_t,
z0 = svabd_n_u32_m (p0, z1, 1),
z0 = svabd_m (p0, z1, 1))
/*
** abd_u32_z_tied1:
** movprfx z0\.s, p0/z, z0\.s
** uabd z0\.s, p0/m, z0\.s, z1\.s
** ret
*/
TEST_UNIFORM_Z (abd_u32_z_tied1, svuint32_t,
z0 = svabd_u32_z (p0, z0, z1),
z0 = svabd_z (p0, z0, z1))
/*
** abd_u32_z_tied2:
** movprfx z0\.s, p0/z, z0\.s
** uabd z0\.s, p0/m, z0\.s, z1\.s
** ret
*/
TEST_UNIFORM_Z (abd_u32_z_tied2, svuint32_t,
z0 = svabd_u32_z (p0, z1, z0),
z0 = svabd_z (p0, z1, z0))
/*
** abd_u32_z_untied:
** (
** movprfx z0\.s, p0/z, z1\.s
** uabd z0\.s, p0/m, z0\.s, z2\.s
** |
** movprfx z0\.s, p0/z, z2\.s
** uabd z0\.s, p0/m, z0\.s, z1\.s
** )
** ret
*/
TEST_UNIFORM_Z (abd_u32_z_untied, svuint32_t,
z0 = svabd_u32_z (p0, z1, z2),
z0 = svabd_z (p0, z1, z2))
/*
** abd_w0_u32_z_tied1:
** mov (z[0-9]+\.s), w0
** movprfx z0\.s, p0/z, z0\.s
** uabd z0\.s, p0/m, z0\.s, \1
** ret
*/
TEST_UNIFORM_ZX (abd_w0_u32_z_tied1, svuint32_t, uint32_t,
z0 = svabd_n_u32_z (p0, z0, x0),
z0 = svabd_z (p0, z0, x0))
/*
** abd_w0_u32_z_untied:
** mov (z[0-9]+\.s), w0
** (
** movprfx z0\.s, p0/z, z1\.s
** uabd z0\.s, p0/m, z0\.s, \1
** |
** movprfx z0\.s, p0/z, \1
** uabd z0\.s, p0/m, z0\.s, z1\.s
** )
** ret
*/
TEST_UNIFORM_ZX (abd_w0_u32_z_untied, svuint32_t, uint32_t,
z0 = svabd_n_u32_z (p0, z1, x0),
z0 = svabd_z (p0, z1, x0))
/*
** abd_1_u32_z_tied1:
** mov (z[0-9]+\.s), #1
** movprfx z0\.s, p0/z, z0\.s
** uabd z0\.s, p0/m, z0\.s, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_u32_z_tied1, svuint32_t,
z0 = svabd_n_u32_z (p0, z0, 1),
z0 = svabd_z (p0, z0, 1))
/*
** abd_1_u32_z_untied:
** mov (z[0-9]+\.s), #1
** (
** movprfx z0\.s, p0/z, z1\.s
** uabd z0\.s, p0/m, z0\.s, \1
** |
** movprfx z0\.s, p0/z, \1
** uabd z0\.s, p0/m, z0\.s, z1\.s
** )
** ret
*/
TEST_UNIFORM_Z (abd_1_u32_z_untied, svuint32_t,
z0 = svabd_n_u32_z (p0, z1, 1),
z0 = svabd_z (p0, z1, 1))
/*
** abd_u32_x_tied1:
** uabd z0\.s, p0/m, z0\.s, z1\.s
** ret
*/
TEST_UNIFORM_Z (abd_u32_x_tied1, svuint32_t,
z0 = svabd_u32_x (p0, z0, z1),
z0 = svabd_x (p0, z0, z1))
/*
** abd_u32_x_tied2:
** uabd z0\.s, p0/m, z0\.s, z1\.s
** ret
*/
TEST_UNIFORM_Z (abd_u32_x_tied2, svuint32_t,
z0 = svabd_u32_x (p0, z1, z0),
z0 = svabd_x (p0, z1, z0))
/*
** abd_u32_x_untied:
** (
** movprfx z0, z1
** uabd z0\.s, p0/m, z0\.s, z2\.s
** |
** movprfx z0, z2
** uabd z0\.s, p0/m, z0\.s, z1\.s
** )
** ret
*/
TEST_UNIFORM_Z (abd_u32_x_untied, svuint32_t,
z0 = svabd_u32_x (p0, z1, z2),
z0 = svabd_x (p0, z1, z2))
/*
** abd_w0_u32_x_tied1:
** mov (z[0-9]+\.s), w0
** uabd z0\.s, p0/m, z0\.s, \1
** ret
*/
TEST_UNIFORM_ZX (abd_w0_u32_x_tied1, svuint32_t, uint32_t,
z0 = svabd_n_u32_x (p0, z0, x0),
z0 = svabd_x (p0, z0, x0))
/*
** abd_w0_u32_x_untied:
** mov z0\.s, w0
** uabd z0\.s, p0/m, z0\.s, z1\.s
** ret
*/
TEST_UNIFORM_ZX (abd_w0_u32_x_untied, svuint32_t, uint32_t,
z0 = svabd_n_u32_x (p0, z1, x0),
z0 = svabd_x (p0, z1, x0))
/*
** abd_1_u32_x_tied1:
** mov (z[0-9]+\.s), #1
** uabd z0\.s, p0/m, z0\.s, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_u32_x_tied1, svuint32_t,
z0 = svabd_n_u32_x (p0, z0, 1),
z0 = svabd_x (p0, z0, 1))
/*
** abd_1_u32_x_untied:
** mov z0\.s, #1
** uabd z0\.s, p0/m, z0\.s, z1\.s
** ret
*/
TEST_UNIFORM_Z (abd_1_u32_x_untied, svuint32_t,
z0 = svabd_n_u32_x (p0, z1, 1),
z0 = svabd_x (p0, z1, 1))
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
/*
** abd_u64_m_tied1:
** uabd z0\.d, p0/m, z0\.d, z1\.d
** ret
*/
TEST_UNIFORM_Z (abd_u64_m_tied1, svuint64_t,
z0 = svabd_u64_m (p0, z0, z1),
z0 = svabd_m (p0, z0, z1))
/*
** abd_u64_m_tied2:
** mov (z[0-9]+\.d), z0\.d
** movprfx z0, z1
** uabd z0\.d, p0/m, z0\.d, \1
** ret
*/
TEST_UNIFORM_Z (abd_u64_m_tied2, svuint64_t,
z0 = svabd_u64_m (p0, z1, z0),
z0 = svabd_m (p0, z1, z0))
/*
** abd_u64_m_untied:
** movprfx z0, z1
** uabd z0\.d, p0/m, z0\.d, z2\.d
** ret
*/
TEST_UNIFORM_Z (abd_u64_m_untied, svuint64_t,
z0 = svabd_u64_m (p0, z1, z2),
z0 = svabd_m (p0, z1, z2))
/*
** abd_x0_u64_m_tied1:
** mov (z[0-9]+\.d), x0
** uabd z0\.d, p0/m, z0\.d, \1
** ret
*/
TEST_UNIFORM_ZX (abd_x0_u64_m_tied1, svuint64_t, uint64_t,
z0 = svabd_n_u64_m (p0, z0, x0),
z0 = svabd_m (p0, z0, x0))
/*
** abd_x0_u64_m_untied:
** mov (z[0-9]+\.d), x0
** movprfx z0, z1
** uabd z0\.d, p0/m, z0\.d, \1
** ret
*/
TEST_UNIFORM_ZX (abd_x0_u64_m_untied, svuint64_t, uint64_t,
z0 = svabd_n_u64_m (p0, z1, x0),
z0 = svabd_m (p0, z1, x0))
/*
** abd_1_u64_m_tied1:
** mov (z[0-9]+\.d), #1
** uabd z0\.d, p0/m, z0\.d, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_u64_m_tied1, svuint64_t,
z0 = svabd_n_u64_m (p0, z0, 1),
z0 = svabd_m (p0, z0, 1))
/*
** abd_1_u64_m_untied: { xfail *-*-* }
** mov (z[0-9]+\.d), #1
** movprfx z0, z1
** uabd z0\.d, p0/m, z0\.d, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_u64_m_untied, svuint64_t,
z0 = svabd_n_u64_m (p0, z1, 1),
z0 = svabd_m (p0, z1, 1))
/*
** abd_u64_z_tied1:
** movprfx z0\.d, p0/z, z0\.d
** uabd z0\.d, p0/m, z0\.d, z1\.d
** ret
*/
TEST_UNIFORM_Z (abd_u64_z_tied1, svuint64_t,
z0 = svabd_u64_z (p0, z0, z1),
z0 = svabd_z (p0, z0, z1))
/*
** abd_u64_z_tied2:
** movprfx z0\.d, p0/z, z0\.d
** uabd z0\.d, p0/m, z0\.d, z1\.d
** ret
*/
TEST_UNIFORM_Z (abd_u64_z_tied2, svuint64_t,
z0 = svabd_u64_z (p0, z1, z0),
z0 = svabd_z (p0, z1, z0))
/*
** abd_u64_z_untied:
** (
** movprfx z0\.d, p0/z, z1\.d
** uabd z0\.d, p0/m, z0\.d, z2\.d
** |
** movprfx z0\.d, p0/z, z2\.d
** uabd z0\.d, p0/m, z0\.d, z1\.d
** )
** ret
*/
TEST_UNIFORM_Z (abd_u64_z_untied, svuint64_t,
z0 = svabd_u64_z (p0, z1, z2),
z0 = svabd_z (p0, z1, z2))
/*
** abd_x0_u64_z_tied1:
** mov (z[0-9]+\.d), x0
** movprfx z0\.d, p0/z, z0\.d
** uabd z0\.d, p0/m, z0\.d, \1
** ret
*/
TEST_UNIFORM_ZX (abd_x0_u64_z_tied1, svuint64_t, uint64_t,
z0 = svabd_n_u64_z (p0, z0, x0),
z0 = svabd_z (p0, z0, x0))
/*
** abd_x0_u64_z_untied:
** mov (z[0-9]+\.d), x0
** (
** movprfx z0\.d, p0/z, z1\.d
** uabd z0\.d, p0/m, z0\.d, \1
** |
** movprfx z0\.d, p0/z, \1
** uabd z0\.d, p0/m, z0\.d, z1\.d
** )
** ret
*/
TEST_UNIFORM_ZX (abd_x0_u64_z_untied, svuint64_t, uint64_t,
z0 = svabd_n_u64_z (p0, z1, x0),
z0 = svabd_z (p0, z1, x0))
/*
** abd_1_u64_z_tied1:
** mov (z[0-9]+\.d), #1
** movprfx z0\.d, p0/z, z0\.d
** uabd z0\.d, p0/m, z0\.d, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_u64_z_tied1, svuint64_t,
z0 = svabd_n_u64_z (p0, z0, 1),
z0 = svabd_z (p0, z0, 1))
/*
** abd_1_u64_z_untied:
** mov (z[0-9]+\.d), #1
** (
** movprfx z0\.d, p0/z, z1\.d
** uabd z0\.d, p0/m, z0\.d, \1
** |
** movprfx z0\.d, p0/z, \1
** uabd z0\.d, p0/m, z0\.d, z1\.d
** )
** ret
*/
TEST_UNIFORM_Z (abd_1_u64_z_untied, svuint64_t,
z0 = svabd_n_u64_z (p0, z1, 1),
z0 = svabd_z (p0, z1, 1))
/*
** abd_u64_x_tied1:
** uabd z0\.d, p0/m, z0\.d, z1\.d
** ret
*/
TEST_UNIFORM_Z (abd_u64_x_tied1, svuint64_t,
z0 = svabd_u64_x (p0, z0, z1),
z0 = svabd_x (p0, z0, z1))
/*
** abd_u64_x_tied2:
** uabd z0\.d, p0/m, z0\.d, z1\.d
** ret
*/
TEST_UNIFORM_Z (abd_u64_x_tied2, svuint64_t,
z0 = svabd_u64_x (p0, z1, z0),
z0 = svabd_x (p0, z1, z0))
/*
** abd_u64_x_untied:
** (
** movprfx z0, z1
** uabd z0\.d, p0/m, z0\.d, z2\.d
** |
** movprfx z0, z2
** uabd z0\.d, p0/m, z0\.d, z1\.d
** )
** ret
*/
TEST_UNIFORM_Z (abd_u64_x_untied, svuint64_t,
z0 = svabd_u64_x (p0, z1, z2),
z0 = svabd_x (p0, z1, z2))
/*
** abd_x0_u64_x_tied1:
** mov (z[0-9]+\.d), x0
** uabd z0\.d, p0/m, z0\.d, \1
** ret
*/
TEST_UNIFORM_ZX (abd_x0_u64_x_tied1, svuint64_t, uint64_t,
z0 = svabd_n_u64_x (p0, z0, x0),
z0 = svabd_x (p0, z0, x0))
/*
** abd_x0_u64_x_untied:
** mov z0\.d, x0
** uabd z0\.d, p0/m, z0\.d, z1\.d
** ret
*/
TEST_UNIFORM_ZX (abd_x0_u64_x_untied, svuint64_t, uint64_t,
z0 = svabd_n_u64_x (p0, z1, x0),
z0 = svabd_x (p0, z1, x0))
/*
** abd_1_u64_x_tied1:
** mov (z[0-9]+\.d), #1
** uabd z0\.d, p0/m, z0\.d, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_u64_x_tied1, svuint64_t,
z0 = svabd_n_u64_x (p0, z0, 1),
z0 = svabd_x (p0, z0, 1))
/*
** abd_1_u64_x_untied:
** mov z0\.d, #1
** uabd z0\.d, p0/m, z0\.d, z1\.d
** ret
*/
TEST_UNIFORM_Z (abd_1_u64_x_untied, svuint64_t,
z0 = svabd_n_u64_x (p0, z1, 1),
z0 = svabd_x (p0, z1, 1))
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
/*
** abd_u8_m_tied1:
** uabd z0\.b, p0/m, z0\.b, z1\.b
** ret
*/
TEST_UNIFORM_Z (abd_u8_m_tied1, svuint8_t,
z0 = svabd_u8_m (p0, z0, z1),
z0 = svabd_m (p0, z0, z1))
/*
** abd_u8_m_tied2:
** mov (z[0-9]+)\.d, z0\.d
** movprfx z0, z1
** uabd z0\.b, p0/m, z0\.b, \1\.b
** ret
*/
TEST_UNIFORM_Z (abd_u8_m_tied2, svuint8_t,
z0 = svabd_u8_m (p0, z1, z0),
z0 = svabd_m (p0, z1, z0))
/*
** abd_u8_m_untied:
** movprfx z0, z1
** uabd z0\.b, p0/m, z0\.b, z2\.b
** ret
*/
TEST_UNIFORM_Z (abd_u8_m_untied, svuint8_t,
z0 = svabd_u8_m (p0, z1, z2),
z0 = svabd_m (p0, z1, z2))
/*
** abd_w0_u8_m_tied1:
** mov (z[0-9]+\.b), w0
** uabd z0\.b, p0/m, z0\.b, \1
** ret
*/
TEST_UNIFORM_ZX (abd_w0_u8_m_tied1, svuint8_t, uint8_t,
z0 = svabd_n_u8_m (p0, z0, x0),
z0 = svabd_m (p0, z0, x0))
/*
** abd_w0_u8_m_untied: { xfail *-*-* }
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** uabd z0\.b, p0/m, z0\.b, \1
** ret
*/
TEST_UNIFORM_ZX (abd_w0_u8_m_untied, svuint8_t, uint8_t,
z0 = svabd_n_u8_m (p0, z1, x0),
z0 = svabd_m (p0, z1, x0))
/*
** abd_1_u8_m_tied1:
** mov (z[0-9]+\.b), #1
** uabd z0\.b, p0/m, z0\.b, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_u8_m_tied1, svuint8_t,
z0 = svabd_n_u8_m (p0, z0, 1),
z0 = svabd_m (p0, z0, 1))
/*
** abd_1_u8_m_untied: { xfail *-*-* }
** mov (z[0-9]+\.b), #1
** movprfx z0, z1
** uabd z0\.b, p0/m, z0\.b, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_u8_m_untied, svuint8_t,
z0 = svabd_n_u8_m (p0, z1, 1),
z0 = svabd_m (p0, z1, 1))
/*
** abd_u8_z_tied1:
** movprfx z0\.b, p0/z, z0\.b
** uabd z0\.b, p0/m, z0\.b, z1\.b
** ret
*/
TEST_UNIFORM_Z (abd_u8_z_tied1, svuint8_t,
z0 = svabd_u8_z (p0, z0, z1),
z0 = svabd_z (p0, z0, z1))
/*
** abd_u8_z_tied2:
** movprfx z0\.b, p0/z, z0\.b
** uabd z0\.b, p0/m, z0\.b, z1\.b
** ret
*/
TEST_UNIFORM_Z (abd_u8_z_tied2, svuint8_t,
z0 = svabd_u8_z (p0, z1, z0),
z0 = svabd_z (p0, z1, z0))
/*
** abd_u8_z_untied:
** (
** movprfx z0\.b, p0/z, z1\.b
** uabd z0\.b, p0/m, z0\.b, z2\.b
** |
** movprfx z0\.b, p0/z, z2\.b
** uabd z0\.b, p0/m, z0\.b, z1\.b
** )
** ret
*/
TEST_UNIFORM_Z (abd_u8_z_untied, svuint8_t,
z0 = svabd_u8_z (p0, z1, z2),
z0 = svabd_z (p0, z1, z2))
/*
** abd_w0_u8_z_tied1:
** mov (z[0-9]+\.b), w0
** movprfx z0\.b, p0/z, z0\.b
** uabd z0\.b, p0/m, z0\.b, \1
** ret
*/
TEST_UNIFORM_ZX (abd_w0_u8_z_tied1, svuint8_t, uint8_t,
z0 = svabd_n_u8_z (p0, z0, x0),
z0 = svabd_z (p0, z0, x0))
/*
** abd_w0_u8_z_untied:
** mov (z[0-9]+\.b), w0
** (
** movprfx z0\.b, p0/z, z1\.b
** uabd z0\.b, p0/m, z0\.b, \1
** |
** movprfx z0\.b, p0/z, \1
** uabd z0\.b, p0/m, z0\.b, z1\.b
** )
** ret
*/
TEST_UNIFORM_ZX (abd_w0_u8_z_untied, svuint8_t, uint8_t,
z0 = svabd_n_u8_z (p0, z1, x0),
z0 = svabd_z (p0, z1, x0))
/*
** abd_1_u8_z_tied1:
** mov (z[0-9]+\.b), #1
** movprfx z0\.b, p0/z, z0\.b
** uabd z0\.b, p0/m, z0\.b, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_u8_z_tied1, svuint8_t,
z0 = svabd_n_u8_z (p0, z0, 1),
z0 = svabd_z (p0, z0, 1))
/*
** abd_1_u8_z_untied:
** mov (z[0-9]+\.b), #1
** (
** movprfx z0\.b, p0/z, z1\.b
** uabd z0\.b, p0/m, z0\.b, \1
** |
** movprfx z0\.b, p0/z, \1
** uabd z0\.b, p0/m, z0\.b, z1\.b
** )
** ret
*/
TEST_UNIFORM_Z (abd_1_u8_z_untied, svuint8_t,
z0 = svabd_n_u8_z (p0, z1, 1),
z0 = svabd_z (p0, z1, 1))
/*
** abd_u8_x_tied1:
** uabd z0\.b, p0/m, z0\.b, z1\.b
** ret
*/
TEST_UNIFORM_Z (abd_u8_x_tied1, svuint8_t,
z0 = svabd_u8_x (p0, z0, z1),
z0 = svabd_x (p0, z0, z1))
/*
** abd_u8_x_tied2:
** uabd z0\.b, p0/m, z0\.b, z1\.b
** ret
*/
TEST_UNIFORM_Z (abd_u8_x_tied2, svuint8_t,
z0 = svabd_u8_x (p0, z1, z0),
z0 = svabd_x (p0, z1, z0))
/*
** abd_u8_x_untied:
** (
** movprfx z0, z1
** uabd z0\.b, p0/m, z0\.b, z2\.b
** |
** movprfx z0, z2
** uabd z0\.b, p0/m, z0\.b, z1\.b
** )
** ret
*/
TEST_UNIFORM_Z (abd_u8_x_untied, svuint8_t,
z0 = svabd_u8_x (p0, z1, z2),
z0 = svabd_x (p0, z1, z2))
/*
** abd_w0_u8_x_tied1:
** mov (z[0-9]+\.b), w0
** uabd z0\.b, p0/m, z0\.b, \1
** ret
*/
TEST_UNIFORM_ZX (abd_w0_u8_x_tied1, svuint8_t, uint8_t,
z0 = svabd_n_u8_x (p0, z0, x0),
z0 = svabd_x (p0, z0, x0))
/*
** abd_w0_u8_x_untied:
** mov z0\.b, w0
** uabd z0\.b, p0/m, z0\.b, z1\.b
** ret
*/
TEST_UNIFORM_ZX (abd_w0_u8_x_untied, svuint8_t, uint8_t,
z0 = svabd_n_u8_x (p0, z1, x0),
z0 = svabd_x (p0, z1, x0))
/*
** abd_1_u8_x_tied1:
** mov (z[0-9]+\.b), #1
** uabd z0\.b, p0/m, z0\.b, \1
** ret
*/
TEST_UNIFORM_Z (abd_1_u8_x_tied1, svuint8_t,
z0 = svabd_n_u8_x (p0, z0, 1),
z0 = svabd_x (p0, z0, 1))
/*
** abd_1_u8_x_untied:
** mov z0\.b, #1
** uabd z0\.b, p0/m, z0\.b, z1\.b
** ret
*/
TEST_UNIFORM_Z (abd_1_u8_x_untied, svuint8_t,
z0 = svabd_n_u8_x (p0, z1, 1),
z0 = svabd_x (p0, z1, 1))
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
/*
** abs_f16_m_tied12:
** fabs z0\.h, p0/m, z0\.h
** ret
*/
TEST_UNIFORM_Z (abs_f16_m_tied12, svfloat16_t,
z0 = svabs_f16_m (z0, p0, z0),
z0 = svabs_m (z0, p0, z0))
/*
** abs_f16_m_tied1:
** fabs z0\.h, p0/m, z1\.h
** ret
*/
TEST_UNIFORM_Z (abs_f16_m_tied1, svfloat16_t,
z0 = svabs_f16_m (z0, p0, z1),
z0 = svabs_m (z0, p0, z1))
/*
** abs_f16_m_tied2:
** mov (z[0-9]+)\.d, z0\.d
** movprfx z0, z1
** fabs z0\.h, p0/m, \1\.h
** ret
*/
TEST_UNIFORM_Z (abs_f16_m_tied2, svfloat16_t,
z0 = svabs_f16_m (z1, p0, z0),
z0 = svabs_m (z1, p0, z0))
/*
** abs_f16_m_untied:
** movprfx z0, z2
** fabs z0\.h, p0/m, z1\.h
** ret
*/
TEST_UNIFORM_Z (abs_f16_m_untied, svfloat16_t,
z0 = svabs_f16_m (z2, p0, z1),
z0 = svabs_m (z2, p0, z1))
/*
** abs_f16_z_tied1:
** mov (z[0-9]+)\.d, z0\.d
** movprfx z0\.h, p0/z, \1\.h
** fabs z0\.h, p0/m, \1\.h
** ret
*/
TEST_UNIFORM_Z (abs_f16_z_tied1, svfloat16_t,
z0 = svabs_f16_z (p0, z0),
z0 = svabs_z (p0, z0))
/*
** abs_f16_z_untied:
** movprfx z0\.h, p0/z, z1\.h
** fabs z0\.h, p0/m, z1\.h
** ret
*/
TEST_UNIFORM_Z (abs_f16_z_untied, svfloat16_t,
z0 = svabs_f16_z (p0, z1),
z0 = svabs_z (p0, z1))
/*
** abs_f16_x_tied1:
** fabs z0\.h, p0/m, z0\.h
** ret
*/
TEST_UNIFORM_Z (abs_f16_x_tied1, svfloat16_t,
z0 = svabs_f16_x (p0, z0),
z0 = svabs_x (p0, z0))
/*
** abs_f16_x_untied:
** fabs z0\.h, p0/m, z1\.h
** ret
*/
TEST_UNIFORM_Z (abs_f16_x_untied, svfloat16_t,
z0 = svabs_f16_x (p0, z1),
z0 = svabs_x (p0, z1))
/*
** ptrue_abs_f16_x_tied1:
** ...
** ptrue p[0-9]+\.b[^\n]*
** ...
** ret
*/
TEST_UNIFORM_Z (ptrue_abs_f16_x_tied1, svfloat16_t,
z0 = svabs_f16_x (svptrue_b16 (), z0),
z0 = svabs_x (svptrue_b16 (), z0))
/*
** ptrue_abs_f16_x_untied:
** ...
** ptrue p[0-9]+\.b[^\n]*
** ...
** ret
*/
TEST_UNIFORM_Z (ptrue_abs_f16_x_untied, svfloat16_t,
z0 = svabs_f16_x (svptrue_b16 (), z1),
z0 = svabs_x (svptrue_b16 (), z1))
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
/*
** abs_f32_m_tied12:
** fabs z0\.s, p0/m, z0\.s
** ret
*/
TEST_UNIFORM_Z (abs_f32_m_tied12, svfloat32_t,
z0 = svabs_f32_m (z0, p0, z0),
z0 = svabs_m (z0, p0, z0))
/*
** abs_f32_m_tied1:
** fabs z0\.s, p0/m, z1\.s
** ret
*/
TEST_UNIFORM_Z (abs_f32_m_tied1, svfloat32_t,
z0 = svabs_f32_m (z0, p0, z1),
z0 = svabs_m (z0, p0, z1))
/*
** abs_f32_m_tied2:
** mov (z[0-9]+)\.d, z0\.d
** movprfx z0, z1
** fabs z0\.s, p0/m, \1\.s
** ret
*/
TEST_UNIFORM_Z (abs_f32_m_tied2, svfloat32_t,
z0 = svabs_f32_m (z1, p0, z0),
z0 = svabs_m (z1, p0, z0))
/*
** abs_f32_m_untied:
** movprfx z0, z2
** fabs z0\.s, p0/m, z1\.s
** ret
*/
TEST_UNIFORM_Z (abs_f32_m_untied, svfloat32_t,
z0 = svabs_f32_m (z2, p0, z1),
z0 = svabs_m (z2, p0, z1))
/*
** abs_f32_z_tied1:
** mov (z[0-9]+)\.d, z0\.d
** movprfx z0\.s, p0/z, \1\.s
** fabs z0\.s, p0/m, \1\.s
** ret
*/
TEST_UNIFORM_Z (abs_f32_z_tied1, svfloat32_t,
z0 = svabs_f32_z (p0, z0),
z0 = svabs_z (p0, z0))
/*
** abs_f32_z_untied:
** movprfx z0\.s, p0/z, z1\.s
** fabs z0\.s, p0/m, z1\.s
** ret
*/
TEST_UNIFORM_Z (abs_f32_z_untied, svfloat32_t,
z0 = svabs_f32_z (p0, z1),
z0 = svabs_z (p0, z1))
/*
** abs_f32_x_tied1:
** fabs z0\.s, p0/m, z0\.s
** ret
*/
TEST_UNIFORM_Z (abs_f32_x_tied1, svfloat32_t,
z0 = svabs_f32_x (p0, z0),
z0 = svabs_x (p0, z0))
/*
** abs_f32_x_untied:
** fabs z0\.s, p0/m, z1\.s
** ret
*/
TEST_UNIFORM_Z (abs_f32_x_untied, svfloat32_t,
z0 = svabs_f32_x (p0, z1),
z0 = svabs_x (p0, z1))
/*
** ptrue_abs_f32_x_tied1:
** ...
** ptrue p[0-9]+\.b[^\n]*
** ...
** ret
*/
TEST_UNIFORM_Z (ptrue_abs_f32_x_tied1, svfloat32_t,
z0 = svabs_f32_x (svptrue_b32 (), z0),
z0 = svabs_x (svptrue_b32 (), z0))
/*
** ptrue_abs_f32_x_untied:
** ...
** ptrue p[0-9]+\.b[^\n]*
** ...
** ret
*/
TEST_UNIFORM_Z (ptrue_abs_f32_x_untied, svfloat32_t,
z0 = svabs_f32_x (svptrue_b32 (), z1),
z0 = svabs_x (svptrue_b32 (), z1))
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
/*
** abs_f64_m_tied12:
** fabs z0\.d, p0/m, z0\.d
** ret
*/
TEST_UNIFORM_Z (abs_f64_m_tied12, svfloat64_t,
z0 = svabs_f64_m (z0, p0, z0),
z0 = svabs_m (z0, p0, z0))
/*
** abs_f64_m_tied1:
** fabs z0\.d, p0/m, z1\.d
** ret
*/
TEST_UNIFORM_Z (abs_f64_m_tied1, svfloat64_t,
z0 = svabs_f64_m (z0, p0, z1),
z0 = svabs_m (z0, p0, z1))
/*
** abs_f64_m_tied2:
** mov (z[0-9]+\.d), z0\.d
** movprfx z0, z1
** fabs z0\.d, p0/m, \1
** ret
*/
TEST_UNIFORM_Z (abs_f64_m_tied2, svfloat64_t,
z0 = svabs_f64_m (z1, p0, z0),
z0 = svabs_m (z1, p0, z0))
/*
** abs_f64_m_untied:
** movprfx z0, z2
** fabs z0\.d, p0/m, z1\.d
** ret
*/
TEST_UNIFORM_Z (abs_f64_m_untied, svfloat64_t,
z0 = svabs_f64_m (z2, p0, z1),
z0 = svabs_m (z2, p0, z1))
/*
** abs_f64_z_tied1:
** mov (z[0-9]+\.d), z0\.d
** movprfx z0\.d, p0/z, \1
** fabs z0\.d, p0/m, \1
** ret
*/
TEST_UNIFORM_Z (abs_f64_z_tied1, svfloat64_t,
z0 = svabs_f64_z (p0, z0),
z0 = svabs_z (p0, z0))
/*
** abs_f64_z_untied:
** movprfx z0\.d, p0/z, z1\.d
** fabs z0\.d, p0/m, z1\.d
** ret
*/
TEST_UNIFORM_Z (abs_f64_z_untied, svfloat64_t,
z0 = svabs_f64_z (p0, z1),
z0 = svabs_z (p0, z1))
/*
** abs_f64_x_tied1:
** fabs z0\.d, p0/m, z0\.d
** ret
*/
TEST_UNIFORM_Z (abs_f64_x_tied1, svfloat64_t,
z0 = svabs_f64_x (p0, z0),
z0 = svabs_x (p0, z0))
/*
** abs_f64_x_untied:
** fabs z0\.d, p0/m, z1\.d
** ret
*/
TEST_UNIFORM_Z (abs_f64_x_untied, svfloat64_t,
z0 = svabs_f64_x (p0, z1),
z0 = svabs_x (p0, z1))
/*
** ptrue_abs_f64_x_tied1:
** ...
** ptrue p[0-9]+\.b[^\n]*
** ...
** ret
*/
TEST_UNIFORM_Z (ptrue_abs_f64_x_tied1, svfloat64_t,
z0 = svabs_f64_x (svptrue_b64 (), z0),
z0 = svabs_x (svptrue_b64 (), z0))
/*
** ptrue_abs_f64_x_untied:
** ...
** ptrue p[0-9]+\.b[^\n]*
** ...
** ret
*/
TEST_UNIFORM_Z (ptrue_abs_f64_x_untied, svfloat64_t,
z0 = svabs_f64_x (svptrue_b64 (), z1),
z0 = svabs_x (svptrue_b64 (), z1))
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
/*
** abs_s16_m_tied12:
** abs z0\.h, p0/m, z0\.h
** ret
*/
TEST_UNIFORM_Z (abs_s16_m_tied12, svint16_t,
z0 = svabs_s16_m (z0, p0, z0),
z0 = svabs_m (z0, p0, z0))
/*
** abs_s16_m_tied1:
** abs z0\.h, p0/m, z1\.h
** ret
*/
TEST_UNIFORM_Z (abs_s16_m_tied1, svint16_t,
z0 = svabs_s16_m (z0, p0, z1),
z0 = svabs_m (z0, p0, z1))
/*
** abs_s16_m_tied2:
** mov (z[0-9]+)\.d, z0\.d
** movprfx z0, z1
** abs z0\.h, p0/m, \1\.h
** ret
*/
TEST_UNIFORM_Z (abs_s16_m_tied2, svint16_t,
z0 = svabs_s16_m (z1, p0, z0),
z0 = svabs_m (z1, p0, z0))
/*
** abs_s16_m_untied:
** movprfx z0, z2
** abs z0\.h, p0/m, z1\.h
** ret
*/
TEST_UNIFORM_Z (abs_s16_m_untied, svint16_t,
z0 = svabs_s16_m (z2, p0, z1),
z0 = svabs_m (z2, p0, z1))
/*
** abs_s16_z_tied1:
** mov (z[0-9]+)\.d, z0\.d
** movprfx z0\.h, p0/z, \1\.h
** abs z0\.h, p0/m, \1\.h
** ret
*/
TEST_UNIFORM_Z (abs_s16_z_tied1, svint16_t,
z0 = svabs_s16_z (p0, z0),
z0 = svabs_z (p0, z0))
/*
** abs_s16_z_untied:
** movprfx z0\.h, p0/z, z1\.h
** abs z0\.h, p0/m, z1\.h
** ret
*/
TEST_UNIFORM_Z (abs_s16_z_untied, svint16_t,
z0 = svabs_s16_z (p0, z1),
z0 = svabs_z (p0, z1))
/*
** abs_s16_x_tied1:
** abs z0\.h, p0/m, z0\.h
** ret
*/
TEST_UNIFORM_Z (abs_s16_x_tied1, svint16_t,
z0 = svabs_s16_x (p0, z0),
z0 = svabs_x (p0, z0))
/*
** abs_s16_x_untied:
** abs z0\.h, p0/m, z1\.h
** ret
*/
TEST_UNIFORM_Z (abs_s16_x_untied, svint16_t,
z0 = svabs_s16_x (p0, z1),
z0 = svabs_x (p0, z1))
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
/*
** abs_s32_m_tied12:
** abs z0\.s, p0/m, z0\.s
** ret
*/
TEST_UNIFORM_Z (abs_s32_m_tied12, svint32_t,
z0 = svabs_s32_m (z0, p0, z0),
z0 = svabs_m (z0, p0, z0))
/*
** abs_s32_m_tied1:
** abs z0\.s, p0/m, z1\.s
** ret
*/
TEST_UNIFORM_Z (abs_s32_m_tied1, svint32_t,
z0 = svabs_s32_m (z0, p0, z1),
z0 = svabs_m (z0, p0, z1))
/*
** abs_s32_m_tied2:
** mov (z[0-9]+)\.d, z0\.d
** movprfx z0, z1
** abs z0\.s, p0/m, \1\.s
** ret
*/
TEST_UNIFORM_Z (abs_s32_m_tied2, svint32_t,
z0 = svabs_s32_m (z1, p0, z0),
z0 = svabs_m (z1, p0, z0))
/*
** abs_s32_m_untied:
** movprfx z0, z2
** abs z0\.s, p0/m, z1\.s
** ret
*/
TEST_UNIFORM_Z (abs_s32_m_untied, svint32_t,
z0 = svabs_s32_m (z2, p0, z1),
z0 = svabs_m (z2, p0, z1))
/*
** abs_s32_z_tied1:
** mov (z[0-9]+)\.d, z0\.d
** movprfx z0\.s, p0/z, \1\.s
** abs z0\.s, p0/m, \1\.s
** ret
*/
TEST_UNIFORM_Z (abs_s32_z_tied1, svint32_t,
z0 = svabs_s32_z (p0, z0),
z0 = svabs_z (p0, z0))
/*
** abs_s32_z_untied:
** movprfx z0\.s, p0/z, z1\.s
** abs z0\.s, p0/m, z1\.s
** ret
*/
TEST_UNIFORM_Z (abs_s32_z_untied, svint32_t,
z0 = svabs_s32_z (p0, z1),
z0 = svabs_z (p0, z1))
/*
** abs_s32_x_tied1:
** abs z0\.s, p0/m, z0\.s
** ret
*/
TEST_UNIFORM_Z (abs_s32_x_tied1, svint32_t,
z0 = svabs_s32_x (p0, z0),
z0 = svabs_x (p0, z0))
/*
** abs_s32_x_untied:
** abs z0\.s, p0/m, z1\.s
** ret
*/
TEST_UNIFORM_Z (abs_s32_x_untied, svint32_t,
z0 = svabs_s32_x (p0, z1),
z0 = svabs_x (p0, z1))
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
/*
** abs_s64_m_tied12:
** abs z0\.d, p0/m, z0\.d
** ret
*/
TEST_UNIFORM_Z (abs_s64_m_tied12, svint64_t,
z0 = svabs_s64_m (z0, p0, z0),
z0 = svabs_m (z0, p0, z0))
/*
** abs_s64_m_tied1:
** abs z0\.d, p0/m, z1\.d
** ret
*/
TEST_UNIFORM_Z (abs_s64_m_tied1, svint64_t,
z0 = svabs_s64_m (z0, p0, z1),
z0 = svabs_m (z0, p0, z1))
/*
** abs_s64_m_tied2:
** mov (z[0-9]+\.d), z0\.d
** movprfx z0, z1
** abs z0\.d, p0/m, \1
** ret
*/
TEST_UNIFORM_Z (abs_s64_m_tied2, svint64_t,
z0 = svabs_s64_m (z1, p0, z0),
z0 = svabs_m (z1, p0, z0))
/*
** abs_s64_m_untied:
** movprfx z0, z2
** abs z0\.d, p0/m, z1\.d
** ret
*/
TEST_UNIFORM_Z (abs_s64_m_untied, svint64_t,
z0 = svabs_s64_m (z2, p0, z1),
z0 = svabs_m (z2, p0, z1))
/*
** abs_s64_z_tied1:
** mov (z[0-9]+\.d), z0\.d
** movprfx z0\.d, p0/z, \1
** abs z0\.d, p0/m, \1
** ret
*/
TEST_UNIFORM_Z (abs_s64_z_tied1, svint64_t,
z0 = svabs_s64_z (p0, z0),
z0 = svabs_z (p0, z0))
/*
** abs_s64_z_untied:
** movprfx z0\.d, p0/z, z1\.d
** abs z0\.d, p0/m, z1\.d
** ret
*/
TEST_UNIFORM_Z (abs_s64_z_untied, svint64_t,
z0 = svabs_s64_z (p0, z1),
z0 = svabs_z (p0, z1))
/*
** abs_s64_x_tied1:
** abs z0\.d, p0/m, z0\.d
** ret
*/
TEST_UNIFORM_Z (abs_s64_x_tied1, svint64_t,
z0 = svabs_s64_x (p0, z0),
z0 = svabs_x (p0, z0))
/*
** abs_s64_x_untied:
** abs z0\.d, p0/m, z1\.d
** ret
*/
TEST_UNIFORM_Z (abs_s64_x_untied, svint64_t,
z0 = svabs_s64_x (p0, z1),
z0 = svabs_x (p0, z1))
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
/*
** abs_s8_m_tied12:
** abs z0\.b, p0/m, z0\.b
** ret
*/
TEST_UNIFORM_Z (abs_s8_m_tied12, svint8_t,
z0 = svabs_s8_m (z0, p0, z0),
z0 = svabs_m (z0, p0, z0))
/*
** abs_s8_m_tied1:
** abs z0\.b, p0/m, z1\.b
** ret
*/
TEST_UNIFORM_Z (abs_s8_m_tied1, svint8_t,
z0 = svabs_s8_m (z0, p0, z1),
z0 = svabs_m (z0, p0, z1))
/*
** abs_s8_m_tied2:
** mov (z[0-9]+)\.d, z0\.d
** movprfx z0, z1
** abs z0\.b, p0/m, \1\.b
** ret
*/
TEST_UNIFORM_Z (abs_s8_m_tied2, svint8_t,
z0 = svabs_s8_m (z1, p0, z0),
z0 = svabs_m (z1, p0, z0))
/*
** abs_s8_m_untied:
** movprfx z0, z2
** abs z0\.b, p0/m, z1\.b
** ret
*/
TEST_UNIFORM_Z (abs_s8_m_untied, svint8_t,
z0 = svabs_s8_m (z2, p0, z1),
z0 = svabs_m (z2, p0, z1))
/*
** abs_s8_z_tied1:
** mov (z[0-9]+)\.d, z0\.d
** movprfx z0\.b, p0/z, \1\.b
** abs z0\.b, p0/m, \1\.b
** ret
*/
TEST_UNIFORM_Z (abs_s8_z_tied1, svint8_t,
z0 = svabs_s8_z (p0, z0),
z0 = svabs_z (p0, z0))
/*
** abs_s8_z_untied:
** movprfx z0\.b, p0/z, z1\.b
** abs z0\.b, p0/m, z1\.b
** ret
*/
TEST_UNIFORM_Z (abs_s8_z_untied, svint8_t,
z0 = svabs_s8_z (p0, z1),
z0 = svabs_z (p0, z1))
/*
** abs_s8_x_tied1:
** abs z0\.b, p0/m, z0\.b
** ret
*/
TEST_UNIFORM_Z (abs_s8_x_tied1, svint8_t,
z0 = svabs_s8_x (p0, z0),
z0 = svabs_x (p0, z0))
/*
** abs_s8_x_untied:
** abs z0\.b, p0/m, z1\.b
** ret
*/
TEST_UNIFORM_Z (abs_s8_x_untied, svint8_t,
z0 = svabs_s8_x (p0, z1),
z0 = svabs_x (p0, z1))
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
/*
** acge_f16_tied:
** (
** facge p0\.h, p0/z, z0\.h, z1\.h
** |
** facle p0\.h, p0/z, z1\.h, z0\.h
** )
** ret
*/
TEST_COMPARE_Z (acge_f16_tied, svfloat16_t,
p0 = svacge_f16 (p0, z0, z1),
p0 = svacge (p0, z0, z1))
/*
** acge_f16_untied:
** (
** facge p0\.h, p1/z, z0\.h, z1\.h
** |
** facle p0\.h, p1/z, z1\.h, z0\.h
** )
** ret
*/
TEST_COMPARE_Z (acge_f16_untied, svfloat16_t,
p0 = svacge_f16 (p1, z0, z1),
p0 = svacge (p1, z0, z1))
/*
** acge_h4_f16:
** mov (z[0-9]+\.h), h4
** (
** facge p0\.h, p1/z, z0\.h, \1
** |
** facle p0\.h, p1/z, \1, z0\.h
** )
** ret
*/
TEST_COMPARE_ZD (acge_h4_f16, svfloat16_t, float16_t,
p0 = svacge_n_f16 (p1, z0, d4),
p0 = svacge (p1, z0, d4))
/*
** acge_0_f16:
** mov (z[0-9]+\.h), #0
** (
** facge p0\.h, p1/z, z0\.h, \1
** |
** facle p0\.h, p1/z, \1, z0\.h
** )
** ret
*/
TEST_COMPARE_Z (acge_0_f16, svfloat16_t,
p0 = svacge_n_f16 (p1, z0, 0),
p0 = svacge (p1, z0, 0))
/*
** acge_1_f16:
** fmov (z[0-9]+\.h), #1\.0(?:e\+0)?
** (
** facge p0\.h, p1/z, z0\.h, \1
** |
** facle p0\.h, p1/z, \1, z0\.h
** )
** ret
*/
TEST_COMPARE_Z (acge_1_f16, svfloat16_t,
p0 = svacge_n_f16 (p1, z0, 1),
p0 = svacge (p1, z0, 1))
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
/*
** acge_f32_tied:
** (
** facge p0\.s, p0/z, z0\.s, z1\.s
** |
** facle p0\.s, p0/z, z1\.s, z0\.s
** )
** ret
*/
TEST_COMPARE_Z (acge_f32_tied, svfloat32_t,
p0 = svacge_f32 (p0, z0, z1),
p0 = svacge (p0, z0, z1))
/*
** acge_f32_untied:
** (
** facge p0\.s, p1/z, z0\.s, z1\.s
** |
** facle p0\.s, p1/z, z1\.s, z0\.s
** )
** ret
*/
TEST_COMPARE_Z (acge_f32_untied, svfloat32_t,
p0 = svacge_f32 (p1, z0, z1),
p0 = svacge (p1, z0, z1))
/*
** acge_s4_f32:
** mov (z[0-9]+\.s), s4
** (
** facge p0\.s, p1/z, z0\.s, \1
** |
** facle p0\.s, p1/z, \1, z0\.s
** )
** ret
*/
TEST_COMPARE_ZD (acge_s4_f32, svfloat32_t, float32_t,
p0 = svacge_n_f32 (p1, z0, d4),
p0 = svacge (p1, z0, d4))
/*
** acge_0_f32:
** mov (z[0-9]+\.s), #0
** (
** facge p0\.s, p1/z, z0\.s, \1
** |
** facle p0\.s, p1/z, \1, z0\.s
** )
** ret
*/
TEST_COMPARE_Z (acge_0_f32, svfloat32_t,
p0 = svacge_n_f32 (p1, z0, 0),
p0 = svacge (p1, z0, 0))
/*
** acge_1_f32:
** fmov (z[0-9]+\.s), #1\.0(?:e\+0)?
** (
** facge p0\.s, p1/z, z0\.s, \1
** |
** facle p0\.s, p1/z, \1, z0\.s
** )
** ret
*/
TEST_COMPARE_Z (acge_1_f32, svfloat32_t,
p0 = svacge_n_f32 (p1, z0, 1),
p0 = svacge (p1, z0, 1))
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
/*
** acge_f64_tied:
** (
** facge p0\.d, p0/z, z0\.d, z1\.d
** |
** facle p0\.d, p0/z, z1\.d, z0\.d
** )
** ret
*/
TEST_COMPARE_Z (acge_f64_tied, svfloat64_t,
p0 = svacge_f64 (p0, z0, z1),
p0 = svacge (p0, z0, z1))
/*
** acge_f64_untied:
** (
** facge p0\.d, p1/z, z0\.d, z1\.d
** |
** facle p0\.d, p1/z, z1\.d, z0\.d
** )
** ret
*/
TEST_COMPARE_Z (acge_f64_untied, svfloat64_t,
p0 = svacge_f64 (p1, z0, z1),
p0 = svacge (p1, z0, z1))
/*
** acge_d4_f64:
** mov (z[0-9]+\.d), d4
** (
** facge p0\.d, p1/z, z0\.d, \1
** |
** facle p0\.d, p1/z, \1, z0\.d
** )
** ret
*/
TEST_COMPARE_ZD (acge_d4_f64, svfloat64_t, float64_t,
p0 = svacge_n_f64 (p1, z0, d4),
p0 = svacge (p1, z0, d4))
/*
** acge_0_f64:
** mov (z[0-9]+\.d), #0
** (
** facge p0\.d, p1/z, z0\.d, \1
** |
** facle p0\.d, p1/z, \1, z0\.d
** )
** ret
*/
TEST_COMPARE_Z (acge_0_f64, svfloat64_t,
p0 = svacge_n_f64 (p1, z0, 0),
p0 = svacge (p1, z0, 0))
/*
** acge_1_f64:
** fmov (z[0-9]+\.d), #1\.0(?:e\+0)?
** (
** facge p0\.d, p1/z, z0\.d, \1
** |
** facle p0\.d, p1/z, \1, z0\.d
** )
** ret
*/
TEST_COMPARE_Z (acge_1_f64, svfloat64_t,
p0 = svacge_n_f64 (p1, z0, 1),
p0 = svacge (p1, z0, 1))
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
/*
** acgt_f16_tied:
** (
** facgt p0\.h, p0/z, z0\.h, z1\.h
** |
** faclt p0\.h, p0/z, z1\.h, z0\.h
** )
** ret
*/
TEST_COMPARE_Z (acgt_f16_tied, svfloat16_t,
p0 = svacgt_f16 (p0, z0, z1),
p0 = svacgt (p0, z0, z1))
/*
** acgt_f16_untied:
** (
** facgt p0\.h, p1/z, z0\.h, z1\.h
** |
** faclt p0\.h, p1/z, z1\.h, z0\.h
** )
** ret
*/
TEST_COMPARE_Z (acgt_f16_untied, svfloat16_t,
p0 = svacgt_f16 (p1, z0, z1),
p0 = svacgt (p1, z0, z1))
/*
** acgt_h4_f16:
** mov (z[0-9]+\.h), h4
** (
** facgt p0\.h, p1/z, z0\.h, \1
** |
** faclt p0\.h, p1/z, \1, z0\.h
** )
** ret
*/
TEST_COMPARE_ZD (acgt_h4_f16, svfloat16_t, float16_t,
p0 = svacgt_n_f16 (p1, z0, d4),
p0 = svacgt (p1, z0, d4))
/*
** acgt_0_f16:
** mov (z[0-9]+\.h), #0
** (
** facgt p0\.h, p1/z, z0\.h, \1
** |
** faclt p0\.h, p1/z, \1, z0\.h
** )
** ret
*/
TEST_COMPARE_Z (acgt_0_f16, svfloat16_t,
p0 = svacgt_n_f16 (p1, z0, 0),
p0 = svacgt (p1, z0, 0))
/*
** acgt_1_f16:
** fmov (z[0-9]+\.h), #1\.0(?:e\+0)?
** (
** facgt p0\.h, p1/z, z0\.h, \1
** |
** faclt p0\.h, p1/z, \1, z0\.h
** )
** ret
*/
TEST_COMPARE_Z (acgt_1_f16, svfloat16_t,
p0 = svacgt_n_f16 (p1, z0, 1),
p0 = svacgt (p1, z0, 1))
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
/*
** acgt_f32_tied:
** (
** facgt p0\.s, p0/z, z0\.s, z1\.s
** |
** faclt p0\.s, p0/z, z1\.s, z0\.s
** )
** ret
*/
TEST_COMPARE_Z (acgt_f32_tied, svfloat32_t,
p0 = svacgt_f32 (p0, z0, z1),
p0 = svacgt (p0, z0, z1))
/*
** acgt_f32_untied:
** (
** facgt p0\.s, p1/z, z0\.s, z1\.s
** |
** faclt p0\.s, p1/z, z1\.s, z0\.s
** )
** ret
*/
TEST_COMPARE_Z (acgt_f32_untied, svfloat32_t,
p0 = svacgt_f32 (p1, z0, z1),
p0 = svacgt (p1, z0, z1))
/*
** acgt_s4_f32:
** mov (z[0-9]+\.s), s4
** (
** facgt p0\.s, p1/z, z0\.s, \1
** |
** faclt p0\.s, p1/z, \1, z0\.s
** )
** ret
*/
TEST_COMPARE_ZD (acgt_s4_f32, svfloat32_t, float32_t,
p0 = svacgt_n_f32 (p1, z0, d4),
p0 = svacgt (p1, z0, d4))
/*
** acgt_0_f32:
** mov (z[0-9]+\.s), #0
** (
** facgt p0\.s, p1/z, z0\.s, \1
** |
** faclt p0\.s, p1/z, \1, z0\.s
** )
** ret
*/
TEST_COMPARE_Z (acgt_0_f32, svfloat32_t,
p0 = svacgt_n_f32 (p1, z0, 0),
p0 = svacgt (p1, z0, 0))
/*
** acgt_1_f32:
** fmov (z[0-9]+\.s), #1\.0(?:e\+0)?
** (
** facgt p0\.s, p1/z, z0\.s, \1
** |
** faclt p0\.s, p1/z, \1, z0\.s
** )
** ret
*/
TEST_COMPARE_Z (acgt_1_f32, svfloat32_t,
p0 = svacgt_n_f32 (p1, z0, 1),
p0 = svacgt (p1, z0, 1))
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
/*
** acgt_f64_tied:
** (
** facgt p0\.d, p0/z, z0\.d, z1\.d
** |
** faclt p0\.d, p0/z, z1\.d, z0\.d
** )
** ret
*/
TEST_COMPARE_Z (acgt_f64_tied, svfloat64_t,
p0 = svacgt_f64 (p0, z0, z1),
p0 = svacgt (p0, z0, z1))
/*
** acgt_f64_untied:
** (
** facgt p0\.d, p1/z, z0\.d, z1\.d
** |
** faclt p0\.d, p1/z, z1\.d, z0\.d
** )
** ret
*/
TEST_COMPARE_Z (acgt_f64_untied, svfloat64_t,
p0 = svacgt_f64 (p1, z0, z1),
p0 = svacgt (p1, z0, z1))
/*
** acgt_d4_f64:
** mov (z[0-9]+\.d), d4
** (
** facgt p0\.d, p1/z, z0\.d, \1
** |
** faclt p0\.d, p1/z, \1, z0\.d
** )
** ret
*/
TEST_COMPARE_ZD (acgt_d4_f64, svfloat64_t, float64_t,
p0 = svacgt_n_f64 (p1, z0, d4),
p0 = svacgt (p1, z0, d4))
/*
** acgt_0_f64:
** mov (z[0-9]+\.d), #0
** (
** facgt p0\.d, p1/z, z0\.d, \1
** |
** faclt p0\.d, p1/z, \1, z0\.d
** )
** ret
*/
TEST_COMPARE_Z (acgt_0_f64, svfloat64_t,
p0 = svacgt_n_f64 (p1, z0, 0),
p0 = svacgt (p1, z0, 0))
/*
** acgt_1_f64:
** fmov (z[0-9]+\.d), #1\.0(?:e\+0)?
** (
** facgt p0\.d, p1/z, z0\.d, \1
** |
** faclt p0\.d, p1/z, \1, z0\.d
** )
** ret
*/
TEST_COMPARE_Z (acgt_1_f64, svfloat64_t,
p0 = svacgt_n_f64 (p1, z0, 1),
p0 = svacgt (p1, z0, 1))
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
/*
** acle_f16_tied:
** (
** facge p0\.h, p0/z, z1\.h, z0\.h
** |
** facle p0\.h, p0/z, z0\.h, z1\.h
** )
** ret
*/
TEST_COMPARE_Z (acle_f16_tied, svfloat16_t,
p0 = svacle_f16 (p0, z0, z1),
p0 = svacle (p0, z0, z1))
/*
** acle_f16_untied:
** (
** facge p0\.h, p1/z, z1\.h, z0\.h
** |
** facle p0\.h, p1/z, z0\.h, z1\.h
** )
** ret
*/
TEST_COMPARE_Z (acle_f16_untied, svfloat16_t,
p0 = svacle_f16 (p1, z0, z1),
p0 = svacle (p1, z0, z1))
/*
** acle_h4_f16:
** mov (z[0-9]+\.h), h4
** (
** facge p0\.h, p1/z, \1, z0\.h
** |
** facle p0\.h, p1/z, z0\.h, \1
** )
** ret
*/
TEST_COMPARE_ZD (acle_h4_f16, svfloat16_t, float16_t,
p0 = svacle_n_f16 (p1, z0, d4),
p0 = svacle (p1, z0, d4))
/*
** acle_0_f16:
** mov (z[0-9]+\.h), #0
** (
** facge p0\.h, p1/z, \1, z0\.h
** |
** facle p0\.h, p1/z, z0\.h, \1
** )
** ret
*/
TEST_COMPARE_Z (acle_0_f16, svfloat16_t,
p0 = svacle_n_f16 (p1, z0, 0),
p0 = svacle (p1, z0, 0))
/*
** acle_1_f16:
** fmov (z[0-9]+\.h), #1\.0(?:e\+0)?
** (
** facge p0\.h, p1/z, \1, z0\.h
** |
** facle p0\.h, p1/z, z0\.h, \1
** )
** ret
*/
TEST_COMPARE_Z (acle_1_f16, svfloat16_t,
p0 = svacle_n_f16 (p1, z0, 1),
p0 = svacle (p1, z0, 1))
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
/*
** acle_f32_tied:
** (
** facge p0\.s, p0/z, z1\.s, z0\.s
** |
** facle p0\.s, p0/z, z0\.s, z1\.s
** )
** ret
*/
TEST_COMPARE_Z (acle_f32_tied, svfloat32_t,
p0 = svacle_f32 (p0, z0, z1),
p0 = svacle (p0, z0, z1))
/*
** acle_f32_untied:
** (
** facge p0\.s, p1/z, z1\.s, z0\.s
** |
** facle p0\.s, p1/z, z0\.s, z1\.s
** )
** ret
*/
TEST_COMPARE_Z (acle_f32_untied, svfloat32_t,
p0 = svacle_f32 (p1, z0, z1),
p0 = svacle (p1, z0, z1))
/*
** acle_s4_f32:
** mov (z[0-9]+\.s), s4
** (
** facge p0\.s, p1/z, \1, z0\.s
** |
** facle p0\.s, p1/z, z0\.s, \1
** )
** ret
*/
TEST_COMPARE_ZD (acle_s4_f32, svfloat32_t, float32_t,
p0 = svacle_n_f32 (p1, z0, d4),
p0 = svacle (p1, z0, d4))
/*
** acle_0_f32:
** mov (z[0-9]+\.s), #0
** (
** facge p0\.s, p1/z, \1, z0\.s
** |
** facle p0\.s, p1/z, z0\.s, \1
** )
** ret
*/
TEST_COMPARE_Z (acle_0_f32, svfloat32_t,
p0 = svacle_n_f32 (p1, z0, 0),
p0 = svacle (p1, z0, 0))
/*
** acle_1_f32:
** fmov (z[0-9]+\.s), #1\.0(?:e\+0)?
** (
** facge p0\.s, p1/z, \1, z0\.s
** |
** facle p0\.s, p1/z, z0\.s, \1
** )
** ret
*/
TEST_COMPARE_Z (acle_1_f32, svfloat32_t,
p0 = svacle_n_f32 (p1, z0, 1),
p0 = svacle (p1, z0, 1))
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
/*
** acle_f64_tied:
** (
** facge p0\.d, p0/z, z1\.d, z0\.d
** |
** facle p0\.d, p0/z, z0\.d, z1\.d
** )
** ret
*/
TEST_COMPARE_Z (acle_f64_tied, svfloat64_t,
p0 = svacle_f64 (p0, z0, z1),
p0 = svacle (p0, z0, z1))
/*
** acle_f64_untied:
** (
** facge p0\.d, p1/z, z1\.d, z0\.d
** |
** facle p0\.d, p1/z, z0\.d, z1\.d
** )
** ret
*/
TEST_COMPARE_Z (acle_f64_untied, svfloat64_t,
p0 = svacle_f64 (p1, z0, z1),
p0 = svacle (p1, z0, z1))
/*
** acle_d4_f64:
** mov (z[0-9]+\.d), d4
** (
** facge p0\.d, p1/z, \1, z0\.d
** |
** facle p0\.d, p1/z, z0\.d, \1
** )
** ret
*/
TEST_COMPARE_ZD (acle_d4_f64, svfloat64_t, float64_t,
p0 = svacle_n_f64 (p1, z0, d4),
p0 = svacle (p1, z0, d4))
/*
** acle_0_f64:
** mov (z[0-9]+\.d), #0
** (
** facge p0\.d, p1/z, \1, z0\.d
** |
** facle p0\.d, p1/z, z0\.d, \1
** )
** ret
*/
TEST_COMPARE_Z (acle_0_f64, svfloat64_t,
p0 = svacle_n_f64 (p1, z0, 0),
p0 = svacle (p1, z0, 0))
/*
** acle_1_f64:
** fmov (z[0-9]+\.d), #1\.0(?:e\+0)?
** (
** facge p0\.d, p1/z, \1, z0\.d
** |
** facle p0\.d, p1/z, z0\.d, \1
** )
** ret
*/
TEST_COMPARE_Z (acle_1_f64, svfloat64_t,
p0 = svacle_n_f64 (p1, z0, 1),
p0 = svacle (p1, z0, 1))
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
/*
** aclt_f16_tied:
** (
** facgt p0\.h, p0/z, z1\.h, z0\.h
** |
** faclt p0\.h, p0/z, z0\.h, z1\.h
** )
** ret
*/
TEST_COMPARE_Z (aclt_f16_tied, svfloat16_t,
p0 = svaclt_f16 (p0, z0, z1),
p0 = svaclt (p0, z0, z1))
/*
** aclt_f16_untied:
** (
** facgt p0\.h, p1/z, z1\.h, z0\.h
** |
** faclt p0\.h, p1/z, z0\.h, z1\.h
** )
** ret
*/
TEST_COMPARE_Z (aclt_f16_untied, svfloat16_t,
p0 = svaclt_f16 (p1, z0, z1),
p0 = svaclt (p1, z0, z1))
/*
** aclt_h4_f16:
** mov (z[0-9]+\.h), h4
** (
** facgt p0\.h, p1/z, \1, z0\.h
** |
** faclt p0\.h, p1/z, z0\.h, \1
** )
** ret
*/
TEST_COMPARE_ZD (aclt_h4_f16, svfloat16_t, float16_t,
p0 = svaclt_n_f16 (p1, z0, d4),
p0 = svaclt (p1, z0, d4))
/*
** aclt_0_f16:
** mov (z[0-9]+\.h), #0
** (
** facgt p0\.h, p1/z, \1, z0\.h
** |
** faclt p0\.h, p1/z, z0\.h, \1
** )
** ret
*/
TEST_COMPARE_Z (aclt_0_f16, svfloat16_t,
p0 = svaclt_n_f16 (p1, z0, 0),
p0 = svaclt (p1, z0, 0))
/*
** aclt_1_f16:
** fmov (z[0-9]+\.h), #1\.0(?:e\+0)?
** (
** facgt p0\.h, p1/z, \1, z0\.h
** |
** faclt p0\.h, p1/z, z0\.h, \1
** )
** ret
*/
TEST_COMPARE_Z (aclt_1_f16, svfloat16_t,
p0 = svaclt_n_f16 (p1, z0, 1),
p0 = svaclt (p1, z0, 1))
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
/*
** aclt_f32_tied:
** (
** facgt p0\.s, p0/z, z1\.s, z0\.s
** |
** faclt p0\.s, p0/z, z0\.s, z1\.s
** )
** ret
*/
TEST_COMPARE_Z (aclt_f32_tied, svfloat32_t,
p0 = svaclt_f32 (p0, z0, z1),
p0 = svaclt (p0, z0, z1))
/*
** aclt_f32_untied:
** (
** facgt p0\.s, p1/z, z1\.s, z0\.s
** |
** faclt p0\.s, p1/z, z0\.s, z1\.s
** )
** ret
*/
TEST_COMPARE_Z (aclt_f32_untied, svfloat32_t,
p0 = svaclt_f32 (p1, z0, z1),
p0 = svaclt (p1, z0, z1))
/*
** aclt_s4_f32:
** mov (z[0-9]+\.s), s4
** (
** facgt p0\.s, p1/z, \1, z0\.s
** |
** faclt p0\.s, p1/z, z0\.s, \1
** )
** ret
*/
TEST_COMPARE_ZD (aclt_s4_f32, svfloat32_t, float32_t,
p0 = svaclt_n_f32 (p1, z0, d4),
p0 = svaclt (p1, z0, d4))
/*
** aclt_0_f32:
** mov (z[0-9]+\.s), #0
** (
** facgt p0\.s, p1/z, \1, z0\.s
** |
** faclt p0\.s, p1/z, z0\.s, \1
** )
** ret
*/
TEST_COMPARE_Z (aclt_0_f32, svfloat32_t,
p0 = svaclt_n_f32 (p1, z0, 0),
p0 = svaclt (p1, z0, 0))
/*
** aclt_1_f32:
** fmov (z[0-9]+\.s), #1\.0(?:e\+0)?
** (
** facgt p0\.s, p1/z, \1, z0\.s
** |
** faclt p0\.s, p1/z, z0\.s, \1
** )
** ret
*/
TEST_COMPARE_Z (aclt_1_f32, svfloat32_t,
p0 = svaclt_n_f32 (p1, z0, 1),
p0 = svaclt (p1, z0, 1))
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
/*
** aclt_f64_tied:
** (
** facgt p0\.d, p0/z, z1\.d, z0\.d
** |
** faclt p0\.d, p0/z, z0\.d, z1\.d
** )
** ret
*/
TEST_COMPARE_Z (aclt_f64_tied, svfloat64_t,
p0 = svaclt_f64 (p0, z0, z1),
p0 = svaclt (p0, z0, z1))
/*
** aclt_f64_untied:
** (
** facgt p0\.d, p1/z, z1\.d, z0\.d
** |
** faclt p0\.d, p1/z, z0\.d, z1\.d
** )
** ret
*/
TEST_COMPARE_Z (aclt_f64_untied, svfloat64_t,
p0 = svaclt_f64 (p1, z0, z1),
p0 = svaclt (p1, z0, z1))
/*
** aclt_d4_f64:
** mov (z[0-9]+\.d), d4
** (
** facgt p0\.d, p1/z, \1, z0\.d
** |
** faclt p0\.d, p1/z, z0\.d, \1
** )
** ret
*/
TEST_COMPARE_ZD (aclt_d4_f64, svfloat64_t, float64_t,
p0 = svaclt_n_f64 (p1, z0, d4),
p0 = svaclt (p1, z0, d4))
/*
** aclt_0_f64:
** mov (z[0-9]+\.d), #0
** (
** facgt p0\.d, p1/z, \1, z0\.d
** |
** faclt p0\.d, p1/z, z0\.d, \1
** )
** ret
*/
TEST_COMPARE_Z (aclt_0_f64, svfloat64_t,
p0 = svaclt_n_f64 (p1, z0, 0),
p0 = svaclt (p1, z0, 0))
/*
** aclt_1_f64:
** fmov (z[0-9]+\.d), #1\.0(?:e\+0)?
** (
** facgt p0\.d, p1/z, \1, z0\.d
** |
** faclt p0\.d, p1/z, z0\.d, \1
** )
** ret
*/
TEST_COMPARE_Z (aclt_1_f64, svfloat64_t,
p0 = svaclt_n_f64 (p1, z0, 1),
p0 = svaclt (p1, z0, 1))
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
/*
** add_s16_m_tied1:
** add z0\.h, p0/m, z0\.h, z1\.h
** ret
*/
TEST_UNIFORM_Z (add_s16_m_tied1, svint16_t,
z0 = svadd_s16_m (p0, z0, z1),
z0 = svadd_m (p0, z0, z1))
/*
** add_s16_m_tied2:
** mov (z[0-9]+)\.d, z0\.d
** movprfx z0, z1
** add z0\.h, p0/m, z0\.h, \1\.h
** ret
*/
TEST_UNIFORM_Z (add_s16_m_tied2, svint16_t,
z0 = svadd_s16_m (p0, z1, z0),
z0 = svadd_m (p0, z1, z0))
/*
** add_s16_m_untied:
** movprfx z0, z1
** add z0\.h, p0/m, z0\.h, z2\.h
** ret
*/
TEST_UNIFORM_Z (add_s16_m_untied, svint16_t,
z0 = svadd_s16_m (p0, z1, z2),
z0 = svadd_m (p0, z1, z2))
/*
** add_w0_s16_m_tied1:
** mov (z[0-9]+\.h), w0
** add z0\.h, p0/m, z0\.h, \1
** ret
*/
TEST_UNIFORM_ZX (add_w0_s16_m_tied1, svint16_t, int16_t,
z0 = svadd_n_s16_m (p0, z0, x0),
z0 = svadd_m (p0, z0, x0))
/*
** add_w0_s16_m_untied: { xfail *-*-* }
** mov (z[0-9]+\.h), w0
** movprfx z0, z1
** add z0\.h, p0/m, z0\.h, \1
** ret
*/
TEST_UNIFORM_ZX (add_w0_s16_m_untied, svint16_t, int16_t,
z0 = svadd_n_s16_m (p0, z1, x0),
z0 = svadd_m (p0, z1, x0))
/*
** add_1_s16_m_tied1:
** mov (z[0-9]+\.h), #1
** add z0\.h, p0/m, z0\.h, \1
** ret
*/
TEST_UNIFORM_Z (add_1_s16_m_tied1, svint16_t,
z0 = svadd_n_s16_m (p0, z0, 1),
z0 = svadd_m (p0, z0, 1))
/*
** add_1_s16_m_untied: { xfail *-*-* }
** mov (z[0-9]+\.h), #1
** movprfx z0, z1
** add z0\.h, p0/m, z0\.h, \1
** ret
*/
TEST_UNIFORM_Z (add_1_s16_m_untied, svint16_t,
z0 = svadd_n_s16_m (p0, z1, 1),
z0 = svadd_m (p0, z1, 1))
/*
** add_m2_s16_m:
** mov (z[0-9]+\.h), #-2
** add z0\.h, p0/m, z0\.h, \1
** ret
*/
TEST_UNIFORM_Z (add_m2_s16_m, svint16_t,
z0 = svadd_n_s16_m (p0, z0, -2),
z0 = svadd_m (p0, z0, -2))
/*
** add_s16_z_tied1:
** movprfx z0\.h, p0/z, z0\.h
** add z0\.h, p0/m, z0\.h, z1\.h
** ret
*/
TEST_UNIFORM_Z (add_s16_z_tied1, svint16_t,
z0 = svadd_s16_z (p0, z0, z1),
z0 = svadd_z (p0, z0, z1))
/*
** add_s16_z_tied2:
** movprfx z0\.h, p0/z, z0\.h
** add z0\.h, p0/m, z0\.h, z1\.h
** ret
*/
TEST_UNIFORM_Z (add_s16_z_tied2, svint16_t,
z0 = svadd_s16_z (p0, z1, z0),
z0 = svadd_z (p0, z1, z0))
/*
** add_s16_z_untied:
** (
** movprfx z0\.h, p0/z, z1\.h
** add z0\.h, p0/m, z0\.h, z2\.h
** |
** movprfx z0\.h, p0/z, z2\.h
** add z0\.h, p0/m, z0\.h, z1\.h
** )
** ret
*/
TEST_UNIFORM_Z (add_s16_z_untied, svint16_t,
z0 = svadd_s16_z (p0, z1, z2),
z0 = svadd_z (p0, z1, z2))
/*
** add_w0_s16_z_tied1:
** mov (z[0-9]+\.h), w0
** movprfx z0\.h, p0/z, z0\.h
** add z0\.h, p0/m, z0\.h, \1
** ret
*/
TEST_UNIFORM_ZX (add_w0_s16_z_tied1, svint16_t, int16_t,
z0 = svadd_n_s16_z (p0, z0, x0),
z0 = svadd_z (p0, z0, x0))
/*
** add_w0_s16_z_untied:
** mov (z[0-9]+\.h), w0
** (
** movprfx z0\.h, p0/z, z1\.h
** add z0\.h, p0/m, z0\.h, \1
** |
** movprfx z0\.h, p0/z, \1
** add z0\.h, p0/m, z0\.h, z1\.h
** )
** ret
*/
TEST_UNIFORM_ZX (add_w0_s16_z_untied, svint16_t, int16_t,
z0 = svadd_n_s16_z (p0, z1, x0),
z0 = svadd_z (p0, z1, x0))
/*
** add_1_s16_z_tied1:
** mov (z[0-9]+\.h), #1
** movprfx z0\.h, p0/z, z0\.h
** add z0\.h, p0/m, z0\.h, \1
** ret
*/
TEST_UNIFORM_Z (add_1_s16_z_tied1, svint16_t,
z0 = svadd_n_s16_z (p0, z0, 1),
z0 = svadd_z (p0, z0, 1))
/*
** add_1_s16_z_untied:
** mov (z[0-9]+\.h), #1
** (
** movprfx z0\.h, p0/z, z1\.h
** add z0\.h, p0/m, z0\.h, \1
** |
** movprfx z0\.h, p0/z, \1
** add z0\.h, p0/m, z0\.h, z1\.h
** )
** ret
*/
TEST_UNIFORM_Z (add_1_s16_z_untied, svint16_t,
z0 = svadd_n_s16_z (p0, z1, 1),
z0 = svadd_z (p0, z1, 1))
/*
** add_s16_x_tied1:
** add z0\.h, (z0\.h, z1\.h|z1\.h, z0\.h)
** ret
*/
TEST_UNIFORM_Z (add_s16_x_tied1, svint16_t,
z0 = svadd_s16_x (p0, z0, z1),
z0 = svadd_x (p0, z0, z1))
/*
** add_s16_x_tied2:
** add z0\.h, (z0\.h, z1\.h|z1\.h, z0\.h)
** ret
*/
TEST_UNIFORM_Z (add_s16_x_tied2, svint16_t,
z0 = svadd_s16_x (p0, z1, z0),
z0 = svadd_x (p0, z1, z0))
/*
** add_s16_x_untied:
** add z0\.h, (z1\.h, z2\.h|z2\.h, z1\.h)
** ret
*/
TEST_UNIFORM_Z (add_s16_x_untied, svint16_t,
z0 = svadd_s16_x (p0, z1, z2),
z0 = svadd_x (p0, z1, z2))
/*
** add_w0_s16_x_tied1:
** mov (z[0-9]+\.h), w0
** add z0\.h, (z0\.h, \1|\1, z0\.h)
** ret
*/
TEST_UNIFORM_ZX (add_w0_s16_x_tied1, svint16_t, int16_t,
z0 = svadd_n_s16_x (p0, z0, x0),
z0 = svadd_x (p0, z0, x0))
/*
** add_w0_s16_x_untied:
** mov (z[0-9]+\.h), w0
** add z0\.h, (z1\.h, \1|\1, z1\.h)
** ret
*/
TEST_UNIFORM_ZX (add_w0_s16_x_untied, svint16_t, int16_t,
z0 = svadd_n_s16_x (p0, z1, x0),
z0 = svadd_x (p0, z1, x0))
/*
** add_1_s16_x_tied1:
** add z0\.h, z0\.h, #1
** ret
*/
TEST_UNIFORM_Z (add_1_s16_x_tied1, svint16_t,
z0 = svadd_n_s16_x (p0, z0, 1),
z0 = svadd_x (p0, z0, 1))
/*
** add_1_s16_x_untied:
** movprfx z0, z1
** add z0\.h, z0\.h, #1
** ret
*/
TEST_UNIFORM_Z (add_1_s16_x_untied, svint16_t,
z0 = svadd_n_s16_x (p0, z1, 1),
z0 = svadd_x (p0, z1, 1))
/*
** add_127_s16_x:
** add z0\.h, z0\.h, #127
** ret
*/
TEST_UNIFORM_Z (add_127_s16_x, svint16_t,
z0 = svadd_n_s16_x (p0, z0, 127),
z0 = svadd_x (p0, z0, 127))
/*
** add_128_s16_x:
** add z0\.h, z0\.h, #128
** ret
*/
TEST_UNIFORM_Z (add_128_s16_x, svint16_t,
z0 = svadd_n_s16_x (p0, z0, 128),
z0 = svadd_x (p0, z0, 128))
/*
** add_255_s16_x:
** add z0\.h, z0\.h, #255
** ret
*/
TEST_UNIFORM_Z (add_255_s16_x, svint16_t,
z0 = svadd_n_s16_x (p0, z0, 255),
z0 = svadd_x (p0, z0, 255))
/*
** add_256_s16_x:
** add z0\.h, z0\.h, #256
** ret
*/
TEST_UNIFORM_Z (add_256_s16_x, svint16_t,
z0 = svadd_n_s16_x (p0, z0, 256),
z0 = svadd_x (p0, z0, 256))
/*
** add_257_s16_x:
** mov (z[0-9]+)\.b, #1
** add z0\.h, (z0\.h, \1\.h|\1\.h, z0\.h)
** ret
*/
TEST_UNIFORM_Z (add_257_s16_x, svint16_t,
z0 = svadd_n_s16_x (p0, z0, 257),
z0 = svadd_x (p0, z0, 257))
/*
** add_512_s16_x:
** add z0\.h, z0\.h, #512
** ret
*/
TEST_UNIFORM_Z (add_512_s16_x, svint16_t,
z0 = svadd_n_s16_x (p0, z0, 512),
z0 = svadd_x (p0, z0, 512))
/*
** add_65280_s16_x:
** add z0\.h, z0\.h, #65280
** ret
*/
TEST_UNIFORM_Z (add_65280_s16_x, svint16_t,
z0 = svadd_n_s16_x (p0, z0, 0xff00),
z0 = svadd_x (p0, z0, 0xff00))
/*
** add_m1_s16_x:
** sub z0\.h, z0\.h, #1
** ret
*/
TEST_UNIFORM_Z (add_m1_s16_x, svint16_t,
z0 = svadd_n_s16_x (p0, z0, -1),
z0 = svadd_x (p0, z0, -1))
/*
** add_m127_s16_x:
** sub z0\.h, z0\.h, #127
** ret
*/
TEST_UNIFORM_Z (add_m127_s16_x, svint16_t,
z0 = svadd_n_s16_x (p0, z0, -127),
z0 = svadd_x (p0, z0, -127))
/*
** add_m128_s16_x:
** sub z0\.h, z0\.h, #128
** ret
*/
TEST_UNIFORM_Z (add_m128_s16_x, svint16_t,
z0 = svadd_n_s16_x (p0, z0, -128),
z0 = svadd_x (p0, z0, -128))
/*
** add_m255_s16_x:
** sub z0\.h, z0\.h, #255
** ret
*/
TEST_UNIFORM_Z (add_m255_s16_x, svint16_t,
z0 = svadd_n_s16_x (p0, z0, -255),
z0 = svadd_x (p0, z0, -255))
/*
** add_m256_s16_x:
** add z0\.h, z0\.h, #65280
** ret
*/
TEST_UNIFORM_Z (add_m256_s16_x, svint16_t,
z0 = svadd_n_s16_x (p0, z0, -256),
z0 = svadd_x (p0, z0, -256))
/*
** add_m257_s16_x:
** mov (z[0-9]+\.h), #-257
** add z0\.h, (z0\.h, \1|\1, z0\.h)
** ret
*/
TEST_UNIFORM_Z (add_m257_s16_x, svint16_t,
z0 = svadd_n_s16_x (p0, z0, -257),
z0 = svadd_x (p0, z0, -257))
/*
** add_m512_s16_x:
** add z0\.h, z0\.h, #65024
** ret
*/
TEST_UNIFORM_Z (add_m512_s16_x, svint16_t,
z0 = svadd_n_s16_x (p0, z0, -512),
z0 = svadd_x (p0, z0, -512))
/*
** add_m32768_s16_x:
** add z0\.h, z0\.h, #32768
** ret
*/
TEST_UNIFORM_Z (add_m32768_s16_x, svint16_t,
z0 = svadd_n_s16_x (p0, z0, -0x8000),
z0 = svadd_x (p0, z0, -0x8000))
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
/*
** add_s32_m_tied1:
** add z0\.s, p0/m, z0\.s, z1\.s
** ret
*/
TEST_UNIFORM_Z (add_s32_m_tied1, svint32_t,
z0 = svadd_s32_m (p0, z0, z1),
z0 = svadd_m (p0, z0, z1))
/*
** add_s32_m_tied2:
** mov (z[0-9]+)\.d, z0\.d
** movprfx z0, z1
** add z0\.s, p0/m, z0\.s, \1\.s
** ret
*/
TEST_UNIFORM_Z (add_s32_m_tied2, svint32_t,
z0 = svadd_s32_m (p0, z1, z0),
z0 = svadd_m (p0, z1, z0))
/*
** add_s32_m_untied:
** movprfx z0, z1
** add z0\.s, p0/m, z0\.s, z2\.s
** ret
*/
TEST_UNIFORM_Z (add_s32_m_untied, svint32_t,
z0 = svadd_s32_m (p0, z1, z2),
z0 = svadd_m (p0, z1, z2))
/*
** add_w0_s32_m_tied1:
** mov (z[0-9]+\.s), w0
** add z0\.s, p0/m, z0\.s, \1
** ret
*/
TEST_UNIFORM_ZX (add_w0_s32_m_tied1, svint32_t, int32_t,
z0 = svadd_n_s32_m (p0, z0, x0),
z0 = svadd_m (p0, z0, x0))
/*
** add_w0_s32_m_untied:
** mov (z[0-9]+\.s), w0
** movprfx z0, z1
** add z0\.s, p0/m, z0\.s, \1
** ret
*/
TEST_UNIFORM_ZX (add_w0_s32_m_untied, svint32_t, int32_t,
z0 = svadd_n_s32_m (p0, z1, x0),
z0 = svadd_m (p0, z1, x0))
/*
** add_1_s32_m_tied1:
** mov (z[0-9]+\.s), #1
** add z0\.s, p0/m, z0\.s, \1
** ret
*/
TEST_UNIFORM_Z (add_1_s32_m_tied1, svint32_t,
z0 = svadd_n_s32_m (p0, z0, 1),
z0 = svadd_m (p0, z0, 1))
/*
** add_1_s32_m_untied: { xfail *-*-* }
** mov (z[0-9]+\.s), #1
** movprfx z0, z1
** add z0\.s, p0/m, z0\.s, \1
** ret
*/
TEST_UNIFORM_Z (add_1_s32_m_untied, svint32_t,
z0 = svadd_n_s32_m (p0, z1, 1),
z0 = svadd_m (p0, z1, 1))
/*
** add_m2_s32_m:
** mov (z[0-9]+\.s), #-2
** add z0\.s, p0/m, z0\.s, \1
** ret
*/
TEST_UNIFORM_Z (add_m2_s32_m, svint32_t,
z0 = svadd_n_s32_m (p0, z0, -2),
z0 = svadd_m (p0, z0, -2))
/*
** add_s32_z_tied1:
** movprfx z0\.s, p0/z, z0\.s
** add z0\.s, p0/m, z0\.s, z1\.s
** ret
*/
TEST_UNIFORM_Z (add_s32_z_tied1, svint32_t,
z0 = svadd_s32_z (p0, z0, z1),
z0 = svadd_z (p0, z0, z1))
/*
** add_s32_z_tied2:
** movprfx z0\.s, p0/z, z0\.s
** add z0\.s, p0/m, z0\.s, z1\.s
** ret
*/
TEST_UNIFORM_Z (add_s32_z_tied2, svint32_t,
z0 = svadd_s32_z (p0, z1, z0),
z0 = svadd_z (p0, z1, z0))
/*
** add_s32_z_untied:
** (
** movprfx z0\.s, p0/z, z1\.s
** add z0\.s, p0/m, z0\.s, z2\.s
** |
** movprfx z0\.s, p0/z, z2\.s
** add z0\.s, p0/m, z0\.s, z1\.s
** )
** ret
*/
TEST_UNIFORM_Z (add_s32_z_untied, svint32_t,
z0 = svadd_s32_z (p0, z1, z2),
z0 = svadd_z (p0, z1, z2))
/*
** add_w0_s32_z_tied1:
** mov (z[0-9]+\.s), w0
** movprfx z0\.s, p0/z, z0\.s
** add z0\.s, p0/m, z0\.s, \1
** ret
*/
TEST_UNIFORM_ZX (add_w0_s32_z_tied1, svint32_t, int32_t,
z0 = svadd_n_s32_z (p0, z0, x0),
z0 = svadd_z (p0, z0, x0))
/*
** add_w0_s32_z_untied:
** mov (z[0-9]+\.s), w0
** (
** movprfx z0\.s, p0/z, z1\.s
** add z0\.s, p0/m, z0\.s, \1
** |
** movprfx z0\.s, p0/z, \1
** add z0\.s, p0/m, z0\.s, z1\.s
** )
** ret
*/
TEST_UNIFORM_ZX (add_w0_s32_z_untied, svint32_t, int32_t,
z0 = svadd_n_s32_z (p0, z1, x0),
z0 = svadd_z (p0, z1, x0))
/*
** add_1_s32_z_tied1:
** mov (z[0-9]+\.s), #1
** movprfx z0\.s, p0/z, z0\.s
** add z0\.s, p0/m, z0\.s, \1
** ret
*/
TEST_UNIFORM_Z (add_1_s32_z_tied1, svint32_t,
z0 = svadd_n_s32_z (p0, z0, 1),
z0 = svadd_z (p0, z0, 1))
/*
** add_1_s32_z_untied:
** mov (z[0-9]+\.s), #1
** (
** movprfx z0\.s, p0/z, z1\.s
** add z0\.s, p0/m, z0\.s, \1
** |
** movprfx z0\.s, p0/z, \1
** add z0\.s, p0/m, z0\.s, z1\.s
** )
** ret
*/
TEST_UNIFORM_Z (add_1_s32_z_untied, svint32_t,
z0 = svadd_n_s32_z (p0, z1, 1),
z0 = svadd_z (p0, z1, 1))
/*
** add_s32_x_tied1:
** add z0\.s, (z0\.s, z1\.s|z1\.s, z0\.s)
** ret
*/
TEST_UNIFORM_Z (add_s32_x_tied1, svint32_t,
z0 = svadd_s32_x (p0, z0, z1),
z0 = svadd_x (p0, z0, z1))
/*
** add_s32_x_tied2:
** add z0\.s, (z0\.s, z1\.s|z1\.s, z0\.s)
** ret
*/
TEST_UNIFORM_Z (add_s32_x_tied2, svint32_t,
z0 = svadd_s32_x (p0, z1, z0),
z0 = svadd_x (p0, z1, z0))
/*
** add_s32_x_untied:
** add z0\.s, (z1\.s, z2\.s|z2\.s, z1\.s)
** ret
*/
TEST_UNIFORM_Z (add_s32_x_untied, svint32_t,
z0 = svadd_s32_x (p0, z1, z2),
z0 = svadd_x (p0, z1, z2))
/*
** add_w0_s32_x_tied1:
** mov (z[0-9]+\.s), w0
** add z0\.s, (z0\.s, \1|\1, z0\.s)
** ret
*/
TEST_UNIFORM_ZX (add_w0_s32_x_tied1, svint32_t, int32_t,
z0 = svadd_n_s32_x (p0, z0, x0),
z0 = svadd_x (p0, z0, x0))
/*
** add_w0_s32_x_untied:
** mov (z[0-9]+\.s), w0
** add z0\.s, (z1\.s, \1|\1, z1\.s)
** ret
*/
TEST_UNIFORM_ZX (add_w0_s32_x_untied, svint32_t, int32_t,
z0 = svadd_n_s32_x (p0, z1, x0),
z0 = svadd_x (p0, z1, x0))
/*
** add_1_s32_x_tied1:
** add z0\.s, z0\.s, #1
** ret
*/
TEST_UNIFORM_Z (add_1_s32_x_tied1, svint32_t,
z0 = svadd_n_s32_x (p0, z0, 1),
z0 = svadd_x (p0, z0, 1))
/*
** add_1_s32_x_untied:
** movprfx z0, z1
** add z0\.s, z0\.s, #1
** ret
*/
TEST_UNIFORM_Z (add_1_s32_x_untied, svint32_t,
z0 = svadd_n_s32_x (p0, z1, 1),
z0 = svadd_x (p0, z1, 1))
/*
** add_127_s32_x:
** add z0\.s, z0\.s, #127
** ret
*/
TEST_UNIFORM_Z (add_127_s32_x, svint32_t,
z0 = svadd_n_s32_x (p0, z0, 127),
z0 = svadd_x (p0, z0, 127))
/*
** add_128_s32_x:
** add z0\.s, z0\.s, #128
** ret
*/
TEST_UNIFORM_Z (add_128_s32_x, svint32_t,
z0 = svadd_n_s32_x (p0, z0, 128),
z0 = svadd_x (p0, z0, 128))
/*
** add_255_s32_x:
** add z0\.s, z0\.s, #255
** ret
*/
TEST_UNIFORM_Z (add_255_s32_x, svint32_t,
z0 = svadd_n_s32_x (p0, z0, 255),
z0 = svadd_x (p0, z0, 255))
/*
** add_256_s32_x:
** add z0\.s, z0\.s, #256
** ret
*/
TEST_UNIFORM_Z (add_256_s32_x, svint32_t,
z0 = svadd_n_s32_x (p0, z0, 256),
z0 = svadd_x (p0, z0, 256))
/*
** add_511_s32_x:
** mov (z[0-9]+\.s), #511
** add z0\.s, (z0\.s, \1|\1, z0\.s)
** ret
*/
TEST_UNIFORM_Z (add_511_s32_x, svint32_t,
z0 = svadd_n_s32_x (p0, z0, 511),
z0 = svadd_x (p0, z0, 511))
/*
** add_512_s32_x:
** add z0\.s, z0\.s, #512
** ret
*/
TEST_UNIFORM_Z (add_512_s32_x, svint32_t,
z0 = svadd_n_s32_x (p0, z0, 512),
z0 = svadd_x (p0, z0, 512))
/*
** add_65280_s32_x:
** add z0\.s, z0\.s, #65280
** ret
*/
TEST_UNIFORM_Z (add_65280_s32_x, svint32_t,
z0 = svadd_n_s32_x (p0, z0, 0xff00),
z0 = svadd_x (p0, z0, 0xff00))
/*
** add_65535_s32_x:
** mov (z[0-9]+\.s), #65535
** add z0\.s, (z0\.s, \1|\1, z0\.s)
** ret
*/
TEST_UNIFORM_Z (add_65535_s32_x, svint32_t,
z0 = svadd_n_s32_x (p0, z0, 65535),
z0 = svadd_x (p0, z0, 65535))
/*
** add_65536_s32_x:
** mov (z[0-9]+\.s), #65536
** add z0\.s, (z0\.s, \1|\1, z0\.s)
** ret
*/
TEST_UNIFORM_Z (add_65536_s32_x, svint32_t,
z0 = svadd_n_s32_x (p0, z0, 65536),
z0 = svadd_x (p0, z0, 65536))
/*
** add_m1_s32_x:
** sub z0\.s, z0\.s, #1
** ret
*/
TEST_UNIFORM_Z (add_m1_s32_x, svint32_t,
z0 = svadd_n_s32_x (p0, z0, -1),
z0 = svadd_x (p0, z0, -1))
/*
** add_m127_s32_x:
** sub z0\.s, z0\.s, #127
** ret
*/
TEST_UNIFORM_Z (add_m127_s32_x, svint32_t,
z0 = svadd_n_s32_x (p0, z0, -127),
z0 = svadd_x (p0, z0, -127))
/*
** add_m128_s32_x:
** sub z0\.s, z0\.s, #128
** ret
*/
TEST_UNIFORM_Z (add_m128_s32_x, svint32_t,
z0 = svadd_n_s32_x (p0, z0, -128),
z0 = svadd_x (p0, z0, -128))
/*
** add_m255_s32_x:
** sub z0\.s, z0\.s, #255
** ret
*/
TEST_UNIFORM_Z (add_m255_s32_x, svint32_t,
z0 = svadd_n_s32_x (p0, z0, -255),
z0 = svadd_x (p0, z0, -255))
/*
** add_m256_s32_x:
** sub z0\.s, z0\.s, #256
** ret
*/
TEST_UNIFORM_Z (add_m256_s32_x, svint32_t,
z0 = svadd_n_s32_x (p0, z0, -256),
z0 = svadd_x (p0, z0, -256))
/*
** add_m511_s32_x:
** mov (z[0-9]+\.s), #-511
** add z0\.s, (z0\.s, \1|\1, z0\.s)
** ret
*/
TEST_UNIFORM_Z (add_m511_s32_x, svint32_t,
z0 = svadd_n_s32_x (p0, z0, -511),
z0 = svadd_x (p0, z0, -511))
/*
** add_m512_s32_x:
** sub z0\.s, z0\.s, #512
** ret
*/
TEST_UNIFORM_Z (add_m512_s32_x, svint32_t,
z0 = svadd_n_s32_x (p0, z0, -512),
z0 = svadd_x (p0, z0, -512))
/*
** add_m32768_s32_x:
** sub z0\.s, z0\.s, #32768
** ret
*/
TEST_UNIFORM_Z (add_m32768_s32_x, svint32_t,
z0 = svadd_n_s32_x (p0, z0, -0x8000),
z0 = svadd_x (p0, z0, -0x8000))
/*
** add_m65280_s32_x:
** sub z0\.s, z0\.s, #65280
** ret
*/
TEST_UNIFORM_Z (add_m65280_s32_x, svint32_t,
z0 = svadd_n_s32_x (p0, z0, -0xff00),
z0 = svadd_x (p0, z0, -0xff00))
/*
** add_m65535_s32_x:
** mov (z[0-9]+\.s), #-65535
** add z0\.s, (z0\.s, \1|\1, z0\.s)
** ret
*/
TEST_UNIFORM_Z (add_m65535_s32_x, svint32_t,
z0 = svadd_n_s32_x (p0, z0, -65535),
z0 = svadd_x (p0, z0, -65535))
/*
** add_m65536_s32_x:
** mov (z[0-9]+\.s), #-65536
** add z0\.s, (z0\.s, \1|\1, z0\.s)
** ret
*/
TEST_UNIFORM_Z (add_m65536_s32_x, svint32_t,
z0 = svadd_n_s32_x (p0, z0, -65536),
z0 = svadd_x (p0, z0, -65536))
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
/*
** add_s64_m_tied1:
** add z0\.d, p0/m, z0\.d, z1\.d
** ret
*/
TEST_UNIFORM_Z (add_s64_m_tied1, svint64_t,
z0 = svadd_s64_m (p0, z0, z1),
z0 = svadd_m (p0, z0, z1))
/*
** add_s64_m_tied2:
** mov (z[0-9]+\.d), z0\.d
** movprfx z0, z1
** add z0\.d, p0/m, z0\.d, \1
** ret
*/
TEST_UNIFORM_Z (add_s64_m_tied2, svint64_t,
z0 = svadd_s64_m (p0, z1, z0),
z0 = svadd_m (p0, z1, z0))
/*
** add_s64_m_untied:
** movprfx z0, z1
** add z0\.d, p0/m, z0\.d, z2\.d
** ret
*/
TEST_UNIFORM_Z (add_s64_m_untied, svint64_t,
z0 = svadd_s64_m (p0, z1, z2),
z0 = svadd_m (p0, z1, z2))
/*
** add_x0_s64_m_tied1:
** mov (z[0-9]+\.d), x0
** add z0\.d, p0/m, z0\.d, \1
** ret
*/
TEST_UNIFORM_ZX (add_x0_s64_m_tied1, svint64_t, int64_t,
z0 = svadd_n_s64_m (p0, z0, x0),
z0 = svadd_m (p0, z0, x0))
/*
** add_x0_s64_m_untied:
** mov (z[0-9]+\.d), x0
** movprfx z0, z1
** add z0\.d, p0/m, z0\.d, \1
** ret
*/
TEST_UNIFORM_ZX (add_x0_s64_m_untied, svint64_t, int64_t,
z0 = svadd_n_s64_m (p0, z1, x0),
z0 = svadd_m (p0, z1, x0))
/*
** add_1_s64_m_tied1:
** mov (z[0-9]+\.d), #1
** add z0\.d, p0/m, z0\.d, \1
** ret
*/
TEST_UNIFORM_Z (add_1_s64_m_tied1, svint64_t,
z0 = svadd_n_s64_m (p0, z0, 1),
z0 = svadd_m (p0, z0, 1))
/*
** add_1_s64_m_untied: { xfail *-*-* }
** mov (z[0-9]+\.d), #1
** movprfx z0, z1
** add z0\.d, p0/m, z0\.d, \1
** ret
*/
TEST_UNIFORM_Z (add_1_s64_m_untied, svint64_t,
z0 = svadd_n_s64_m (p0, z1, 1),
z0 = svadd_m (p0, z1, 1))
/*
** add_m2_s64_m:
** mov (z[0-9]+\.d), #-2
** add z0\.d, p0/m, z0\.d, \1
** ret
*/
TEST_UNIFORM_Z (add_m2_s64_m, svint64_t,
z0 = svadd_n_s64_m (p0, z0, -2),
z0 = svadd_m (p0, z0, -2))
/*
** add_s64_z_tied1:
** movprfx z0\.d, p0/z, z0\.d
** add z0\.d, p0/m, z0\.d, z1\.d
** ret
*/
TEST_UNIFORM_Z (add_s64_z_tied1, svint64_t,
z0 = svadd_s64_z (p0, z0, z1),
z0 = svadd_z (p0, z0, z1))
/*
** add_s64_z_tied2:
** movprfx z0\.d, p0/z, z0\.d
** add z0\.d, p0/m, z0\.d, z1\.d
** ret
*/
TEST_UNIFORM_Z (add_s64_z_tied2, svint64_t,
z0 = svadd_s64_z (p0, z1, z0),
z0 = svadd_z (p0, z1, z0))
/*
** add_s64_z_untied:
** (
** movprfx z0\.d, p0/z, z1\.d
** add z0\.d, p0/m, z0\.d, z2\.d
** |
** movprfx z0\.d, p0/z, z2\.d
** add z0\.d, p0/m, z0\.d, z1\.d
** )
** ret
*/
TEST_UNIFORM_Z (add_s64_z_untied, svint64_t,
z0 = svadd_s64_z (p0, z1, z2),
z0 = svadd_z (p0, z1, z2))
/*
** add_x0_s64_z_tied1:
** mov (z[0-9]+\.d), x0
** movprfx z0\.d, p0/z, z0\.d
** add z0\.d, p0/m, z0\.d, \1
** ret
*/
TEST_UNIFORM_ZX (add_x0_s64_z_tied1, svint64_t, int64_t,
z0 = svadd_n_s64_z (p0, z0, x0),
z0 = svadd_z (p0, z0, x0))
/*
** add_x0_s64_z_untied:
** mov (z[0-9]+\.d), x0
** (
** movprfx z0\.d, p0/z, z1\.d
** add z0\.d, p0/m, z0\.d, \1
** |
** movprfx z0\.d, p0/z, \1
** add z0\.d, p0/m, z0\.d, z1\.d
** )
** ret
*/
TEST_UNIFORM_ZX (add_x0_s64_z_untied, svint64_t, int64_t,
z0 = svadd_n_s64_z (p0, z1, x0),
z0 = svadd_z (p0, z1, x0))
/*
** add_1_s64_z_tied1:
** mov (z[0-9]+\.d), #1
** movprfx z0\.d, p0/z, z0\.d
** add z0\.d, p0/m, z0\.d, \1
** ret
*/
TEST_UNIFORM_Z (add_1_s64_z_tied1, svint64_t,
z0 = svadd_n_s64_z (p0, z0, 1),
z0 = svadd_z (p0, z0, 1))
/*
** add_1_s64_z_untied:
** mov (z[0-9]+\.d), #1
** (
** movprfx z0\.d, p0/z, z1\.d
** add z0\.d, p0/m, z0\.d, \1
** |
** movprfx z0\.d, p0/z, \1
** add z0\.d, p0/m, z0\.d, z1\.d
** )
** ret
*/
TEST_UNIFORM_Z (add_1_s64_z_untied, svint64_t,
z0 = svadd_n_s64_z (p0, z1, 1),
z0 = svadd_z (p0, z1, 1))
/*
** add_s64_x_tied1:
** add z0\.d, (z0\.d, z1\.d|z1\.d, z0\.d)
** ret
*/
TEST_UNIFORM_Z (add_s64_x_tied1, svint64_t,
z0 = svadd_s64_x (p0, z0, z1),
z0 = svadd_x (p0, z0, z1))
/*
** add_s64_x_tied2:
** add z0\.d, (z0\.d, z1\.d|z1\.d, z0\.d)
** ret
*/
TEST_UNIFORM_Z (add_s64_x_tied2, svint64_t,
z0 = svadd_s64_x (p0, z1, z0),
z0 = svadd_x (p0, z1, z0))
/*
** add_s64_x_untied:
** add z0\.d, (z1\.d, z2\.d|z2\.d, z1\.d)
** ret
*/
TEST_UNIFORM_Z (add_s64_x_untied, svint64_t,
z0 = svadd_s64_x (p0, z1, z2),
z0 = svadd_x (p0, z1, z2))
/*
** add_x0_s64_x_tied1:
** mov (z[0-9]+\.d), x0
** add z0\.d, (z0\.d, \1|\1, z0\.d)
** ret
*/
TEST_UNIFORM_ZX (add_x0_s64_x_tied1, svint64_t, int64_t,
z0 = svadd_n_s64_x (p0, z0, x0),
z0 = svadd_x (p0, z0, x0))
/*
** add_x0_s64_x_untied:
** mov (z[0-9]+\.d), x0
** add z0\.d, (z1\.d, \1|\1, z1\.d)
** ret
*/
TEST_UNIFORM_ZX (add_x0_s64_x_untied, svint64_t, int64_t,
z0 = svadd_n_s64_x (p0, z1, x0),
z0 = svadd_x (p0, z1, x0))
/*
** add_1_s64_x_tied1:
** add z0\.d, z0\.d, #1
** ret
*/
TEST_UNIFORM_Z (add_1_s64_x_tied1, svint64_t,
z0 = svadd_n_s64_x (p0, z0, 1),
z0 = svadd_x (p0, z0, 1))
/*
** add_1_s64_x_untied:
** movprfx z0, z1
** add z0\.d, z0\.d, #1
** ret
*/
TEST_UNIFORM_Z (add_1_s64_x_untied, svint64_t,
z0 = svadd_n_s64_x (p0, z1, 1),
z0 = svadd_x (p0, z1, 1))
/*
** add_127_s64_x:
** add z0\.d, z0\.d, #127
** ret
*/
TEST_UNIFORM_Z (add_127_s64_x, svint64_t,
z0 = svadd_n_s64_x (p0, z0, 127),
z0 = svadd_x (p0, z0, 127))
/*
** add_128_s64_x:
** add z0\.d, z0\.d, #128
** ret
*/
TEST_UNIFORM_Z (add_128_s64_x, svint64_t,
z0 = svadd_n_s64_x (p0, z0, 128),
z0 = svadd_x (p0, z0, 128))
/*
** add_255_s64_x:
** add z0\.d, z0\.d, #255
** ret
*/
TEST_UNIFORM_Z (add_255_s64_x, svint64_t,
z0 = svadd_n_s64_x (p0, z0, 255),
z0 = svadd_x (p0, z0, 255))
/*
** add_256_s64_x:
** add z0\.d, z0\.d, #256
** ret
*/
TEST_UNIFORM_Z (add_256_s64_x, svint64_t,
z0 = svadd_n_s64_x (p0, z0, 256),
z0 = svadd_x (p0, z0, 256))
/*
** add_511_s64_x:
** mov (z[0-9]+\.d), #511
** add z0\.d, (z0\.d, \1|\1, z0\.d)
** ret
*/
TEST_UNIFORM_Z (add_511_s64_x, svint64_t,
z0 = svadd_n_s64_x (p0, z0, 511),
z0 = svadd_x (p0, z0, 511))
/*
** add_512_s64_x:
** add z0\.d, z0\.d, #512
** ret
*/
TEST_UNIFORM_Z (add_512_s64_x, svint64_t,
z0 = svadd_n_s64_x (p0, z0, 512),
z0 = svadd_x (p0, z0, 512))
/*
** add_65280_s64_x:
** add z0\.d, z0\.d, #65280
** ret
*/
TEST_UNIFORM_Z (add_65280_s64_x, svint64_t,
z0 = svadd_n_s64_x (p0, z0, 0xff00),
z0 = svadd_x (p0, z0, 0xff00))
/*
** add_65535_s64_x:
** mov (z[0-9]+\.d), #65535
** add z0\.d, (z0\.d, \1|\1, z0\.d)
** ret
*/
TEST_UNIFORM_Z (add_65535_s64_x, svint64_t,
z0 = svadd_n_s64_x (p0, z0, 65535),
z0 = svadd_x (p0, z0, 65535))
/*
** add_65536_s64_x:
** mov (z[0-9]+\.d), #65536
** add z0\.d, (z0\.d, \1|\1, z0\.d)
** ret
*/
TEST_UNIFORM_Z (add_65536_s64_x, svint64_t,
z0 = svadd_n_s64_x (p0, z0, 65536),
z0 = svadd_x (p0, z0, 65536))
/*
** add_m1_s64_x:
** sub z0\.d, z0\.d, #1
** ret
*/
TEST_UNIFORM_Z (add_m1_s64_x, svint64_t,
z0 = svadd_n_s64_x (p0, z0, -1),
z0 = svadd_x (p0, z0, -1))
/*
** add_m127_s64_x:
** sub z0\.d, z0\.d, #127
** ret
*/
TEST_UNIFORM_Z (add_m127_s64_x, svint64_t,
z0 = svadd_n_s64_x (p0, z0, -127),
z0 = svadd_x (p0, z0, -127))
/*
** add_m128_s64_x:
** sub z0\.d, z0\.d, #128
** ret
*/
TEST_UNIFORM_Z (add_m128_s64_x, svint64_t,
z0 = svadd_n_s64_x (p0, z0, -128),
z0 = svadd_x (p0, z0, -128))
/*
** add_m255_s64_x:
** sub z0\.d, z0\.d, #255
** ret
*/
TEST_UNIFORM_Z (add_m255_s64_x, svint64_t,
z0 = svadd_n_s64_x (p0, z0, -255),
z0 = svadd_x (p0, z0, -255))
/*
** add_m256_s64_x:
** sub z0\.d, z0\.d, #256
** ret
*/
TEST_UNIFORM_Z (add_m256_s64_x, svint64_t,
z0 = svadd_n_s64_x (p0, z0, -256),
z0 = svadd_x (p0, z0, -256))
/*
** add_m511_s64_x:
** mov (z[0-9]+\.d), #-511
** add z0\.d, (z0\.d, \1|\1, z0\.d)
** ret
*/
TEST_UNIFORM_Z (add_m511_s64_x, svint64_t,
z0 = svadd_n_s64_x (p0, z0, -511),
z0 = svadd_x (p0, z0, -511))
/*
** add_m512_s64_x:
** sub z0\.d, z0\.d, #512
** ret
*/
TEST_UNIFORM_Z (add_m512_s64_x, svint64_t,
z0 = svadd_n_s64_x (p0, z0, -512),
z0 = svadd_x (p0, z0, -512))
/*
** add_m32768_s64_x:
** sub z0\.d, z0\.d, #32768
** ret
*/
TEST_UNIFORM_Z (add_m32768_s64_x, svint64_t,
z0 = svadd_n_s64_x (p0, z0, -0x8000),
z0 = svadd_x (p0, z0, -0x8000))
/*
** add_m65280_s64_x:
** sub z0\.d, z0\.d, #65280
** ret
*/
TEST_UNIFORM_Z (add_m65280_s64_x, svint64_t,
z0 = svadd_n_s64_x (p0, z0, -0xff00),
z0 = svadd_x (p0, z0, -0xff00))
/*
** add_m65535_s64_x:
** mov (z[0-9]+\.d), #-65535
** add z0\.d, (z0\.d, \1|\1, z0\.d)
** ret
*/
TEST_UNIFORM_Z (add_m65535_s64_x, svint64_t,
z0 = svadd_n_s64_x (p0, z0, -65535),
z0 = svadd_x (p0, z0, -65535))
/*
** add_m65536_s64_x:
** mov (z[0-9]+\.d), #-65536
** add z0\.d, (z0\.d, \1|\1, z0\.d)
** ret
*/
TEST_UNIFORM_Z (add_m65536_s64_x, svint64_t,
z0 = svadd_n_s64_x (p0, z0, -65536),
z0 = svadd_x (p0, z0, -65536))
/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
#include "test_sve_acle.h"
/*
** add_s8_m_tied1:
** add z0\.b, p0/m, z0\.b, z1\.b
** ret
*/
TEST_UNIFORM_Z (add_s8_m_tied1, svint8_t,
z0 = svadd_s8_m (p0, z0, z1),
z0 = svadd_m (p0, z0, z1))
/*
** add_s8_m_tied2:
** mov (z[0-9]+)\.d, z0\.d
** movprfx z0, z1
** add z0\.b, p0/m, z0\.b, \1\.b
** ret
*/
TEST_UNIFORM_Z (add_s8_m_tied2, svint8_t,
z0 = svadd_s8_m (p0, z1, z0),
z0 = svadd_m (p0, z1, z0))
/*
** add_s8_m_untied:
** movprfx z0, z1
** add z0\.b, p0/m, z0\.b, z2\.b
** ret
*/
TEST_UNIFORM_Z (add_s8_m_untied, svint8_t,
z0 = svadd_s8_m (p0, z1, z2),
z0 = svadd_m (p0, z1, z2))
/*
** add_w0_s8_m_tied1:
** mov (z[0-9]+\.b), w0
** add z0\.b, p0/m, z0\.b, \1
** ret
*/
TEST_UNIFORM_ZX (add_w0_s8_m_tied1, svint8_t, int8_t,
z0 = svadd_n_s8_m (p0, z0, x0),
z0 = svadd_m (p0, z0, x0))
/*
** add_w0_s8_m_untied: { xfail *-*-* }
** mov (z[0-9]+\.b), w0
** movprfx z0, z1
** add z0\.b, p0/m, z0\.b, \1
** ret
*/
TEST_UNIFORM_ZX (add_w0_s8_m_untied, svint8_t, int8_t,
z0 = svadd_n_s8_m (p0, z1, x0),
z0 = svadd_m (p0, z1, x0))
/*
** add_1_s8_m_tied1:
** mov (z[0-9]+\.b), #1
** add z0\.b, p0/m, z0\.b, \1
** ret
*/
TEST_UNIFORM_Z (add_1_s8_m_tied1, svint8_t,
z0 = svadd_n_s8_m (p0, z0, 1),
z0 = svadd_m (p0, z0, 1))
/*
** add_1_s8_m_untied: { xfail *-*-* }
** mov (z[0-9]+\.b), #1
** movprfx z0, z1
** add z0\.b, p0/m, z0\.b, \1
** ret
*/
TEST_UNIFORM_Z (add_1_s8_m_untied, svint8_t,
z0 = svadd_n_s8_m (p0, z1, 1),
z0 = svadd_m (p0, z1, 1))
/*
** add_m1_s8_m:
** mov (z[0-9]+\.b), #-1
** add z0\.b, p0/m, z0\.b, \1
** ret
*/
TEST_UNIFORM_Z (add_m1_s8_m, svint8_t,
z0 = svadd_n_s8_m (p0, z0, -1),
z0 = svadd_m (p0, z0, -1))
/*
** add_s8_z_tied1:
** movprfx z0\.b, p0/z, z0\.b
** add z0\.b, p0/m, z0\.b, z1\.b
** ret
*/
TEST_UNIFORM_Z (add_s8_z_tied1, svint8_t,
z0 = svadd_s8_z (p0, z0, z1),
z0 = svadd_z (p0, z0, z1))
/*
** add_s8_z_tied2:
** movprfx z0\.b, p0/z, z0\.b
** add z0\.b, p0/m, z0\.b, z1\.b
** ret
*/
TEST_UNIFORM_Z (add_s8_z_tied2, svint8_t,
z0 = svadd_s8_z (p0, z1, z0),
z0 = svadd_z (p0, z1, z0))
/*
** add_s8_z_untied:
** (
** movprfx z0\.b, p0/z, z1\.b
** add z0\.b, p0/m, z0\.b, z2\.b
** |
** movprfx z0\.b, p0/z, z2\.b
** add z0\.b, p0/m, z0\.b, z1\.b
** )
** ret
*/
TEST_UNIFORM_Z (add_s8_z_untied, svint8_t,
z0 = svadd_s8_z (p0, z1, z2),
z0 = svadd_z (p0, z1, z2))
/*
** add_w0_s8_z_tied1:
** mov (z[0-9]+\.b), w0
** movprfx z0\.b, p0/z, z0\.b
** add z0\.b, p0/m, z0\.b, \1
** ret
*/
TEST_UNIFORM_ZX (add_w0_s8_z_tied1, svint8_t, int8_t,
z0 = svadd_n_s8_z (p0, z0, x0),
z0 = svadd_z (p0, z0, x0))
/*
** add_w0_s8_z_untied:
** mov (z[0-9]+\.b), w0
** (
** movprfx z0\.b, p0/z, z1\.b
** add z0\.b, p0/m, z0\.b, \1
** |
** movprfx z0\.b, p0/z, \1
** add z0\.b, p0/m, z0\.b, z1\.b
** )
** ret
*/
TEST_UNIFORM_ZX (add_w0_s8_z_untied, svint8_t, int8_t,
z0 = svadd_n_s8_z (p0, z1, x0),
z0 = svadd_z (p0, z1, x0))
/*
** add_1_s8_z_tied1:
** mov (z[0-9]+\.b), #1
** movprfx z0\.b, p0/z, z0\.b
** add z0\.b, p0/m, z0\.b, \1
** ret
*/
TEST_UNIFORM_Z (add_1_s8_z_tied1, svint8_t,
z0 = svadd_n_s8_z (p0, z0, 1),
z0 = svadd_z (p0, z0, 1))
/*
** add_1_s8_z_untied:
** mov (z[0-9]+\.b), #1
** (
** movprfx z0\.b, p0/z, z1\.b
** add z0\.b, p0/m, z0\.b, \1
** |
** movprfx z0\.b, p0/z, \1
** add z0\.b, p0/m, z0\.b, z1\.b
** )
** ret
*/
TEST_UNIFORM_Z (add_1_s8_z_untied, svint8_t,
z0 = svadd_n_s8_z (p0, z1, 1),
z0 = svadd_z (p0, z1, 1))
/*
** add_s8_x_tied1:
** add z0\.b, (z0\.b, z1\.b|z1\.b, z0\.b)
** ret
*/
TEST_UNIFORM_Z (add_s8_x_tied1, svint8_t,
z0 = svadd_s8_x (p0, z0, z1),
z0 = svadd_x (p0, z0, z1))
/*
** add_s8_x_tied2:
** add z0\.b, (z0\.b, z1\.b|z1\.b, z0\.b)
** ret
*/
TEST_UNIFORM_Z (add_s8_x_tied2, svint8_t,
z0 = svadd_s8_x (p0, z1, z0),
z0 = svadd_x (p0, z1, z0))
/*
** add_s8_x_untied:
** add z0\.b, (z1\.b, z2\.b|z2\.b, z1\.b)
** ret
*/
TEST_UNIFORM_Z (add_s8_x_untied, svint8_t,
z0 = svadd_s8_x (p0, z1, z2),
z0 = svadd_x (p0, z1, z2))
/*
** add_w0_s8_x_tied1:
** mov (z[0-9]+\.b), w0
** add z0\.b, (z0\.b, \1|\1, z0\.b)
** ret
*/
TEST_UNIFORM_ZX (add_w0_s8_x_tied1, svint8_t, int8_t,
z0 = svadd_n_s8_x (p0, z0, x0),
z0 = svadd_x (p0, z0, x0))
/*
** add_w0_s8_x_untied:
** mov (z[0-9]+\.b), w0
** add z0\.b, (z1\.b, \1|\1, z1\.b)
** ret
*/
TEST_UNIFORM_ZX (add_w0_s8_x_untied, svint8_t, int8_t,
z0 = svadd_n_s8_x (p0, z1, x0),
z0 = svadd_x (p0, z1, x0))
/*
** add_1_s8_x_tied1:
** add z0\.b, z0\.b, #1
** ret
*/
TEST_UNIFORM_Z (add_1_s8_x_tied1, svint8_t,
z0 = svadd_n_s8_x (p0, z0, 1),
z0 = svadd_x (p0, z0, 1))
/*
** add_1_s8_x_untied:
** movprfx z0, z1
** add z0\.b, z0\.b, #1
** ret
*/
TEST_UNIFORM_Z (add_1_s8_x_untied, svint8_t,
z0 = svadd_n_s8_x (p0, z1, 1),
z0 = svadd_x (p0, z1, 1))
/*
** add_127_s8_x:
** add z0\.b, z0\.b, #127
** ret
*/
TEST_UNIFORM_Z (add_127_s8_x, svint8_t,
z0 = svadd_n_s8_x (p0, z0, 127),
z0 = svadd_x (p0, z0, 127))
/*
** add_128_s8_x:
** add z0\.b, z0\.b, #128
** ret
*/
TEST_UNIFORM_Z (add_128_s8_x, svint8_t,
z0 = svadd_n_s8_x (p0, z0, 128),
z0 = svadd_x (p0, z0, 128))
/*
** add_255_s8_x:
** add z0\.b, z0\.b, #255
** ret
*/
TEST_UNIFORM_Z (add_255_s8_x, svint8_t,
z0 = svadd_n_s8_x (p0, z0, 255),
z0 = svadd_x (p0, z0, 255))
/*
** add_m1_s8_x:
** add z0\.b, z0\.b, #255
** ret
*/
TEST_UNIFORM_Z (add_m1_s8_x, svint8_t,
z0 = svadd_n_s8_x (p0, z0, -1),
z0 = svadd_x (p0, z0, -1))
/*
** add_m127_s8_x:
** add z0\.b, z0\.b, #129
** ret
*/
TEST_UNIFORM_Z (add_m127_s8_x, svint8_t,
z0 = svadd_n_s8_x (p0, z0, -127),
z0 = svadd_x (p0, z0, -127))
/*
** add_m128_s8_x:
** add z0\.b, z0\.b, #128
** ret
*/
TEST_UNIFORM_Z (add_m128_s8_x, svint8_t,
z0 = svadd_n_s8_x (p0, z0, -128),
z0 = svadd_x (p0, z0, -128))
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment