Commit 6c2e8d1c by Bob Wilson Committed by Bob Wilson

lib1funcs.asm: Avoid use of .Lfe* in .size directives.

	* config/xtensa/lib1funcs.asm: Avoid use of .Lfe* in .size directives.
	(do_abs, do_addx2, do_addx4, do_addx8): New assembler macros.
	(__mulsi3): Use do_addx* instead of ADDX* instructions.  Formatting.
	(nsau): Rename to do_nsau.  Provide alternate version for use when
	the NSAU instruction is available.
	(__udivsi3, __divsi3, __umodsi3, __modsi3): Use do_nsau macro.
	(__divsi3, __modsi3): Use do_abs macro instead of ABS instruction.
	* config/xtensa/xtensa-config.h: Update comments to match binutils.
	(XCHAL_HAVE_ABS, XCHAL_HAVE_ADDX): Define.
	* config/xtensa/xtensa.h (MASK_ABS, MASK_ADDX): Define.
	(TARGET_ABS, TARGET_ADDX): Define.
	(TARGET_DEFAULT): Conditionally add MASK_ABS and MASK_ADDX.
	(TARGET_SWITCHES): Add "abs", "no-abs", "addx", and "no-addx".
	* config/xtensa/xtensa.md (*addx2, *addx4, *addx8, *subx2, *subx4,
	*subx8): Set predicate condition to TARGET_ADDX.
	(abssi2): Set predicate condition to TARGET_ABS.
	* doc/invoke.texi (Option Summary): Document new "-mabs", "-mno-abs",
	"-maddx", and "-mno-addx" options.
	(Xtensa Options): Likewise.  Also tag some opcode names with @code.

From-SVN: r67044
parent 69cf7a55
2003-05-20 Bob Wilson <bob.wilson@acm.org>
* config/xtensa/lib1funcs.asm: Avoid use of .Lfe* in .size directives.
(do_abs, do_addx2, do_addx4, do_addx8): New assembler macros.
(__mulsi3): Use do_addx* instead of ADDX* instructions. Formatting.
(nsau): Rename to do_nsau. Provide alternate version for use when
the NSAU instruction is available.
(__udivsi3, __divsi3, __umodsi3, __modsi3): Use do_nsau macro.
(__divsi3, __modsi3): Use do_abs macro instead of ABS instruction.
* config/xtensa/xtensa-config.h: Update comments to match binutils.
(XCHAL_HAVE_ABS, XCHAL_HAVE_ADDX): Define.
* config/xtensa/xtensa.h (MASK_ABS, MASK_ADDX): Define.
(TARGET_ABS, TARGET_ADDX): Define.
(TARGET_DEFAULT): Conditionally add MASK_ABS and MASK_ADDX.
(TARGET_SWITCHES): Add "abs", "no-abs", "addx", and "no-addx".
* config/xtensa/xtensa.md (*addx2, *addx4, *addx8, *subx2, *subx4,
*subx8): Set predicate condition to TARGET_ADDX.
(abssi2): Set predicate condition to TARGET_ABS.
* doc/invoke.texi (Option Summary): Document new "-mabs", "-mno-abs",
"-maddx", and "-mno-addx" options.
(Xtensa Options): Likewise. Also tag some opcode names with @code.
2003-05-20 Kevin Ryde <user42@zip.com.au> 2003-05-20 Kevin Ryde <user42@zip.com.au>
Wolfgang Bangerth <bangerth@dealii.org> Wolfgang Bangerth <bangerth@dealii.org>
......
/* Assembly functions for the Xtensa version of libgcc1. /* Assembly functions for the Xtensa version of libgcc1.
Copyright (C) 2001,2002 Free Software Foundation, Inc. Copyright (C) 2001,2002,2003 Free Software Foundation, Inc.
Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica. Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.
This file is part of GCC. This file is part of GCC.
...@@ -30,6 +30,46 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA ...@@ -30,6 +30,46 @@ Software Foundation, 59 Temple Place - Suite 330, Boston, MA
#include "xtensa/xtensa-config.h" #include "xtensa/xtensa-config.h"
# Define macros for the ABS and ADDX* instructions to handle cases
# where they are not included in the Xtensa processor configuration.
.macro do_abs dst, src, tmp
#if XCHAL_HAVE_ABS
abs \dst, \src
#else
neg \tmp, \src
movgez \tmp, \src, \src
mov \dst, \tmp
#endif
.endm
.macro do_addx2 dst, as, at, tmp
#if XCHAL_HAVE_ADDX
addx2 \dst, \as, \at
#else
slli \tmp, \as, 1
add \dst, \tmp, \at
#endif
.endm
.macro do_addx4 dst, as, at, tmp
#if XCHAL_HAVE_ADDX
addx4 \dst, \as, \at
#else
slli \tmp, \as, 2
add \dst, \tmp, \at
#endif
.endm
.macro do_addx8 dst, as, at, tmp
#if XCHAL_HAVE_ADDX
addx8 \dst, \as, \at
#else
slli \tmp, \as, 3
add \dst, \tmp, \at
#endif
.endm
#ifdef L_mulsi3 #ifdef L_mulsi3
.align 4 .align 4
.global __mulsi3 .global __mulsi3
...@@ -64,88 +104,85 @@ __mulsi3: ...@@ -64,88 +104,85 @@ __mulsi3:
#else /* !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MAC16 */ #else /* !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MAC16 */
# Multiply one bit at a time, but unroll the loop 4x to better # Multiply one bit at a time, but unroll the loop 4x to better
# exploit the addx instructions. # exploit the addx instructions and avoid overhead.
# Peel the first iteration to save a cycle on init.
# Peel the first iteration to save a cycle on init
# avoid negative numbers
# Avoid negative numbers.
xor a5, a2, a3 # top bit is 1 iff one of the inputs is negative xor a5, a2, a3 # top bit is 1 iff one of the inputs is negative
abs a3, a3 do_abs a3, a3, a6
abs a2, a2 do_abs a2, a2, a6
# swap so that second argument is smaller
sub a7, a2, a3
mov a4, a3
movgez a4, a2, a7 # a4 = max(a2, a3)
movltz a3, a2, a7 # a3 = min(a2, a3)
movi a2, 0 # Swap so the second argument is smaller.
extui a6, a3, 0, 1 sub a7, a2, a3
movnez a2, a4, a6 mov a4, a3
movgez a4, a2, a7 # a4 = max(a2, a3)
movltz a3, a2, a7 # a3 = min(a2, a3)
addx2 a7, a4, a2 movi a2, 0
extui a6, a3, 1, 1 extui a6, a3, 0, 1
movnez a2, a7, a6 movnez a2, a4, a6
addx4 a7, a4, a2 do_addx2 a7, a4, a2, a7
extui a6, a3, 2, 1 extui a6, a3, 1, 1
movnez a2, a7, a6 movnez a2, a7, a6
addx8 a7, a4, a2 do_addx4 a7, a4, a2, a7
extui a6, a3, 3, 1 extui a6, a3, 2, 1
movnez a2, a7, a6 movnez a2, a7, a6
bgeui a3, 16, .Lmult_main_loop do_addx8 a7, a4, a2, a7
neg a3, a2 extui a6, a3, 3, 1
movltz a2, a3, a5 movnez a2, a7, a6
retw
bgeui a3, 16, .Lmult_main_loop
neg a3, a2
movltz a2, a3, a5
retw
.align 4 .align 4
.Lmult_main_loop: .Lmult_main_loop:
srli a3, a3, 4 srli a3, a3, 4
slli a4, a4, 4 slli a4, a4, 4
add a7, a4, a2 add a7, a4, a2
extui a6, a3, 0, 1 extui a6, a3, 0, 1
movnez a2, a7, a6 movnez a2, a7, a6
addx2 a7, a4, a2 do_addx2 a7, a4, a2, a7
extui a6, a3, 1, 1 extui a6, a3, 1, 1
movnez a2, a7, a6 movnez a2, a7, a6
addx4 a7, a4, a2 do_addx4 a7, a4, a2, a7
extui a6, a3, 2, 1 extui a6, a3, 2, 1
movnez a2, a7, a6 movnez a2, a7, a6
addx8 a7, a4, a2 do_addx8 a7, a4, a2, a7
extui a6, a3, 3, 1 extui a6, a3, 3, 1
movnez a2, a7, a6 movnez a2, a7, a6
bgeui a3, 16, .Lmult_main_loop
bgeui a3, 16, .Lmult_main_loop neg a3, a2
movltz a2, a3, a5
neg a3, a2
movltz a2, a3, a5
#endif /* !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MAC16 */ #endif /* !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MAC16 */
retw retw
.Lfe0: .size __mulsi3,.-__mulsi3
.size __mulsi3,.Lfe0-__mulsi3
#endif /* L_mulsi3 */ #endif /* L_mulsi3 */
# Some Xtensa configurations include the NSAU (unsigned # Define a macro for the NSAU (unsigned normalize shift amount)
# normalize shift amount) instruction which computes the number # instruction, which computes the number of leading zero bits,
# of leading zero bits. For other configurations, the "nsau" # to handle cases where it is not included in the Xtensa processor
# operation is implemented as a macro. # configuration.
#if !XCHAL_HAVE_NSA .macro do_nsau cnt, val, tmp, a
.macro nsau cnt, val, tmp, a #if XCHAL_HAVE_NSA
nsau \cnt, \val
#else
mov \a, \val mov \a, \val
movi \cnt, 0 movi \cnt, 0
extui \tmp, \a, 16, 16 extui \tmp, \a, 16, 16
...@@ -163,8 +200,8 @@ __mulsi3: ...@@ -163,8 +200,8 @@ __mulsi3:
add \tmp, \tmp, \a add \tmp, \tmp, \a
l8ui \tmp, \tmp, 0 l8ui \tmp, \tmp, 0
add \cnt, \cnt, \tmp add \cnt, \cnt, \tmp
.endm
#endif /* !XCHAL_HAVE_NSA */ #endif /* !XCHAL_HAVE_NSA */
.endm
#ifdef L_nsau #ifdef L_nsau
.section .rodata .section .rodata
...@@ -190,8 +227,7 @@ __nsau_data: ...@@ -190,8 +227,7 @@ __nsau_data:
.byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
.byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 .byte 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
#endif /* !XCHAL_HAVE_NSA */ #endif /* !XCHAL_HAVE_NSA */
.Lfe1: .size __nsau_data,.-__nsau_data
.size __nsau_data,.Lfe1-__nsau_data
.hidden __nsau_data .hidden __nsau_data
#endif /* L_nsau */ #endif /* L_nsau */
...@@ -205,13 +241,8 @@ __udivsi3: ...@@ -205,13 +241,8 @@ __udivsi3:
bltui a3, 2, .Lle_one # check if the divisor <= 1 bltui a3, 2, .Lle_one # check if the divisor <= 1
mov a6, a2 # keep dividend in a6 mov a6, a2 # keep dividend in a6
#if XCHAL_HAVE_NSA do_nsau a5, a6, a2, a7 # dividend_shift = nsau(dividend)
nsau a5, a6 # dividend_shift = nsau(dividend) do_nsau a4, a3, a2, a7 # divisor_shift = nsau(divisor)
nsau a4, a3 # divisor_shift = nsau(divisor)
#else /* !XCHAL_HAVE_NSA */
nsau a5, a6, a2, a7 # dividend_shift = nsau(dividend)
nsau a4, a3, a2, a7 # divisor_shift = nsau(divisor)
#endif /* !XCHAL_HAVE_NSA */
bgeu a5, a4, .Lspecial bgeu a5, a4, .Lspecial
sub a4, a4, a5 # count = divisor_shift - dividend_shift sub a4, a4, a5 # count = divisor_shift - dividend_shift
...@@ -255,8 +286,7 @@ __udivsi3: ...@@ -255,8 +286,7 @@ __udivsi3:
.Lerror: .Lerror:
movi a2, 0 # just return 0; could throw an exception movi a2, 0 # just return 0; could throw an exception
retw retw
.Lfe2: .size __udivsi3,.-__udivsi3
.size __udivsi3,.Lfe2-__udivsi3
#endif /* L_udivsi3 */ #endif /* L_udivsi3 */
...@@ -268,16 +298,11 @@ __udivsi3: ...@@ -268,16 +298,11 @@ __udivsi3:
__divsi3: __divsi3:
entry sp, 16 entry sp, 16
xor a7, a2, a3 # sign = dividend ^ divisor xor a7, a2, a3 # sign = dividend ^ divisor
abs a6, a2 # udividend = abs(dividend) do_abs a6, a2, a4 # udividend = abs(dividend)
abs a3, a3 # udivisor = abs(divisor) do_abs a3, a3, a4 # udivisor = abs(divisor)
bltui a3, 2, .Lle_one # check if udivisor <= 1 bltui a3, 2, .Lle_one # check if udivisor <= 1
#if XCHAL_HAVE_NSA do_nsau a5, a6, a2, a8 # udividend_shift = nsau(udividend)
nsau a5, a6 # udividend_shift = nsau(udividend) do_nsau a4, a3, a2, a8 # udivisor_shift = nsau(udivisor)
nsau a4, a3 # udivisor_shift = nsau(udivisor)
#else /* !XCHAL_HAVE_NSA */
nsau a5, a6, a2, a8 # udividend_shift = nsau(udividend)
nsau a4, a3, a2, a8 # udivisor_shift = nsau(udivisor)
#endif /* !XCHAL_HAVE_NSA */
bgeu a5, a4, .Lspecial bgeu a5, a4, .Lspecial
sub a4, a4, a5 # count = udivisor_shift - udividend_shift sub a4, a4, a5 # count = udivisor_shift - udividend_shift
...@@ -326,8 +351,7 @@ __divsi3: ...@@ -326,8 +351,7 @@ __divsi3:
.Lerror: .Lerror:
movi a2, 0 # just return 0; could throw an exception movi a2, 0 # just return 0; could throw an exception
retw retw
.Lfe3: .size __divsi3,.-__divsi3
.size __divsi3,.Lfe3-__divsi3
#endif /* L_divsi3 */ #endif /* L_divsi3 */
...@@ -340,13 +364,8 @@ __umodsi3: ...@@ -340,13 +364,8 @@ __umodsi3:
entry sp, 16 entry sp, 16
bltui a3, 2, .Lle_one # check if the divisor is <= 1 bltui a3, 2, .Lle_one # check if the divisor is <= 1
#if XCHAL_HAVE_NSA do_nsau a5, a2, a6, a7 # dividend_shift = nsau(dividend)
nsau a5, a2 # dividend_shift = nsau(dividend) do_nsau a4, a3, a6, a7 # divisor_shift = nsau(divisor)
nsau a4, a3 # divisor_shift = nsau(divisor)
#else /* !XCHAL_HAVE_NSA */
nsau a5, a2, a6, a7 # dividend_shift = nsau(dividend)
nsau a4, a3, a6, a7 # divisor_shift = nsau(divisor)
#endif /* !XCHAL_HAVE_NSA */
bgeu a5, a4, .Lspecial bgeu a5, a4, .Lspecial
sub a4, a4, a5 # count = divisor_shift - dividend_shift sub a4, a4, a5 # count = divisor_shift - dividend_shift
...@@ -384,8 +403,7 @@ __umodsi3: ...@@ -384,8 +403,7 @@ __umodsi3:
# someday we may want to throw an exception if the divisor is 0. # someday we may want to throw an exception if the divisor is 0.
movi a2, 0 movi a2, 0
retw retw
.Lfe4: .size __umodsi3,.-__umodsi3
.size __umodsi3,.Lfe4-__umodsi3
#endif /* L_umodsi3 */ #endif /* L_umodsi3 */
...@@ -397,16 +415,11 @@ __umodsi3: ...@@ -397,16 +415,11 @@ __umodsi3:
__modsi3: __modsi3:
entry sp, 16 entry sp, 16
mov a7, a2 # save original (signed) dividend mov a7, a2 # save original (signed) dividend
abs a2, a2 # udividend = abs(dividend) do_abs a2, a2, a4 # udividend = abs(dividend)
abs a3, a3 # udivisor = abs(divisor) do_abs a3, a3, a4 # udivisor = abs(divisor)
bltui a3, 2, .Lle_one # check if udivisor <= 1 bltui a3, 2, .Lle_one # check if udivisor <= 1
#if XCHAL_HAVE_NSA do_nsau a5, a2, a6, a8 # udividend_shift = nsau(udividend)
nsau a5, a2 # udividend_shift = nsau(udividend) do_nsau a4, a3, a6, a8 # udivisor_shift = nsau(udivisor)
nsau a4, a3 # udivisor_shift = nsau(udivisor)
#else /* !XCHAL_HAVE_NSA */
nsau a5, a2, a6, a8 # udividend_shift = nsau(udividend)
nsau a4, a3, a6, a8 # udivisor_shift = nsau(udivisor)
#endif /* !XCHAL_HAVE_NSA */
bgeu a5, a4, .Lspecial bgeu a5, a4, .Lspecial
sub a4, a4, a5 # count = udivisor_shift - udividend_shift sub a4, a4, a5 # count = udivisor_shift - udividend_shift
...@@ -450,7 +463,6 @@ __modsi3: ...@@ -450,7 +463,6 @@ __modsi3:
# someday we may want to throw an exception if udivisor is 0. # someday we may want to throw an exception if udivisor is 0.
movi a2, 0 movi a2, 0
retw retw
.Lfe5: .size __modsi3,.-__modsi3
.size __modsi3,.Lfe5-__modsi3
#endif /* L_modsi3 */ #endif /* L_modsi3 */
...@@ -2,32 +2,33 @@ ...@@ -2,32 +2,33 @@
Copyright (C) 2001,2002,2003 Free Software Foundation, Inc. Copyright (C) 2001,2002,2003 Free Software Foundation, Inc.
Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica. Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.
** NOTE: This file was automatically generated by the Xtensa Processor This program is free software; you can redistribute it and/or modify
** Generator. Changes made here will be lost when this file is it under the terms of the GNU General Public License as published by
** updated or replaced with the settings for a different Xtensa the Free Software Foundation; either version 2, or (at your option)
** processor configuration. DO NOT EDIT! any later version.
This program is free software; you can redistribute it and/or modify This program is distributed in the hope that it will be useful, but
it under the terms of the GNU General Public License as published by WITHOUT ANY WARRANTY; without even the implied warranty of
the Free Software Foundation; either version 2, or (at your option) MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
any later version. General Public License for more details.
This program is distributed in the hope that it will be useful, but You should have received a copy of the GNU General Public License
WITHOUT ANY WARRANTY; without even the implied warranty of along with this program; if not, write to the Free Software
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef XTENSA_CONFIG_H #ifndef XTENSA_CONFIG_H
#define XTENSA_CONFIG_H #define XTENSA_CONFIG_H
/* The macros defined here match those with the same names in the Xtensa
compile-time HAL (Hardware Abstraction Layer). Please refer to the
Xtensa System Software Reference Manual for documentation of these
macros. */
#define XCHAL_HAVE_BE 1 #define XCHAL_HAVE_BE 1
#define XCHAL_HAVE_DENSITY 1 #define XCHAL_HAVE_DENSITY 1
#define XCHAL_HAVE_CONST16 0 #define XCHAL_HAVE_CONST16 0
#define XCHAL_HAVE_ABS 1
#define XCHAL_HAVE_ADDX 1
#define XCHAL_HAVE_L32R 1 #define XCHAL_HAVE_L32R 1
#define XCHAL_HAVE_MAC16 0 #define XCHAL_HAVE_MAC16 0
#define XCHAL_HAVE_MUL16 0 #define XCHAL_HAVE_MUL16 0
......
...@@ -62,6 +62,8 @@ extern unsigned xtensa_current_frame_size; ...@@ -62,6 +62,8 @@ extern unsigned xtensa_current_frame_size;
#define MASK_NO_FUSED_MADD 0x00008000 /* avoid f-p mul/add */ #define MASK_NO_FUSED_MADD 0x00008000 /* avoid f-p mul/add */
#define MASK_SERIALIZE_VOLATILE 0x00010000 /* serialize volatile refs */ #define MASK_SERIALIZE_VOLATILE 0x00010000 /* serialize volatile refs */
#define MASK_CONST16 0x00020000 /* use CONST16 instruction */ #define MASK_CONST16 0x00020000 /* use CONST16 instruction */
#define MASK_ABS 0x00040000 /* use ABS instruction */
#define MASK_ADDX 0x00080000 /* use ADDX* and SUBX* */
/* Macros used in the machine description to test the flags. */ /* Macros used in the machine description to test the flags. */
...@@ -83,6 +85,8 @@ extern unsigned xtensa_current_frame_size; ...@@ -83,6 +85,8 @@ extern unsigned xtensa_current_frame_size;
#define TARGET_NO_FUSED_MADD (target_flags & MASK_NO_FUSED_MADD) #define TARGET_NO_FUSED_MADD (target_flags & MASK_NO_FUSED_MADD)
#define TARGET_SERIALIZE_VOLATILE (target_flags & MASK_SERIALIZE_VOLATILE) #define TARGET_SERIALIZE_VOLATILE (target_flags & MASK_SERIALIZE_VOLATILE)
#define TARGET_CONST16 (target_flags & MASK_CONST16) #define TARGET_CONST16 (target_flags & MASK_CONST16)
#define TARGET_ABS (target_flags & MASK_ABS)
#define TARGET_ADDX (target_flags & MASK_ADDX)
/* Default target_flags if no switches are specified */ /* Default target_flags if no switches are specified */
...@@ -90,6 +94,8 @@ extern unsigned xtensa_current_frame_size; ...@@ -90,6 +94,8 @@ extern unsigned xtensa_current_frame_size;
(XCHAL_HAVE_BE ? MASK_BIG_ENDIAN : 0) | \ (XCHAL_HAVE_BE ? MASK_BIG_ENDIAN : 0) | \
(XCHAL_HAVE_DENSITY ? MASK_DENSITY : 0) | \ (XCHAL_HAVE_DENSITY ? MASK_DENSITY : 0) | \
(XCHAL_HAVE_L32R ? 0 : MASK_CONST16) | \ (XCHAL_HAVE_L32R ? 0 : MASK_CONST16) | \
(XCHAL_HAVE_ABS ? MASK_ABS : 0) | \
(XCHAL_HAVE_ADDX ? MASK_ADDX : 0) | \
(XCHAL_HAVE_MAC16 ? MASK_MAC16 : 0) | \ (XCHAL_HAVE_MAC16 ? MASK_MAC16 : 0) | \
(XCHAL_HAVE_MUL16 ? MASK_MUL16 : 0) | \ (XCHAL_HAVE_MUL16 ? MASK_MUL16 : 0) | \
(XCHAL_HAVE_MUL32 ? MASK_MUL32 : 0) | \ (XCHAL_HAVE_MUL32 ? MASK_MUL32 : 0) | \
...@@ -121,6 +127,14 @@ extern unsigned xtensa_current_frame_size; ...@@ -121,6 +127,14 @@ extern unsigned xtensa_current_frame_size;
N_("Use CONST16 instruction to load constants")}, \ N_("Use CONST16 instruction to load constants")}, \
{"no-const16", -MASK_CONST16, \ {"no-const16", -MASK_CONST16, \
N_("Use PC-relative L32R instruction to load constants")}, \ N_("Use PC-relative L32R instruction to load constants")}, \
{"abs", MASK_ABS, \
N_("Use the Xtensa ABS instruction")}, \
{"no-abs", -MASK_ABS, \
N_("Do not use the Xtensa ABS instruction")}, \
{"addx", MASK_ADDX, \
N_("Use the Xtensa ADDX and SUBX instructions")}, \
{"no-addx", -MASK_ADDX, \
N_("Do not use the Xtensa ADDX and SUBX instructions")}, \
{"mac16", MASK_MAC16, \ {"mac16", MASK_MAC16, \
N_("Use the Xtensa MAC16 option")}, \ N_("Use the Xtensa MAC16 option")}, \
{"no-mac16", -MASK_MAC16, \ {"no-mac16", -MASK_MAC16, \
......
...@@ -163,7 +163,7 @@ ...@@ -163,7 +163,7 @@
(plus:SI (mult:SI (match_operand:SI 1 "register_operand" "r") (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "r")
(const_int 2)) (const_int 2))
(match_operand:SI 2 "register_operand" "r")))] (match_operand:SI 2 "register_operand" "r")))]
"" "TARGET_ADDX"
"addx2\\t%0, %1, %2" "addx2\\t%0, %1, %2"
[(set_attr "type" "arith") [(set_attr "type" "arith")
(set_attr "mode" "SI") (set_attr "mode" "SI")
...@@ -174,7 +174,7 @@ ...@@ -174,7 +174,7 @@
(plus:SI (mult:SI (match_operand:SI 1 "register_operand" "r") (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "r")
(const_int 4)) (const_int 4))
(match_operand:SI 2 "register_operand" "r")))] (match_operand:SI 2 "register_operand" "r")))]
"" "TARGET_ADDX"
"addx4\\t%0, %1, %2" "addx4\\t%0, %1, %2"
[(set_attr "type" "arith") [(set_attr "type" "arith")
(set_attr "mode" "SI") (set_attr "mode" "SI")
...@@ -185,7 +185,7 @@ ...@@ -185,7 +185,7 @@
(plus:SI (mult:SI (match_operand:SI 1 "register_operand" "r") (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "r")
(const_int 8)) (const_int 8))
(match_operand:SI 2 "register_operand" "r")))] (match_operand:SI 2 "register_operand" "r")))]
"" "TARGET_ADDX"
"addx8\\t%0, %1, %2" "addx8\\t%0, %1, %2"
[(set_attr "type" "arith") [(set_attr "type" "arith")
(set_attr "mode" "SI") (set_attr "mode" "SI")
...@@ -257,7 +257,7 @@ ...@@ -257,7 +257,7 @@
(minus:SI (mult:SI (match_operand:SI 1 "register_operand" "r") (minus:SI (mult:SI (match_operand:SI 1 "register_operand" "r")
(const_int 2)) (const_int 2))
(match_operand:SI 2 "register_operand" "r")))] (match_operand:SI 2 "register_operand" "r")))]
"" "TARGET_ADDX"
"subx2\\t%0, %1, %2" "subx2\\t%0, %1, %2"
[(set_attr "type" "arith") [(set_attr "type" "arith")
(set_attr "mode" "SI") (set_attr "mode" "SI")
...@@ -268,7 +268,7 @@ ...@@ -268,7 +268,7 @@
(minus:SI (mult:SI (match_operand:SI 1 "register_operand" "r") (minus:SI (mult:SI (match_operand:SI 1 "register_operand" "r")
(const_int 4)) (const_int 4))
(match_operand:SI 2 "register_operand" "r")))] (match_operand:SI 2 "register_operand" "r")))]
"" "TARGET_ADDX"
"subx4\\t%0, %1, %2" "subx4\\t%0, %1, %2"
[(set_attr "type" "arith") [(set_attr "type" "arith")
(set_attr "mode" "SI") (set_attr "mode" "SI")
...@@ -279,7 +279,7 @@ ...@@ -279,7 +279,7 @@
(minus:SI (mult:SI (match_operand:SI 1 "register_operand" "r") (minus:SI (mult:SI (match_operand:SI 1 "register_operand" "r")
(const_int 8)) (const_int 8))
(match_operand:SI 2 "register_operand" "r")))] (match_operand:SI 2 "register_operand" "r")))]
"" "TARGET_ADDX"
"subx8\\t%0, %1, %2" "subx8\\t%0, %1, %2"
[(set_attr "type" "arith") [(set_attr "type" "arith")
(set_attr "mode" "SI") (set_attr "mode" "SI")
...@@ -518,7 +518,7 @@ ...@@ -518,7 +518,7 @@
(define_insn "abssi2" (define_insn "abssi2"
[(set (match_operand:SI 0 "register_operand" "=a") [(set (match_operand:SI 0 "register_operand" "=a")
(abs:SI (match_operand:SI 1 "register_operand" "r")))] (abs:SI (match_operand:SI 1 "register_operand" "r")))]
"" "TARGET_ABS"
"abs\\t%0, %1" "abs\\t%0, %1"
[(set_attr "type" "arith") [(set_attr "type" "arith")
(set_attr "mode" "SI") (set_attr "mode" "SI")
......
...@@ -634,6 +634,8 @@ in the following sections. ...@@ -634,6 +634,8 @@ in the following sections.
@gccoptlist{-mbig-endian -mlittle-endian @gol @gccoptlist{-mbig-endian -mlittle-endian @gol
-mdensity -mno-density @gol -mdensity -mno-density @gol
-mconst16 -mno-const16 @gol -mconst16 -mno-const16 @gol
-mabs -mno-abs @gol
-maddx -mno-addx @gol
-mmac16 -mno-mac16 @gol -mmac16 -mno-mac16 @gol
-mmul16 -mno-mul16 @gol -mmul16 -mno-mul16 @gol
-mmul32 -mno-mul32 @gol -mmul32 -mno-mul32 @gol
...@@ -10649,11 +10651,26 @@ Enable or disable use of the optional Xtensa code density instructions. ...@@ -10649,11 +10651,26 @@ Enable or disable use of the optional Xtensa code density instructions.
@itemx -mno-const16 @itemx -mno-const16
@opindex mconst16 @opindex mconst16
@opindex mno-const16 @opindex mno-const16
Enable or disable use of CONST16 instructions for loading constant values. Enable or disable use of @code{CONST16} instructions for loading
The CONST16 instruction is currently not a standard option from Tensilica. constant values. The @code{CONST16} instruction is currently not a
When enabled, CONST16 instructions are always used in place of the standard standard option from Tensilica. When enabled, @code{CONST16}
L32R instructions. The use of CONST16 is enabled by default only if the instructions are always used in place of the standard @code{L32R}
L32R instruction is not available. instructions. The use of @code{CONST16} is enabled by default only if
the @code{L32R} instruction is not available.
@item -mabs
@itemx -mno-abs
@opindex mabs
@opindex mno-abs
Enable or disable use of the Xtensa @code{ABS} instruction for absolute
value operations.
@item -maddx
@itemx -mno-addx
@opindex maddx
@opindex mno-addx
Enable or disable use of the Xtensa @code{ADDX} and @code{SUBX}
instructions.
@item -mmac16 @item -mmac16
@itemx -mno-mac16 @itemx -mno-mac16
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment