Commit b7974b3a by Bob Wilson

lib1funcs.asm: Rename abi_entry/abi_return macros to leaf_entry/leaf_return.

	* config/xtensa/lib1funcs.asm: Rename abi_entry/abi_return macros
	to leaf_entry/leaf_return.  Change leaf_entry to add 16 bytes to
	the frame size.  Update to use the new macros.
	* config/xtensa/ieee754-sf.S: Use new leaf_entry/leaf_return macros.
	* config/xtensa/ieee754-df.S: Likewise.

From-SVN: r112604
parent 56e84019
2004-03-31 Richard Henderson <rth@redhat.com> 2006-03-31 Bob Wilson <bob.wilson@acm.org>
* config/xtensa/lib1funcs.asm: Rename abi_entry/abi_return macros
to leaf_entry/leaf_return. Change leaf_entry to add 16 bytes to
the frame size. Update to use the new macros.
* config/xtensa/ieee754-sf.S: Use new leaf_entry/leaf_return macros.
* config/xtensa/ieee754-df.S: Likewise.
2006-03-31 Richard Henderson <rth@redhat.com>
* tree-cfg.c (make_ctrl_stmt_edges, make_exit_edges): Merge into... * tree-cfg.c (make_ctrl_stmt_edges, make_exit_edges): Merge into...
(make_edges): ... here. Control fallthru creation with a local (make_edges): ... here. Control fallthru creation with a local
......
...@@ -51,10 +51,10 @@ ...@@ -51,10 +51,10 @@
.global __negdf2 .global __negdf2
.type __negdf2, @function .type __negdf2, @function
__negdf2: __negdf2:
abi_entry sp, 32 leaf_entry sp, 16
movi a4, 0x80000000 movi a4, 0x80000000
xor xh, xh, a4 xor xh, xh, a4
abi_return leaf_return
#endif /* L_negdf2 */ #endif /* L_negdf2 */
...@@ -74,13 +74,13 @@ __adddf3_aux: ...@@ -74,13 +74,13 @@ __adddf3_aux:
slli a7, xh, 12 slli a7, xh, 12
or a7, a7, xl or a7, a7, xl
beqz a7, .Ladd_ynan_or_inf beqz a7, .Ladd_ynan_or_inf
1: abi_return 1: leaf_return
.Ladd_ynan_or_inf: .Ladd_ynan_or_inf:
/* Return y. */ /* Return y. */
mov xh, yh mov xh, yh
mov xl, yl mov xl, yl
abi_return leaf_return
.Ladd_opposite_signs: .Ladd_opposite_signs:
/* Operand signs differ. Do a subtraction. */ /* Operand signs differ. Do a subtraction. */
...@@ -92,7 +92,7 @@ __adddf3_aux: ...@@ -92,7 +92,7 @@ __adddf3_aux:
.global __adddf3 .global __adddf3
.type __adddf3, @function .type __adddf3, @function
__adddf3: __adddf3:
abi_entry sp, 32 leaf_entry sp, 16
movi a6, 0x7ff00000 movi a6, 0x7ff00000
/* Check if the two operands have the same sign. */ /* Check if the two operands have the same sign. */
...@@ -166,7 +166,7 @@ __adddf3: ...@@ -166,7 +166,7 @@ __adddf3:
add xh, xh, yh add xh, xh, yh
bgeu xl, yl, 1f bgeu xl, yl, 1f
addi xh, xh, 1 addi xh, xh, 1
1: abi_return 1: leaf_return
.Ladd_bigshifty: .Ladd_bigshifty:
/* Exponent difference > 64 -- just return the bigger value. */ /* Exponent difference > 64 -- just return the bigger value. */
...@@ -229,7 +229,7 @@ __adddf3: ...@@ -229,7 +229,7 @@ __adddf3:
/* Check if the leftover fraction is exactly 1/2. */ /* Check if the leftover fraction is exactly 1/2. */
slli a9, a9, 1 slli a9, a9, 1
beqz a9, .Ladd_exactlyhalf beqz a9, .Ladd_exactlyhalf
1: abi_return 1: leaf_return
.Ladd_bigshiftx: .Ladd_bigshiftx:
/* Mostly the same thing as "bigshifty".... */ /* Mostly the same thing as "bigshifty".... */
...@@ -247,7 +247,7 @@ __adddf3: ...@@ -247,7 +247,7 @@ __adddf3:
.Ladd_returny: .Ladd_returny:
mov xh, yh mov xh, yh
mov xl, yl mov xl, yl
abi_return leaf_return
.Ladd_carry: .Ladd_carry:
/* The addition has overflowed into the exponent field, so the /* The addition has overflowed into the exponent field, so the
...@@ -288,7 +288,7 @@ __adddf3: ...@@ -288,7 +288,7 @@ __adddf3:
addi xl, xl, 1 addi xl, xl, 1
beqz xl, .Ladd_roundcarry beqz xl, .Ladd_roundcarry
beqz a9, .Ladd_exactlyhalf beqz a9, .Ladd_exactlyhalf
1: abi_return 1: leaf_return
.Ladd_infinity: .Ladd_infinity:
/* Clear the mantissa. */ /* Clear the mantissa. */
...@@ -299,20 +299,20 @@ __adddf3: ...@@ -299,20 +299,20 @@ __adddf3:
/* The sign bit may have been lost in a carry-out. Put it back. */ /* The sign bit may have been lost in a carry-out. Put it back. */
slli a8, a8, 1 slli a8, a8, 1
or xh, xh, a8 or xh, xh, a8
abi_return leaf_return
.Ladd_exactlyhalf: .Ladd_exactlyhalf:
/* Round down to the nearest even value. */ /* Round down to the nearest even value. */
srli xl, xl, 1 srli xl, xl, 1
slli xl, xl, 1 slli xl, xl, 1
abi_return leaf_return
.Ladd_roundcarry: .Ladd_roundcarry:
/* xl is always zero when the rounding increment overflows, so /* xl is always zero when the rounding increment overflows, so
there's no need to round it to an even value. */ there's no need to round it to an even value. */
addi xh, xh, 1 addi xh, xh, 1
/* Overflow to the exponent is OK. */ /* Overflow to the exponent is OK. */
abi_return leaf_return
/* Subtraction */ /* Subtraction */
...@@ -328,14 +328,14 @@ __subdf3_aux: ...@@ -328,14 +328,14 @@ __subdf3_aux:
/* Both x and y are either NaN or Inf, so the result is NaN. */ /* Both x and y are either NaN or Inf, so the result is NaN. */
movi a4, 0x80000 /* make it a quiet NaN */ movi a4, 0x80000 /* make it a quiet NaN */
or xh, xh, a4 or xh, xh, a4
1: abi_return 1: leaf_return
.Lsub_ynan_or_inf: .Lsub_ynan_or_inf:
/* Negate y and return it. */ /* Negate y and return it. */
slli a7, a6, 11 slli a7, a6, 11
xor xh, yh, a7 xor xh, yh, a7
mov xl, yl mov xl, yl
abi_return leaf_return
.Lsub_opposite_signs: .Lsub_opposite_signs:
/* Operand signs differ. Do an addition. */ /* Operand signs differ. Do an addition. */
...@@ -347,7 +347,7 @@ __subdf3_aux: ...@@ -347,7 +347,7 @@ __subdf3_aux:
.global __subdf3 .global __subdf3
.type __subdf3, @function .type __subdf3, @function
__subdf3: __subdf3:
abi_entry sp, 32 leaf_entry sp, 16
movi a6, 0x7ff00000 movi a6, 0x7ff00000
/* Check if the two operands have the same sign. */ /* Check if the two operands have the same sign. */
...@@ -415,7 +415,7 @@ __subdf3: ...@@ -415,7 +415,7 @@ __subdf3:
/* The operands are equal. Return 0.0. */ /* The operands are equal. Return 0.0. */
movi xh, 0 movi xh, 0
movi xl, 0 movi xl, 0
1: abi_return 1: leaf_return
.Lsub_yexpzero: .Lsub_yexpzero:
/* y is a subnormal value. Replace its sign/exponent with zero, /* y is a subnormal value. Replace its sign/exponent with zero,
...@@ -493,7 +493,7 @@ __subdf3: ...@@ -493,7 +493,7 @@ __subdf3:
/* Check if the leftover fraction is exactly 1/2. */ /* Check if the leftover fraction is exactly 1/2. */
slli a9, a9, 1 slli a9, a9, 1
beqz a9, .Lsub_exactlyhalf beqz a9, .Lsub_exactlyhalf
1: abi_return 1: leaf_return
.Lsub_xexpzero: .Lsub_xexpzero:
/* Same as "yexpzero". */ /* Same as "yexpzero". */
...@@ -523,7 +523,7 @@ __subdf3: ...@@ -523,7 +523,7 @@ __subdf3:
slli a7, a6, 11 slli a7, a6, 11
xor xh, yh, a7 xor xh, yh, a7
mov xl, yl mov xl, yl
abi_return leaf_return
.Lsub_borrow: .Lsub_borrow:
/* The subtraction has underflowed into the exponent field, so the /* The subtraction has underflowed into the exponent field, so the
...@@ -560,14 +560,14 @@ __subdf3: ...@@ -560,14 +560,14 @@ __subdf3:
/* Round down to the nearest even value. */ /* Round down to the nearest even value. */
srli xl, xl, 1 srli xl, xl, 1
slli xl, xl, 1 slli xl, xl, 1
abi_return leaf_return
.Lsub_roundcarry: .Lsub_roundcarry:
/* xl is always zero when the rounding increment overflows, so /* xl is always zero when the rounding increment overflows, so
there's no need to round it to an even value. */ there's no need to round it to an even value. */
addi xh, xh, 1 addi xh, xh, 1
/* Overflow to the exponent is OK. */ /* Overflow to the exponent is OK. */
abi_return leaf_return
.Lsub_xhzero: .Lsub_xhzero:
/* When normalizing the result, all the mantissa bits in the high /* When normalizing the result, all the mantissa bits in the high
...@@ -728,7 +728,7 @@ __muldf3_aux: ...@@ -728,7 +728,7 @@ __muldf3_aux:
.global __muldf3 .global __muldf3
.type __muldf3, @function .type __muldf3, @function
__muldf3: __muldf3:
abi_entry sp, 48 leaf_entry sp, 32
#if __XTENSA_CALL0_ABI__ #if __XTENSA_CALL0_ABI__
addi sp, sp, -32 addi sp, sp, -32
s32i a12, sp, 16 s32i a12, sp, 16
...@@ -1093,7 +1093,7 @@ __muldf3: ...@@ -1093,7 +1093,7 @@ __muldf3:
l32i a15, sp, 28 l32i a15, sp, 28
addi sp, sp, 32 addi sp, sp, 32
#endif #endif
abi_return leaf_return
.Lmul_exactlyhalf: .Lmul_exactlyhalf:
/* Round down to the nearest even value. */ /* Round down to the nearest even value. */
...@@ -1246,7 +1246,7 @@ __divdf3_aux: ...@@ -1246,7 +1246,7 @@ __divdf3_aux:
movi a4, 0x80000 /* make it a quiet NaN */ movi a4, 0x80000 /* make it a quiet NaN */
or xh, xh, a4 or xh, xh, a4
1: movi xl, 0 1: movi xl, 0
abi_return leaf_return
.Ldiv_xexpzero: .Ldiv_xexpzero:
/* Clear the sign bit of x. */ /* Clear the sign bit of x. */
...@@ -1287,7 +1287,7 @@ __divdf3_aux: ...@@ -1287,7 +1287,7 @@ __divdf3_aux:
srli xh, a7, 31 srli xh, a7, 31
slli xh, xh, 31 slli xh, xh, 31
movi xl, 0 movi xl, 0
abi_return leaf_return
.Ldiv_xnan_or_inf: .Ldiv_xnan_or_inf:
/* Set the sign bit of the result. */ /* Set the sign bit of the result. */
...@@ -1298,7 +1298,7 @@ __divdf3_aux: ...@@ -1298,7 +1298,7 @@ __divdf3_aux:
bnall yh, a6, 1f bnall yh, a6, 1f
movi a4, 0x80000 /* make it a quiet NaN */ movi a4, 0x80000 /* make it a quiet NaN */
or xh, xh, a4 or xh, xh, a4
1: abi_return 1: leaf_return
.Ldiv_ynan_or_inf: .Ldiv_ynan_or_inf:
/* If y is Infinity, return zero. */ /* If y is Infinity, return zero. */
...@@ -1308,7 +1308,7 @@ __divdf3_aux: ...@@ -1308,7 +1308,7 @@ __divdf3_aux:
/* y is NaN; return it. */ /* y is NaN; return it. */
mov xh, yh mov xh, yh
mov xl, yl mov xl, yl
abi_return leaf_return
.Ldiv_highequal1: .Ldiv_highequal1:
bltu xl, yl, 2f bltu xl, yl, 2f
...@@ -1318,7 +1318,7 @@ __divdf3_aux: ...@@ -1318,7 +1318,7 @@ __divdf3_aux:
.global __divdf3 .global __divdf3
.type __divdf3, @function .type __divdf3, @function
__divdf3: __divdf3:
abi_entry sp, 32 leaf_entry sp, 16
movi a6, 0x7ff00000 movi a6, 0x7ff00000
/* Get the sign of the result. */ /* Get the sign of the result. */
...@@ -1431,7 +1431,7 @@ __divdf3: ...@@ -1431,7 +1431,7 @@ __divdf3:
srli a7, a7, 31 srli a7, a7, 31
slli a7, a7, 31 slli a7, a7, 31
or xh, xh, a7 or xh, xh, a7
abi_return leaf_return
.Ldiv_highequal2: .Ldiv_highequal2:
bgeu xl, yl, 2b bgeu xl, yl, 2b
...@@ -1510,7 +1510,7 @@ __divdf3: ...@@ -1510,7 +1510,7 @@ __divdf3:
srli xh, a7, 31 srli xh, a7, 31
slli xh, xh, 31 slli xh, xh, 31
movi xl, 0 movi xl, 0
abi_return leaf_return
#endif /* L_divdf3 */ #endif /* L_divdf3 */
...@@ -1524,7 +1524,7 @@ __divdf3: ...@@ -1524,7 +1524,7 @@ __divdf3:
.set __nedf2, __eqdf2 .set __nedf2, __eqdf2
.type __eqdf2, @function .type __eqdf2, @function
__eqdf2: __eqdf2:
abi_entry sp, 32 leaf_entry sp, 16
bne xl, yl, 2f bne xl, yl, 2f
bne xh, yh, 4f bne xh, yh, 4f
...@@ -1534,11 +1534,11 @@ __eqdf2: ...@@ -1534,11 +1534,11 @@ __eqdf2:
/* Equal. */ /* Equal. */
movi a2, 0 movi a2, 0
abi_return leaf_return
/* Not equal. */ /* Not equal. */
2: movi a2, 1 2: movi a2, 1
abi_return leaf_return
/* Check if the mantissas are nonzero. */ /* Check if the mantissas are nonzero. */
3: slli a7, xh, 12 3: slli a7, xh, 12
...@@ -1555,7 +1555,7 @@ __eqdf2: ...@@ -1555,7 +1555,7 @@ __eqdf2:
5: movi a2, 0 5: movi a2, 0
movi a3, 1 movi a3, 1
movnez a2, a3, a7 movnez a2, a3, a7
abi_return leaf_return
/* Greater Than */ /* Greater Than */
...@@ -1564,7 +1564,7 @@ __eqdf2: ...@@ -1564,7 +1564,7 @@ __eqdf2:
.global __gtdf2 .global __gtdf2
.type __gtdf2, @function .type __gtdf2, @function
__gtdf2: __gtdf2:
abi_entry sp, 32 leaf_entry sp, 16
movi a6, 0x7ff00000 movi a6, 0x7ff00000
ball xh, a6, 2f ball xh, a6, 2f
1: bnall yh, a6, .Lle_cmp 1: bnall yh, a6, .Lle_cmp
...@@ -1574,14 +1574,14 @@ __gtdf2: ...@@ -1574,14 +1574,14 @@ __gtdf2:
or a7, a7, yl or a7, a7, yl
beqz a7, .Lle_cmp beqz a7, .Lle_cmp
movi a2, 0 movi a2, 0
abi_return leaf_return
/* Check if x is a NaN. */ /* Check if x is a NaN. */
2: slli a7, xh, 12 2: slli a7, xh, 12
or a7, a7, xl or a7, a7, xl
beqz a7, 1b beqz a7, 1b
movi a2, 0 movi a2, 0
abi_return leaf_return
/* Less Than or Equal */ /* Less Than or Equal */
...@@ -1590,7 +1590,7 @@ __gtdf2: ...@@ -1590,7 +1590,7 @@ __gtdf2:
.global __ledf2 .global __ledf2
.type __ledf2, @function .type __ledf2, @function
__ledf2: __ledf2:
abi_entry sp, 32 leaf_entry sp, 16
movi a6, 0x7ff00000 movi a6, 0x7ff00000
ball xh, a6, 2f ball xh, a6, 2f
1: bnall yh, a6, .Lle_cmp 1: bnall yh, a6, .Lle_cmp
...@@ -1600,14 +1600,14 @@ __ledf2: ...@@ -1600,14 +1600,14 @@ __ledf2:
or a7, a7, yl or a7, a7, yl
beqz a7, .Lle_cmp beqz a7, .Lle_cmp
movi a2, 1 movi a2, 1
abi_return leaf_return
/* Check if x is a NaN. */ /* Check if x is a NaN. */
2: slli a7, xh, 12 2: slli a7, xh, 12
or a7, a7, xl or a7, a7, xl
beqz a7, 1b beqz a7, 1b
movi a2, 1 movi a2, 1
abi_return leaf_return
.Lle_cmp: .Lle_cmp:
/* Check if x and y have different signs. */ /* Check if x and y have different signs. */
...@@ -1622,7 +1622,7 @@ __ledf2: ...@@ -1622,7 +1622,7 @@ __ledf2:
bne xh, yh, 5f bne xh, yh, 5f
bltu yl, xl, 5f bltu yl, xl, 5f
4: movi a2, 0 4: movi a2, 0
abi_return leaf_return
.Lle_xneg: .Lle_xneg:
/* Check if y <= x. */ /* Check if y <= x. */
...@@ -1630,7 +1630,7 @@ __ledf2: ...@@ -1630,7 +1630,7 @@ __ledf2:
bne yh, xh, 5f bne yh, xh, 5f
bgeu xl, yl, 4b bgeu xl, yl, 4b
5: movi a2, 1 5: movi a2, 1
abi_return leaf_return
.Lle_diff_signs: .Lle_diff_signs:
bltz xh, 4b bltz xh, 4b
...@@ -1643,7 +1643,7 @@ __ledf2: ...@@ -1643,7 +1643,7 @@ __ledf2:
movi a2, 1 movi a2, 1
movi a3, 0 movi a3, 0
moveqz a2, a3, a7 moveqz a2, a3, a7
abi_return leaf_return
/* Greater Than or Equal */ /* Greater Than or Equal */
...@@ -1652,7 +1652,7 @@ __ledf2: ...@@ -1652,7 +1652,7 @@ __ledf2:
.global __gedf2 .global __gedf2
.type __gedf2, @function .type __gedf2, @function
__gedf2: __gedf2:
abi_entry sp, 32 leaf_entry sp, 16
movi a6, 0x7ff00000 movi a6, 0x7ff00000
ball xh, a6, 2f ball xh, a6, 2f
1: bnall yh, a6, .Llt_cmp 1: bnall yh, a6, .Llt_cmp
...@@ -1662,14 +1662,14 @@ __gedf2: ...@@ -1662,14 +1662,14 @@ __gedf2:
or a7, a7, yl or a7, a7, yl
beqz a7, .Llt_cmp beqz a7, .Llt_cmp
movi a2, -1 movi a2, -1
abi_return leaf_return
/* Check if x is a NaN. */ /* Check if x is a NaN. */
2: slli a7, xh, 12 2: slli a7, xh, 12
or a7, a7, xl or a7, a7, xl
beqz a7, 1b beqz a7, 1b
movi a2, -1 movi a2, -1
abi_return leaf_return
/* Less Than */ /* Less Than */
...@@ -1678,7 +1678,7 @@ __gedf2: ...@@ -1678,7 +1678,7 @@ __gedf2:
.global __ltdf2 .global __ltdf2
.type __ltdf2, @function .type __ltdf2, @function
__ltdf2: __ltdf2:
abi_entry sp, 32 leaf_entry sp, 16
movi a6, 0x7ff00000 movi a6, 0x7ff00000
ball xh, a6, 2f ball xh, a6, 2f
1: bnall yh, a6, .Llt_cmp 1: bnall yh, a6, .Llt_cmp
...@@ -1688,14 +1688,14 @@ __ltdf2: ...@@ -1688,14 +1688,14 @@ __ltdf2:
or a7, a7, yl or a7, a7, yl
beqz a7, .Llt_cmp beqz a7, .Llt_cmp
movi a2, 0 movi a2, 0
abi_return leaf_return
/* Check if x is a NaN. */ /* Check if x is a NaN. */
2: slli a7, xh, 12 2: slli a7, xh, 12
or a7, a7, xl or a7, a7, xl
beqz a7, 1b beqz a7, 1b
movi a2, 0 movi a2, 0
abi_return leaf_return
.Llt_cmp: .Llt_cmp:
/* Check if x and y have different signs. */ /* Check if x and y have different signs. */
...@@ -1710,7 +1710,7 @@ __ltdf2: ...@@ -1710,7 +1710,7 @@ __ltdf2:
bne xh, yh, 5f bne xh, yh, 5f
bgeu xl, yl, 5f bgeu xl, yl, 5f
4: movi a2, -1 4: movi a2, -1
abi_return leaf_return
.Llt_xneg: .Llt_xneg:
/* Check if y < x. */ /* Check if y < x. */
...@@ -1718,7 +1718,7 @@ __ltdf2: ...@@ -1718,7 +1718,7 @@ __ltdf2:
bne yh, xh, 5f bne yh, xh, 5f
bltu yl, xl, 4b bltu yl, xl, 4b
5: movi a2, 0 5: movi a2, 0
abi_return leaf_return
.Llt_diff_signs: .Llt_diff_signs:
bgez xh, 5b bgez xh, 5b
...@@ -1731,7 +1731,7 @@ __ltdf2: ...@@ -1731,7 +1731,7 @@ __ltdf2:
movi a2, 0 movi a2, 0
movi a3, -1 movi a3, -1
movnez a2, a3, a7 movnez a2, a3, a7
abi_return leaf_return
/* Unordered */ /* Unordered */
...@@ -1740,24 +1740,24 @@ __ltdf2: ...@@ -1740,24 +1740,24 @@ __ltdf2:
.global __unorddf2 .global __unorddf2
.type __unorddf2, @function .type __unorddf2, @function
__unorddf2: __unorddf2:
abi_entry sp, 32 leaf_entry sp, 16
movi a6, 0x7ff00000 movi a6, 0x7ff00000
ball xh, a6, 3f ball xh, a6, 3f
1: ball yh, a6, 4f 1: ball yh, a6, 4f
2: movi a2, 0 2: movi a2, 0
abi_return leaf_return
3: slli a7, xh, 12 3: slli a7, xh, 12
or a7, a7, xl or a7, a7, xl
beqz a7, 1b beqz a7, 1b
movi a2, 1 movi a2, 1
abi_return leaf_return
4: slli a7, yh, 12 4: slli a7, yh, 12
or a7, a7, yl or a7, a7, yl
beqz a7, 2b beqz a7, 2b
movi a2, 1 movi a2, 1
abi_return leaf_return
#endif /* L_cmpdf2 */ #endif /* L_cmpdf2 */
...@@ -1767,7 +1767,7 @@ __unorddf2: ...@@ -1767,7 +1767,7 @@ __unorddf2:
.global __fixdfsi .global __fixdfsi
.type __fixdfsi, @function .type __fixdfsi, @function
__fixdfsi: __fixdfsi:
abi_entry sp, 32 leaf_entry sp, 16
/* Check for NaN and Infinity. */ /* Check for NaN and Infinity. */
movi a6, 0x7ff00000 movi a6, 0x7ff00000
...@@ -1792,7 +1792,7 @@ __fixdfsi: ...@@ -1792,7 +1792,7 @@ __fixdfsi:
/* Negate the result if sign != 0. */ /* Negate the result if sign != 0. */
neg a2, a5 neg a2, a5
movgez a2, a5, a7 movgez a2, a5, a7
abi_return leaf_return
.Lfixdfsi_nan_or_inf: .Lfixdfsi_nan_or_inf:
/* Handle Infinity and NaN. */ /* Handle Infinity and NaN. */
...@@ -1808,11 +1808,11 @@ __fixdfsi: ...@@ -1808,11 +1808,11 @@ __fixdfsi:
addi a5, a4, -1 /* 0x7fffffff */ addi a5, a4, -1 /* 0x7fffffff */
movgez a4, a5, xh movgez a4, a5, xh
mov a2, a4 mov a2, a4
abi_return leaf_return
.Lfixdfsi_zero: .Lfixdfsi_zero:
movi a2, 0 movi a2, 0
abi_return leaf_return
#endif /* L_fixdfsi */ #endif /* L_fixdfsi */
...@@ -1822,7 +1822,7 @@ __fixdfsi: ...@@ -1822,7 +1822,7 @@ __fixdfsi:
.global __fixdfdi .global __fixdfdi
.type __fixdfdi, @function .type __fixdfdi, @function
__fixdfdi: __fixdfdi:
abi_entry sp, 32 leaf_entry sp, 16
/* Check for NaN and Infinity. */ /* Check for NaN and Infinity. */
movi a6, 0x7ff00000 movi a6, 0x7ff00000
...@@ -1854,7 +1854,7 @@ __fixdfdi: ...@@ -1854,7 +1854,7 @@ __fixdfdi:
neg xh, xh neg xh, xh
beqz xl, 1f beqz xl, 1f
addi xh, xh, -1 addi xh, xh, -1
1: abi_return 1: leaf_return
.Lfixdfdi_smallshift: .Lfixdfdi_smallshift:
src xl, xh, xl src xl, xh, xl
...@@ -1875,16 +1875,16 @@ __fixdfdi: ...@@ -1875,16 +1875,16 @@ __fixdfdi:
bgez xh, 1f bgez xh, 1f
mov xh, a7 mov xh, a7
movi xl, 0 movi xl, 0
abi_return leaf_return
1: addi xh, a7, -1 /* 0x7fffffff */ 1: addi xh, a7, -1 /* 0x7fffffff */
movi xl, -1 movi xl, -1
abi_return leaf_return
.Lfixdfdi_zero: .Lfixdfdi_zero:
movi xh, 0 movi xh, 0
movi xl, 0 movi xl, 0
abi_return leaf_return
#endif /* L_fixdfdi */ #endif /* L_fixdfdi */
...@@ -1894,7 +1894,7 @@ __fixdfdi: ...@@ -1894,7 +1894,7 @@ __fixdfdi:
.global __fixunsdfsi .global __fixunsdfsi
.type __fixunsdfsi, @function .type __fixunsdfsi, @function
__fixunsdfsi: __fixunsdfsi:
abi_entry sp, 32 leaf_entry sp, 16
/* Check for NaN and Infinity. */ /* Check for NaN and Infinity. */
movi a6, 0x7ff00000 movi a6, 0x7ff00000
...@@ -1921,7 +1921,7 @@ __fixunsdfsi: ...@@ -1921,7 +1921,7 @@ __fixunsdfsi:
/* Negate the result if sign != 0. */ /* Negate the result if sign != 0. */
neg a2, a5 neg a2, a5
movgez a2, a5, a7 movgez a2, a5, a7
abi_return leaf_return
.Lfixunsdfsi_nan_or_inf: .Lfixunsdfsi_nan_or_inf:
/* Handle Infinity and NaN. */ /* Handle Infinity and NaN. */
...@@ -1931,28 +1931,28 @@ __fixunsdfsi: ...@@ -1931,28 +1931,28 @@ __fixunsdfsi:
/* Translate NaN to 0xffffffff. */ /* Translate NaN to 0xffffffff. */
movi a2, -1 movi a2, -1
abi_return leaf_return
.Lfixunsdfsi_maxint: .Lfixunsdfsi_maxint:
slli a4, a6, 11 /* 0x80000000 */ slli a4, a6, 11 /* 0x80000000 */
movi a5, -1 /* 0xffffffff */ movi a5, -1 /* 0xffffffff */
movgez a4, a5, xh movgez a4, a5, xh
mov a2, a4 mov a2, a4
abi_return leaf_return
.Lfixunsdfsi_zero: .Lfixunsdfsi_zero:
movi a2, 0 movi a2, 0
abi_return leaf_return
.Lfixunsdfsi_bigexp: .Lfixunsdfsi_bigexp:
/* Handle unsigned maximum exponent case. */ /* Handle unsigned maximum exponent case. */
bltz xh, 1f bltz xh, 1f
mov a2, a5 /* no shift needed */ mov a2, a5 /* no shift needed */
abi_return leaf_return
/* Return 0x80000000 if negative. */ /* Return 0x80000000 if negative. */
1: slli a2, a6, 11 1: slli a2, a6, 11
abi_return leaf_return
#endif /* L_fixunsdfsi */ #endif /* L_fixunsdfsi */
...@@ -1962,7 +1962,7 @@ __fixunsdfsi: ...@@ -1962,7 +1962,7 @@ __fixunsdfsi:
.global __fixunsdfdi .global __fixunsdfdi
.type __fixunsdfdi, @function .type __fixunsdfdi, @function
__fixunsdfdi: __fixunsdfdi:
abi_entry sp, 32 leaf_entry sp, 16
/* Check for NaN and Infinity. */ /* Check for NaN and Infinity. */
movi a6, 0x7ff00000 movi a6, 0x7ff00000
...@@ -1996,7 +1996,7 @@ __fixunsdfdi: ...@@ -1996,7 +1996,7 @@ __fixunsdfdi:
neg xh, xh neg xh, xh
beqz xl, 1f beqz xl, 1f
addi xh, xh, -1 addi xh, xh, -1
1: abi_return 1: leaf_return
.Lfixunsdfdi_smallshift: .Lfixunsdfdi_smallshift:
src xl, xh, xl src xl, xh, xl
...@@ -2012,23 +2012,23 @@ __fixunsdfdi: ...@@ -2012,23 +2012,23 @@ __fixunsdfdi:
/* Translate NaN to 0xffffffff.... */ /* Translate NaN to 0xffffffff.... */
1: movi xh, -1 1: movi xh, -1
movi xl, -1 movi xl, -1
abi_return leaf_return
.Lfixunsdfdi_maxint: .Lfixunsdfdi_maxint:
bgez xh, 1b bgez xh, 1b
2: slli xh, a6, 11 /* 0x80000000 */ 2: slli xh, a6, 11 /* 0x80000000 */
movi xl, 0 movi xl, 0
abi_return leaf_return
.Lfixunsdfdi_zero: .Lfixunsdfdi_zero:
movi xh, 0 movi xh, 0
movi xl, 0 movi xl, 0
abi_return leaf_return
.Lfixunsdfdi_bigexp: .Lfixunsdfdi_bigexp:
/* Handle unsigned maximum exponent case. */ /* Handle unsigned maximum exponent case. */
bltz a7, 2b bltz a7, 2b
abi_return /* no shift needed */ leaf_return /* no shift needed */
#endif /* L_fixunsdfdi */ #endif /* L_fixunsdfdi */
...@@ -2038,7 +2038,7 @@ __fixunsdfdi: ...@@ -2038,7 +2038,7 @@ __fixunsdfdi:
.global __floatunsidf .global __floatunsidf
.type __floatunsidf, @function .type __floatunsidf, @function
__floatunsidf: __floatunsidf:
abi_entry sp, 32 leaf_entry sp, 16
beqz a2, .Lfloatsidf_return_zero beqz a2, .Lfloatsidf_return_zero
/* Set the sign to zero and jump to the floatsidf code. */ /* Set the sign to zero and jump to the floatsidf code. */
...@@ -2049,7 +2049,7 @@ __floatunsidf: ...@@ -2049,7 +2049,7 @@ __floatunsidf:
.global __floatsidf .global __floatsidf
.type __floatsidf, @function .type __floatsidf, @function
__floatsidf: __floatsidf:
abi_entry sp, 32 leaf_entry sp, 16
/* Check for zero. */ /* Check for zero. */
beqz a2, .Lfloatsidf_return_zero beqz a2, .Lfloatsidf_return_zero
...@@ -2084,11 +2084,11 @@ __floatsidf: ...@@ -2084,11 +2084,11 @@ __floatsidf:
/* Add the sign and return. */ /* Add the sign and return. */
slli a7, a7, 31 slli a7, a7, 31
or xh, xh, a7 or xh, xh, a7
abi_return leaf_return
.Lfloatsidf_return_zero: .Lfloatsidf_return_zero:
movi a3, 0 movi a3, 0
abi_return leaf_return
#endif /* L_floatsidf */ #endif /* L_floatsidf */
...@@ -2098,7 +2098,7 @@ __floatsidf: ...@@ -2098,7 +2098,7 @@ __floatsidf:
.global __floatundidf .global __floatundidf
.type __floatundidf, @function .type __floatundidf, @function
__floatundidf: __floatundidf:
abi_entry sp, 32 leaf_entry sp, 16
/* Check for zero. */ /* Check for zero. */
or a4, xh, xl or a4, xh, xl
...@@ -2112,7 +2112,7 @@ __floatundidf: ...@@ -2112,7 +2112,7 @@ __floatundidf:
.global __floatdidf .global __floatdidf
.type __floatdidf, @function .type __floatdidf, @function
__floatdidf: __floatdidf:
abi_entry sp, 32 leaf_entry sp, 16
/* Check for zero. */ /* Check for zero. */
or a4, xh, xl or a4, xh, xl
...@@ -2161,7 +2161,7 @@ __floatdidf: ...@@ -2161,7 +2161,7 @@ __floatdidf:
/* Check if the leftover fraction is exactly 1/2. */ /* Check if the leftover fraction is exactly 1/2. */
slli a6, a6, 1 slli a6, a6, 1
beqz a6, .Lfloatdidf_exactlyhalf beqz a6, .Lfloatdidf_exactlyhalf
2: abi_return 2: leaf_return
.Lfloatdidf_bigshift: .Lfloatdidf_bigshift:
/* xh is zero. Normalize with first 1 bit of xl in the msb of xh. */ /* xh is zero. Normalize with first 1 bit of xl in the msb of xh. */
...@@ -2176,14 +2176,14 @@ __floatdidf: ...@@ -2176,14 +2176,14 @@ __floatdidf:
/* Round down to the nearest even value. */ /* Round down to the nearest even value. */
srli xl, xl, 1 srli xl, xl, 1
slli xl, xl, 1 slli xl, xl, 1
abi_return leaf_return
.Lfloatdidf_roundcarry: .Lfloatdidf_roundcarry:
/* xl is always zero when the rounding increment overflows, so /* xl is always zero when the rounding increment overflows, so
there's no need to round it to an even value. */ there's no need to round it to an even value. */
addi xh, xh, 1 addi xh, xh, 1
/* Overflow to the exponent is OK. */ /* Overflow to the exponent is OK. */
abi_return leaf_return
#endif /* L_floatdidf */ #endif /* L_floatdidf */
...@@ -2193,7 +2193,7 @@ __floatdidf: ...@@ -2193,7 +2193,7 @@ __floatdidf:
.global __truncdfsf2 .global __truncdfsf2
.type __truncdfsf2, @function .type __truncdfsf2, @function
__truncdfsf2: __truncdfsf2:
abi_entry sp, 32 leaf_entry sp, 16
/* Adjust the exponent bias. */ /* Adjust the exponent bias. */
movi a4, (0x3ff - 0x7f) << 20 movi a4, (0x3ff - 0x7f) << 20
...@@ -2228,13 +2228,13 @@ __truncdfsf2: ...@@ -2228,13 +2228,13 @@ __truncdfsf2:
/* Check if the leftover fraction is exactly 1/2. */ /* Check if the leftover fraction is exactly 1/2. */
slli a4, a4, 1 slli a4, a4, 1
beqz a4, .Ltrunc_exactlyhalf beqz a4, .Ltrunc_exactlyhalf
1: abi_return 1: leaf_return
.Ltrunc_exactlyhalf: .Ltrunc_exactlyhalf:
/* Round down to the nearest even value. */ /* Round down to the nearest even value. */
srli a2, a2, 1 srli a2, a2, 1
slli a2, a2, 1 slli a2, a2, 1
abi_return leaf_return
.Ltrunc_overflow: .Ltrunc_overflow:
/* Check if exponent == 0x7ff. */ /* Check if exponent == 0x7ff. */
...@@ -2254,7 +2254,7 @@ __truncdfsf2: ...@@ -2254,7 +2254,7 @@ __truncdfsf2:
extui a6, xh, 31, 1 extui a6, xh, 31, 1
ssai 1 ssai 1
src a2, a6, a4 src a2, a6, a4
abi_return leaf_return
.Ltrunc_underflow: .Ltrunc_underflow:
/* Find shift count for a subnormal. Flush to zero if >= 32. */ /* Find shift count for a subnormal. Flush to zero if >= 32. */
...@@ -2287,7 +2287,7 @@ __truncdfsf2: ...@@ -2287,7 +2287,7 @@ __truncdfsf2:
/* Return +/- zero. */ /* Return +/- zero. */
1: extui a2, xh, 31, 1 1: extui a2, xh, 31, 1
slli a2, a2, 31 slli a2, a2, 31
abi_return leaf_return
#endif /* L_truncdfsf2 */ #endif /* L_truncdfsf2 */
...@@ -2297,7 +2297,7 @@ __truncdfsf2: ...@@ -2297,7 +2297,7 @@ __truncdfsf2:
.global __extendsfdf2 .global __extendsfdf2
.type __extendsfdf2, @function .type __extendsfdf2, @function
__extendsfdf2: __extendsfdf2:
abi_entry sp, 32 leaf_entry sp, 16
/* Save the sign bit and then shift it off. */ /* Save the sign bit and then shift it off. */
extui a5, a2, 31, 1 extui a5, a2, 31, 1
...@@ -2320,7 +2320,7 @@ __extendsfdf2: ...@@ -2320,7 +2320,7 @@ __extendsfdf2:
/* Add the sign bit. */ /* Add the sign bit. */
or xh, a4, a5 or xh, a4, a5
abi_return leaf_return
.Lextend_nan_or_inf: .Lextend_nan_or_inf:
movi a4, 0x7ff00000 movi a4, 0x7ff00000
...@@ -2335,7 +2335,7 @@ __extendsfdf2: ...@@ -2335,7 +2335,7 @@ __extendsfdf2:
/* Add the sign and return. */ /* Add the sign and return. */
1: or xh, a4, a5 1: or xh, a4, a5
movi xl, 0 movi xl, 0
abi_return leaf_return
.Lextend_expzero: .Lextend_expzero:
beqz a4, 1b beqz a4, 1b
...@@ -2358,7 +2358,7 @@ __extendsfdf2: ...@@ -2358,7 +2358,7 @@ __extendsfdf2:
/* Add the sign and return. */ /* Add the sign and return. */
or xh, a4, a5 or xh, a4, a5
abi_return leaf_return
#endif /* L_extendsfdf2 */ #endif /* L_extendsfdf2 */
......
...@@ -51,10 +51,10 @@ ...@@ -51,10 +51,10 @@
.global __negsf2 .global __negsf2
.type __negsf2, @function .type __negsf2, @function
__negsf2: __negsf2:
abi_entry sp, 32 leaf_entry sp, 16
movi a4, 0x80000000 movi a4, 0x80000000
xor a2, a2, a4 xor a2, a2, a4
abi_return leaf_return
#endif /* L_negsf2 */ #endif /* L_negsf2 */
...@@ -73,12 +73,12 @@ __addsf3_aux: ...@@ -73,12 +73,12 @@ __addsf3_aux:
/* If x is a NaN, return it. Otherwise, return y. */ /* If x is a NaN, return it. Otherwise, return y. */
slli a7, a2, 9 slli a7, a2, 9
beqz a7, .Ladd_ynan_or_inf beqz a7, .Ladd_ynan_or_inf
1: abi_return 1: leaf_return
.Ladd_ynan_or_inf: .Ladd_ynan_or_inf:
/* Return y. */ /* Return y. */
mov a2, a3 mov a2, a3
abi_return leaf_return
.Ladd_opposite_signs: .Ladd_opposite_signs:
/* Operand signs differ. Do a subtraction. */ /* Operand signs differ. Do a subtraction. */
...@@ -90,7 +90,7 @@ __addsf3_aux: ...@@ -90,7 +90,7 @@ __addsf3_aux:
.global __addsf3 .global __addsf3
.type __addsf3, @function .type __addsf3, @function
__addsf3: __addsf3:
abi_entry sp, 32 leaf_entry sp, 16
movi a6, 0x7f800000 movi a6, 0x7f800000
/* Check if the two operands have the same sign. */ /* Check if the two operands have the same sign. */
...@@ -158,7 +158,7 @@ __addsf3: ...@@ -158,7 +158,7 @@ __addsf3:
a carry into the exponent field will not work because it a carry into the exponent field will not work because it
assumes there is an implicit "1.0" that needs to be added. */ assumes there is an implicit "1.0" that needs to be added. */
add a2, a2, a3 add a2, a2, a3
1: abi_return 1: leaf_return
.Ladd_xexpzero: .Ladd_xexpzero:
/* Same as "yexpzero" except skip handling the case when both /* Same as "yexpzero" except skip handling the case when both
...@@ -200,11 +200,11 @@ __addsf3: ...@@ -200,11 +200,11 @@ __addsf3:
/* Check if the leftover fraction is exactly 1/2. */ /* Check if the leftover fraction is exactly 1/2. */
slli a9, a9, 1 slli a9, a9, 1
beqz a9, .Ladd_exactlyhalf beqz a9, .Ladd_exactlyhalf
1: abi_return 1: leaf_return
.Ladd_returny: .Ladd_returny:
mov a2, a3 mov a2, a3
abi_return leaf_return
.Ladd_carry: .Ladd_carry:
/* The addition has overflowed into the exponent field, so the /* The addition has overflowed into the exponent field, so the
...@@ -242,7 +242,7 @@ __addsf3: ...@@ -242,7 +242,7 @@ __addsf3:
bbci.l a10, 0, 1f bbci.l a10, 0, 1f
addi a2, a2, 1 addi a2, a2, 1
beqz a9, .Ladd_exactlyhalf beqz a9, .Ladd_exactlyhalf
1: abi_return 1: leaf_return
.Ladd_infinity: .Ladd_infinity:
/* Clear the mantissa. */ /* Clear the mantissa. */
...@@ -252,13 +252,13 @@ __addsf3: ...@@ -252,13 +252,13 @@ __addsf3:
/* The sign bit may have been lost in a carry-out. Put it back. */ /* The sign bit may have been lost in a carry-out. Put it back. */
slli a8, a8, 1 slli a8, a8, 1
or a2, a2, a8 or a2, a2, a8
abi_return leaf_return
.Ladd_exactlyhalf: .Ladd_exactlyhalf:
/* Round down to the nearest even value. */ /* Round down to the nearest even value. */
srli a2, a2, 1 srli a2, a2, 1
slli a2, a2, 1 slli a2, a2, 1
abi_return leaf_return
/* Subtraction */ /* Subtraction */
...@@ -274,13 +274,13 @@ __subsf3_aux: ...@@ -274,13 +274,13 @@ __subsf3_aux:
/* Both x and y are either NaN or Inf, so the result is NaN. */ /* Both x and y are either NaN or Inf, so the result is NaN. */
movi a4, 0x400000 /* make it a quiet NaN */ movi a4, 0x400000 /* make it a quiet NaN */
or a2, a2, a4 or a2, a2, a4
1: abi_return 1: leaf_return
.Lsub_ynan_or_inf: .Lsub_ynan_or_inf:
/* Negate y and return it. */ /* Negate y and return it. */
slli a7, a6, 8 slli a7, a6, 8
xor a2, a3, a7 xor a2, a3, a7
abi_return leaf_return
.Lsub_opposite_signs: .Lsub_opposite_signs:
/* Operand signs differ. Do an addition. */ /* Operand signs differ. Do an addition. */
...@@ -292,7 +292,7 @@ __subsf3_aux: ...@@ -292,7 +292,7 @@ __subsf3_aux:
.global __subsf3 .global __subsf3
.type __subsf3, @function .type __subsf3, @function
__subsf3: __subsf3:
abi_entry sp, 32 leaf_entry sp, 16
movi a6, 0x7f800000 movi a6, 0x7f800000
/* Check if the two operands have the same sign. */ /* Check if the two operands have the same sign. */
...@@ -366,7 +366,7 @@ __subsf3: ...@@ -366,7 +366,7 @@ __subsf3:
/* Negate and return y. */ /* Negate and return y. */
slli a7, a6, 8 slli a7, a6, 8
xor a2, a3, a7 xor a2, a3, a7
1: abi_return 1: leaf_return
.Lsub_xsmaller: .Lsub_xsmaller:
/* Same thing as the "ysmaller" code, but with x and y swapped and /* Same thing as the "ysmaller" code, but with x and y swapped and
...@@ -408,7 +408,7 @@ __subsf3: ...@@ -408,7 +408,7 @@ __subsf3:
/* Check if the leftover fraction is exactly 1/2. */ /* Check if the leftover fraction is exactly 1/2. */
slli a9, a9, 1 slli a9, a9, 1
beqz a9, .Lsub_exactlyhalf beqz a9, .Lsub_exactlyhalf
1: abi_return 1: leaf_return
.Lsub_xexpzero: .Lsub_xexpzero:
/* Same as "yexpzero". */ /* Same as "yexpzero". */
...@@ -421,7 +421,7 @@ __subsf3: ...@@ -421,7 +421,7 @@ __subsf3:
.Lsub_return_zero: .Lsub_return_zero:
movi a2, 0 movi a2, 0
abi_return leaf_return
.Lsub_borrow: .Lsub_borrow:
/* The subtraction has underflowed into the exponent field, so the /* The subtraction has underflowed into the exponent field, so the
...@@ -457,7 +457,7 @@ __subsf3: ...@@ -457,7 +457,7 @@ __subsf3:
/* Round down to the nearest even value. */ /* Round down to the nearest even value. */
srli a2, a2, 1 srli a2, a2, 1
slli a2, a2, 1 slli a2, a2, 1
abi_return leaf_return
.Lsub_xzero: .Lsub_xzero:
/* If there was a borrow from the exponent, and the mantissa and /* If there was a borrow from the exponent, and the mantissa and
...@@ -570,7 +570,7 @@ __mulsf3_aux: ...@@ -570,7 +570,7 @@ __mulsf3_aux:
.global __mulsf3 .global __mulsf3
.type __mulsf3, @function .type __mulsf3, @function
__mulsf3: __mulsf3:
abi_entry sp, 48 leaf_entry sp, 32
#if __XTENSA_CALL0_ABI__ #if __XTENSA_CALL0_ABI__
addi sp, sp, -32 addi sp, sp, -32
s32i a12, sp, 16 s32i a12, sp, 16
...@@ -780,7 +780,7 @@ __mulsf3: ...@@ -780,7 +780,7 @@ __mulsf3:
l32i a15, sp, 28 l32i a15, sp, 28
addi sp, sp, 32 addi sp, sp, 32
#endif #endif
abi_return leaf_return
.Lmul_exactlyhalf: .Lmul_exactlyhalf:
/* Round down to the nearest even value. */ /* Round down to the nearest even value. */
...@@ -895,7 +895,7 @@ __divsf3_aux: ...@@ -895,7 +895,7 @@ __divsf3_aux:
bnez a4, 1f bnez a4, 1f
movi a4, 0x400000 /* make it a quiet NaN */ movi a4, 0x400000 /* make it a quiet NaN */
or a2, a2, a4 or a2, a2, a4
1: abi_return 1: leaf_return
.Ldiv_xexpzero: .Ldiv_xexpzero:
/* Clear the sign bit of x. */ /* Clear the sign bit of x. */
...@@ -918,7 +918,7 @@ __divsf3_aux: ...@@ -918,7 +918,7 @@ __divsf3_aux:
/* Return zero with the appropriate sign bit. */ /* Return zero with the appropriate sign bit. */
srli a2, a7, 31 srli a2, a7, 31
slli a2, a2, 31 slli a2, a2, 31
abi_return leaf_return
.Ldiv_xnan_or_inf: .Ldiv_xnan_or_inf:
/* Set the sign bit of the result. */ /* Set the sign bit of the result. */
...@@ -929,7 +929,7 @@ __divsf3_aux: ...@@ -929,7 +929,7 @@ __divsf3_aux:
bnall a3, a6, 1f bnall a3, a6, 1f
movi a4, 0x400000 /* make it a quiet NaN */ movi a4, 0x400000 /* make it a quiet NaN */
or a2, a2, a4 or a2, a2, a4
1: abi_return 1: leaf_return
.Ldiv_ynan_or_inf: .Ldiv_ynan_or_inf:
/* If y is Infinity, return zero. */ /* If y is Infinity, return zero. */
...@@ -937,13 +937,13 @@ __divsf3_aux: ...@@ -937,13 +937,13 @@ __divsf3_aux:
beqz a8, .Ldiv_return_zero beqz a8, .Ldiv_return_zero
/* y is NaN; return it. */ /* y is NaN; return it. */
mov a2, a3 mov a2, a3
abi_return leaf_return
.align 4 .align 4
.global __divsf3 .global __divsf3
.type __divsf3, @function .type __divsf3, @function
__divsf3: __divsf3:
abi_entry sp, 32 leaf_entry sp, 16
movi a6, 0x7f800000 movi a6, 0x7f800000
/* Get the sign of the result. */ /* Get the sign of the result. */
...@@ -1036,7 +1036,7 @@ __divsf3: ...@@ -1036,7 +1036,7 @@ __divsf3:
srli a7, a7, 31 srli a7, a7, 31
slli a7, a7, 31 slli a7, a7, 31
or a2, a2, a7 or a2, a2, a7
abi_return leaf_return
.Ldiv_overflow: .Ldiv_overflow:
bltz a8, .Ldiv_underflow bltz a8, .Ldiv_underflow
...@@ -1085,7 +1085,7 @@ __divsf3: ...@@ -1085,7 +1085,7 @@ __divsf3:
/* Return zero with the appropriate sign bit. */ /* Return zero with the appropriate sign bit. */
srli a2, a7, 31 srli a2, a7, 31
slli a2, a2, 31 slli a2, a2, 31
abi_return leaf_return
#endif /* L_divsf3 */ #endif /* L_divsf3 */
...@@ -1099,7 +1099,7 @@ __divsf3: ...@@ -1099,7 +1099,7 @@ __divsf3:
.set __nesf2, __eqsf2 .set __nesf2, __eqsf2
.type __eqsf2, @function .type __eqsf2, @function
__eqsf2: __eqsf2:
abi_entry sp, 32 leaf_entry sp, 16
bne a2, a3, 4f bne a2, a3, 4f
/* The values are equal but NaN != NaN. Check the exponent. */ /* The values are equal but NaN != NaN. Check the exponent. */
...@@ -1108,11 +1108,11 @@ __eqsf2: ...@@ -1108,11 +1108,11 @@ __eqsf2:
/* Equal. */ /* Equal. */
movi a2, 0 movi a2, 0
abi_return leaf_return
/* Not equal. */ /* Not equal. */
2: movi a2, 1 2: movi a2, 1
abi_return leaf_return
/* Check if the mantissas are nonzero. */ /* Check if the mantissas are nonzero. */
3: slli a7, a2, 9 3: slli a7, a2, 9
...@@ -1127,7 +1127,7 @@ __eqsf2: ...@@ -1127,7 +1127,7 @@ __eqsf2:
5: movi a2, 0 5: movi a2, 0
movi a3, 1 movi a3, 1
movnez a2, a3, a7 movnez a2, a3, a7
abi_return leaf_return
/* Greater Than */ /* Greater Than */
...@@ -1136,7 +1136,7 @@ __eqsf2: ...@@ -1136,7 +1136,7 @@ __eqsf2:
.global __gtsf2 .global __gtsf2
.type __gtsf2, @function .type __gtsf2, @function
__gtsf2: __gtsf2:
abi_entry sp, 32 leaf_entry sp, 16
movi a6, 0x7f800000 movi a6, 0x7f800000
ball a2, a6, 2f ball a2, a6, 2f
1: bnall a3, a6, .Lle_cmp 1: bnall a3, a6, .Lle_cmp
...@@ -1145,13 +1145,13 @@ __gtsf2: ...@@ -1145,13 +1145,13 @@ __gtsf2:
slli a7, a3, 9 slli a7, a3, 9
beqz a7, .Lle_cmp beqz a7, .Lle_cmp
movi a2, 0 movi a2, 0
abi_return leaf_return
/* Check if x is a NaN. */ /* Check if x is a NaN. */
2: slli a7, a2, 9 2: slli a7, a2, 9
beqz a7, 1b beqz a7, 1b
movi a2, 0 movi a2, 0
abi_return leaf_return
/* Less Than or Equal */ /* Less Than or Equal */
...@@ -1160,7 +1160,7 @@ __gtsf2: ...@@ -1160,7 +1160,7 @@ __gtsf2:
.global __lesf2 .global __lesf2
.type __lesf2, @function .type __lesf2, @function
__lesf2: __lesf2:
abi_entry sp, 32 leaf_entry sp, 16
movi a6, 0x7f800000 movi a6, 0x7f800000
ball a2, a6, 2f ball a2, a6, 2f
1: bnall a3, a6, .Lle_cmp 1: bnall a3, a6, .Lle_cmp
...@@ -1169,13 +1169,13 @@ __lesf2: ...@@ -1169,13 +1169,13 @@ __lesf2:
slli a7, a3, 9 slli a7, a3, 9
beqz a7, .Lle_cmp beqz a7, .Lle_cmp
movi a2, 1 movi a2, 1
abi_return leaf_return
/* Check if x is a NaN. */ /* Check if x is a NaN. */
2: slli a7, a2, 9 2: slli a7, a2, 9
beqz a7, 1b beqz a7, 1b
movi a2, 1 movi a2, 1
abi_return leaf_return
.Lle_cmp: .Lle_cmp:
/* Check if x and y have different signs. */ /* Check if x and y have different signs. */
...@@ -1188,13 +1188,13 @@ __lesf2: ...@@ -1188,13 +1188,13 @@ __lesf2:
/* Check if x <= y. */ /* Check if x <= y. */
bltu a3, a2, 5f bltu a3, a2, 5f
4: movi a2, 0 4: movi a2, 0
abi_return leaf_return
.Lle_xneg: .Lle_xneg:
/* Check if y <= x. */ /* Check if y <= x. */
bgeu a2, a3, 4b bgeu a2, a3, 4b
5: movi a2, 1 5: movi a2, 1
abi_return leaf_return
.Lle_diff_signs: .Lle_diff_signs:
bltz a2, 4b bltz a2, 4b
...@@ -1205,7 +1205,7 @@ __lesf2: ...@@ -1205,7 +1205,7 @@ __lesf2:
movi a2, 1 movi a2, 1
movi a3, 0 movi a3, 0
moveqz a2, a3, a7 moveqz a2, a3, a7
abi_return leaf_return
/* Greater Than or Equal */ /* Greater Than or Equal */
...@@ -1214,7 +1214,7 @@ __lesf2: ...@@ -1214,7 +1214,7 @@ __lesf2:
.global __gesf2 .global __gesf2
.type __gesf2, @function .type __gesf2, @function
__gesf2: __gesf2:
abi_entry sp, 32 leaf_entry sp, 16
movi a6, 0x7f800000 movi a6, 0x7f800000
ball a2, a6, 2f ball a2, a6, 2f
1: bnall a3, a6, .Llt_cmp 1: bnall a3, a6, .Llt_cmp
...@@ -1223,13 +1223,13 @@ __gesf2: ...@@ -1223,13 +1223,13 @@ __gesf2:
slli a7, a3, 9 slli a7, a3, 9
beqz a7, .Llt_cmp beqz a7, .Llt_cmp
movi a2, -1 movi a2, -1
abi_return leaf_return
/* Check if x is a NaN. */ /* Check if x is a NaN. */
2: slli a7, a2, 9 2: slli a7, a2, 9
beqz a7, 1b beqz a7, 1b
movi a2, -1 movi a2, -1
abi_return leaf_return
/* Less Than */ /* Less Than */
...@@ -1238,7 +1238,7 @@ __gesf2: ...@@ -1238,7 +1238,7 @@ __gesf2:
.global __ltsf2 .global __ltsf2
.type __ltsf2, @function .type __ltsf2, @function
__ltsf2: __ltsf2:
abi_entry sp, 32 leaf_entry sp, 16
movi a6, 0x7f800000 movi a6, 0x7f800000
ball a2, a6, 2f ball a2, a6, 2f
1: bnall a3, a6, .Llt_cmp 1: bnall a3, a6, .Llt_cmp
...@@ -1247,13 +1247,13 @@ __ltsf2: ...@@ -1247,13 +1247,13 @@ __ltsf2:
slli a7, a3, 9 slli a7, a3, 9
beqz a7, .Llt_cmp beqz a7, .Llt_cmp
movi a2, 0 movi a2, 0
abi_return leaf_return
/* Check if x is a NaN. */ /* Check if x is a NaN. */
2: slli a7, a2, 9 2: slli a7, a2, 9
beqz a7, 1b beqz a7, 1b
movi a2, 0 movi a2, 0
abi_return leaf_return
.Llt_cmp: .Llt_cmp:
/* Check if x and y have different signs. */ /* Check if x and y have different signs. */
...@@ -1266,13 +1266,13 @@ __ltsf2: ...@@ -1266,13 +1266,13 @@ __ltsf2:
/* Check if x < y. */ /* Check if x < y. */
bgeu a2, a3, 5f bgeu a2, a3, 5f
4: movi a2, -1 4: movi a2, -1
abi_return leaf_return
.Llt_xneg: .Llt_xneg:
/* Check if y < x. */ /* Check if y < x. */
bltu a3, a2, 4b bltu a3, a2, 4b
5: movi a2, 0 5: movi a2, 0
abi_return leaf_return
.Llt_diff_signs: .Llt_diff_signs:
bgez a2, 5b bgez a2, 5b
...@@ -1283,7 +1283,7 @@ __ltsf2: ...@@ -1283,7 +1283,7 @@ __ltsf2:
movi a2, 0 movi a2, 0
movi a3, -1 movi a3, -1
movnez a2, a3, a7 movnez a2, a3, a7
abi_return leaf_return
/* Unordered */ /* Unordered */
...@@ -1292,22 +1292,22 @@ __ltsf2: ...@@ -1292,22 +1292,22 @@ __ltsf2:
.global __unordsf2 .global __unordsf2
.type __unordsf2, @function .type __unordsf2, @function
__unordsf2: __unordsf2:
abi_entry sp, 32 leaf_entry sp, 16
movi a6, 0x7f800000 movi a6, 0x7f800000
ball a2, a6, 3f ball a2, a6, 3f
1: ball a3, a6, 4f 1: ball a3, a6, 4f
2: movi a2, 0 2: movi a2, 0
abi_return leaf_return
3: slli a7, a2, 9 3: slli a7, a2, 9
beqz a7, 1b beqz a7, 1b
movi a2, 1 movi a2, 1
abi_return leaf_return
4: slli a7, a3, 9 4: slli a7, a3, 9
beqz a7, 2b beqz a7, 2b
movi a2, 1 movi a2, 1
abi_return leaf_return
#endif /* L_cmpsf2 */ #endif /* L_cmpsf2 */
...@@ -1317,7 +1317,7 @@ __unordsf2: ...@@ -1317,7 +1317,7 @@ __unordsf2:
.global __fixsfsi .global __fixsfsi
.type __fixsfsi, @function .type __fixsfsi, @function
__fixsfsi: __fixsfsi:
abi_entry sp, 32 leaf_entry sp, 16
/* Check for NaN and Infinity. */ /* Check for NaN and Infinity. */
movi a6, 0x7f800000 movi a6, 0x7f800000
...@@ -1340,7 +1340,7 @@ __fixsfsi: ...@@ -1340,7 +1340,7 @@ __fixsfsi:
/* Negate the result if sign != 0. */ /* Negate the result if sign != 0. */
neg a2, a5 neg a2, a5
movgez a2, a5, a7 movgez a2, a5, a7
abi_return leaf_return
.Lfixsfsi_nan_or_inf: .Lfixsfsi_nan_or_inf:
/* Handle Infinity and NaN. */ /* Handle Infinity and NaN. */
...@@ -1355,11 +1355,11 @@ __fixsfsi: ...@@ -1355,11 +1355,11 @@ __fixsfsi:
addi a5, a4, -1 /* 0x7fffffff */ addi a5, a4, -1 /* 0x7fffffff */
movgez a4, a5, a2 movgez a4, a5, a2
mov a2, a4 mov a2, a4
abi_return leaf_return
.Lfixsfsi_zero: .Lfixsfsi_zero:
movi a2, 0 movi a2, 0
abi_return leaf_return
#endif /* L_fixsfsi */ #endif /* L_fixsfsi */
...@@ -1369,7 +1369,7 @@ __fixsfsi: ...@@ -1369,7 +1369,7 @@ __fixsfsi:
.global __fixsfdi .global __fixsfdi
.type __fixsfdi, @function .type __fixsfdi, @function
__fixsfdi: __fixsfdi:
abi_entry sp, 32 leaf_entry sp, 16
/* Check for NaN and Infinity. */ /* Check for NaN and Infinity. */
movi a6, 0x7f800000 movi a6, 0x7f800000
...@@ -1398,7 +1398,7 @@ __fixsfdi: ...@@ -1398,7 +1398,7 @@ __fixsfdi:
neg xh, xh neg xh, xh
beqz xl, 1f beqz xl, 1f
addi xh, xh, -1 addi xh, xh, -1
1: abi_return 1: leaf_return
.Lfixsfdi_smallshift: .Lfixsfdi_smallshift:
movi xl, 0 movi xl, 0
...@@ -1419,16 +1419,16 @@ __fixsfdi: ...@@ -1419,16 +1419,16 @@ __fixsfdi:
bgez a2, 1f bgez a2, 1f
mov xh, a7 mov xh, a7
movi xl, 0 movi xl, 0
abi_return leaf_return
1: addi xh, a7, -1 /* 0x7fffffff */ 1: addi xh, a7, -1 /* 0x7fffffff */
movi xl, -1 movi xl, -1
abi_return leaf_return
.Lfixsfdi_zero: .Lfixsfdi_zero:
movi xh, 0 movi xh, 0
movi xl, 0 movi xl, 0
abi_return leaf_return
#endif /* L_fixsfdi */ #endif /* L_fixsfdi */
...@@ -1438,7 +1438,7 @@ __fixsfdi: ...@@ -1438,7 +1438,7 @@ __fixsfdi:
.global __fixunssfsi .global __fixunssfsi
.type __fixunssfsi, @function .type __fixunssfsi, @function
__fixunssfsi: __fixunssfsi:
abi_entry sp, 32 leaf_entry sp, 16
/* Check for NaN and Infinity. */ /* Check for NaN and Infinity. */
movi a6, 0x7f800000 movi a6, 0x7f800000
...@@ -1463,7 +1463,7 @@ __fixunssfsi: ...@@ -1463,7 +1463,7 @@ __fixunssfsi:
/* Negate the result if sign != 0. */ /* Negate the result if sign != 0. */
neg a2, a5 neg a2, a5
movgez a2, a5, a7 movgez a2, a5, a7
abi_return leaf_return
.Lfixunssfsi_nan_or_inf: .Lfixunssfsi_nan_or_inf:
/* Handle Infinity and NaN. */ /* Handle Infinity and NaN. */
...@@ -1472,28 +1472,28 @@ __fixunssfsi: ...@@ -1472,28 +1472,28 @@ __fixunssfsi:
/* Translate NaN to 0xffffffff. */ /* Translate NaN to 0xffffffff. */
movi a2, -1 movi a2, -1
abi_return leaf_return
.Lfixunssfsi_maxint: .Lfixunssfsi_maxint:
slli a4, a6, 8 /* 0x80000000 */ slli a4, a6, 8 /* 0x80000000 */
movi a5, -1 /* 0xffffffff */ movi a5, -1 /* 0xffffffff */
movgez a4, a5, a2 movgez a4, a5, a2
mov a2, a4 mov a2, a4
abi_return leaf_return
.Lfixunssfsi_zero: .Lfixunssfsi_zero:
movi a2, 0 movi a2, 0
abi_return leaf_return
.Lfixunssfsi_bigexp: .Lfixunssfsi_bigexp:
/* Handle unsigned maximum exponent case. */ /* Handle unsigned maximum exponent case. */
bltz a2, 1f bltz a2, 1f
mov a2, a5 /* no shift needed */ mov a2, a5 /* no shift needed */
abi_return leaf_return
/* Return 0x80000000 if negative. */ /* Return 0x80000000 if negative. */
1: slli a2, a6, 8 1: slli a2, a6, 8
abi_return leaf_return
#endif /* L_fixunssfsi */ #endif /* L_fixunssfsi */
...@@ -1503,7 +1503,7 @@ __fixunssfsi: ...@@ -1503,7 +1503,7 @@ __fixunssfsi:
.global __fixunssfdi .global __fixunssfdi
.type __fixunssfdi, @function .type __fixunssfdi, @function
__fixunssfdi: __fixunssfdi:
abi_entry sp, 32 leaf_entry sp, 16
/* Check for NaN and Infinity. */ /* Check for NaN and Infinity. */
movi a6, 0x7f800000 movi a6, 0x7f800000
...@@ -1534,7 +1534,7 @@ __fixunssfdi: ...@@ -1534,7 +1534,7 @@ __fixunssfdi:
neg xh, xh neg xh, xh
beqz xl, 1f beqz xl, 1f
addi xh, xh, -1 addi xh, xh, -1
1: abi_return 1: leaf_return
.Lfixunssfdi_smallshift: .Lfixunssfdi_smallshift:
movi xl, 0 movi xl, 0
...@@ -1550,24 +1550,24 @@ __fixunssfdi: ...@@ -1550,24 +1550,24 @@ __fixunssfdi:
/* Translate NaN to 0xffffffff.... */ /* Translate NaN to 0xffffffff.... */
1: movi xh, -1 1: movi xh, -1
movi xl, -1 movi xl, -1
abi_return leaf_return
.Lfixunssfdi_maxint: .Lfixunssfdi_maxint:
bgez a2, 1b bgez a2, 1b
2: slli xh, a6, 8 /* 0x80000000 */ 2: slli xh, a6, 8 /* 0x80000000 */
movi xl, 0 movi xl, 0
abi_return leaf_return
.Lfixunssfdi_zero: .Lfixunssfdi_zero:
movi xh, 0 movi xh, 0
movi xl, 0 movi xl, 0
abi_return leaf_return
.Lfixunssfdi_bigexp: .Lfixunssfdi_bigexp:
/* Handle unsigned maximum exponent case. */ /* Handle unsigned maximum exponent case. */
bltz a7, 2b bltz a7, 2b
movi xl, 0 movi xl, 0
abi_return /* no shift needed */ leaf_return /* no shift needed */
#endif /* L_fixunssfdi */ #endif /* L_fixunssfdi */
...@@ -1577,7 +1577,7 @@ __fixunssfdi: ...@@ -1577,7 +1577,7 @@ __fixunssfdi:
.global __floatunsisf .global __floatunsisf
.type __floatunsisf, @function .type __floatunsisf, @function
__floatunsisf: __floatunsisf:
abi_entry sp, 32 leaf_entry sp, 16
beqz a2, .Lfloatsisf_return beqz a2, .Lfloatsisf_return
/* Set the sign to zero and jump to the floatsisf code. */ /* Set the sign to zero and jump to the floatsisf code. */
...@@ -1588,7 +1588,7 @@ __floatunsisf: ...@@ -1588,7 +1588,7 @@ __floatunsisf:
.global __floatsisf .global __floatsisf
.type __floatsisf, @function .type __floatsisf, @function
__floatsisf: __floatsisf:
abi_entry sp, 32 leaf_entry sp, 16
/* Check for zero. */ /* Check for zero. */
beqz a2, .Lfloatsisf_return beqz a2, .Lfloatsisf_return
...@@ -1633,13 +1633,13 @@ __floatsisf: ...@@ -1633,13 +1633,13 @@ __floatsisf:
beqz a6, .Lfloatsisf_exactlyhalf beqz a6, .Lfloatsisf_exactlyhalf
.Lfloatsisf_return: .Lfloatsisf_return:
abi_return leaf_return
.Lfloatsisf_exactlyhalf: .Lfloatsisf_exactlyhalf:
/* Round down to the nearest even value. */ /* Round down to the nearest even value. */
srli a2, a2, 1 srli a2, a2, 1
slli a2, a2, 1 slli a2, a2, 1
abi_return leaf_return
#endif /* L_floatsisf */ #endif /* L_floatsisf */
...@@ -1649,7 +1649,7 @@ __floatsisf: ...@@ -1649,7 +1649,7 @@ __floatsisf:
.global __floatundisf .global __floatundisf
.type __floatundisf, @function .type __floatundisf, @function
__floatundisf: __floatundisf:
abi_entry sp, 32 leaf_entry sp, 16
/* Check for zero. */ /* Check for zero. */
or a4, xh, xl or a4, xh, xl
...@@ -1663,7 +1663,7 @@ __floatundisf: ...@@ -1663,7 +1663,7 @@ __floatundisf:
.global __floatdisf .global __floatdisf
.type __floatdisf, @function .type __floatdisf, @function
__floatdisf: __floatdisf:
abi_entry sp, 32 leaf_entry sp, 16
/* Check for zero. */ /* Check for zero. */
or a4, xh, xl or a4, xh, xl
...@@ -1714,7 +1714,7 @@ __floatdisf: ...@@ -1714,7 +1714,7 @@ __floatdisf:
/* Check if the leftover fraction is exactly 1/2. */ /* Check if the leftover fraction is exactly 1/2. */
slli a6, a6, 1 slli a6, a6, 1
beqz a6, .Lfloatdisf_exactlyhalf beqz a6, .Lfloatdisf_exactlyhalf
2: abi_return 2: leaf_return
.Lfloatdisf_bigshift: .Lfloatdisf_bigshift:
/* xh is zero. Normalize with first 1 bit of xl in the msb of xh. */ /* xh is zero. Normalize with first 1 bit of xl in the msb of xh. */
...@@ -1729,6 +1729,6 @@ __floatdisf: ...@@ -1729,6 +1729,6 @@ __floatdisf:
/* Round down to the nearest even value. */ /* Round down to the nearest even value. */
srli a2, a2, 1 srli a2, a2, 1
slli a2, a2, 1 slli a2, a2, 1
abi_return leaf_return
#endif /* L_floatdisf */ #endif /* L_floatdisf */
...@@ -30,10 +30,11 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA ...@@ -30,10 +30,11 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
#include "xtensa-config.h" #include "xtensa-config.h"
# Note: These functions use a minimum stack frame size of 32. This is # Force each stack frame to contain an "Extra Save Area" (ESA) of at least
# necessary for Xtensa configurations that only support a fixed register # 16 bytes. This is necessary for non-standard Xtensa configurations that
# window size of 8, where even leaf functions (such as these) need to # only support a fixed register window size of 8, where even leaf functions
# allocate space for a 4-word "extra save area". # (such as these) need the ESA for interrupt handlers.
#define MIN_ESA 16
# Define macros for the ABS and ADDX* instructions to handle cases # Define macros for the ABS and ADDX* instructions to handle cases
# where they are not included in the Xtensa processor configuration. # where they are not included in the Xtensa processor configuration.
...@@ -75,20 +76,20 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA ...@@ -75,20 +76,20 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
#endif #endif
.endm .endm
# Define macros for function entry and return, supporting either the # Define macros for leaf function entry and return, supporting either the
# standard register windowed ABI or the non-windowed call0 ABI. These # standard register windowed ABI or the non-windowed call0 ABI. These
# macros do not allocate any extra stack space, so they only work for # macros do not allocate any extra stack space, so they only work for
# leaf functions that do not need to spill anything to the stack. # leaf functions that do not need to spill anything to the stack.
.macro abi_entry reg, size .macro leaf_entry reg, size
#if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__ #if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__
entry \reg, \size entry \reg, \size + MIN_ESA
#else #else
/* do nothing */ /* do nothing */
#endif #endif
.endm .endm
.macro abi_return .macro leaf_return
#if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__ #if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__
retw retw
#else #else
...@@ -102,14 +103,14 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA ...@@ -102,14 +103,14 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
.global __mulsi3 .global __mulsi3
.type __mulsi3,@function .type __mulsi3,@function
__mulsi3: __mulsi3:
abi_entry sp, 32 leaf_entry sp, 16
#if XCHAL_HAVE_MUL16 #if XCHAL_HAVE_MUL16
or a4, a2, a3 or a4, a2, a3
srai a4, a4, 16 srai a4, a4, 16
bnez a4, .LMUL16 bnez a4, .LMUL16
mul16u a2, a2, a3 mul16u a2, a2, a3
abi_return leaf_return
.LMUL16: .LMUL16:
srai a4, a2, 16 srai a4, a2, 16
srai a5, a3, 16 srai a5, a3, 16
...@@ -165,7 +166,7 @@ __mulsi3: ...@@ -165,7 +166,7 @@ __mulsi3:
bgeui a3, 16, .Lmult_main_loop bgeui a3, 16, .Lmult_main_loop
neg a3, a2 neg a3, a2
movltz a2, a3, a5 movltz a2, a3, a5
abi_return leaf_return
.align 4 .align 4
.Lmult_main_loop: .Lmult_main_loop:
...@@ -195,7 +196,7 @@ __mulsi3: ...@@ -195,7 +196,7 @@ __mulsi3:
#endif /* !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MAC16 */ #endif /* !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MAC16 */
abi_return leaf_return
.size __mulsi3,.-__mulsi3 .size __mulsi3,.-__mulsi3
#endif /* L_mulsi3 */ #endif /* L_mulsi3 */
...@@ -264,7 +265,7 @@ __nsau_data: ...@@ -264,7 +265,7 @@ __nsau_data:
.global __udivsi3 .global __udivsi3
.type __udivsi3,@function .type __udivsi3,@function
__udivsi3: __udivsi3:
abi_entry sp, 32 leaf_entry sp, 16
bltui a3, 2, .Lle_one # check if the divisor <= 1 bltui a3, 2, .Lle_one # check if the divisor <= 1
mov a6, a2 # keep dividend in a6 mov a6, a2 # keep dividend in a6
...@@ -297,24 +298,24 @@ __udivsi3: ...@@ -297,24 +298,24 @@ __udivsi3:
bltu a6, a3, .Lreturn bltu a6, a3, .Lreturn
addi a2, a2, 1 # increment quotient if dividend >= divisor addi a2, a2, 1 # increment quotient if dividend >= divisor
.Lreturn: .Lreturn:
abi_return leaf_return
.Lle_one: .Lle_one:
beqz a3, .Lerror # if divisor == 1, return the dividend beqz a3, .Lerror # if divisor == 1, return the dividend
abi_return leaf_return
.Lspecial: .Lspecial:
# return dividend >= divisor # return dividend >= divisor
bltu a6, a3, .Lreturn0 bltu a6, a3, .Lreturn0
movi a2, 1 movi a2, 1
abi_return leaf_return
.Lerror: .Lerror:
# just return 0; could throw an exception # just return 0; could throw an exception
.Lreturn0: .Lreturn0:
movi a2, 0 movi a2, 0
abi_return leaf_return
.size __udivsi3,.-__udivsi3 .size __udivsi3,.-__udivsi3
#endif /* L_udivsi3 */ #endif /* L_udivsi3 */
...@@ -325,7 +326,7 @@ __udivsi3: ...@@ -325,7 +326,7 @@ __udivsi3:
.global __divsi3 .global __divsi3
.type __divsi3,@function .type __divsi3,@function
__divsi3: __divsi3:
abi_entry sp, 32 leaf_entry sp, 16
xor a7, a2, a3 # sign = dividend ^ divisor xor a7, a2, a3 # sign = dividend ^ divisor
do_abs a6, a2, a4 # udividend = abs(dividend) do_abs a6, a2, a4 # udividend = abs(dividend)
do_abs a3, a3, a4 # udivisor = abs(divisor) do_abs a3, a3, a4 # udivisor = abs(divisor)
...@@ -361,27 +362,27 @@ __divsi3: ...@@ -361,27 +362,27 @@ __divsi3:
.Lreturn: .Lreturn:
neg a5, a2 neg a5, a2
movltz a2, a5, a7 # return (sign < 0) ? -quotient : quotient movltz a2, a5, a7 # return (sign < 0) ? -quotient : quotient
abi_return leaf_return
.Lle_one: .Lle_one:
beqz a3, .Lerror beqz a3, .Lerror
neg a2, a6 # if udivisor == 1, then return... neg a2, a6 # if udivisor == 1, then return...
movgez a2, a6, a7 # (sign < 0) ? -udividend : udividend movgez a2, a6, a7 # (sign < 0) ? -udividend : udividend
abi_return leaf_return
.Lspecial: .Lspecial:
bltu a6, a3, .Lreturn0 # if dividend < divisor, return 0 bltu a6, a3, .Lreturn0 # if dividend < divisor, return 0
movi a2, 1 movi a2, 1
movi a4, -1 movi a4, -1
movltz a2, a4, a7 # else return (sign < 0) ? -1 : 1 movltz a2, a4, a7 # else return (sign < 0) ? -1 : 1
abi_return leaf_return
.Lerror: .Lerror:
# just return 0; could throw an exception # just return 0; could throw an exception
.Lreturn0: .Lreturn0:
movi a2, 0 movi a2, 0
abi_return leaf_return
.size __divsi3,.-__divsi3 .size __divsi3,.-__divsi3
#endif /* L_divsi3 */ #endif /* L_divsi3 */
...@@ -392,7 +393,7 @@ __divsi3: ...@@ -392,7 +393,7 @@ __divsi3:
.global __umodsi3 .global __umodsi3
.type __umodsi3,@function .type __umodsi3,@function
__umodsi3: __umodsi3:
abi_entry sp, 32 leaf_entry sp, 16
bltui a3, 2, .Lle_one # check if the divisor is <= 1 bltui a3, 2, .Lle_one # check if the divisor is <= 1
do_nsau a5, a2, a6, a7 # dividend_shift = nsau(dividend) do_nsau a5, a2, a6, a7 # dividend_shift = nsau(dividend)
...@@ -422,13 +423,13 @@ __umodsi3: ...@@ -422,13 +423,13 @@ __umodsi3:
bltu a2, a3, .Lreturn bltu a2, a3, .Lreturn
sub a2, a2, a3 # subtract once more if dividend >= divisor sub a2, a2, a3 # subtract once more if dividend >= divisor
.Lreturn: .Lreturn:
abi_return leaf_return
.Lle_one: .Lle_one:
# the divisor is either 0 or 1, so just return 0. # the divisor is either 0 or 1, so just return 0.
# someday we may want to throw an exception if the divisor is 0. # someday we may want to throw an exception if the divisor is 0.
movi a2, 0 movi a2, 0
abi_return leaf_return
.size __umodsi3,.-__umodsi3 .size __umodsi3,.-__umodsi3
#endif /* L_umodsi3 */ #endif /* L_umodsi3 */
...@@ -439,7 +440,7 @@ __umodsi3: ...@@ -439,7 +440,7 @@ __umodsi3:
.global __modsi3 .global __modsi3
.type __modsi3,@function .type __modsi3,@function
__modsi3: __modsi3:
abi_entry sp, 32 leaf_entry sp, 16
mov a7, a2 # save original (signed) dividend mov a7, a2 # save original (signed) dividend
do_abs a2, a2, a4 # udividend = abs(dividend) do_abs a2, a2, a4 # udividend = abs(dividend)
do_abs a3, a3, a4 # udivisor = abs(divisor) do_abs a3, a3, a4 # udivisor = abs(divisor)
...@@ -474,13 +475,13 @@ __modsi3: ...@@ -474,13 +475,13 @@ __modsi3:
bgez a7, .Lpositive bgez a7, .Lpositive
neg a2, a2 # if (dividend < 0), return -udividend neg a2, a2 # if (dividend < 0), return -udividend
.Lpositive: .Lpositive:
abi_return leaf_return
.Lle_one: .Lle_one:
# udivisor is either 0 or 1, so just return 0. # udivisor is either 0 or 1, so just return 0.
# someday we may want to throw an exception if udivisor is 0. # someday we may want to throw an exception if udivisor is 0.
movi a2, 0 movi a2, 0
abi_return leaf_return
.size __modsi3,.-__modsi3 .size __modsi3,.-__modsi3
#endif /* L_modsi3 */ #endif /* L_modsi3 */
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment