summaryrefslogtreecommitdiff
path: root/gcc/config/xtensa
diff options
context:
space:
mode:
authorbwilson <bwilson@138bc75d-0d04-0410-961f-82ee72b054a4>2006-04-01 01:29:11 +0000
committerbwilson <bwilson@138bc75d-0d04-0410-961f-82ee72b054a4>2006-04-01 01:29:11 +0000
commit29bf9219c9121cd0c7244c9b954a3137d17ed88d (patch)
treee64eea562a933783bbc8a535ae6cf5530641e8eb /gcc/config/xtensa
parent2a09b1df753c147d51326d4d2b513c46eb3c8784 (diff)
downloadgcc-29bf9219c9121cd0c7244c9b954a3137d17ed88d.tar.gz
* config/xtensa/lib1funcs.asm: Rename abi_entry/abi_return macros
to leaf_entry/leaf_return. Change leaf_entry to add 16 bytes to the frame size. Update to use the new macros. * config/xtensa/ieee754-sf.S: Use new leaf_entry/leaf_return macros. * config/xtensa/ieee754-df.S: Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@112604 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'gcc/config/xtensa')
-rw-r--r--gcc/config/xtensa/ieee754-df.S190
-rw-r--r--gcc/config/xtensa/ieee754-sf.S166
-rw-r--r--gcc/config/xtensa/lib1funcs.asm57
3 files changed, 207 insertions, 206 deletions
diff --git a/gcc/config/xtensa/ieee754-df.S b/gcc/config/xtensa/ieee754-df.S
index 5981979fc65..711b10c9df3 100644
--- a/gcc/config/xtensa/ieee754-df.S
+++ b/gcc/config/xtensa/ieee754-df.S
@@ -51,10 +51,10 @@
.global __negdf2
.type __negdf2, @function
__negdf2:
- abi_entry sp, 32
+ leaf_entry sp, 16
movi a4, 0x80000000
xor xh, xh, a4
- abi_return
+ leaf_return
#endif /* L_negdf2 */
@@ -74,13 +74,13 @@ __adddf3_aux:
slli a7, xh, 12
or a7, a7, xl
beqz a7, .Ladd_ynan_or_inf
-1: abi_return
+1: leaf_return
.Ladd_ynan_or_inf:
/* Return y. */
mov xh, yh
mov xl, yl
- abi_return
+ leaf_return
.Ladd_opposite_signs:
/* Operand signs differ. Do a subtraction. */
@@ -92,7 +92,7 @@ __adddf3_aux:
.global __adddf3
.type __adddf3, @function
__adddf3:
- abi_entry sp, 32
+ leaf_entry sp, 16
movi a6, 0x7ff00000
/* Check if the two operands have the same sign. */
@@ -166,7 +166,7 @@ __adddf3:
add xh, xh, yh
bgeu xl, yl, 1f
addi xh, xh, 1
-1: abi_return
+1: leaf_return
.Ladd_bigshifty:
/* Exponent difference > 64 -- just return the bigger value. */
@@ -229,7 +229,7 @@ __adddf3:
/* Check if the leftover fraction is exactly 1/2. */
slli a9, a9, 1
beqz a9, .Ladd_exactlyhalf
-1: abi_return
+1: leaf_return
.Ladd_bigshiftx:
/* Mostly the same thing as "bigshifty".... */
@@ -247,7 +247,7 @@ __adddf3:
.Ladd_returny:
mov xh, yh
mov xl, yl
- abi_return
+ leaf_return
.Ladd_carry:
/* The addition has overflowed into the exponent field, so the
@@ -288,7 +288,7 @@ __adddf3:
addi xl, xl, 1
beqz xl, .Ladd_roundcarry
beqz a9, .Ladd_exactlyhalf
-1: abi_return
+1: leaf_return
.Ladd_infinity:
/* Clear the mantissa. */
@@ -299,20 +299,20 @@ __adddf3:
/* The sign bit may have been lost in a carry-out. Put it back. */
slli a8, a8, 1
or xh, xh, a8
- abi_return
+ leaf_return
.Ladd_exactlyhalf:
/* Round down to the nearest even value. */
srli xl, xl, 1
slli xl, xl, 1
- abi_return
+ leaf_return
.Ladd_roundcarry:
/* xl is always zero when the rounding increment overflows, so
there's no need to round it to an even value. */
addi xh, xh, 1
/* Overflow to the exponent is OK. */
- abi_return
+ leaf_return
/* Subtraction */
@@ -328,14 +328,14 @@ __subdf3_aux:
/* Both x and y are either NaN or Inf, so the result is NaN. */
movi a4, 0x80000 /* make it a quiet NaN */
or xh, xh, a4
-1: abi_return
+1: leaf_return
.Lsub_ynan_or_inf:
/* Negate y and return it. */
slli a7, a6, 11
xor xh, yh, a7
mov xl, yl
- abi_return
+ leaf_return
.Lsub_opposite_signs:
/* Operand signs differ. Do an addition. */
@@ -347,7 +347,7 @@ __subdf3_aux:
.global __subdf3
.type __subdf3, @function
__subdf3:
- abi_entry sp, 32
+ leaf_entry sp, 16
movi a6, 0x7ff00000
/* Check if the two operands have the same sign. */
@@ -415,7 +415,7 @@ __subdf3:
/* The operands are equal. Return 0.0. */
movi xh, 0
movi xl, 0
-1: abi_return
+1: leaf_return
.Lsub_yexpzero:
/* y is a subnormal value. Replace its sign/exponent with zero,
@@ -493,7 +493,7 @@ __subdf3:
/* Check if the leftover fraction is exactly 1/2. */
slli a9, a9, 1
beqz a9, .Lsub_exactlyhalf
-1: abi_return
+1: leaf_return
.Lsub_xexpzero:
/* Same as "yexpzero". */
@@ -523,7 +523,7 @@ __subdf3:
slli a7, a6, 11
xor xh, yh, a7
mov xl, yl
- abi_return
+ leaf_return
.Lsub_borrow:
/* The subtraction has underflowed into the exponent field, so the
@@ -560,14 +560,14 @@ __subdf3:
/* Round down to the nearest even value. */
srli xl, xl, 1
slli xl, xl, 1
- abi_return
+ leaf_return
.Lsub_roundcarry:
/* xl is always zero when the rounding increment overflows, so
there's no need to round it to an even value. */
addi xh, xh, 1
/* Overflow to the exponent is OK. */
- abi_return
+ leaf_return
.Lsub_xhzero:
/* When normalizing the result, all the mantissa bits in the high
@@ -728,7 +728,7 @@ __muldf3_aux:
.global __muldf3
.type __muldf3, @function
__muldf3:
- abi_entry sp, 48
+ leaf_entry sp, 32
#if __XTENSA_CALL0_ABI__
addi sp, sp, -32
s32i a12, sp, 16
@@ -1093,7 +1093,7 @@ __muldf3:
l32i a15, sp, 28
addi sp, sp, 32
#endif
- abi_return
+ leaf_return
.Lmul_exactlyhalf:
/* Round down to the nearest even value. */
@@ -1246,7 +1246,7 @@ __divdf3_aux:
movi a4, 0x80000 /* make it a quiet NaN */
or xh, xh, a4
1: movi xl, 0
- abi_return
+ leaf_return
.Ldiv_xexpzero:
/* Clear the sign bit of x. */
@@ -1287,7 +1287,7 @@ __divdf3_aux:
srli xh, a7, 31
slli xh, xh, 31
movi xl, 0
- abi_return
+ leaf_return
.Ldiv_xnan_or_inf:
/* Set the sign bit of the result. */
@@ -1298,7 +1298,7 @@ __divdf3_aux:
bnall yh, a6, 1f
movi a4, 0x80000 /* make it a quiet NaN */
or xh, xh, a4
-1: abi_return
+1: leaf_return
.Ldiv_ynan_or_inf:
/* If y is Infinity, return zero. */
@@ -1308,7 +1308,7 @@ __divdf3_aux:
/* y is NaN; return it. */
mov xh, yh
mov xl, yl
- abi_return
+ leaf_return
.Ldiv_highequal1:
bltu xl, yl, 2f
@@ -1318,7 +1318,7 @@ __divdf3_aux:
.global __divdf3
.type __divdf3, @function
__divdf3:
- abi_entry sp, 32
+ leaf_entry sp, 16
movi a6, 0x7ff00000
/* Get the sign of the result. */
@@ -1431,7 +1431,7 @@ __divdf3:
srli a7, a7, 31
slli a7, a7, 31
or xh, xh, a7
- abi_return
+ leaf_return
.Ldiv_highequal2:
bgeu xl, yl, 2b
@@ -1510,7 +1510,7 @@ __divdf3:
srli xh, a7, 31
slli xh, xh, 31
movi xl, 0
- abi_return
+ leaf_return
#endif /* L_divdf3 */
@@ -1524,7 +1524,7 @@ __divdf3:
.set __nedf2, __eqdf2
.type __eqdf2, @function
__eqdf2:
- abi_entry sp, 32
+ leaf_entry sp, 16
bne xl, yl, 2f
bne xh, yh, 4f
@@ -1534,11 +1534,11 @@ __eqdf2:
/* Equal. */
movi a2, 0
- abi_return
+ leaf_return
/* Not equal. */
2: movi a2, 1
- abi_return
+ leaf_return
/* Check if the mantissas are nonzero. */
3: slli a7, xh, 12
@@ -1555,7 +1555,7 @@ __eqdf2:
5: movi a2, 0
movi a3, 1
movnez a2, a3, a7
- abi_return
+ leaf_return
/* Greater Than */
@@ -1564,7 +1564,7 @@ __eqdf2:
.global __gtdf2
.type __gtdf2, @function
__gtdf2:
- abi_entry sp, 32
+ leaf_entry sp, 16
movi a6, 0x7ff00000
ball xh, a6, 2f
1: bnall yh, a6, .Lle_cmp
@@ -1574,14 +1574,14 @@ __gtdf2:
or a7, a7, yl
beqz a7, .Lle_cmp
movi a2, 0
- abi_return
+ leaf_return
/* Check if x is a NaN. */
2: slli a7, xh, 12
or a7, a7, xl
beqz a7, 1b
movi a2, 0
- abi_return
+ leaf_return
/* Less Than or Equal */
@@ -1590,7 +1590,7 @@ __gtdf2:
.global __ledf2
.type __ledf2, @function
__ledf2:
- abi_entry sp, 32
+ leaf_entry sp, 16
movi a6, 0x7ff00000
ball xh, a6, 2f
1: bnall yh, a6, .Lle_cmp
@@ -1600,14 +1600,14 @@ __ledf2:
or a7, a7, yl
beqz a7, .Lle_cmp
movi a2, 1
- abi_return
+ leaf_return
/* Check if x is a NaN. */
2: slli a7, xh, 12
or a7, a7, xl
beqz a7, 1b
movi a2, 1
- abi_return
+ leaf_return
.Lle_cmp:
/* Check if x and y have different signs. */
@@ -1622,7 +1622,7 @@ __ledf2:
bne xh, yh, 5f
bltu yl, xl, 5f
4: movi a2, 0
- abi_return
+ leaf_return
.Lle_xneg:
/* Check if y <= x. */
@@ -1630,7 +1630,7 @@ __ledf2:
bne yh, xh, 5f
bgeu xl, yl, 4b
5: movi a2, 1
- abi_return
+ leaf_return
.Lle_diff_signs:
bltz xh, 4b
@@ -1643,7 +1643,7 @@ __ledf2:
movi a2, 1
movi a3, 0
moveqz a2, a3, a7
- abi_return
+ leaf_return
/* Greater Than or Equal */
@@ -1652,7 +1652,7 @@ __ledf2:
.global __gedf2
.type __gedf2, @function
__gedf2:
- abi_entry sp, 32
+ leaf_entry sp, 16
movi a6, 0x7ff00000
ball xh, a6, 2f
1: bnall yh, a6, .Llt_cmp
@@ -1662,14 +1662,14 @@ __gedf2:
or a7, a7, yl
beqz a7, .Llt_cmp
movi a2, -1
- abi_return
+ leaf_return
/* Check if x is a NaN. */
2: slli a7, xh, 12
or a7, a7, xl
beqz a7, 1b
movi a2, -1
- abi_return
+ leaf_return
/* Less Than */
@@ -1678,7 +1678,7 @@ __gedf2:
.global __ltdf2
.type __ltdf2, @function
__ltdf2:
- abi_entry sp, 32
+ leaf_entry sp, 16
movi a6, 0x7ff00000
ball xh, a6, 2f
1: bnall yh, a6, .Llt_cmp
@@ -1688,14 +1688,14 @@ __ltdf2:
or a7, a7, yl
beqz a7, .Llt_cmp
movi a2, 0
- abi_return
+ leaf_return
/* Check if x is a NaN. */
2: slli a7, xh, 12
or a7, a7, xl
beqz a7, 1b
movi a2, 0
- abi_return
+ leaf_return
.Llt_cmp:
/* Check if x and y have different signs. */
@@ -1710,7 +1710,7 @@ __ltdf2:
bne xh, yh, 5f
bgeu xl, yl, 5f
4: movi a2, -1
- abi_return
+ leaf_return
.Llt_xneg:
/* Check if y < x. */
@@ -1718,7 +1718,7 @@ __ltdf2:
bne yh, xh, 5f
bltu yl, xl, 4b
5: movi a2, 0
- abi_return
+ leaf_return
.Llt_diff_signs:
bgez xh, 5b
@@ -1731,7 +1731,7 @@ __ltdf2:
movi a2, 0
movi a3, -1
movnez a2, a3, a7
- abi_return
+ leaf_return
/* Unordered */
@@ -1740,24 +1740,24 @@ __ltdf2:
.global __unorddf2
.type __unorddf2, @function
__unorddf2:
- abi_entry sp, 32
+ leaf_entry sp, 16
movi a6, 0x7ff00000
ball xh, a6, 3f
1: ball yh, a6, 4f
2: movi a2, 0
- abi_return
+ leaf_return
3: slli a7, xh, 12
or a7, a7, xl
beqz a7, 1b
movi a2, 1
- abi_return
+ leaf_return
4: slli a7, yh, 12
or a7, a7, yl
beqz a7, 2b
movi a2, 1
- abi_return
+ leaf_return
#endif /* L_cmpdf2 */
@@ -1767,7 +1767,7 @@ __unorddf2:
.global __fixdfsi
.type __fixdfsi, @function
__fixdfsi:
- abi_entry sp, 32
+ leaf_entry sp, 16
/* Check for NaN and Infinity. */
movi a6, 0x7ff00000
@@ -1792,7 +1792,7 @@ __fixdfsi:
/* Negate the result if sign != 0. */
neg a2, a5
movgez a2, a5, a7
- abi_return
+ leaf_return
.Lfixdfsi_nan_or_inf:
/* Handle Infinity and NaN. */
@@ -1808,11 +1808,11 @@ __fixdfsi:
addi a5, a4, -1 /* 0x7fffffff */
movgez a4, a5, xh
mov a2, a4
- abi_return
+ leaf_return
.Lfixdfsi_zero:
movi a2, 0
- abi_return
+ leaf_return
#endif /* L_fixdfsi */
@@ -1822,7 +1822,7 @@ __fixdfsi:
.global __fixdfdi
.type __fixdfdi, @function
__fixdfdi:
- abi_entry sp, 32
+ leaf_entry sp, 16
/* Check for NaN and Infinity. */
movi a6, 0x7ff00000
@@ -1854,7 +1854,7 @@ __fixdfdi:
neg xh, xh
beqz xl, 1f
addi xh, xh, -1
-1: abi_return
+1: leaf_return
.Lfixdfdi_smallshift:
src xl, xh, xl
@@ -1875,16 +1875,16 @@ __fixdfdi:
bgez xh, 1f
mov xh, a7
movi xl, 0
- abi_return
+ leaf_return
1: addi xh, a7, -1 /* 0x7fffffff */
movi xl, -1
- abi_return
+ leaf_return
.Lfixdfdi_zero:
movi xh, 0
movi xl, 0
- abi_return
+ leaf_return
#endif /* L_fixdfdi */
@@ -1894,7 +1894,7 @@ __fixdfdi:
.global __fixunsdfsi
.type __fixunsdfsi, @function
__fixunsdfsi:
- abi_entry sp, 32
+ leaf_entry sp, 16
/* Check for NaN and Infinity. */
movi a6, 0x7ff00000
@@ -1921,7 +1921,7 @@ __fixunsdfsi:
/* Negate the result if sign != 0. */
neg a2, a5
movgez a2, a5, a7
- abi_return
+ leaf_return
.Lfixunsdfsi_nan_or_inf:
/* Handle Infinity and NaN. */
@@ -1931,28 +1931,28 @@ __fixunsdfsi:
/* Translate NaN to 0xffffffff. */
movi a2, -1
- abi_return
+ leaf_return
.Lfixunsdfsi_maxint:
slli a4, a6, 11 /* 0x80000000 */
movi a5, -1 /* 0xffffffff */
movgez a4, a5, xh
mov a2, a4
- abi_return
+ leaf_return
.Lfixunsdfsi_zero:
movi a2, 0
- abi_return
+ leaf_return
.Lfixunsdfsi_bigexp:
/* Handle unsigned maximum exponent case. */
bltz xh, 1f
mov a2, a5 /* no shift needed */
- abi_return
+ leaf_return
/* Return 0x80000000 if negative. */
1: slli a2, a6, 11
- abi_return
+ leaf_return
#endif /* L_fixunsdfsi */
@@ -1962,7 +1962,7 @@ __fixunsdfsi:
.global __fixunsdfdi
.type __fixunsdfdi, @function
__fixunsdfdi:
- abi_entry sp, 32
+ leaf_entry sp, 16
/* Check for NaN and Infinity. */
movi a6, 0x7ff00000
@@ -1996,7 +1996,7 @@ __fixunsdfdi:
neg xh, xh
beqz xl, 1f
addi xh, xh, -1
-1: abi_return
+1: leaf_return
.Lfixunsdfdi_smallshift:
src xl, xh, xl
@@ -2012,23 +2012,23 @@ __fixunsdfdi:
/* Translate NaN to 0xffffffff.... */
1: movi xh, -1
movi xl, -1
- abi_return
+ leaf_return
.Lfixunsdfdi_maxint:
bgez xh, 1b
2: slli xh, a6, 11 /* 0x80000000 */
movi xl, 0
- abi_return
+ leaf_return
.Lfixunsdfdi_zero:
movi xh, 0
movi xl, 0
- abi_return
+ leaf_return
.Lfixunsdfdi_bigexp:
/* Handle unsigned maximum exponent case. */
bltz a7, 2b
- abi_return /* no shift needed */
+ leaf_return /* no shift needed */
#endif /* L_fixunsdfdi */
@@ -2038,7 +2038,7 @@ __fixunsdfdi:
.global __floatunsidf
.type __floatunsidf, @function
__floatunsidf:
- abi_entry sp, 32
+ leaf_entry sp, 16
beqz a2, .Lfloatsidf_return_zero
/* Set the sign to zero and jump to the floatsidf code. */
@@ -2049,7 +2049,7 @@ __floatunsidf:
.global __floatsidf
.type __floatsidf, @function
__floatsidf:
- abi_entry sp, 32
+ leaf_entry sp, 16
/* Check for zero. */
beqz a2, .Lfloatsidf_return_zero
@@ -2084,11 +2084,11 @@ __floatsidf:
/* Add the sign and return. */
slli a7, a7, 31
or xh, xh, a7
- abi_return
+ leaf_return
.Lfloatsidf_return_zero:
movi a3, 0
- abi_return
+ leaf_return
#endif /* L_floatsidf */
@@ -2098,7 +2098,7 @@ __floatsidf:
.global __floatundidf
.type __floatundidf, @function
__floatundidf:
- abi_entry sp, 32
+ leaf_entry sp, 16
/* Check for zero. */
or a4, xh, xl
@@ -2112,7 +2112,7 @@ __floatundidf:
.global __floatdidf
.type __floatdidf, @function
__floatdidf:
- abi_entry sp, 32
+ leaf_entry sp, 16
/* Check for zero. */
or a4, xh, xl
@@ -2161,7 +2161,7 @@ __floatdidf:
/* Check if the leftover fraction is exactly 1/2. */
slli a6, a6, 1
beqz a6, .Lfloatdidf_exactlyhalf
-2: abi_return
+2: leaf_return
.Lfloatdidf_bigshift:
/* xh is zero. Normalize with first 1 bit of xl in the msb of xh. */
@@ -2176,14 +2176,14 @@ __floatdidf:
/* Round down to the nearest even value. */
srli xl, xl, 1
slli xl, xl, 1
- abi_return
+ leaf_return
.Lfloatdidf_roundcarry:
/* xl is always zero when the rounding increment overflows, so
there's no need to round it to an even value. */
addi xh, xh, 1
/* Overflow to the exponent is OK. */
- abi_return
+ leaf_return
#endif /* L_floatdidf */
@@ -2193,7 +2193,7 @@ __floatdidf:
.global __truncdfsf2
.type __truncdfsf2, @function
__truncdfsf2:
- abi_entry sp, 32
+ leaf_entry sp, 16
/* Adjust the exponent bias. */
movi a4, (0x3ff - 0x7f) << 20
@@ -2228,13 +2228,13 @@ __truncdfsf2:
/* Check if the leftover fraction is exactly 1/2. */
slli a4, a4, 1
beqz a4, .Ltrunc_exactlyhalf
-1: abi_return
+1: leaf_return
.Ltrunc_exactlyhalf:
/* Round down to the nearest even value. */
srli a2, a2, 1
slli a2, a2, 1
- abi_return
+ leaf_return
.Ltrunc_overflow:
/* Check if exponent == 0x7ff. */
@@ -2254,7 +2254,7 @@ __truncdfsf2:
extui a6, xh, 31, 1
ssai 1
src a2, a6, a4
- abi_return
+ leaf_return
.Ltrunc_underflow:
/* Find shift count for a subnormal. Flush to zero if >= 32. */
@@ -2287,7 +2287,7 @@ __truncdfsf2:
/* Return +/- zero. */
1: extui a2, xh, 31, 1
slli a2, a2, 31
- abi_return
+ leaf_return
#endif /* L_truncdfsf2 */
@@ -2297,7 +2297,7 @@ __truncdfsf2:
.global __extendsfdf2
.type __extendsfdf2, @function
__extendsfdf2:
- abi_entry sp, 32
+ leaf_entry sp, 16
/* Save the sign bit and then shift it off. */
extui a5, a2, 31, 1
@@ -2320,7 +2320,7 @@ __extendsfdf2:
/* Add the sign bit. */
or xh, a4, a5
- abi_return
+ leaf_return
.Lextend_nan_or_inf:
movi a4, 0x7ff00000
@@ -2335,7 +2335,7 @@ __extendsfdf2:
/* Add the sign and return. */
1: or xh, a4, a5
movi xl, 0
- abi_return
+ leaf_return
.Lextend_expzero:
beqz a4, 1b
@@ -2358,7 +2358,7 @@ __extendsfdf2:
/* Add the sign and return. */
or xh, a4, a5
- abi_return
+ leaf_return
#endif /* L_extendsfdf2 */
diff --git a/gcc/config/xtensa/ieee754-sf.S b/gcc/config/xtensa/ieee754-sf.S
index f669cc8ec2f..a75e742898b 100644
--- a/gcc/config/xtensa/ieee754-sf.S
+++ b/gcc/config/xtensa/ieee754-sf.S
@@ -51,10 +51,10 @@
.global __negsf2
.type __negsf2, @function
__negsf2:
- abi_entry sp, 32
+ leaf_entry sp, 16
movi a4, 0x80000000
xor a2, a2, a4
- abi_return
+ leaf_return
#endif /* L_negsf2 */
@@ -73,12 +73,12 @@ __addsf3_aux:
/* If x is a NaN, return it. Otherwise, return y. */
slli a7, a2, 9
beqz a7, .Ladd_ynan_or_inf
-1: abi_return
+1: leaf_return
.Ladd_ynan_or_inf:
/* Return y. */
mov a2, a3
- abi_return
+ leaf_return
.Ladd_opposite_signs:
/* Operand signs differ. Do a subtraction. */
@@ -90,7 +90,7 @@ __addsf3_aux:
.global __addsf3
.type __addsf3, @function
__addsf3:
- abi_entry sp, 32
+ leaf_entry sp, 16
movi a6, 0x7f800000
/* Check if the two operands have the same sign. */
@@ -158,7 +158,7 @@ __addsf3:
a carry into the exponent field will not work because it
assumes there is an implicit "1.0" that needs to be added. */
add a2, a2, a3
-1: abi_return
+1: leaf_return
.Ladd_xexpzero:
/* Same as "yexpzero" except skip handling the case when both
@@ -200,11 +200,11 @@ __addsf3:
/* Check if the leftover fraction is exactly 1/2. */
slli a9, a9, 1
beqz a9, .Ladd_exactlyhalf
-1: abi_return
+1: leaf_return
.Ladd_returny:
mov a2, a3
- abi_return
+ leaf_return
.Ladd_carry:
/* The addition has overflowed into the exponent field, so the
@@ -242,7 +242,7 @@ __addsf3:
bbci.l a10, 0, 1f
addi a2, a2, 1
beqz a9, .Ladd_exactlyhalf
-1: abi_return
+1: leaf_return
.Ladd_infinity:
/* Clear the mantissa. */
@@ -252,13 +252,13 @@ __addsf3:
/* The sign bit may have been lost in a carry-out. Put it back. */
slli a8, a8, 1
or a2, a2, a8
- abi_return
+ leaf_return
.Ladd_exactlyhalf:
/* Round down to the nearest even value. */
srli a2, a2, 1
slli a2, a2, 1
- abi_return
+ leaf_return
/* Subtraction */
@@ -274,13 +274,13 @@ __subsf3_aux:
/* Both x and y are either NaN or Inf, so the result is NaN. */
movi a4, 0x400000 /* make it a quiet NaN */
or a2, a2, a4
-1: abi_return
+1: leaf_return
.Lsub_ynan_or_inf:
/* Negate y and return it. */
slli a7, a6, 8
xor a2, a3, a7
- abi_return
+ leaf_return
.Lsub_opposite_signs:
/* Operand signs differ. Do an addition. */
@@ -292,7 +292,7 @@ __subsf3_aux:
.global __subsf3
.type __subsf3, @function
__subsf3:
- abi_entry sp, 32
+ leaf_entry sp, 16
movi a6, 0x7f800000
/* Check if the two operands have the same sign. */
@@ -366,7 +366,7 @@ __subsf3:
/* Negate and return y. */
slli a7, a6, 8
xor a2, a3, a7
-1: abi_return
+1: leaf_return
.Lsub_xsmaller:
/* Same thing as the "ysmaller" code, but with x and y swapped and
@@ -408,7 +408,7 @@ __subsf3:
/* Check if the leftover fraction is exactly 1/2. */
slli a9, a9, 1
beqz a9, .Lsub_exactlyhalf
-1: abi_return
+1: leaf_return
.Lsub_xexpzero:
/* Same as "yexpzero". */
@@ -421,7 +421,7 @@ __subsf3:
.Lsub_return_zero:
movi a2, 0
- abi_return
+ leaf_return
.Lsub_borrow:
/* The subtraction has underflowed into the exponent field, so the
@@ -457,7 +457,7 @@ __subsf3:
/* Round down to the nearest even value. */
srli a2, a2, 1
slli a2, a2, 1
- abi_return
+ leaf_return
.Lsub_xzero:
/* If there was a borrow from the exponent, and the mantissa and
@@ -570,7 +570,7 @@ __mulsf3_aux:
.global __mulsf3
.type __mulsf3, @function
__mulsf3:
- abi_entry sp, 48
+ leaf_entry sp, 32
#if __XTENSA_CALL0_ABI__
addi sp, sp, -32
s32i a12, sp, 16
@@ -780,7 +780,7 @@ __mulsf3:
l32i a15, sp, 28
addi sp, sp, 32
#endif
- abi_return
+ leaf_return
.Lmul_exactlyhalf:
/* Round down to the nearest even value. */
@@ -895,7 +895,7 @@ __divsf3_aux:
bnez a4, 1f
movi a4, 0x400000 /* make it a quiet NaN */
or a2, a2, a4
-1: abi_return
+1: leaf_return
.Ldiv_xexpzero:
/* Clear the sign bit of x. */
@@ -918,7 +918,7 @@ __divsf3_aux:
/* Return zero with the appropriate sign bit. */
srli a2, a7, 31
slli a2, a2, 31
- abi_return
+ leaf_return
.Ldiv_xnan_or_inf:
/* Set the sign bit of the result. */
@@ -929,7 +929,7 @@ __divsf3_aux:
bnall a3, a6, 1f
movi a4, 0x400000 /* make it a quiet NaN */
or a2, a2, a4
-1: abi_return
+1: leaf_return
.Ldiv_ynan_or_inf:
/* If y is Infinity, return zero. */
@@ -937,13 +937,13 @@ __divsf3_aux:
beqz a8, .Ldiv_return_zero
/* y is NaN; return it. */
mov a2, a3
- abi_return
+ leaf_return
.align 4
.global __divsf3
.type __divsf3, @function
__divsf3:
- abi_entry sp, 32
+ leaf_entry sp, 16
movi a6, 0x7f800000
/* Get the sign of the result. */
@@ -1036,7 +1036,7 @@ __divsf3:
srli a7, a7, 31
slli a7, a7, 31
or a2, a2, a7
- abi_return
+ leaf_return
.Ldiv_overflow:
bltz a8, .Ldiv_underflow
@@ -1085,7 +1085,7 @@ __divsf3:
/* Return zero with the appropriate sign bit. */
srli a2, a7, 31
slli a2, a2, 31
- abi_return
+ leaf_return
#endif /* L_divsf3 */
@@ -1099,7 +1099,7 @@ __divsf3:
.set __nesf2, __eqsf2
.type __eqsf2, @function
__eqsf2:
- abi_entry sp, 32
+ leaf_entry sp, 16
bne a2, a3, 4f
/* The values are equal but NaN != NaN. Check the exponent. */
@@ -1108,11 +1108,11 @@ __eqsf2:
/* Equal. */
movi a2, 0
- abi_return
+ leaf_return
/* Not equal. */
2: movi a2, 1
- abi_return
+ leaf_return
/* Check if the mantissas are nonzero. */
3: slli a7, a2, 9
@@ -1127,7 +1127,7 @@ __eqsf2:
5: movi a2, 0
movi a3, 1
movnez a2, a3, a7
- abi_return
+ leaf_return
/* Greater Than */
@@ -1136,7 +1136,7 @@ __eqsf2:
.global __gtsf2
.type __gtsf2, @function
__gtsf2:
- abi_entry sp, 32
+ leaf_entry sp, 16
movi a6, 0x7f800000
ball a2, a6, 2f
1: bnall a3, a6, .Lle_cmp
@@ -1145,13 +1145,13 @@ __gtsf2:
slli a7, a3, 9
beqz a7, .Lle_cmp
movi a2, 0
- abi_return
+ leaf_return
/* Check if x is a NaN. */
2: slli a7, a2, 9
beqz a7, 1b
movi a2, 0
- abi_return
+ leaf_return
/* Less Than or Equal */
@@ -1160,7 +1160,7 @@ __gtsf2:
.global __lesf2
.type __lesf2, @function
__lesf2:
- abi_entry sp, 32
+ leaf_entry sp, 16
movi a6, 0x7f800000
ball a2, a6, 2f
1: bnall a3, a6, .Lle_cmp
@@ -1169,13 +1169,13 @@ __lesf2:
slli a7, a3, 9
beqz a7, .Lle_cmp
movi a2, 1
- abi_return
+ leaf_return
/* Check if x is a NaN. */
2: slli a7, a2, 9
beqz a7, 1b
movi a2, 1
- abi_return
+ leaf_return
.Lle_cmp:
/* Check if x and y have different signs. */
@@ -1188,13 +1188,13 @@ __lesf2:
/* Check if x <= y. */
bltu a3, a2, 5f
4: movi a2, 0
- abi_return
+ leaf_return
.Lle_xneg:
/* Check if y <= x. */
bgeu a2, a3, 4b
5: movi a2, 1
- abi_return
+ leaf_return
.Lle_diff_signs:
bltz a2, 4b
@@ -1205,7 +1205,7 @@ __lesf2:
movi a2, 1
movi a3, 0
moveqz a2, a3, a7
- abi_return
+ leaf_return
/* Greater Than or Equal */
@@ -1214,7 +1214,7 @@ __lesf2:
.global __gesf2
.type __gesf2, @function
__gesf2:
- abi_entry sp, 32
+ leaf_entry sp, 16
movi a6, 0x7f800000
ball a2, a6, 2f
1: bnall a3, a6, .Llt_cmp
@@ -1223,13 +1223,13 @@ __gesf2:
slli a7, a3, 9
beqz a7, .Llt_cmp
movi a2, -1
- abi_return
+ leaf_return
/* Check if x is a NaN. */
2: slli a7, a2, 9
beqz a7, 1b
movi a2, -1
- abi_return
+ leaf_return
/* Less Than */
@@ -1238,7 +1238,7 @@ __gesf2:
.global __ltsf2
.type __ltsf2, @function
__ltsf2:
- abi_entry sp, 32
+ leaf_entry sp, 16
movi a6, 0x7f800000
ball a2, a6, 2f
1: bnall a3, a6, .Llt_cmp
@@ -1247,13 +1247,13 @@ __ltsf2:
slli a7, a3, 9
beqz a7, .Llt_cmp
movi a2, 0
- abi_return
+ leaf_return
/* Check if x is a NaN. */
2: slli a7, a2, 9
beqz a7, 1b
movi a2, 0
- abi_return
+ leaf_return
.Llt_cmp:
/* Check if x and y have different signs. */
@@ -1266,13 +1266,13 @@ __ltsf2:
/* Check if x < y. */
bgeu a2, a3, 5f
4: movi a2, -1
- abi_return
+ leaf_return
.Llt_xneg:
/* Check if y < x. */
bltu a3, a2, 4b
5: movi a2, 0
- abi_return
+ leaf_return
.Llt_diff_signs:
bgez a2, 5b
@@ -1283,7 +1283,7 @@ __ltsf2:
movi a2, 0
movi a3, -1
movnez a2, a3, a7
- abi_return
+ leaf_return
/* Unordered */
@@ -1292,22 +1292,22 @@ __ltsf2:
.global __unordsf2
.type __unordsf2, @function
__unordsf2:
- abi_entry sp, 32
+ leaf_entry sp, 16
movi a6, 0x7f800000
ball a2, a6, 3f
1: ball a3, a6, 4f
2: movi a2, 0
- abi_return
+ leaf_return
3: slli a7, a2, 9
beqz a7, 1b
movi a2, 1
- abi_return
+ leaf_return
4: slli a7, a3, 9
beqz a7, 2b
movi a2, 1
- abi_return
+ leaf_return
#endif /* L_cmpsf2 */
@@ -1317,7 +1317,7 @@ __unordsf2:
.global __fixsfsi
.type __fixsfsi, @function
__fixsfsi:
- abi_entry sp, 32
+ leaf_entry sp, 16
/* Check for NaN and Infinity. */
movi a6, 0x7f800000
@@ -1340,7 +1340,7 @@ __fixsfsi:
/* Negate the result if sign != 0. */
neg a2, a5
movgez a2, a5, a7
- abi_return
+ leaf_return
.Lfixsfsi_nan_or_inf:
/* Handle Infinity and NaN. */
@@ -1355,11 +1355,11 @@ __fixsfsi:
addi a5, a4, -1 /* 0x7fffffff */
movgez a4, a5, a2
mov a2, a4
- abi_return
+ leaf_return
.Lfixsfsi_zero:
movi a2, 0
- abi_return
+ leaf_return
#endif /* L_fixsfsi */
@@ -1369,7 +1369,7 @@ __fixsfsi:
.global __fixsfdi
.type __fixsfdi, @function
__fixsfdi:
- abi_entry sp, 32
+ leaf_entry sp, 16
/* Check for NaN and Infinity. */
movi a6, 0x7f800000
@@ -1398,7 +1398,7 @@ __fixsfdi:
neg xh, xh
beqz xl, 1f
addi xh, xh, -1
-1: abi_return
+1: leaf_return
.Lfixsfdi_smallshift:
movi xl, 0
@@ -1419,16 +1419,16 @@ __fixsfdi:
bgez a2, 1f
mov xh, a7
movi xl, 0
- abi_return
+ leaf_return
1: addi xh, a7, -1 /* 0x7fffffff */
movi xl, -1
- abi_return
+ leaf_return
.Lfixsfdi_zero:
movi xh, 0
movi xl, 0
- abi_return
+ leaf_return
#endif /* L_fixsfdi */
@@ -1438,7 +1438,7 @@ __fixsfdi:
.global __fixunssfsi
.type __fixunssfsi, @function
__fixunssfsi:
- abi_entry sp, 32
+ leaf_entry sp, 16
/* Check for NaN and Infinity. */
movi a6, 0x7f800000
@@ -1463,7 +1463,7 @@ __fixunssfsi:
/* Negate the result if sign != 0. */
neg a2, a5
movgez a2, a5, a7
- abi_return
+ leaf_return
.Lfixunssfsi_nan_or_inf:
/* Handle Infinity and NaN. */
@@ -1472,28 +1472,28 @@ __fixunssfsi:
/* Translate NaN to 0xffffffff. */
movi a2, -1
- abi_return
+ leaf_return
.Lfixunssfsi_maxint:
slli a4, a6, 8 /* 0x80000000 */
movi a5, -1 /* 0xffffffff */
movgez a4, a5, a2
mov a2, a4
- abi_return
+ leaf_return
.Lfixunssfsi_zero:
movi a2, 0
- abi_return
+ leaf_return
.Lfixunssfsi_bigexp:
/* Handle unsigned maximum exponent case. */
bltz a2, 1f
mov a2, a5 /* no shift needed */
- abi_return
+ leaf_return
/* Return 0x80000000 if negative. */
1: slli a2, a6, 8
- abi_return
+ leaf_return
#endif /* L_fixunssfsi */
@@ -1503,7 +1503,7 @@ __fixunssfsi:
.global __fixunssfdi
.type __fixunssfdi, @function
__fixunssfdi:
- abi_entry sp, 32
+ leaf_entry sp, 16
/* Check for NaN and Infinity. */
movi a6, 0x7f800000
@@ -1534,7 +1534,7 @@ __fixunssfdi:
neg xh, xh
beqz xl, 1f
addi xh, xh, -1
-1: abi_return
+1: leaf_return
.Lfixunssfdi_smallshift:
movi xl, 0
@@ -1550,24 +1550,24 @@ __fixunssfdi:
/* Translate NaN to 0xffffffff.... */
1: movi xh, -1
movi xl, -1
- abi_return
+ leaf_return
.Lfixunssfdi_maxint:
bgez a2, 1b
2: slli xh, a6, 8 /* 0x80000000 */
movi xl, 0
- abi_return
+ leaf_return
.Lfixunssfdi_zero:
movi xh, 0
movi xl, 0
- abi_return
+ leaf_return
.Lfixunssfdi_bigexp:
/* Handle unsigned maximum exponent case. */
bltz a7, 2b
movi xl, 0
- abi_return /* no shift needed */
+ leaf_return /* no shift needed */
#endif /* L_fixunssfdi */
@@ -1577,7 +1577,7 @@ __fixunssfdi:
.global __floatunsisf
.type __floatunsisf, @function
__floatunsisf:
- abi_entry sp, 32
+ leaf_entry sp, 16
beqz a2, .Lfloatsisf_return
/* Set the sign to zero and jump to the floatsisf code. */
@@ -1588,7 +1588,7 @@ __floatunsisf:
.global __floatsisf
.type __floatsisf, @function
__floatsisf:
- abi_entry sp, 32
+ leaf_entry sp, 16
/* Check for zero. */
beqz a2, .Lfloatsisf_return
@@ -1633,13 +1633,13 @@ __floatsisf:
beqz a6, .Lfloatsisf_exactlyhalf
.Lfloatsisf_return:
- abi_return
+ leaf_return
.Lfloatsisf_exactlyhalf:
/* Round down to the nearest even value. */
srli a2, a2, 1
slli a2, a2, 1
- abi_return
+ leaf_return
#endif /* L_floatsisf */
@@ -1649,7 +1649,7 @@ __floatsisf:
.global __floatundisf
.type __floatundisf, @function
__floatundisf:
- abi_entry sp, 32
+ leaf_entry sp, 16
/* Check for zero. */
or a4, xh, xl
@@ -1663,7 +1663,7 @@ __floatundisf:
.global __floatdisf
.type __floatdisf, @function
__floatdisf:
- abi_entry sp, 32
+ leaf_entry sp, 16
/* Check for zero. */
or a4, xh, xl
@@ -1714,7 +1714,7 @@ __floatdisf:
/* Check if the leftover fraction is exactly 1/2. */
slli a6, a6, 1
beqz a6, .Lfloatdisf_exactlyhalf
-2: abi_return
+2: leaf_return
.Lfloatdisf_bigshift:
/* xh is zero. Normalize with first 1 bit of xl in the msb of xh. */
@@ -1729,6 +1729,6 @@ __floatdisf:
/* Round down to the nearest even value. */
srli a2, a2, 1
slli a2, a2, 1
- abi_return
+ leaf_return
#endif /* L_floatdisf */
diff --git a/gcc/config/xtensa/lib1funcs.asm b/gcc/config/xtensa/lib1funcs.asm
index ebfd54ddfcd..7d316ede805 100644
--- a/gcc/config/xtensa/lib1funcs.asm
+++ b/gcc/config/xtensa/lib1funcs.asm
@@ -30,10 +30,11 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
#include "xtensa-config.h"
-# Note: These functions use a minimum stack frame size of 32. This is
-# necessary for Xtensa configurations that only support a fixed register
-# window size of 8, where even leaf functions (such as these) need to
-# allocate space for a 4-word "extra save area".
+# Force each stack frame to contain an "Extra Save Area" (ESA) of at least
+# 16 bytes. This is necessary for non-standard Xtensa configurations that
+# only support a fixed register window size of 8, where even leaf functions
+# (such as these) need the ESA for interrupt handlers.
+#define MIN_ESA 16
# Define macros for the ABS and ADDX* instructions to handle cases
# where they are not included in the Xtensa processor configuration.
@@ -75,20 +76,20 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
#endif
.endm
-# Define macros for function entry and return, supporting either the
+# Define macros for leaf function entry and return, supporting either the
# standard register windowed ABI or the non-windowed call0 ABI. These
# macros do not allocate any extra stack space, so they only work for
# leaf functions that do not need to spill anything to the stack.
- .macro abi_entry reg, size
+ .macro leaf_entry reg, size
#if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__
- entry \reg, \size
+ entry \reg, \size + MIN_ESA
#else
/* do nothing */
#endif
.endm
- .macro abi_return
+ .macro leaf_return
#if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__
retw
#else
@@ -102,14 +103,14 @@ Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
.global __mulsi3
.type __mulsi3,@function
__mulsi3:
- abi_entry sp, 32
+ leaf_entry sp, 16
#if XCHAL_HAVE_MUL16
or a4, a2, a3
srai a4, a4, 16
bnez a4, .LMUL16
mul16u a2, a2, a3
- abi_return
+ leaf_return
.LMUL16:
srai a4, a2, 16
srai a5, a3, 16
@@ -165,7 +166,7 @@ __mulsi3:
bgeui a3, 16, .Lmult_main_loop
neg a3, a2
movltz a2, a3, a5
- abi_return
+ leaf_return
.align 4
.Lmult_main_loop:
@@ -195,7 +196,7 @@ __mulsi3:
#endif /* !XCHAL_HAVE_MUL16 && !XCHAL_HAVE_MAC16 */
- abi_return
+ leaf_return
.size __mulsi3,.-__mulsi3
#endif /* L_mulsi3 */
@@ -264,7 +265,7 @@ __nsau_data:
.global __udivsi3
.type __udivsi3,@function
__udivsi3:
- abi_entry sp, 32
+ leaf_entry sp, 16
bltui a3, 2, .Lle_one # check if the divisor <= 1
mov a6, a2 # keep dividend in a6
@@ -297,24 +298,24 @@ __udivsi3:
bltu a6, a3, .Lreturn
addi a2, a2, 1 # increment quotient if dividend >= divisor
.Lreturn:
- abi_return
+ leaf_return
.Lle_one:
beqz a3, .Lerror # if divisor == 1, return the dividend
- abi_return
+ leaf_return
.Lspecial:
# return dividend >= divisor
bltu a6, a3, .Lreturn0
movi a2, 1
- abi_return
+ leaf_return
.Lerror:
# just return 0; could throw an exception
.Lreturn0:
movi a2, 0
- abi_return
+ leaf_return
.size __udivsi3,.-__udivsi3
#endif /* L_udivsi3 */
@@ -325,7 +326,7 @@ __udivsi3:
.global __divsi3
.type __divsi3,@function
__divsi3:
- abi_entry sp, 32
+ leaf_entry sp, 16
xor a7, a2, a3 # sign = dividend ^ divisor
do_abs a6, a2, a4 # udividend = abs(dividend)
do_abs a3, a3, a4 # udivisor = abs(divisor)
@@ -361,27 +362,27 @@ __divsi3:
.Lreturn:
neg a5, a2
movltz a2, a5, a7 # return (sign < 0) ? -quotient : quotient
- abi_return
+ leaf_return
.Lle_one:
beqz a3, .Lerror
neg a2, a6 # if udivisor == 1, then return...
movgez a2, a6, a7 # (sign < 0) ? -udividend : udividend
- abi_return
+ leaf_return
.Lspecial:
bltu a6, a3, .Lreturn0 # if dividend < divisor, return 0
movi a2, 1
movi a4, -1
movltz a2, a4, a7 # else return (sign < 0) ? -1 : 1
- abi_return
+ leaf_return
.Lerror:
# just return 0; could throw an exception
.Lreturn0:
movi a2, 0
- abi_return
+ leaf_return
.size __divsi3,.-__divsi3
#endif /* L_divsi3 */
@@ -392,7 +393,7 @@ __divsi3:
.global __umodsi3
.type __umodsi3,@function
__umodsi3:
- abi_entry sp, 32
+ leaf_entry sp, 16
bltui a3, 2, .Lle_one # check if the divisor is <= 1
do_nsau a5, a2, a6, a7 # dividend_shift = nsau(dividend)
@@ -422,13 +423,13 @@ __umodsi3:
bltu a2, a3, .Lreturn
sub a2, a2, a3 # subtract once more if dividend >= divisor
.Lreturn:
- abi_return
+ leaf_return
.Lle_one:
# the divisor is either 0 or 1, so just return 0.
# someday we may want to throw an exception if the divisor is 0.
movi a2, 0
- abi_return
+ leaf_return
.size __umodsi3,.-__umodsi3
#endif /* L_umodsi3 */
@@ -439,7 +440,7 @@ __umodsi3:
.global __modsi3
.type __modsi3,@function
__modsi3:
- abi_entry sp, 32
+ leaf_entry sp, 16
mov a7, a2 # save original (signed) dividend
do_abs a2, a2, a4 # udividend = abs(dividend)
do_abs a3, a3, a4 # udivisor = abs(divisor)
@@ -474,13 +475,13 @@ __modsi3:
bgez a7, .Lpositive
neg a2, a2 # if (dividend < 0), return -udividend
.Lpositive:
- abi_return
+ leaf_return
.Lle_one:
# udivisor is either 0 or 1, so just return 0.
# someday we may want to throw an exception if udivisor is 0.
movi a2, 0
- abi_return
+ leaf_return
.size __modsi3,.-__modsi3
#endif /* L_modsi3 */