Commit ff935d0c by Martin Galvan Committed by Ramana Radhakrishnan

Add support for CFI directives in fp emulation routines for ARM.


2015-05-15  Martin Galvan  <martin.galvan@tallertechnologies.com>

        * config/arm/lib1funcs.S (CFI_START_FUNCTION, CFI_END_FUNCTION):
        New macros.
        * config/arm/ieee754-df.S: Add CFI directives.
        * config/arm/ieee754-sf.S: Add CFI directives.

From-SVN: r223220
parent cf57e993
2015-05-15 Martin Galvan <martin.galvan@tallertechnologies.com>
* config/arm/lib1funcs.S (CFI_START_FUNCTION, CFI_END_FUNCTION):
New macros.
* config/arm/ieee754-df.S: Add CFI directives.
* config/arm/ieee754-sf.S: Add CFI directives.
2015-05-13 Eric Botcazou <ebotcazou@adacore.com> 2015-05-13 Eric Botcazou <ebotcazou@adacore.com>
* configure.ac: Include config/sjlj.m4. * configure.ac: Include config/sjlj.m4.
......
...@@ -33,8 +33,12 @@ ...@@ -33,8 +33,12 @@
* Only the default rounding mode is intended for best performances. * Only the default rounding mode is intended for best performances.
* Exceptions aren't supported yet, but that can be added quite easily * Exceptions aren't supported yet, but that can be added quite easily
* if necessary without impacting performances. * if necessary without impacting performances.
*
* In the CFI related comments, 'previousOffset' refers to the previous offset
* from sp used to compute the CFA.
*/ */
.cfi_sections .debug_frame
#ifndef __ARMEB__ #ifndef __ARMEB__
#define xl r0 #define xl r0
...@@ -53,11 +57,13 @@ ...@@ -53,11 +57,13 @@
ARM_FUNC_START negdf2 ARM_FUNC_START negdf2
ARM_FUNC_ALIAS aeabi_dneg negdf2 ARM_FUNC_ALIAS aeabi_dneg negdf2
CFI_START_FUNCTION
@ flip sign bit @ flip sign bit
eor xh, xh, #0x80000000 eor xh, xh, #0x80000000
RET RET
CFI_END_FUNCTION
FUNC_END aeabi_dneg FUNC_END aeabi_dneg
FUNC_END negdf2 FUNC_END negdf2
...@@ -66,6 +72,7 @@ ARM_FUNC_ALIAS aeabi_dneg negdf2 ...@@ -66,6 +72,7 @@ ARM_FUNC_ALIAS aeabi_dneg negdf2
#ifdef L_arm_addsubdf3 #ifdef L_arm_addsubdf3
ARM_FUNC_START aeabi_drsub ARM_FUNC_START aeabi_drsub
CFI_START_FUNCTION
eor xh, xh, #0x80000000 @ flip sign bit of first arg eor xh, xh, #0x80000000 @ flip sign bit of first arg
b 1f b 1f
...@@ -81,7 +88,11 @@ ARM_FUNC_ALIAS aeabi_dsub subdf3 ...@@ -81,7 +88,11 @@ ARM_FUNC_ALIAS aeabi_dsub subdf3
ARM_FUNC_START adddf3 ARM_FUNC_START adddf3
ARM_FUNC_ALIAS aeabi_dadd adddf3 ARM_FUNC_ALIAS aeabi_dadd adddf3
1: do_push {r4, r5, lr} 1: do_push {r4, r5, lr} @ sp -= 12
.cfi_adjust_cfa_offset 12 @ CFA is now sp + previousOffset + 12
.cfi_rel_offset r4, 0 @ Registers are saved from sp to sp + 8
.cfi_rel_offset r5, 4
.cfi_rel_offset lr, 8
@ Look for zeroes, equal values, INF, or NAN. @ Look for zeroes, equal values, INF, or NAN.
shift1 lsl, r4, xh, #1 shift1 lsl, r4, xh, #1
...@@ -148,6 +159,11 @@ ARM_FUNC_ALIAS aeabi_dadd adddf3 ...@@ -148,6 +159,11 @@ ARM_FUNC_ALIAS aeabi_dadd adddf3
@ Since this is not common case, rescale them off line. @ Since this is not common case, rescale them off line.
teq r4, r5 teq r4, r5
beq LSYM(Lad_d) beq LSYM(Lad_d)
@ CFI note: we're lucky that the branches to Lad_* that appear after this function
@ have a CFI state that's exactly the same as the one we're in at this
@ point. Otherwise the CFI would change to a different state after the branch,
@ which would be disastrous for backtracing.
LSYM(Lad_x): LSYM(Lad_x):
@ Compensate for the exponent overlapping the mantissa MSB added later @ Compensate for the exponent overlapping the mantissa MSB added later
...@@ -413,6 +429,7 @@ LSYM(Lad_i): ...@@ -413,6 +429,7 @@ LSYM(Lad_i):
orrne xh, xh, #0x00080000 @ quiet NAN orrne xh, xh, #0x00080000 @ quiet NAN
RETLDM "r4, r5" RETLDM "r4, r5"
CFI_END_FUNCTION
FUNC_END aeabi_dsub FUNC_END aeabi_dsub
FUNC_END subdf3 FUNC_END subdf3
FUNC_END aeabi_dadd FUNC_END aeabi_dadd
...@@ -420,12 +437,19 @@ LSYM(Lad_i): ...@@ -420,12 +437,19 @@ LSYM(Lad_i):
ARM_FUNC_START floatunsidf ARM_FUNC_START floatunsidf
ARM_FUNC_ALIAS aeabi_ui2d floatunsidf ARM_FUNC_ALIAS aeabi_ui2d floatunsidf
CFI_START_FUNCTION
teq r0, #0 teq r0, #0
do_it eq, t do_it eq, t
moveq r1, #0 moveq r1, #0
RETc(eq) RETc(eq)
do_push {r4, r5, lr}
do_push {r4, r5, lr} @ sp -= 12
.cfi_adjust_cfa_offset 12 @ CFA is now sp + previousOffset + 12
.cfi_rel_offset r4, 0 @ Registers are saved from sp + 0 to sp + 8.
.cfi_rel_offset r5, 4
.cfi_rel_offset lr, 8
mov r4, #0x400 @ initial exponent mov r4, #0x400 @ initial exponent
add r4, r4, #(52-1 - 1) add r4, r4, #(52-1 - 1)
mov r5, #0 @ sign bit is 0 mov r5, #0 @ sign bit is 0
...@@ -435,17 +459,25 @@ ARM_FUNC_ALIAS aeabi_ui2d floatunsidf ...@@ -435,17 +459,25 @@ ARM_FUNC_ALIAS aeabi_ui2d floatunsidf
mov xh, #0 mov xh, #0
b LSYM(Lad_l) b LSYM(Lad_l)
CFI_END_FUNCTION
FUNC_END aeabi_ui2d FUNC_END aeabi_ui2d
FUNC_END floatunsidf FUNC_END floatunsidf
ARM_FUNC_START floatsidf ARM_FUNC_START floatsidf
ARM_FUNC_ALIAS aeabi_i2d floatsidf ARM_FUNC_ALIAS aeabi_i2d floatsidf
CFI_START_FUNCTION
teq r0, #0 teq r0, #0
do_it eq, t do_it eq, t
moveq r1, #0 moveq r1, #0
RETc(eq) RETc(eq)
do_push {r4, r5, lr}
do_push {r4, r5, lr} @ sp -= 12
.cfi_adjust_cfa_offset 12 @ CFA is now sp + previousOffset + 12
.cfi_rel_offset r4, 0 @ Registers are saved from sp + 0 to sp + 8.
.cfi_rel_offset r5, 4
.cfi_rel_offset lr, 8
mov r4, #0x400 @ initial exponent mov r4, #0x400 @ initial exponent
add r4, r4, #(52-1 - 1) add r4, r4, #(52-1 - 1)
ands r5, r0, #0x80000000 @ sign bit in r5 ands r5, r0, #0x80000000 @ sign bit in r5
...@@ -457,11 +489,13 @@ ARM_FUNC_ALIAS aeabi_i2d floatsidf ...@@ -457,11 +489,13 @@ ARM_FUNC_ALIAS aeabi_i2d floatsidf
mov xh, #0 mov xh, #0
b LSYM(Lad_l) b LSYM(Lad_l)
CFI_END_FUNCTION
FUNC_END aeabi_i2d FUNC_END aeabi_i2d
FUNC_END floatsidf FUNC_END floatsidf
ARM_FUNC_START extendsfdf2 ARM_FUNC_START extendsfdf2
ARM_FUNC_ALIAS aeabi_f2d extendsfdf2 ARM_FUNC_ALIAS aeabi_f2d extendsfdf2
CFI_START_FUNCTION
movs r2, r0, lsl #1 @ toss sign bit movs r2, r0, lsl #1 @ toss sign bit
mov xh, r2, asr #3 @ stretch exponent mov xh, r2, asr #3 @ stretch exponent
...@@ -480,34 +514,54 @@ ARM_FUNC_ALIAS aeabi_f2d extendsfdf2 ...@@ -480,34 +514,54 @@ ARM_FUNC_ALIAS aeabi_f2d extendsfdf2
@ value was denormalized. We can normalize it now. @ value was denormalized. We can normalize it now.
do_push {r4, r5, lr} do_push {r4, r5, lr}
.cfi_adjust_cfa_offset 12 @ CFA is now sp + previousOffset + 12
.cfi_rel_offset r4, 0 @ Registers are saved from sp + 0 to sp + 8.
.cfi_rel_offset r5, 4
.cfi_rel_offset lr, 8
mov r4, #0x380 @ setup corresponding exponent mov r4, #0x380 @ setup corresponding exponent
and r5, xh, #0x80000000 @ move sign bit in r5 and r5, xh, #0x80000000 @ move sign bit in r5
bic xh, xh, #0x80000000 bic xh, xh, #0x80000000
b LSYM(Lad_l) b LSYM(Lad_l)
CFI_END_FUNCTION
FUNC_END aeabi_f2d FUNC_END aeabi_f2d
FUNC_END extendsfdf2 FUNC_END extendsfdf2
ARM_FUNC_START floatundidf ARM_FUNC_START floatundidf
ARM_FUNC_ALIAS aeabi_ul2d floatundidf ARM_FUNC_ALIAS aeabi_ul2d floatundidf
CFI_START_FUNCTION
.cfi_remember_state @ Save the current CFA state.
orrs r2, r0, r1 orrs r2, r0, r1
do_it eq do_it eq
RETc(eq) RETc(eq)
do_push {r4, r5, lr} do_push {r4, r5, lr} @ sp -= 12
.cfi_adjust_cfa_offset 12 @ CFA is now sp + previousOffset + 12
.cfi_rel_offset r4, 0 @ Registers are saved from sp + 0 to sp + 8
.cfi_rel_offset r5, 4
.cfi_rel_offset lr, 8
mov r5, #0 mov r5, #0
b 2f b 2f
ARM_FUNC_START floatdidf ARM_FUNC_START floatdidf
ARM_FUNC_ALIAS aeabi_l2d floatdidf ARM_FUNC_ALIAS aeabi_l2d floatdidf
.cfi_restore_state
@ Restore the CFI state we saved above. If we didn't do this then the
@ following instructions would have the CFI state that was set by the
@ offset adjustments made in floatundidf.
orrs r2, r0, r1 orrs r2, r0, r1
do_it eq do_it eq
RETc(eq) RETc(eq)
do_push {r4, r5, lr} do_push {r4, r5, lr} @ sp -= 12
.cfi_adjust_cfa_offset 12 @ CFA is now sp + previousOffset + 12
.cfi_rel_offset r4, 0 @ Registers are saved from sp to sp + 8
.cfi_rel_offset r5, 4
.cfi_rel_offset lr, 8
ands r5, ah, #0x80000000 @ sign bit in r5 ands r5, ah, #0x80000000 @ sign bit in r5
bpl 2f bpl 2f
...@@ -550,6 +604,7 @@ ARM_FUNC_ALIAS aeabi_l2d floatdidf ...@@ -550,6 +604,7 @@ ARM_FUNC_ALIAS aeabi_l2d floatdidf
add r4, r4, r2 add r4, r4, r2
b LSYM(Lad_p) b LSYM(Lad_p)
CFI_END_FUNCTION
FUNC_END floatdidf FUNC_END floatdidf
FUNC_END aeabi_l2d FUNC_END aeabi_l2d
FUNC_END floatundidf FUNC_END floatundidf
...@@ -561,7 +616,14 @@ ARM_FUNC_ALIAS aeabi_l2d floatdidf ...@@ -561,7 +616,14 @@ ARM_FUNC_ALIAS aeabi_l2d floatdidf
ARM_FUNC_START muldf3 ARM_FUNC_START muldf3
ARM_FUNC_ALIAS aeabi_dmul muldf3 ARM_FUNC_ALIAS aeabi_dmul muldf3
do_push {r4, r5, r6, lr} CFI_START_FUNCTION
do_push {r4, r5, r6, lr} @ sp -= 16
.cfi_adjust_cfa_offset 16 @ CFA is now sp + previousOffset + 16
.cfi_rel_offset r4, 0 @ Registers are saved from sp to sp + 12.
.cfi_rel_offset r5, 4
.cfi_rel_offset r6, 8
.cfi_rel_offset lr, 12
@ Mask out exponents, trap any zero/denormal/INF/NAN. @ Mask out exponents, trap any zero/denormal/INF/NAN.
mov ip, #0xff mov ip, #0xff
...@@ -596,7 +658,16 @@ ARM_FUNC_ALIAS aeabi_dmul muldf3 ...@@ -596,7 +658,16 @@ ARM_FUNC_ALIAS aeabi_dmul muldf3
and r6, r6, #0x80000000 and r6, r6, #0x80000000
@ Well, no way to make it shorter without the umull instruction. @ Well, no way to make it shorter without the umull instruction.
stmfd sp!, {r6, r7, r8, r9, sl, fp} stmfd sp!, {r6, r7, r8, r9, sl, fp} @ sp -= 24
.cfi_remember_state @ Save the current CFI state.
.cfi_adjust_cfa_offset 24 @ CFA is now sp + previousOffset + 24.
.cfi_rel_offset r6, 0 @ Registers are saved from sp to sp + 20.
.cfi_rel_offset r7, 4
.cfi_rel_offset r8, 8
.cfi_rel_offset r9, 12
.cfi_rel_offset sl, 16
.cfi_rel_offset fp, 20
mov r7, xl, lsr #16 mov r7, xl, lsr #16
mov r8, yl, lsr #16 mov r8, yl, lsr #16
mov r9, xh, lsr #16 mov r9, xh, lsr #16
...@@ -648,8 +719,8 @@ ARM_FUNC_ALIAS aeabi_dmul muldf3 ...@@ -648,8 +719,8 @@ ARM_FUNC_ALIAS aeabi_dmul muldf3
mul fp, xh, yh mul fp, xh, yh
adcs r5, r5, fp adcs r5, r5, fp
adc r6, r6, #0 adc r6, r6, #0
ldmfd sp!, {yl, r7, r8, r9, sl, fp} ldmfd sp!, {yl, r7, r8, r9, sl, fp} @ sp += 24
.cfi_restore_state @ Restore the previous CFI state.
#else #else
@ Here is the actual multiplication. @ Here is the actual multiplication.
...@@ -715,7 +786,6 @@ LSYM(Lml_1): ...@@ -715,7 +786,6 @@ LSYM(Lml_1):
orr xh, xh, #0x00100000 orr xh, xh, #0x00100000
mov lr, #0 mov lr, #0
subs r4, r4, #1 subs r4, r4, #1
LSYM(Lml_u): LSYM(Lml_u):
@ Overflow? @ Overflow?
bgt LSYM(Lml_o) bgt LSYM(Lml_o)
...@@ -863,13 +933,20 @@ LSYM(Lml_n): ...@@ -863,13 +933,20 @@ LSYM(Lml_n):
orr xh, xh, #0x00f80000 orr xh, xh, #0x00f80000
RETLDM "r4, r5, r6" RETLDM "r4, r5, r6"
CFI_END_FUNCTION
FUNC_END aeabi_dmul FUNC_END aeabi_dmul
FUNC_END muldf3 FUNC_END muldf3
ARM_FUNC_START divdf3 ARM_FUNC_START divdf3
ARM_FUNC_ALIAS aeabi_ddiv divdf3 ARM_FUNC_ALIAS aeabi_ddiv divdf3
CFI_START_FUNCTION
do_push {r4, r5, r6, lr} do_push {r4, r5, r6, lr}
.cfi_adjust_cfa_offset 16
.cfi_rel_offset r4, 0
.cfi_rel_offset r5, 4
.cfi_rel_offset r6, 8
.cfi_rel_offset lr, 12
@ Mask out exponents, trap any zero/denormal/INF/NAN. @ Mask out exponents, trap any zero/denormal/INF/NAN.
mov ip, #0xff mov ip, #0xff
...@@ -1052,6 +1129,7 @@ LSYM(Ldv_s): ...@@ -1052,6 +1129,7 @@ LSYM(Ldv_s):
bne LSYM(Lml_z) @ 0 / <non_zero> -> 0 bne LSYM(Lml_z) @ 0 / <non_zero> -> 0
b LSYM(Lml_n) @ 0 / 0 -> NAN b LSYM(Lml_n) @ 0 / 0 -> NAN
CFI_END_FUNCTION
FUNC_END aeabi_ddiv FUNC_END aeabi_ddiv
FUNC_END divdf3 FUNC_END divdf3
...@@ -1063,6 +1141,7 @@ LSYM(Ldv_s): ...@@ -1063,6 +1141,7 @@ LSYM(Ldv_s):
ARM_FUNC_START gtdf2 ARM_FUNC_START gtdf2
ARM_FUNC_ALIAS gedf2 gtdf2 ARM_FUNC_ALIAS gedf2 gtdf2
CFI_START_FUNCTION
mov ip, #-1 mov ip, #-1
b 1f b 1f
...@@ -1077,6 +1156,10 @@ ARM_FUNC_ALIAS eqdf2 cmpdf2 ...@@ -1077,6 +1156,10 @@ ARM_FUNC_ALIAS eqdf2 cmpdf2
mov ip, #1 @ how should we specify unordered here? mov ip, #1 @ how should we specify unordered here?
1: str ip, [sp, #-4]! 1: str ip, [sp, #-4]!
.cfi_adjust_cfa_offset 4 @ CFA is now sp + previousOffset + 4.
@ We're not adding CFI for ip as it's pushed into the stack
@ only because @ it may be popped off later as a return value
@ (i.e. we're not preserving @ it anyways).
@ Trap any INF/NAN first. @ Trap any INF/NAN first.
mov ip, xh, lsl #1 mov ip, xh, lsl #1
...@@ -1085,10 +1168,18 @@ ARM_FUNC_ALIAS eqdf2 cmpdf2 ...@@ -1085,10 +1168,18 @@ ARM_FUNC_ALIAS eqdf2 cmpdf2
do_it ne do_it ne
COND(mvn,s,ne) ip, ip, asr #21 COND(mvn,s,ne) ip, ip, asr #21
beq 3f beq 3f
.cfi_remember_state
@ Test for equality. @ Save the current CFI state. This is done because the branch
@ Note that 0.0 is equal to -0.0. @ is conditional, @ and if we don't take it we'll issue a
@ .cfi_adjust_cfa_offset and return. @ If we do take it,
@ however, the .cfi_adjust_cfa_offset from the non-branch @ code
@ will affect the branch code as well. To avoid this we'll
@ restore @ the current state before executing the branch code.
@ Test for equality. @ Note that 0.0 is equal to -0.0.
2: add sp, sp, #4 2: add sp, sp, #4
.cfi_adjust_cfa_offset -4 @ CFA is now sp + previousOffset.
orrs ip, xl, xh, lsl #1 @ if x == 0.0 or -0.0 orrs ip, xl, xh, lsl #1 @ if x == 0.0 or -0.0
do_it eq, e do_it eq, e
COND(orr,s,eq) ip, yl, yh, lsl #1 @ and y == 0.0 or -0.0 COND(orr,s,eq) ip, yl, yh, lsl #1 @ and y == 0.0 or -0.0
...@@ -1117,8 +1208,13 @@ ARM_FUNC_ALIAS eqdf2 cmpdf2 ...@@ -1117,8 +1208,13 @@ ARM_FUNC_ALIAS eqdf2 cmpdf2
orr r0, r0, #1 orr r0, r0, #1
RET RET
@ Look for a NAN. 3: @ Look for a NAN.
3: mov ip, xh, lsl #1
@ Restore the previous CFI state (i.e. keep the CFI state as it was
@ before the branch).
.cfi_restore_state
mov ip, xh, lsl #1
mvns ip, ip, asr #21 mvns ip, ip, asr #21
bne 4f bne 4f
orrs ip, xl, xh, lsl #12 orrs ip, xl, xh, lsl #12
...@@ -1128,9 +1224,13 @@ ARM_FUNC_ALIAS eqdf2 cmpdf2 ...@@ -1128,9 +1224,13 @@ ARM_FUNC_ALIAS eqdf2 cmpdf2
bne 2b bne 2b
orrs ip, yl, yh, lsl #12 orrs ip, yl, yh, lsl #12
beq 2b @ y is not NAN beq 2b @ y is not NAN
5: ldr r0, [sp], #4 @ unordered return code 5: ldr r0, [sp], #4 @ unordered return code
.cfi_adjust_cfa_offset -4 @ CFA is now sp + previousOffset.
RET RET
CFI_END_FUNCTION
FUNC_END gedf2 FUNC_END gedf2
FUNC_END gtdf2 FUNC_END gtdf2
FUNC_END ledf2 FUNC_END ledf2
...@@ -1140,6 +1240,7 @@ ARM_FUNC_ALIAS eqdf2 cmpdf2 ...@@ -1140,6 +1240,7 @@ ARM_FUNC_ALIAS eqdf2 cmpdf2
FUNC_END cmpdf2 FUNC_END cmpdf2
ARM_FUNC_START aeabi_cdrcmple ARM_FUNC_START aeabi_cdrcmple
CFI_START_FUNCTION
mov ip, r0 mov ip, r0
mov r0, r2 mov r0, r2
...@@ -1155,6 +1256,10 @@ ARM_FUNC_ALIAS aeabi_cdcmple aeabi_cdcmpeq ...@@ -1155,6 +1256,10 @@ ARM_FUNC_ALIAS aeabi_cdcmple aeabi_cdcmpeq
@ The status-returning routines are required to preserve all @ The status-returning routines are required to preserve all
@ registers except ip, lr, and cpsr. @ registers except ip, lr, and cpsr.
6: do_push {r0, lr} 6: do_push {r0, lr}
.cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8.
.cfi_rel_offset r0, 0 @ Previous r0 is saved at sp.
.cfi_rel_offset lr, 4 @ Previous lr is saved at sp + 4.
ARM_CALL cmpdf2 ARM_CALL cmpdf2
@ Set the Z flag correctly, and the C flag unconditionally. @ Set the Z flag correctly, and the C flag unconditionally.
cmp r0, #0 cmp r0, #0
...@@ -1162,59 +1267,86 @@ ARM_FUNC_ALIAS aeabi_cdcmple aeabi_cdcmpeq ...@@ -1162,59 +1267,86 @@ ARM_FUNC_ALIAS aeabi_cdcmple aeabi_cdcmpeq
@ that the first operand was smaller than the second. @ that the first operand was smaller than the second.
do_it mi do_it mi
cmnmi r0, #0 cmnmi r0, #0
RETLDM "r0" RETLDM "r0"
CFI_END_FUNCTION
FUNC_END aeabi_cdcmple FUNC_END aeabi_cdcmple
FUNC_END aeabi_cdcmpeq FUNC_END aeabi_cdcmpeq
FUNC_END aeabi_cdrcmple FUNC_END aeabi_cdrcmple
ARM_FUNC_START aeabi_dcmpeq ARM_FUNC_START aeabi_dcmpeq
CFI_START_FUNCTION
str lr, [sp, #-8]! @ sp -= 8
.cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8
.cfi_rel_offset lr, 0 @ lr is at sp
str lr, [sp, #-8]!
ARM_CALL aeabi_cdcmple ARM_CALL aeabi_cdcmple
do_it eq, e do_it eq, e
moveq r0, #1 @ Equal to. moveq r0, #1 @ Equal to.
movne r0, #0 @ Less than, greater than, or unordered. movne r0, #0 @ Less than, greater than, or unordered.
RETLDM RETLDM
CFI_END_FUNCTION
FUNC_END aeabi_dcmpeq FUNC_END aeabi_dcmpeq
ARM_FUNC_START aeabi_dcmplt ARM_FUNC_START aeabi_dcmplt
CFI_START_FUNCTION
str lr, [sp, #-8]! @ sp -= 8
.cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8
.cfi_rel_offset lr, 0 @ lr is at sp
str lr, [sp, #-8]!
ARM_CALL aeabi_cdcmple ARM_CALL aeabi_cdcmple
do_it cc, e do_it cc, e
movcc r0, #1 @ Less than. movcc r0, #1 @ Less than.
movcs r0, #0 @ Equal to, greater than, or unordered. movcs r0, #0 @ Equal to, greater than, or unordered.
RETLDM RETLDM
CFI_END_FUNCTION
FUNC_END aeabi_dcmplt FUNC_END aeabi_dcmplt
ARM_FUNC_START aeabi_dcmple ARM_FUNC_START aeabi_dcmple
CFI_START_FUNCTION
str lr, [sp, #-8]! @ sp -= 8
.cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8
.cfi_rel_offset lr, 0 @ lr is at sp
str lr, [sp, #-8]!
ARM_CALL aeabi_cdcmple ARM_CALL aeabi_cdcmple
do_it ls, e do_it ls, e
movls r0, #1 @ Less than or equal to. movls r0, #1 @ Less than or equal to.
movhi r0, #0 @ Greater than or unordered. movhi r0, #0 @ Greater than or unordered.
RETLDM RETLDM
CFI_END_FUNCTION
FUNC_END aeabi_dcmple FUNC_END aeabi_dcmple
ARM_FUNC_START aeabi_dcmpge ARM_FUNC_START aeabi_dcmpge
CFI_START_FUNCTION
str lr, [sp, #-8]! @ sp -= 8
.cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8
.cfi_rel_offset lr, 0 @ lr is at sp
str lr, [sp, #-8]!
ARM_CALL aeabi_cdrcmple ARM_CALL aeabi_cdrcmple
do_it ls, e do_it ls, e
movls r0, #1 @ Operand 2 is less than or equal to operand 1. movls r0, #1 @ Operand 2 is less than or equal to operand 1.
movhi r0, #0 @ Operand 2 greater than operand 1, or unordered. movhi r0, #0 @ Operand 2 greater than operand 1, or unordered.
RETLDM RETLDM
CFI_END_FUNCTION
FUNC_END aeabi_dcmpge FUNC_END aeabi_dcmpge
ARM_FUNC_START aeabi_dcmpgt ARM_FUNC_START aeabi_dcmpgt
CFI_START_FUNCTION
str lr, [sp, #-8]! @ sp -= 8
.cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8
.cfi_rel_offset lr, 0 @ lr is at sp
str lr, [sp, #-8]!
ARM_CALL aeabi_cdrcmple ARM_CALL aeabi_cdrcmple
do_it cc, e do_it cc, e
movcc r0, #1 @ Operand 2 is less than operand 1. movcc r0, #1 @ Operand 2 is less than operand 1.
...@@ -1222,6 +1354,7 @@ ARM_FUNC_START aeabi_dcmpgt ...@@ -1222,6 +1354,7 @@ ARM_FUNC_START aeabi_dcmpgt
@ or they are unordered. @ or they are unordered.
RETLDM RETLDM
CFI_END_FUNCTION
FUNC_END aeabi_dcmpgt FUNC_END aeabi_dcmpgt
#endif /* L_cmpdf2 */ #endif /* L_cmpdf2 */
...@@ -1230,6 +1363,7 @@ ARM_FUNC_START aeabi_dcmpgt ...@@ -1230,6 +1363,7 @@ ARM_FUNC_START aeabi_dcmpgt
ARM_FUNC_START unorddf2 ARM_FUNC_START unorddf2
ARM_FUNC_ALIAS aeabi_dcmpun unorddf2 ARM_FUNC_ALIAS aeabi_dcmpun unorddf2
.cfi_startproc
mov ip, xh, lsl #1 mov ip, xh, lsl #1
mvns ip, ip, asr #21 mvns ip, ip, asr #21
...@@ -1247,6 +1381,7 @@ ARM_FUNC_ALIAS aeabi_dcmpun unorddf2 ...@@ -1247,6 +1381,7 @@ ARM_FUNC_ALIAS aeabi_dcmpun unorddf2
3: mov r0, #1 @ arguments are unordered. 3: mov r0, #1 @ arguments are unordered.
RET RET
.cfi_endproc
FUNC_END aeabi_dcmpun FUNC_END aeabi_dcmpun
FUNC_END unorddf2 FUNC_END unorddf2
...@@ -1256,6 +1391,7 @@ ARM_FUNC_ALIAS aeabi_dcmpun unorddf2 ...@@ -1256,6 +1391,7 @@ ARM_FUNC_ALIAS aeabi_dcmpun unorddf2
ARM_FUNC_START fixdfsi ARM_FUNC_START fixdfsi
ARM_FUNC_ALIAS aeabi_d2iz fixdfsi ARM_FUNC_ALIAS aeabi_d2iz fixdfsi
CFI_START_FUNCTION
@ check exponent range. @ check exponent range.
mov r2, xh, lsl #1 mov r2, xh, lsl #1
...@@ -1289,6 +1425,7 @@ ARM_FUNC_ALIAS aeabi_d2iz fixdfsi ...@@ -1289,6 +1425,7 @@ ARM_FUNC_ALIAS aeabi_d2iz fixdfsi
4: mov r0, #0 @ How should we convert NAN? 4: mov r0, #0 @ How should we convert NAN?
RET RET
CFI_END_FUNCTION
FUNC_END aeabi_d2iz FUNC_END aeabi_d2iz
FUNC_END fixdfsi FUNC_END fixdfsi
...@@ -1298,6 +1435,7 @@ ARM_FUNC_ALIAS aeabi_d2iz fixdfsi ...@@ -1298,6 +1435,7 @@ ARM_FUNC_ALIAS aeabi_d2iz fixdfsi
ARM_FUNC_START fixunsdfsi ARM_FUNC_START fixunsdfsi
ARM_FUNC_ALIAS aeabi_d2uiz fixunsdfsi ARM_FUNC_ALIAS aeabi_d2uiz fixunsdfsi
CFI_START_FUNCTION
@ check exponent range. @ check exponent range.
movs r2, xh, lsl #1 movs r2, xh, lsl #1
...@@ -1327,6 +1465,7 @@ ARM_FUNC_ALIAS aeabi_d2uiz fixunsdfsi ...@@ -1327,6 +1465,7 @@ ARM_FUNC_ALIAS aeabi_d2uiz fixunsdfsi
4: mov r0, #0 @ How should we convert NAN? 4: mov r0, #0 @ How should we convert NAN?
RET RET
CFI_END_FUNCTION
FUNC_END aeabi_d2uiz FUNC_END aeabi_d2uiz
FUNC_END fixunsdfsi FUNC_END fixunsdfsi
...@@ -1336,6 +1475,7 @@ ARM_FUNC_ALIAS aeabi_d2uiz fixunsdfsi ...@@ -1336,6 +1475,7 @@ ARM_FUNC_ALIAS aeabi_d2uiz fixunsdfsi
ARM_FUNC_START truncdfsf2 ARM_FUNC_START truncdfsf2
ARM_FUNC_ALIAS aeabi_d2f truncdfsf2 ARM_FUNC_ALIAS aeabi_d2f truncdfsf2
CFI_START_FUNCTION
@ check exponent range. @ check exponent range.
mov r2, xh, lsl #1 mov r2, xh, lsl #1
...@@ -1400,6 +1540,7 @@ ARM_FUNC_ALIAS aeabi_d2f truncdfsf2 ...@@ -1400,6 +1540,7 @@ ARM_FUNC_ALIAS aeabi_d2f truncdfsf2
orr r0, r0, #0x00800000 orr r0, r0, #0x00800000
RET RET
CFI_END_FUNCTION
FUNC_END aeabi_d2f FUNC_END aeabi_d2f
FUNC_END truncdfsf2 FUNC_END truncdfsf2
......
...@@ -31,16 +31,21 @@ ...@@ -31,16 +31,21 @@
* Only the default rounding mode is intended for best performances. * Only the default rounding mode is intended for best performances.
* Exceptions aren't supported yet, but that can be added quite easily * Exceptions aren't supported yet, but that can be added quite easily
* if necessary without impacting performances. * if necessary without impacting performances.
*
* In the CFI related comments, 'previousOffset' refers to the previous offset
* from sp used to compute the CFA.
*/ */
#ifdef L_arm_negsf2 #ifdef L_arm_negsf2
ARM_FUNC_START negsf2 ARM_FUNC_START negsf2
ARM_FUNC_ALIAS aeabi_fneg negsf2 ARM_FUNC_ALIAS aeabi_fneg negsf2
CFI_START_FUNCTION
eor r0, r0, #0x80000000 @ flip sign bit eor r0, r0, #0x80000000 @ flip sign bit
RET RET
CFI_END_FUNCTION
FUNC_END aeabi_fneg FUNC_END aeabi_fneg
FUNC_END negsf2 FUNC_END negsf2
...@@ -49,6 +54,7 @@ ARM_FUNC_ALIAS aeabi_fneg negsf2 ...@@ -49,6 +54,7 @@ ARM_FUNC_ALIAS aeabi_fneg negsf2
#ifdef L_arm_addsubsf3 #ifdef L_arm_addsubsf3
ARM_FUNC_START aeabi_frsub ARM_FUNC_START aeabi_frsub
CFI_START_FUNCTION
eor r0, r0, #0x80000000 @ flip sign bit of first arg eor r0, r0, #0x80000000 @ flip sign bit of first arg
b 1f b 1f
...@@ -284,6 +290,7 @@ LSYM(Lad_i): ...@@ -284,6 +290,7 @@ LSYM(Lad_i):
orrne r0, r0, #0x00400000 @ quiet NAN orrne r0, r0, #0x00400000 @ quiet NAN
RET RET
CFI_END_FUNCTION
FUNC_END aeabi_frsub FUNC_END aeabi_frsub
FUNC_END aeabi_fadd FUNC_END aeabi_fadd
FUNC_END addsf3 FUNC_END addsf3
...@@ -292,6 +299,7 @@ LSYM(Lad_i): ...@@ -292,6 +299,7 @@ LSYM(Lad_i):
ARM_FUNC_START floatunsisf ARM_FUNC_START floatunsisf
ARM_FUNC_ALIAS aeabi_ui2f floatunsisf ARM_FUNC_ALIAS aeabi_ui2f floatunsisf
CFI_START_FUNCTION
mov r3, #0 mov r3, #0
b 1f b 1f
...@@ -316,6 +324,7 @@ ARM_FUNC_ALIAS aeabi_i2f floatsisf ...@@ -316,6 +324,7 @@ ARM_FUNC_ALIAS aeabi_i2f floatsisf
mov al, #0 mov al, #0
b 2f b 2f
CFI_END_FUNCTION
FUNC_END aeabi_i2f FUNC_END aeabi_i2f
FUNC_END floatsisf FUNC_END floatsisf
FUNC_END aeabi_ui2f FUNC_END aeabi_ui2f
...@@ -323,6 +332,7 @@ ARM_FUNC_ALIAS aeabi_i2f floatsisf ...@@ -323,6 +332,7 @@ ARM_FUNC_ALIAS aeabi_i2f floatsisf
ARM_FUNC_START floatundisf ARM_FUNC_START floatundisf
ARM_FUNC_ALIAS aeabi_ul2f floatundisf ARM_FUNC_ALIAS aeabi_ul2f floatundisf
CFI_START_FUNCTION
orrs r2, r0, r1 orrs r2, r0, r1
do_it eq do_it eq
...@@ -409,6 +419,7 @@ ARM_FUNC_ALIAS aeabi_l2f floatdisf ...@@ -409,6 +419,7 @@ ARM_FUNC_ALIAS aeabi_l2f floatdisf
biceq r0, r0, ip, lsr #31 biceq r0, r0, ip, lsr #31
RET RET
CFI_END_FUNCTION
FUNC_END floatdisf FUNC_END floatdisf
FUNC_END aeabi_l2f FUNC_END aeabi_l2f
FUNC_END floatundisf FUNC_END floatundisf
...@@ -420,6 +431,7 @@ ARM_FUNC_ALIAS aeabi_l2f floatdisf ...@@ -420,6 +431,7 @@ ARM_FUNC_ALIAS aeabi_l2f floatdisf
ARM_FUNC_START mulsf3 ARM_FUNC_START mulsf3
ARM_FUNC_ALIAS aeabi_fmul mulsf3 ARM_FUNC_ALIAS aeabi_fmul mulsf3
CFI_START_FUNCTION
@ Mask out exponents, trap any zero/denormal/INF/NAN. @ Mask out exponents, trap any zero/denormal/INF/NAN.
mov ip, #0xff mov ip, #0xff
...@@ -454,7 +466,13 @@ LSYM(Lml_x): ...@@ -454,7 +466,13 @@ LSYM(Lml_x):
and r3, ip, #0x80000000 and r3, ip, #0x80000000
@ Well, no way to make it shorter without the umull instruction. @ Well, no way to make it shorter without the umull instruction.
do_push {r3, r4, r5} do_push {r3, r4, r5} @ sp -= 12
.cfi_remember_state @ Save the current CFI state
.cfi_adjust_cfa_offset 12 @ CFA is now sp + previousOffset + 12
.cfi_rel_offset r3, 0 @ Registers are saved from sp to sp + 8
.cfi_rel_offset r4, 4
.cfi_rel_offset r5, 8
mov r4, r0, lsr #16 mov r4, r0, lsr #16
mov r5, r1, lsr #16 mov r5, r1, lsr #16
bic r0, r0, r4, lsl #16 bic r0, r0, r4, lsl #16
...@@ -465,7 +483,8 @@ LSYM(Lml_x): ...@@ -465,7 +483,8 @@ LSYM(Lml_x):
mla r0, r4, r1, r0 mla r0, r4, r1, r0
adds r3, r3, r0, lsl #16 adds r3, r3, r0, lsl #16
adc r1, ip, r0, lsr #16 adc r1, ip, r0, lsr #16
do_pop {r0, r4, r5} do_pop {r0, r4, r5} @ sp += 12
.cfi_restore_state @ Restore the previous CFI state
#else #else
...@@ -618,11 +637,13 @@ LSYM(Lml_n): ...@@ -618,11 +637,13 @@ LSYM(Lml_n):
orr r0, r0, #0x00c00000 orr r0, r0, #0x00c00000
RET RET
CFI_END_FUNCTION
FUNC_END aeabi_fmul FUNC_END aeabi_fmul
FUNC_END mulsf3 FUNC_END mulsf3
ARM_FUNC_START divsf3 ARM_FUNC_START divsf3
ARM_FUNC_ALIAS aeabi_fdiv divsf3 ARM_FUNC_ALIAS aeabi_fdiv divsf3
CFI_START_FUNCTION
@ Mask out exponents, trap any zero/denormal/INF/NAN. @ Mask out exponents, trap any zero/denormal/INF/NAN.
mov ip, #0xff mov ip, #0xff
...@@ -758,6 +779,7 @@ LSYM(Ldv_s): ...@@ -758,6 +779,7 @@ LSYM(Ldv_s):
bne LSYM(Lml_z) @ 0 / <non_zero> -> 0 bne LSYM(Lml_z) @ 0 / <non_zero> -> 0
b LSYM(Lml_n) @ 0 / 0 -> NAN b LSYM(Lml_n) @ 0 / 0 -> NAN
CFI_END_FUNCTION
FUNC_END aeabi_fdiv FUNC_END aeabi_fdiv
FUNC_END divsf3 FUNC_END divsf3
...@@ -782,6 +804,7 @@ LSYM(Ldv_s): ...@@ -782,6 +804,7 @@ LSYM(Ldv_s):
ARM_FUNC_START gtsf2 ARM_FUNC_START gtsf2
ARM_FUNC_ALIAS gesf2 gtsf2 ARM_FUNC_ALIAS gesf2 gtsf2
CFI_START_FUNCTION
mov ip, #-1 mov ip, #-1
b 1f b 1f
...@@ -796,6 +819,10 @@ ARM_FUNC_ALIAS eqsf2 cmpsf2 ...@@ -796,6 +819,10 @@ ARM_FUNC_ALIAS eqsf2 cmpsf2
mov ip, #1 @ how should we specify unordered here? mov ip, #1 @ how should we specify unordered here?
1: str ip, [sp, #-4]! 1: str ip, [sp, #-4]!
.cfi_adjust_cfa_offset 4 @ CFA is now sp + previousOffset + 4.
@ We're not adding CFI for ip as it's pushed into the stack only because
@ it may be popped off later as a return value (i.e. we're not preserving
@ it anyways).
@ Trap any INF/NAN first. @ Trap any INF/NAN first.
mov r2, r0, lsl #1 mov r2, r0, lsl #1
...@@ -804,10 +831,18 @@ ARM_FUNC_ALIAS eqsf2 cmpsf2 ...@@ -804,10 +831,18 @@ ARM_FUNC_ALIAS eqsf2 cmpsf2
do_it ne do_it ne
COND(mvn,s,ne) ip, r3, asr #24 COND(mvn,s,ne) ip, r3, asr #24
beq 3f beq 3f
.cfi_remember_state
@ Save the current CFI state. This is done because the branch is conditional,
@ and if we don't take it we'll issue a .cfi_adjust_cfa_offset and return.
@ If we do take it, however, the .cfi_adjust_cfa_offset from the non-branch
@ code will affect the branch code as well. To avoid this we'll restore
@ the current state before executing the branch code.
@ Compare values. @ Compare values.
@ Note that 0.0 is equal to -0.0. @ Note that 0.0 is equal to -0.0.
2: add sp, sp, #4 2: add sp, sp, #4
.cfi_adjust_cfa_offset -4 @ CFA is now sp + previousOffset.
orrs ip, r2, r3, lsr #1 @ test if both are 0, clear C flag orrs ip, r2, r3, lsr #1 @ test if both are 0, clear C flag
do_it ne do_it ne
teqne r0, r1 @ if not 0 compare sign teqne r0, r1 @ if not 0 compare sign
...@@ -823,8 +858,13 @@ ARM_FUNC_ALIAS eqsf2 cmpsf2 ...@@ -823,8 +858,13 @@ ARM_FUNC_ALIAS eqsf2 cmpsf2
orrne r0, r0, #1 orrne r0, r0, #1
RET RET
@ Look for a NAN. 3: @ Look for a NAN.
3: mvns ip, r2, asr #24
@ Restore the previous CFI state (i.e. keep the CFI state as it was
@ before the branch).
.cfi_restore_state
mvns ip, r2, asr #24
bne 4f bne 4f
movs ip, r0, lsl #9 movs ip, r0, lsl #9
bne 5f @ r0 is NAN bne 5f @ r0 is NAN
...@@ -832,9 +872,12 @@ ARM_FUNC_ALIAS eqsf2 cmpsf2 ...@@ -832,9 +872,12 @@ ARM_FUNC_ALIAS eqsf2 cmpsf2
bne 2b bne 2b
movs ip, r1, lsl #9 movs ip, r1, lsl #9
beq 2b @ r1 is not NAN beq 2b @ r1 is not NAN
5: ldr r0, [sp], #4 @ return unordered code. 5: ldr r0, [sp], #4 @ return unordered code.
.cfi_adjust_cfa_offset -4 @ CFA is now sp + previousOffset.
RET RET
CFI_END_FUNCTION
FUNC_END gesf2 FUNC_END gesf2
FUNC_END gtsf2 FUNC_END gtsf2
FUNC_END lesf2 FUNC_END lesf2
...@@ -844,6 +887,7 @@ ARM_FUNC_ALIAS eqsf2 cmpsf2 ...@@ -844,6 +887,7 @@ ARM_FUNC_ALIAS eqsf2 cmpsf2
FUNC_END cmpsf2 FUNC_END cmpsf2
ARM_FUNC_START aeabi_cfrcmple ARM_FUNC_START aeabi_cfrcmple
CFI_START_FUNCTION
mov ip, r0 mov ip, r0
mov r0, r1 mov r0, r1
...@@ -856,6 +900,13 @@ ARM_FUNC_ALIAS aeabi_cfcmple aeabi_cfcmpeq ...@@ -856,6 +900,13 @@ ARM_FUNC_ALIAS aeabi_cfcmple aeabi_cfcmpeq
@ The status-returning routines are required to preserve all @ The status-returning routines are required to preserve all
@ registers except ip, lr, and cpsr. @ registers except ip, lr, and cpsr.
6: do_push {r0, r1, r2, r3, lr} 6: do_push {r0, r1, r2, r3, lr}
.cfi_adjust_cfa_offset 20 @ CFA is at sp + previousOffset + 20
.cfi_rel_offset r0, 0 @ Registers are saved from sp to sp + 16
.cfi_rel_offset r1, 4
.cfi_rel_offset r2, 8
.cfi_rel_offset r3, 12
.cfi_rel_offset lr, 16
ARM_CALL cmpsf2 ARM_CALL cmpsf2
@ Set the Z flag correctly, and the C flag unconditionally. @ Set the Z flag correctly, and the C flag unconditionally.
cmp r0, #0 cmp r0, #0
...@@ -865,57 +916,82 @@ ARM_FUNC_ALIAS aeabi_cfcmple aeabi_cfcmpeq ...@@ -865,57 +916,82 @@ ARM_FUNC_ALIAS aeabi_cfcmple aeabi_cfcmpeq
cmnmi r0, #0 cmnmi r0, #0
RETLDM "r0, r1, r2, r3" RETLDM "r0, r1, r2, r3"
CFI_END_FUNCTION
FUNC_END aeabi_cfcmple FUNC_END aeabi_cfcmple
FUNC_END aeabi_cfcmpeq FUNC_END aeabi_cfcmpeq
FUNC_END aeabi_cfrcmple FUNC_END aeabi_cfrcmple
ARM_FUNC_START aeabi_fcmpeq ARM_FUNC_START aeabi_fcmpeq
CFI_START_FUNCTION
str lr, [sp, #-8]! @ sp -= 8
.cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8
.cfi_rel_offset lr, 0 @ lr is at sp
str lr, [sp, #-8]!
ARM_CALL aeabi_cfcmple ARM_CALL aeabi_cfcmple
do_it eq, e do_it eq, e
moveq r0, #1 @ Equal to. moveq r0, #1 @ Equal to.
movne r0, #0 @ Less than, greater than, or unordered. movne r0, #0 @ Less than, greater than, or unordered.
RETLDM RETLDM
CFI_END_FUNCTION
FUNC_END aeabi_fcmpeq FUNC_END aeabi_fcmpeq
ARM_FUNC_START aeabi_fcmplt ARM_FUNC_START aeabi_fcmplt
CFI_START_FUNCTION
str lr, [sp, #-8]! @ sp -= 8
.cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8
.cfi_rel_offset lr, 0 @ lr is at sp
str lr, [sp, #-8]!
ARM_CALL aeabi_cfcmple ARM_CALL aeabi_cfcmple
do_it cc, e do_it cc, e
movcc r0, #1 @ Less than. movcc r0, #1 @ Less than.
movcs r0, #0 @ Equal to, greater than, or unordered. movcs r0, #0 @ Equal to, greater than, or unordered.
RETLDM RETLDM
CFI_END_FUNCTION
FUNC_END aeabi_fcmplt FUNC_END aeabi_fcmplt
ARM_FUNC_START aeabi_fcmple ARM_FUNC_START aeabi_fcmple
CFI_START_FUNCTION
str lr, [sp, #-8]! @ sp -= 8
.cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8
.cfi_rel_offset lr, 0 @ lr is at sp
str lr, [sp, #-8]!
ARM_CALL aeabi_cfcmple ARM_CALL aeabi_cfcmple
do_it ls, e do_it ls, e
movls r0, #1 @ Less than or equal to. movls r0, #1 @ Less than or equal to.
movhi r0, #0 @ Greater than or unordered. movhi r0, #0 @ Greater than or unordered.
RETLDM RETLDM
CFI_END_FUNCTION
FUNC_END aeabi_fcmple FUNC_END aeabi_fcmple
ARM_FUNC_START aeabi_fcmpge ARM_FUNC_START aeabi_fcmpge
CFI_START_FUNCTION
str lr, [sp, #-8]! @ sp -= 8
.cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8
.cfi_rel_offset lr, 0 @ lr is at sp
str lr, [sp, #-8]!
ARM_CALL aeabi_cfrcmple ARM_CALL aeabi_cfrcmple
do_it ls, e do_it ls, e
movls r0, #1 @ Operand 2 is less than or equal to operand 1. movls r0, #1 @ Operand 2 is less than or equal to operand 1.
movhi r0, #0 @ Operand 2 greater than operand 1, or unordered. movhi r0, #0 @ Operand 2 greater than operand 1, or unordered.
RETLDM RETLDM
CFI_END_FUNCTION
FUNC_END aeabi_fcmpge FUNC_END aeabi_fcmpge
ARM_FUNC_START aeabi_fcmpgt ARM_FUNC_START aeabi_fcmpgt
CFI_START_FUNCTION
str lr, [sp, #-8]! @ sp -= 8
.cfi_adjust_cfa_offset 8 @ CFA is now sp + previousOffset + 8
.cfi_rel_offset lr, 0 @ lr is at sp
str lr, [sp, #-8]!
ARM_CALL aeabi_cfrcmple ARM_CALL aeabi_cfrcmple
do_it cc, e do_it cc, e
movcc r0, #1 @ Operand 2 is less than operand 1. movcc r0, #1 @ Operand 2 is less than operand 1.
...@@ -923,6 +999,7 @@ ARM_FUNC_START aeabi_fcmpgt ...@@ -923,6 +999,7 @@ ARM_FUNC_START aeabi_fcmpgt
@ or they are unordered. @ or they are unordered.
RETLDM RETLDM
CFI_END_FUNCTION
FUNC_END aeabi_fcmpgt FUNC_END aeabi_fcmpgt
#endif /* L_cmpsf2 */ #endif /* L_cmpsf2 */
...@@ -931,6 +1008,7 @@ ARM_FUNC_START aeabi_fcmpgt ...@@ -931,6 +1008,7 @@ ARM_FUNC_START aeabi_fcmpgt
ARM_FUNC_START unordsf2 ARM_FUNC_START unordsf2
ARM_FUNC_ALIAS aeabi_fcmpun unordsf2 ARM_FUNC_ALIAS aeabi_fcmpun unordsf2
CFI_START_FUNCTION
mov r2, r0, lsl #1 mov r2, r0, lsl #1
mov r3, r1, lsl #1 mov r3, r1, lsl #1
...@@ -947,6 +1025,7 @@ ARM_FUNC_ALIAS aeabi_fcmpun unordsf2 ...@@ -947,6 +1025,7 @@ ARM_FUNC_ALIAS aeabi_fcmpun unordsf2
3: mov r0, #1 @ arguments are unordered. 3: mov r0, #1 @ arguments are unordered.
RET RET
CFI_END_FUNCTION
FUNC_END aeabi_fcmpun FUNC_END aeabi_fcmpun
FUNC_END unordsf2 FUNC_END unordsf2
...@@ -956,6 +1035,7 @@ ARM_FUNC_ALIAS aeabi_fcmpun unordsf2 ...@@ -956,6 +1035,7 @@ ARM_FUNC_ALIAS aeabi_fcmpun unordsf2
ARM_FUNC_START fixsfsi ARM_FUNC_START fixsfsi
ARM_FUNC_ALIAS aeabi_f2iz fixsfsi ARM_FUNC_ALIAS aeabi_f2iz fixsfsi
CFI_START_FUNCTION
@ check exponent range. @ check exponent range.
mov r2, r0, lsl #1 mov r2, r0, lsl #1
...@@ -989,6 +1069,7 @@ ARM_FUNC_ALIAS aeabi_f2iz fixsfsi ...@@ -989,6 +1069,7 @@ ARM_FUNC_ALIAS aeabi_f2iz fixsfsi
4: mov r0, #0 @ What should we convert NAN to? 4: mov r0, #0 @ What should we convert NAN to?
RET RET
CFI_END_FUNCTION
FUNC_END aeabi_f2iz FUNC_END aeabi_f2iz
FUNC_END fixsfsi FUNC_END fixsfsi
...@@ -998,6 +1079,7 @@ ARM_FUNC_ALIAS aeabi_f2iz fixsfsi ...@@ -998,6 +1079,7 @@ ARM_FUNC_ALIAS aeabi_f2iz fixsfsi
ARM_FUNC_START fixunssfsi ARM_FUNC_START fixunssfsi
ARM_FUNC_ALIAS aeabi_f2uiz fixunssfsi ARM_FUNC_ALIAS aeabi_f2uiz fixunssfsi
CFI_START_FUNCTION
@ check exponent range. @ check exponent range.
movs r2, r0, lsl #1 movs r2, r0, lsl #1
...@@ -1027,6 +1109,7 @@ ARM_FUNC_ALIAS aeabi_f2uiz fixunssfsi ...@@ -1027,6 +1109,7 @@ ARM_FUNC_ALIAS aeabi_f2uiz fixunssfsi
4: mov r0, #0 @ What should we convert NAN to? 4: mov r0, #0 @ What should we convert NAN to?
RET RET
CFI_END_FUNCTION
FUNC_END aeabi_f2uiz FUNC_END aeabi_f2uiz
FUNC_END fixunssfsi FUNC_END fixunssfsi
......
...@@ -1965,6 +1965,16 @@ LSYM(Lchange_\register): ...@@ -1965,6 +1965,16 @@ LSYM(Lchange_\register):
#endif /* Arch supports thumb. */ #endif /* Arch supports thumb. */
.macro CFI_START_FUNCTION
.cfi_startproc
.cfi_remember_state
.endm
.macro CFI_END_FUNCTION
.cfi_restore_state
.cfi_endproc
.endm
#ifndef __symbian__ #ifndef __symbian__
#ifndef __ARM_ARCH_6M__ #ifndef __ARM_ARCH_6M__
#include "ieee754-df.S" #include "ieee754-df.S"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment