Commit 6b9ce2b4 by Richard Earnshaw

libgcc: arm: convert thumb1 code to unified syntax

Unified syntax has been the official syntax for thumb1 assembly for
over 10 years now.  It's time we made preparations for that becoming
the default in the assembler.  But before we can start doing that we
really need to clean up some laggards from the olden days.  Libgcc
support for thumb1 is one such example.

This patch converts all of the legacy (disjoint) syntax that I could
find over to unified code.  The identification was done by using a
trick version of gas that defaulted to unified mode which then faults
if legacy syntax is encountered.  The code produced was then compared
against the old code to check for differences.  One such difference
does exist, but that is because in unified syntax 'movs rd, rn' is
encoded as 'lsls rd, rn, #0', rather than 'adds rd, rn, #0'; but that
is a deliberate change that was introduced because the lsls encoding
more closely reflects the behaviour of 'movs' in arm state (where only
some of the condition flags are modified).

	* config/arm/bpabi-v6m.S (aeabi_lcmp): Convert thumb1 code to unified
	syntax.
	(aeabi_ulcmp, aeabi_ldivmod, aeabi_uldivmod): Likewise.
	(aeabi_frsub, aeabi_cfcmpeq, aeabi_fcmpeq): Likewise.
	(aeabi_fcmp, aeabi_drsub, aeabi_cdrcmple): Likewise.
	(aeabi_cdcmpeq, aeabi_dcmpeq, aeabi_dcmp): Likewise.
	* config/arm/lib1funcs.S (Lend_fde): Convert thumb1 code to unified
	syntax.
	(divsi3, modsi3): Likewise.
	(clzdi2, ctzsi2): Likewise.
	* config/arm/libunwind.S (restore_core_regs): Convert thumb1 code to
	unified syntax.
	(UNWIND_WRAPPER): Likewise.
parent 8e6d0dba
2020-03-03 Richard Earnshaw <rearnsha@arm.com>
* config/arm/bpabi-v6m.S (aeabi_lcmp): Convert thumb1 code to unified
syntax.
(aeabi_ulcmp, aeabi_ldivmod, aeabi_uldivmod): Likewise.
(aeabi_frsub, aeabi_cfcmpeq, aeabi_fcmpeq): Likewise.
(aeabi_fcmp, aeabi_drsub, aeabi_cdrcmple): Likewise.
(aeabi_cdcmpeq, aeabi_dcmpeq, aeabi_dcmp): Likewise.
* config/arm/lib1funcs.S (Lend_fde): Convert thumb1 code to unified
syntax.
(divsi3, modsi3): Likewise.
(clzdi2, ctzsi2): Likewise.
* config/arm/libunwind.S (restore_core_regs): Convert thumb1 code to
unified syntax.
(UNWIND_WRAPPER): Likewise.
2020-03-02 Martin Liska <mliska@suse.cz> 2020-03-02 Martin Liska <mliska@suse.cz>
* libgcov-interface.c: Remove duplicate * libgcov-interface.c: Remove duplicate
......
...@@ -39,21 +39,21 @@ FUNC_START aeabi_lcmp ...@@ -39,21 +39,21 @@ FUNC_START aeabi_lcmp
cmp xxh, yyh cmp xxh, yyh
beq 1f beq 1f
bgt 2f bgt 2f
mov r0, #1 movs r0, #1
neg r0, r0 negs r0, r0
RET RET
2: 2:
mov r0, #1 movs r0, #1
RET RET
1: 1:
sub r0, xxl, yyl subs r0, xxl, yyl
beq 1f beq 1f
bhi 2f bhi 2f
mov r0, #1 movs r0, #1
neg r0, r0 negs r0, r0
RET RET
2: 2:
mov r0, #1 movs r0, #1
1: 1:
RET RET
FUNC_END aeabi_lcmp FUNC_END aeabi_lcmp
...@@ -65,15 +65,15 @@ FUNC_START aeabi_lcmp ...@@ -65,15 +65,15 @@ FUNC_START aeabi_lcmp
FUNC_START aeabi_ulcmp FUNC_START aeabi_ulcmp
cmp xxh, yyh cmp xxh, yyh
bne 1f bne 1f
sub r0, xxl, yyl subs r0, xxl, yyl
beq 2f beq 2f
1: 1:
bcs 1f bcs 1f
mov r0, #1 movs r0, #1
neg r0, r0 negs r0, r0
RET RET
1: 1:
mov r0, #1 movs r0, #1
2: 2:
RET RET
FUNC_END aeabi_ulcmp FUNC_END aeabi_ulcmp
...@@ -91,29 +91,29 @@ FUNC_START aeabi_ulcmp ...@@ -91,29 +91,29 @@ FUNC_START aeabi_ulcmp
cmp xxl, #0 cmp xxl, #0
2: 2:
beq 3f beq 3f
mov xxh, #0 movs xxh, #0
mvn xxh, xxh @ 0xffffffff mvns xxh, xxh @ 0xffffffff
mov xxl, xxh movs xxl, xxh
3: 3:
.else .else
blt 6f blt 6f
bgt 4f bgt 4f
cmp xxl, #0 cmp xxl, #0
beq 5f beq 5f
4: mov xxl, #0 4: movs xxl, #0
mvn xxl, xxl @ 0xffffffff mvns xxl, xxl @ 0xffffffff
lsr xxh, xxl, #1 @ 0x7fffffff lsrs xxh, xxl, #1 @ 0x7fffffff
b 5f b 5f
6: mov xxh, #0x80 6: movs xxh, #0x80
lsl xxh, xxh, #24 @ 0x80000000 lsls xxh, xxh, #24 @ 0x80000000
mov xxl, #0 movs xxl, #0
5: 5:
.endif .endif
@ tailcalls are tricky on v6-m. @ tailcalls are tricky on v6-m.
push {r0, r1, r2} push {r0, r1, r2}
ldr r0, 1f ldr r0, 1f
adr r1, 1f adr r1, 1f
add r0, r1 adds r0, r1
str r0, [sp, #8] str r0, [sp, #8]
@ We know we are not on armv4t, so pop pc is safe. @ We know we are not on armv4t, so pop pc is safe.
pop {r0, r1, pc} pop {r0, r1, pc}
...@@ -128,15 +128,15 @@ FUNC_START aeabi_ulcmp ...@@ -128,15 +128,15 @@ FUNC_START aeabi_ulcmp
FUNC_START aeabi_ldivmod FUNC_START aeabi_ldivmod
test_div_by_zero signed test_div_by_zero signed
push {r0, r1} push {r0, r1}
mov r0, sp mov r0, sp
push {r0, lr} push {r0, lr}
ldr r0, [sp, #8] ldr r0, [sp, #8]
bl SYM(__gnu_ldivmod_helper) bl SYM(__gnu_ldivmod_helper)
ldr r3, [sp, #4] ldr r3, [sp, #4]
mov lr, r3 mov lr, r3
add sp, sp, #8 add sp, sp, #8
pop {r2, r3} pop {r2, r3}
RET RET
FUNC_END aeabi_ldivmod FUNC_END aeabi_ldivmod
...@@ -147,15 +147,15 @@ FUNC_START aeabi_ldivmod ...@@ -147,15 +147,15 @@ FUNC_START aeabi_ldivmod
FUNC_START aeabi_uldivmod FUNC_START aeabi_uldivmod
test_div_by_zero unsigned test_div_by_zero unsigned
push {r0, r1} push {r0, r1}
mov r0, sp mov r0, sp
push {r0, lr} push {r0, lr}
ldr r0, [sp, #8] ldr r0, [sp, #8]
bl SYM(__udivmoddi4) bl SYM(__udivmoddi4)
ldr r3, [sp, #4] ldr r3, [sp, #4]
mov lr, r3 mov lr, r3
add sp, sp, #8 add sp, sp, #8
pop {r2, r3} pop {r2, r3}
RET RET
FUNC_END aeabi_uldivmod FUNC_END aeabi_uldivmod
...@@ -166,9 +166,9 @@ FUNC_START aeabi_uldivmod ...@@ -166,9 +166,9 @@ FUNC_START aeabi_uldivmod
FUNC_START aeabi_frsub FUNC_START aeabi_frsub
push {r4, lr} push {r4, lr}
mov r4, #1 movs r4, #1
lsl r4, #31 lsls r4, #31
eor r0, r0, r4 eors r0, r0, r4
bl __aeabi_fadd bl __aeabi_fadd
pop {r4, pc} pop {r4, pc}
...@@ -181,7 +181,7 @@ FUNC_START aeabi_frsub ...@@ -181,7 +181,7 @@ FUNC_START aeabi_frsub
FUNC_START aeabi_cfrcmple FUNC_START aeabi_cfrcmple
mov ip, r0 mov ip, r0
mov r0, r1 movs r0, r1
mov r1, ip mov r1, ip
b 6f b 6f
...@@ -196,8 +196,8 @@ FUNC_ALIAS aeabi_cfcmple aeabi_cfcmpeq ...@@ -196,8 +196,8 @@ FUNC_ALIAS aeabi_cfcmple aeabi_cfcmpeq
cmp r0, #0 cmp r0, #0
@ Clear the C flag if the return value was -1, indicating @ Clear the C flag if the return value was -1, indicating
@ that the first operand was smaller than the second. @ that the first operand was smaller than the second.
bmi 1f bmi 1f
mov r1, #0 movs r1, #0
cmn r0, r1 cmn r0, r1
1: 1:
pop {r0, r1, r2, r3, r4, pc} pop {r0, r1, r2, r3, r4, pc}
...@@ -210,8 +210,8 @@ FUNC_START aeabi_fcmpeq ...@@ -210,8 +210,8 @@ FUNC_START aeabi_fcmpeq
push {r4, lr} push {r4, lr}
bl __eqsf2 bl __eqsf2
neg r0, r0 negs r0, r0
add r0, r0, #1 adds r0, r0, #1
pop {r4, pc} pop {r4, pc}
FUNC_END aeabi_fcmpeq FUNC_END aeabi_fcmpeq
...@@ -223,10 +223,10 @@ FUNC_START aeabi_fcmp\cond ...@@ -223,10 +223,10 @@ FUNC_START aeabi_fcmp\cond
bl __\helper\mode bl __\helper\mode
cmp r0, #0 cmp r0, #0
b\cond 1f b\cond 1f
mov r0, #0 movs r0, #0
pop {r4, pc} pop {r4, pc}
1: 1:
mov r0, #1 movs r0, #1
pop {r4, pc} pop {r4, pc}
FUNC_END aeabi_fcmp\cond FUNC_END aeabi_fcmp\cond
...@@ -244,9 +244,9 @@ COMPARISON ge, ge ...@@ -244,9 +244,9 @@ COMPARISON ge, ge
FUNC_START aeabi_drsub FUNC_START aeabi_drsub
push {r4, lr} push {r4, lr}
mov r4, #1 movs r4, #1
lsl r4, #31 lsls r4, #31
eor xxh, xxh, r4 eors xxh, xxh, r4
bl __aeabi_dadd bl __aeabi_dadd
pop {r4, pc} pop {r4, pc}
...@@ -259,10 +259,10 @@ FUNC_START aeabi_drsub ...@@ -259,10 +259,10 @@ FUNC_START aeabi_drsub
FUNC_START aeabi_cdrcmple FUNC_START aeabi_cdrcmple
mov ip, r0 mov ip, r0
mov r0, r2 movs r0, r2
mov r2, ip mov r2, ip
mov ip, r1 mov ip, r1
mov r1, r3 movs r1, r3
mov r3, ip mov r3, ip
b 6f b 6f
...@@ -277,8 +277,8 @@ FUNC_ALIAS aeabi_cdcmple aeabi_cdcmpeq ...@@ -277,8 +277,8 @@ FUNC_ALIAS aeabi_cdcmple aeabi_cdcmpeq
cmp r0, #0 cmp r0, #0
@ Clear the C flag if the return value was -1, indicating @ Clear the C flag if the return value was -1, indicating
@ that the first operand was smaller than the second. @ that the first operand was smaller than the second.
bmi 1f bmi 1f
mov r1, #0 movs r1, #0
cmn r0, r1 cmn r0, r1
1: 1:
pop {r0, r1, r2, r3, r4, pc} pop {r0, r1, r2, r3, r4, pc}
...@@ -291,8 +291,8 @@ FUNC_START aeabi_dcmpeq ...@@ -291,8 +291,8 @@ FUNC_START aeabi_dcmpeq
push {r4, lr} push {r4, lr}
bl __eqdf2 bl __eqdf2
neg r0, r0 negs r0, r0
add r0, r0, #1 adds r0, r0, #1
pop {r4, pc} pop {r4, pc}
FUNC_END aeabi_dcmpeq FUNC_END aeabi_dcmpeq
...@@ -304,10 +304,10 @@ FUNC_START aeabi_dcmp\cond ...@@ -304,10 +304,10 @@ FUNC_START aeabi_dcmp\cond
bl __\helper\mode bl __\helper\mode
cmp r0, #0 cmp r0, #0
b\cond 1f b\cond 1f
mov r0, #0 movs r0, #0
pop {r4, pc} pop {r4, pc}
1: 1:
mov r0, #1 movs r0, #1
pop {r4, pc} pop {r4, pc}
FUNC_END aeabi_dcmp\cond FUNC_END aeabi_dcmp\cond
......
...@@ -63,28 +63,28 @@ ...@@ -63,28 +63,28 @@
/* r0 points to a 16-word block. Upload these values to the actual core /* r0 points to a 16-word block. Upload these values to the actual core
state. */ state. */
FUNC_START restore_core_regs FUNC_START restore_core_regs
mov r1, r0 movs r1, r0
add r1, r1, #52 adds r1, r1, #52
ldmia r1!, {r3, r4, r5} ldmia r1!, {r3, r4, r5}
sub r3, r3, #4 subs r3, r3, #4
mov ip, r3 mov ip, r3
str r5, [r3] str r5, [r3]
mov lr, r4 mov lr, r4
/* Restore r8-r11. */ /* Restore r8-r11. */
mov r1, r0 movs r1, r0
add r1, r1, #32 adds r1, r1, #32
ldmia r1!, {r2, r3, r4, r5} ldmia r1!, {r2, r3, r4, r5}
mov r8, r2 mov r8, r2
mov r9, r3 mov r9, r3
mov sl, r4 mov sl, r4
mov fp, r5 mov fp, r5
mov r1, r0 movs r1, r0
add r1, r1, #8 adds r1, r1, #8
ldmia r1!, {r2, r3, r4, r5, r6, r7} ldmia r1!, {r2, r3, r4, r5, r6, r7}
ldr r1, [r0, #4] ldr r1, [r0, #4]
ldr r0, [r0] ldr r0, [r0]
mov sp, ip mov sp, ip
pop {pc} pop {pc}
FUNC_END restore_core_regs FUNC_END restore_core_regs
UNPREFIX restore_core_regs UNPREFIX restore_core_regs
...@@ -132,38 +132,38 @@ FUNC_START gnu_Unwind_Save_WMMXC ...@@ -132,38 +132,38 @@ FUNC_START gnu_Unwind_Save_WMMXC
FUNC_START \name FUNC_START \name
/* Create a phase2_vrs structure. */ /* Create a phase2_vrs structure. */
/* Save r0 in the PC slot so we can use it as a scratch register. */ /* Save r0 in the PC slot so we can use it as a scratch register. */
push {r0} push {r0}
add r0, sp, #4 add r0, sp, #4
push {r0, lr} /* Push original SP and LR. */ push {r0, lr} /* Push original SP and LR. */
/* Make space for r8-r12. */ /* Make space for r8-r12. */
sub sp, sp, #20 sub sp, sp, #20
/* Save low registers. */ /* Save low registers. */
push {r0, r1, r2, r3, r4, r5, r6, r7} push {r0, r1, r2, r3, r4, r5, r6, r7}
/* Save high registers. */ /* Save high registers. */
add r0, sp, #32 add r0, sp, #32
mov r1, r8 mov r1, r8
mov r2, r9 mov r2, r9
mov r3, sl mov r3, sl
mov r4, fp mov r4, fp
mov r5, ip mov r5, ip
stmia r0!, {r1, r2, r3, r4, r5} stmia r0!, {r1, r2, r3, r4, r5}
/* Restore original low register values. */ /* Restore original low register values. */
add r0, sp, #4 add r0, sp, #4
ldmia r0!, {r1, r2, r3, r4, r5} ldmia r0!, {r1, r2, r3, r4, r5}
/* Restore orginial r0. */ /* Restore orginial r0. */
ldr r0, [sp, #60] ldr r0, [sp, #60]
str r0, [sp] str r0, [sp]
/* Demand-save flags, plus an extra word for alignment. */ /* Demand-save flags, plus an extra word for alignment. */
mov r3, #0 movs r3, #0
push {r2, r3} push {r2, r3}
/* Point r1 at the block. Pass r[0..nargs) unchanged. */ /* Point r1 at the block. Pass r[0..nargs) unchanged. */
add r\nargs, sp, #4 add r\nargs, sp, #4
bl SYM (__gnu\name) bl SYM (__gnu\name)
ldr r3, [sp, #64] ldr r3, [sp, #64]
add sp, sp, #72 add sp, sp, #72
bx r3 bx r3
FUNC_END \name FUNC_END \name
UNPREFIX \name UNPREFIX \name
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment