diff options
author | Aidan MacDonald <amachronic@protonmail.com> | 2023-03-23 18:16:15 +0000 |
---|---|---|
committer | Aidan MacDonald <amachronic@protonmail.com> | 2023-03-23 18:16:33 +0000 |
commit | 58b2e457824dc93916233627b98614409e5f258d (patch) | |
tree | 67c485a7881745574d66ae10889c2da331ba68c0 /firmware/asm/arm | |
parent | 86429dbf1eca8ee0e08176997f508647c3abf6bd (diff) | |
download | rockbox-58b2e457824dc93916233627b98614409e5f258d.tar.gz rockbox-58b2e457824dc93916233627b98614409e5f258d.zip |
Fix unified syntax in ARM inline assembly
GCC 4.9 always emits assembly with divided syntax. Setting unified
syntax in inline assembly causes the assembler to complain about
GCC's generated code, because the directive extends past the scope
of the inline asm. Fix this by setting divided mode at the end of
the inline assembly block.
The assembler directives are hidden behind macros because later
versions of GCC won't need this workaround: they can be told to
use the unified syntax with -masm-syntax-unified.
Change-Id: Ic09e729e5bbb6fd44d08dac348daf6f55c75d7d8
Diffstat (limited to 'firmware/asm/arm')
-rw-r--r-- | firmware/asm/arm/corelock.c | 3 | ||||
-rw-r--r-- | firmware/asm/arm/thread.c | 3 |
2 files changed, 4 insertions, 2 deletions
diff --git a/firmware/asm/arm/corelock.c b/firmware/asm/arm/corelock.c index 07ec77a60e..a60299436f 100644 --- a/firmware/asm/arm/corelock.c +++ b/firmware/asm/arm/corelock.c | |||
@@ -61,7 +61,7 @@ int corelock_try_lock(struct corelock *cl) | |||
61 | 61 | ||
62 | /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */ | 62 | /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */ |
63 | asm volatile ( | 63 | asm volatile ( |
64 | ".syntax unified \n" | 64 | BEGIN_ARM_ASM_SYNTAX_UNIFIED |
65 | "mov r1, %[id] \n" /* r1 = PROCESSOR_ID */ | 65 | "mov r1, %[id] \n" /* r1 = PROCESSOR_ID */ |
66 | "ldrb r1, [r1] \n" | 66 | "ldrb r1, [r1] \n" |
67 | "strb r1, [%[cl], r1, lsr #7] \n" /* cl->myl[core] = core */ | 67 | "strb r1, [%[cl], r1, lsr #7] \n" /* cl->myl[core] = core */ |
@@ -74,6 +74,7 @@ int corelock_try_lock(struct corelock *cl) | |||
74 | "ands %[rv], %[rv], r1 \n" | 74 | "ands %[rv], %[rv], r1 \n" |
75 | "strbeq %[rv], [%[cl], r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */ | 75 | "strbeq %[rv], [%[cl], r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */ |
76 | "1: \n" /* Done */ | 76 | "1: \n" /* Done */ |
77 | END_ARM_ASM_SYNTAX_UNIFIED | ||
77 | : [rv] "=r"(rval) | 78 | : [rv] "=r"(rval) |
78 | : [id] "i" (&PROCESSOR_ID), [cl] "r" (cl) | 79 | : [id] "i" (&PROCESSOR_ID), [cl] "r" (cl) |
79 | : "r1","r2","cc" | 80 | : "r1","r2","cc" |
diff --git a/firmware/asm/arm/thread.c b/firmware/asm/arm/thread.c index bd9f950616..30df56e0d9 100644 --- a/firmware/asm/arm/thread.c +++ b/firmware/asm/arm/thread.c | |||
@@ -73,7 +73,7 @@ static inline void store_context(void* addr) | |||
73 | static inline void load_context(const void* addr) | 73 | static inline void load_context(const void* addr) |
74 | { | 74 | { |
75 | asm volatile( | 75 | asm volatile( |
76 | ".syntax unified \n" | 76 | BEGIN_ARM_ASM_SYNTAX_UNIFIED |
77 | "ldr r0, [%0, #40] \n" /* Load start pointer */ | 77 | "ldr r0, [%0, #40] \n" /* Load start pointer */ |
78 | "cmp r0, #0 \n" /* Check for NULL */ | 78 | "cmp r0, #0 \n" /* Check for NULL */ |
79 | 79 | ||
@@ -86,6 +86,7 @@ static inline void load_context(const void* addr) | |||
86 | #endif | 86 | #endif |
87 | 87 | ||
88 | "ldmia %0, { r4-r11, sp, lr } \n" /* Load regs r4 to r14 from context */ | 88 | "ldmia %0, { r4-r11, sp, lr } \n" /* Load regs r4 to r14 from context */ |
89 | END_ARM_ASM_SYNTAX_UNIFIED | ||
89 | : : "r" (addr) : "r0" /* only! */ | 90 | : : "r" (addr) : "r0" /* only! */ |
90 | ); | 91 | ); |
91 | } | 92 | } |