summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAidan MacDonald <amachronic@protonmail.com>2023-03-23 18:16:15 +0000
committerAidan MacDonald <amachronic@protonmail.com>2023-03-23 18:16:33 +0000
commit58b2e457824dc93916233627b98614409e5f258d (patch)
tree67c485a7881745574d66ae10889c2da331ba68c0
parent86429dbf1eca8ee0e08176997f508647c3abf6bd (diff)
downloadrockbox-58b2e457824dc93916233627b98614409e5f258d.tar.gz
rockbox-58b2e457824dc93916233627b98614409e5f258d.zip
Fix unified syntax in ARM inline assembly
GCC 4.9 always emits assembly with divided syntax. Setting unified syntax in inline assembly causes the assembler to complain about GCC's generated code, because the directive extends past the scope of the inline asm. Fix this by setting divided mode at the end of the inline assembly block. The assembler directives are hidden behind macros because later versions of GCC won't need this workaround: they can be told to use the unified syntax with -masm-syntax-unified. Change-Id: Ic09e729e5bbb6fd44d08dac348daf6f55c75d7d8
-rw-r--r--apps/plugins/mpegplayer/mpeg_misc.h6
-rw-r--r--firmware/asm/arm/corelock.c3
-rw-r--r--firmware/asm/arm/thread.c3
-rw-r--r--firmware/export/config.h10
-rw-r--r--firmware/target/arm/pcm-telechips.c3
-rw-r--r--firmware/target/arm/pp/pcm-pp.c3
-rw-r--r--lib/rbcodec/codecs/demac/libdemac/vector_math16_armv6.h9
7 files changed, 28 insertions, 9 deletions
diff --git a/apps/plugins/mpegplayer/mpeg_misc.h b/apps/plugins/mpegplayer/mpeg_misc.h
index 68ee8cac3c..233b815493 100644
--- a/apps/plugins/mpegplayer/mpeg_misc.h
+++ b/apps/plugins/mpegplayer/mpeg_misc.h
@@ -53,13 +53,14 @@ enum state_enum
53#define CMP_3_CONST(_a, _b) \ 53#define CMP_3_CONST(_a, _b) \
54 ({ int _x; \ 54 ({ int _x; \
55 asm volatile ( \ 55 asm volatile ( \
56 ".syntax unified \n" \ 56 BEGIN_ARM_ASM_SYNTAX_UNIFIED \
57 "ldrb %[x], [%[a], #0] \n" \ 57 "ldrb %[x], [%[a], #0] \n" \
58 "eors %[x], %[x], %[b0] \n" \ 58 "eors %[x], %[x], %[b0] \n" \
59 "ldrbeq %[x], [%[a], #1] \n" \ 59 "ldrbeq %[x], [%[a], #1] \n" \
60 "eorseq %[x], %[x], %[b1] \n" \ 60 "eorseq %[x], %[x], %[b1] \n" \
61 "ldrbeq %[x], [%[a], #2] \n" \ 61 "ldrbeq %[x], [%[a], #2] \n" \
62 "eorseq %[x], %[x], %[b2] \n" \ 62 "eorseq %[x], %[x], %[b2] \n" \
63 END_ARM_ASM_SYNTAX_UNIFIED \
63 : [x]"=&r"(_x) \ 64 : [x]"=&r"(_x) \
64 : [a]"r"(_a), \ 65 : [a]"r"(_a), \
65 [b0]"i"(((_b) >> 24) & 0xff), \ 66 [b0]"i"(((_b) >> 24) & 0xff), \
@@ -71,7 +72,7 @@ enum state_enum
71#define CMP_4_CONST(_a, _b) \ 72#define CMP_4_CONST(_a, _b) \
72 ({ int _x; \ 73 ({ int _x; \
73 asm volatile ( \ 74 asm volatile ( \
74 ".syntax unified \n" \ 75 BEGIN_ARM_ASM_SYNTAX_UNIFIED \
75 "ldrb %[x], [%[a], #0] \n" \ 76 "ldrb %[x], [%[a], #0] \n" \
76 "eors %[x], %[x], %[b0] \n" \ 77 "eors %[x], %[x], %[b0] \n" \
77 "ldrbeq %[x], [%[a], #1] \n" \ 78 "ldrbeq %[x], [%[a], #1] \n" \
@@ -80,6 +81,7 @@ enum state_enum
80 "eorseq %[x], %[x], %[b2] \n" \ 81 "eorseq %[x], %[x], %[b2] \n" \
81 "ldrbeq %[x], [%[a], #3] \n" \ 82 "ldrbeq %[x], [%[a], #3] \n" \
82 "eorseq %[x], %[x], %[b3] \n" \ 83 "eorseq %[x], %[x], %[b3] \n" \
84 END_ARM_ASM_SYNTAX_UNIFIED \
83 : [x]"=&r"(_x) \ 85 : [x]"=&r"(_x) \
84 : [a]"r"(_a), \ 86 : [a]"r"(_a), \
85 [b0]"i"(((_b) >> 24) & 0xff), \ 87 [b0]"i"(((_b) >> 24) & 0xff), \
diff --git a/firmware/asm/arm/corelock.c b/firmware/asm/arm/corelock.c
index 07ec77a60e..a60299436f 100644
--- a/firmware/asm/arm/corelock.c
+++ b/firmware/asm/arm/corelock.c
@@ -61,7 +61,7 @@ int corelock_try_lock(struct corelock *cl)
61 61
62 /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */ 62 /* Relies on the fact that core IDs are complementary bitmasks (0x55,0xaa) */
63 asm volatile ( 63 asm volatile (
64 ".syntax unified \n" 64 BEGIN_ARM_ASM_SYNTAX_UNIFIED
65 "mov r1, %[id] \n" /* r1 = PROCESSOR_ID */ 65 "mov r1, %[id] \n" /* r1 = PROCESSOR_ID */
66 "ldrb r1, [r1] \n" 66 "ldrb r1, [r1] \n"
67 "strb r1, [%[cl], r1, lsr #7] \n" /* cl->myl[core] = core */ 67 "strb r1, [%[cl], r1, lsr #7] \n" /* cl->myl[core] = core */
@@ -74,6 +74,7 @@ int corelock_try_lock(struct corelock *cl)
74 "ands %[rv], %[rv], r1 \n" 74 "ands %[rv], %[rv], r1 \n"
75 "strbeq %[rv], [%[cl], r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */ 75 "strbeq %[rv], [%[cl], r1, lsr #7] \n" /* if not, cl->myl[core] = 0 */
76 "1: \n" /* Done */ 76 "1: \n" /* Done */
77 END_ARM_ASM_SYNTAX_UNIFIED
77 : [rv] "=r"(rval) 78 : [rv] "=r"(rval)
78 : [id] "i" (&PROCESSOR_ID), [cl] "r" (cl) 79 : [id] "i" (&PROCESSOR_ID), [cl] "r" (cl)
79 : "r1","r2","cc" 80 : "r1","r2","cc"
diff --git a/firmware/asm/arm/thread.c b/firmware/asm/arm/thread.c
index bd9f950616..30df56e0d9 100644
--- a/firmware/asm/arm/thread.c
+++ b/firmware/asm/arm/thread.c
@@ -73,7 +73,7 @@ static inline void store_context(void* addr)
73static inline void load_context(const void* addr) 73static inline void load_context(const void* addr)
74{ 74{
75 asm volatile( 75 asm volatile(
76 ".syntax unified \n" 76 BEGIN_ARM_ASM_SYNTAX_UNIFIED
77 "ldr r0, [%0, #40] \n" /* Load start pointer */ 77 "ldr r0, [%0, #40] \n" /* Load start pointer */
78 "cmp r0, #0 \n" /* Check for NULL */ 78 "cmp r0, #0 \n" /* Check for NULL */
79 79
@@ -86,6 +86,7 @@ static inline void load_context(const void* addr)
86#endif 86#endif
87 87
88 "ldmia %0, { r4-r11, sp, lr } \n" /* Load regs r4 to r14 from context */ 88 "ldmia %0, { r4-r11, sp, lr } \n" /* Load regs r4 to r14 from context */
89 END_ARM_ASM_SYNTAX_UNIFIED
89 : : "r" (addr) : "r0" /* only! */ 90 : : "r" (addr) : "r0" /* only! */
90 ); 91 );
91} 92}
diff --git a/firmware/export/config.h b/firmware/export/config.h
index 19ee03b4c7..49cd6d610c 100644
--- a/firmware/export/config.h
+++ b/firmware/export/config.h
@@ -1005,6 +1005,16 @@ Lyre prototype 1 */
1005#define ROCKBOX_STRICT_ALIGN 1 1005#define ROCKBOX_STRICT_ALIGN 1
1006#endif 1006#endif
1007 1007
1008/*
1009 * These macros are for switching on unified syntax in inline assembly.
1010 * Older versions of GCC emit assembly in divided syntax with no option
1011 * to enable unified syntax.
1012 *
1013 * FIXME: This needs to be looked at after the toolchain bump
1014 */
1015#define BEGIN_ARM_ASM_SYNTAX_UNIFIED ".syntax unified\n"
1016#define END_ARM_ASM_SYNTAX_UNIFIED ".syntax divided\n"
1017
1008#if defined(CPU_ARM) && defined(__ASSEMBLER__) 1018#if defined(CPU_ARM) && defined(__ASSEMBLER__)
1009.syntax unified 1019.syntax unified
1010/* ARMv4T doesn't switch the T bit when popping pc directly, we must use BX */ 1020/* ARMv4T doesn't switch the T bit when popping pc directly, we must use BX */
diff --git a/firmware/target/arm/pcm-telechips.c b/firmware/target/arm/pcm-telechips.c
index 747765d8fb..45044bc664 100644
--- a/firmware/target/arm/pcm-telechips.c
+++ b/firmware/target/arm/pcm-telechips.c
@@ -218,7 +218,7 @@ void fiq_handler(void)
218 * r0-r3 and r12 is a working register. 218 * r0-r3 and r12 is a working register.
219 */ 219 */
220 asm volatile ( 220 asm volatile (
221 ".syntax unified \n" 221 BEGIN_ARM_ASM_SYNTAX_UNIFIED
222 "sub lr, lr, #4 \n" 222 "sub lr, lr, #4 \n"
223 "stmfd sp!, { r0-r3, lr } \n" /* stack scratch regs and lr */ 223 "stmfd sp!, { r0-r3, lr } \n" /* stack scratch regs and lr */
224 "mov r14, #0 \n" /* Was the callback called? */ 224 "mov r14, #0 \n" /* Was the callback called? */
@@ -274,6 +274,7 @@ void fiq_handler(void)
274 "bhi .fill_fifo \n" /* not stop and enough? refill */ 274 "bhi .fill_fifo \n" /* not stop and enough? refill */
275 "ldmfd sp!, { r0-r3, pc }^ \n" /* exit */ 275 "ldmfd sp!, { r0-r3, pc }^ \n" /* exit */
276 ".ltorg \n" 276 ".ltorg \n"
277 END_ARM_ASM_SYNTAX_UNIFIED
277 : : "i"(PCM_DMAST_OK), "i"(PCM_DMAST_STARTED) 278 : : "i"(PCM_DMAST_OK), "i"(PCM_DMAST_STARTED)
278 ); 279 );
279} 280}
diff --git a/firmware/target/arm/pp/pcm-pp.c b/firmware/target/arm/pp/pcm-pp.c
index fd798f0506..94b1c5ae10 100644
--- a/firmware/target/arm/pp/pcm-pp.c
+++ b/firmware/target/arm/pp/pcm-pp.c
@@ -327,7 +327,7 @@ void fiq_playback(void)
327 */ 327 */
328 asm volatile ( 328 asm volatile (
329 /* No external calls */ 329 /* No external calls */
330 ".syntax unified \n" 330 BEGIN_ARM_ASM_SYNTAX_UNIFIED
331 "sub lr, lr, #4 \n" /* Prepare return address */ 331 "sub lr, lr, #4 \n" /* Prepare return address */
332 "stmfd sp!, { lr } \n" /* stack lr so we can use it */ 332 "stmfd sp!, { lr } \n" /* stack lr so we can use it */
333 "ldr r12, =0xcf001040 \n" /* Some magic from iPodLinux ... */ 333 "ldr r12, =0xcf001040 \n" /* Some magic from iPodLinux ... */
@@ -395,6 +395,7 @@ void fiq_playback(void)
395 "bne 3b \n" /* no? -> go return */ 395 "bne 3b \n" /* no? -> go return */
396 "b 2b \n" /* yes -> get even more */ 396 "b 2b \n" /* yes -> get even more */
397 ".ltorg \n" 397 ".ltorg \n"
398 END_ARM_ASM_SYNTAX_UNIFIED
398 : /* These must only be integers! No regs */ 399 : /* These must only be integers! No regs */
399 : "i"(PCM_DMAST_OK), "i"(PCM_DMAST_STARTED)); 400 : "i"(PCM_DMAST_OK), "i"(PCM_DMAST_STARTED));
400} 401}
diff --git a/lib/rbcodec/codecs/demac/libdemac/vector_math16_armv6.h b/lib/rbcodec/codecs/demac/libdemac/vector_math16_armv6.h
index 1da090efbb..ad5eed60fb 100644
--- a/lib/rbcodec/codecs/demac/libdemac/vector_math16_armv6.h
+++ b/lib/rbcodec/codecs/demac/libdemac/vector_math16_armv6.h
@@ -45,7 +45,7 @@ static inline int32_t vector_sp_add(int16_t* v1, int16_t* f2, int16_t* s2)
45#endif 45#endif
46 46
47 asm volatile ( 47 asm volatile (
48 ".syntax unified \n" 48 BEGIN_ARM_ASM_SYNTAX_UNIFIED
49#if ORDER > 32 49#if ORDER > 32
50 "mov %[res], #0 \n" 50 "mov %[res], #0 \n"
51#endif 51#endif
@@ -186,6 +186,7 @@ static inline int32_t vector_sp_add(int16_t* v1, int16_t* f2, int16_t* s2)
186#endif 186#endif
187 187
188 "99: \n" 188 "99: \n"
189 END_ARM_ASM_SYNTAX_UNIFIED
189 : /* outputs */ 190 : /* outputs */
190#if ORDER > 32 191#if ORDER > 32
191 [cnt]"+r"(cnt), 192 [cnt]"+r"(cnt),
@@ -215,7 +216,7 @@ static inline int32_t vector_sp_sub(int16_t* v1, int16_t* f2, int16_t* s2)
215#endif 216#endif
216 217
217 asm volatile ( 218 asm volatile (
218 ".syntax unified \n" 219 BEGIN_ARM_ASM_SYNTAX_UNIFIED
219#if ORDER > 32 220#if ORDER > 32
220 "mov %[res], #0 \n" 221 "mov %[res], #0 \n"
221#endif 222#endif
@@ -356,6 +357,7 @@ static inline int32_t vector_sp_sub(int16_t* v1, int16_t* f2, int16_t* s2)
356#endif 357#endif
357 358
358 "99: \n" 359 "99: \n"
360 END_ARM_ASM_SYNTAX_UNIFIED
359 : /* outputs */ 361 : /* outputs */
360#if ORDER > 32 362#if ORDER > 32
361 [cnt]"+r"(cnt), 363 [cnt]"+r"(cnt),
@@ -383,7 +385,7 @@ static inline int32_t scalarproduct(int16_t* v1, int16_t* v2)
383#endif 385#endif
384 386
385 asm volatile ( 387 asm volatile (
386 ".syntax unified \n" 388 BEGIN_ARM_ASM_SYNTAX_UNIFIED
387#if ORDER > 32 389#if ORDER > 32
388 "mov %[res], #0 \n" 390 "mov %[res], #0 \n"
389#endif 391#endif
@@ -477,6 +479,7 @@ static inline int32_t scalarproduct(int16_t* v1, int16_t* v2)
477#endif 479#endif
478 480
479 "99: \n" 481 "99: \n"
482 END_ARM_ASM_SYNTAX_UNIFIED
480 : /* outputs */ 483 : /* outputs */
481#if ORDER > 32 484#if ORDER > 32
482 [cnt]"+r"(cnt), 485 [cnt]"+r"(cnt),