summaryrefslogtreecommitdiff
path: root/lib/rbcodec/codecs/demac/libdemac/vector_math16_armv6.h
diff options
context:
space:
mode:
authorAidan MacDonald <amachronic@protonmail.com>2023-03-23 18:16:15 +0000
committerAidan MacDonald <amachronic@protonmail.com>2023-03-23 18:16:33 +0000
commit58b2e457824dc93916233627b98614409e5f258d (patch)
tree67c485a7881745574d66ae10889c2da331ba68c0 /lib/rbcodec/codecs/demac/libdemac/vector_math16_armv6.h
parent86429dbf1eca8ee0e08176997f508647c3abf6bd (diff)
downloadrockbox-58b2e457824dc93916233627b98614409e5f258d.tar.gz
rockbox-58b2e457824dc93916233627b98614409e5f258d.zip
Fix unified syntax in ARM inline assembly
GCC 4.9 always emits assembly with divided syntax. Setting unified syntax in inline assembly causes the assembler to complain about GCC's generated code, because the directive extends past the scope of the inline asm. Fix this by setting divided mode at the end of the inline assembly block. The assembler directives are hidden behind macros because later versions of GCC won't need this workaround: they can be told to use the unified syntax with -masm-syntax-unified. Change-Id: Ic09e729e5bbb6fd44d08dac348daf6f55c75d7d8
Diffstat (limited to 'lib/rbcodec/codecs/demac/libdemac/vector_math16_armv6.h')
-rw-r--r--lib/rbcodec/codecs/demac/libdemac/vector_math16_armv6.h9
1 files changed, 6 insertions, 3 deletions
diff --git a/lib/rbcodec/codecs/demac/libdemac/vector_math16_armv6.h b/lib/rbcodec/codecs/demac/libdemac/vector_math16_armv6.h
index 1da090efbb..ad5eed60fb 100644
--- a/lib/rbcodec/codecs/demac/libdemac/vector_math16_armv6.h
+++ b/lib/rbcodec/codecs/demac/libdemac/vector_math16_armv6.h
@@ -45,7 +45,7 @@ static inline int32_t vector_sp_add(int16_t* v1, int16_t* f2, int16_t* s2)
45#endif 45#endif
46 46
47 asm volatile ( 47 asm volatile (
48 ".syntax unified \n" 48 BEGIN_ARM_ASM_SYNTAX_UNIFIED
49#if ORDER > 32 49#if ORDER > 32
50 "mov %[res], #0 \n" 50 "mov %[res], #0 \n"
51#endif 51#endif
@@ -186,6 +186,7 @@ static inline int32_t vector_sp_add(int16_t* v1, int16_t* f2, int16_t* s2)
186#endif 186#endif
187 187
188 "99: \n" 188 "99: \n"
189 END_ARM_ASM_SYNTAX_UNIFIED
189 : /* outputs */ 190 : /* outputs */
190#if ORDER > 32 191#if ORDER > 32
191 [cnt]"+r"(cnt), 192 [cnt]"+r"(cnt),
@@ -215,7 +216,7 @@ static inline int32_t vector_sp_sub(int16_t* v1, int16_t* f2, int16_t* s2)
215#endif 216#endif
216 217
217 asm volatile ( 218 asm volatile (
218 ".syntax unified \n" 219 BEGIN_ARM_ASM_SYNTAX_UNIFIED
219#if ORDER > 32 220#if ORDER > 32
220 "mov %[res], #0 \n" 221 "mov %[res], #0 \n"
221#endif 222#endif
@@ -356,6 +357,7 @@ static inline int32_t vector_sp_sub(int16_t* v1, int16_t* f2, int16_t* s2)
356#endif 357#endif
357 358
358 "99: \n" 359 "99: \n"
360 END_ARM_ASM_SYNTAX_UNIFIED
359 : /* outputs */ 361 : /* outputs */
360#if ORDER > 32 362#if ORDER > 32
361 [cnt]"+r"(cnt), 363 [cnt]"+r"(cnt),
@@ -383,7 +385,7 @@ static inline int32_t scalarproduct(int16_t* v1, int16_t* v2)
383#endif 385#endif
384 386
385 asm volatile ( 387 asm volatile (
386 ".syntax unified \n" 388 BEGIN_ARM_ASM_SYNTAX_UNIFIED
387#if ORDER > 32 389#if ORDER > 32
388 "mov %[res], #0 \n" 390 "mov %[res], #0 \n"
389#endif 391#endif
@@ -477,6 +479,7 @@ static inline int32_t scalarproduct(int16_t* v1, int16_t* v2)
477#endif 479#endif
478 480
479 "99: \n" 481 "99: \n"
482 END_ARM_ASM_SYNTAX_UNIFIED
480 : /* outputs */ 483 : /* outputs */
481#if ORDER > 32 484#if ORDER > 32
482 [cnt]"+r"(cnt), 485 [cnt]"+r"(cnt),