diff options
Diffstat (limited to 'lib/rbcodec/codecs/demac')
-rw-r--r-- | lib/rbcodec/codecs/demac/libdemac/udiv32_arm.S | 2 | ||||
-rw-r--r-- | lib/rbcodec/codecs/demac/libdemac/vector_math16_armv6.h | 23 |
2 files changed, 14 insertions, 11 deletions
diff --git a/lib/rbcodec/codecs/demac/libdemac/udiv32_arm.S b/lib/rbcodec/codecs/demac/libdemac/udiv32_arm.S index 7b851659bd..1d19160a91 100644 --- a/lib/rbcodec/codecs/demac/libdemac/udiv32_arm.S +++ b/lib/rbcodec/codecs/demac/libdemac/udiv32_arm.S | |||
@@ -225,7 +225,7 @@ udiv32_arm: | |||
225 | mov \inv, \divisor, lsl \bits | 225 | mov \inv, \divisor, lsl \bits |
226 | add \neg, pc, \inv, lsr #25 | 226 | add \neg, pc, \inv, lsr #25 |
227 | cmp \inv, #1<<31 | 227 | cmp \inv, #1<<31 |
228 | ldrhib \inv, [\neg, #.L_udiv_est_table-.-64] | 228 | ldrbhi \inv, [\neg, #.L_udiv_est_table-.-64] |
229 | bls 20f | 229 | bls 20f |
230 | subs \bits, \bits, #7 | 230 | subs \bits, \bits, #7 |
231 | rsb \neg, \divisor, #0 | 231 | rsb \neg, \divisor, #0 |
diff --git a/lib/rbcodec/codecs/demac/libdemac/vector_math16_armv6.h b/lib/rbcodec/codecs/demac/libdemac/vector_math16_armv6.h index 8d27331b62..1da090efbb 100644 --- a/lib/rbcodec/codecs/demac/libdemac/vector_math16_armv6.h +++ b/lib/rbcodec/codecs/demac/libdemac/vector_math16_armv6.h | |||
@@ -45,6 +45,7 @@ static inline int32_t vector_sp_add(int16_t* v1, int16_t* f2, int16_t* s2) | |||
45 | #endif | 45 | #endif |
46 | 46 | ||
47 | asm volatile ( | 47 | asm volatile ( |
48 | ".syntax unified \n" | ||
48 | #if ORDER > 32 | 49 | #if ORDER > 32 |
49 | "mov %[res], #0 \n" | 50 | "mov %[res], #0 \n" |
50 | #endif | 51 | #endif |
@@ -117,7 +118,7 @@ static inline int32_t vector_sp_add(int16_t* v1, int16_t* f2, int16_t* s2) | |||
117 | "smladx %[res], r1, r2, %[res] \n" | 118 | "smladx %[res], r1, r2, %[res] \n" |
118 | #if ORDER > 32 | 119 | #if ORDER > 32 |
119 | "subs %[cnt], %[cnt], #1 \n" | 120 | "subs %[cnt], %[cnt], #1 \n" |
120 | "ldmneia %[f2]!, {r2,r4} \n" | 121 | "ldmiane %[f2]!, {r2,r4} \n" |
121 | "sadd16 r0, r0, r7 \n" | 122 | "sadd16 r0, r0, r7 \n" |
122 | "sadd16 r1, r1, r5 \n" | 123 | "sadd16 r1, r1, r5 \n" |
123 | "strd r0, [%[v1]], #8 \n" | 124 | "strd r0, [%[v1]], #8 \n" |
@@ -172,8 +173,8 @@ static inline int32_t vector_sp_add(int16_t* v1, int16_t* f2, int16_t* s2) | |||
172 | "smlad %[res], r3, r5, %[res] \n" | 173 | "smlad %[res], r3, r5, %[res] \n" |
173 | #if ORDER > 32 | 174 | #if ORDER > 32 |
174 | "subs %[cnt], %[cnt], #1 \n" | 175 | "subs %[cnt], %[cnt], #1 \n" |
175 | "ldrned r4, [%[f2]], #8 \n" | 176 | "ldrdne r4, [%[f2]], #8 \n" |
176 | "ldrned r0, [%[v1], #8] \n" | 177 | "ldrdne r0, [%[v1], #8] \n" |
177 | "sadd16 r2, r2, r6 \n" | 178 | "sadd16 r2, r2, r6 \n" |
178 | "sadd16 r3, r3, r7 \n" | 179 | "sadd16 r3, r3, r7 \n" |
179 | "strd r2, [%[v1]], #8 \n" | 180 | "strd r2, [%[v1]], #8 \n" |
@@ -214,6 +215,7 @@ static inline int32_t vector_sp_sub(int16_t* v1, int16_t* f2, int16_t* s2) | |||
214 | #endif | 215 | #endif |
215 | 216 | ||
216 | asm volatile ( | 217 | asm volatile ( |
218 | ".syntax unified \n" | ||
217 | #if ORDER > 32 | 219 | #if ORDER > 32 |
218 | "mov %[res], #0 \n" | 220 | "mov %[res], #0 \n" |
219 | #endif | 221 | #endif |
@@ -286,7 +288,7 @@ static inline int32_t vector_sp_sub(int16_t* v1, int16_t* f2, int16_t* s2) | |||
286 | "smladx %[res], r1, r2, %[res] \n" | 288 | "smladx %[res], r1, r2, %[res] \n" |
287 | #if ORDER > 32 | 289 | #if ORDER > 32 |
288 | "subs %[cnt], %[cnt], #1 \n" | 290 | "subs %[cnt], %[cnt], #1 \n" |
289 | "ldmneia %[f2]!, {r2,r4} \n" | 291 | "ldmiane %[f2]!, {r2,r4} \n" |
290 | "ssub16 r0, r0, r7 \n" | 292 | "ssub16 r0, r0, r7 \n" |
291 | "ssub16 r1, r1, r5 \n" | 293 | "ssub16 r1, r1, r5 \n" |
292 | "strd r0, [%[v1]], #8 \n" | 294 | "strd r0, [%[v1]], #8 \n" |
@@ -341,8 +343,8 @@ static inline int32_t vector_sp_sub(int16_t* v1, int16_t* f2, int16_t* s2) | |||
341 | "smlad %[res], r3, r5, %[res] \n" | 343 | "smlad %[res], r3, r5, %[res] \n" |
342 | #if ORDER > 32 | 344 | #if ORDER > 32 |
343 | "subs %[cnt], %[cnt], #1 \n" | 345 | "subs %[cnt], %[cnt], #1 \n" |
344 | "ldrned r4, [%[f2]], #8 \n" | 346 | "ldrdne r4, [%[f2]], #8 \n" |
345 | "ldrned r0, [%[v1], #8] \n" | 347 | "ldrdne r0, [%[v1], #8] \n" |
346 | "ssub16 r2, r2, r6 \n" | 348 | "ssub16 r2, r2, r6 \n" |
347 | "ssub16 r3, r3, r7 \n" | 349 | "ssub16 r3, r3, r7 \n" |
348 | "strd r2, [%[v1]], #8 \n" | 350 | "strd r2, [%[v1]], #8 \n" |
@@ -381,6 +383,7 @@ static inline int32_t scalarproduct(int16_t* v1, int16_t* v2) | |||
381 | #endif | 383 | #endif |
382 | 384 | ||
383 | asm volatile ( | 385 | asm volatile ( |
386 | ".syntax unified \n" | ||
384 | #if ORDER > 32 | 387 | #if ORDER > 32 |
385 | "mov %[res], #0 \n" | 388 | "mov %[res], #0 \n" |
386 | #endif | 389 | #endif |
@@ -421,10 +424,10 @@ static inline int32_t scalarproduct(int16_t* v1, int16_t* v2) | |||
421 | "pkhtb r1, r7, r4 \n" | 424 | "pkhtb r1, r7, r4 \n" |
422 | #if ORDER > 32 | 425 | #if ORDER > 32 |
423 | "subs %[cnt], %[cnt], #1 \n" | 426 | "subs %[cnt], %[cnt], #1 \n" |
424 | "ldrned r6, [%[v2]], #8 \n" | 427 | "ldrdne r6, [%[v2]], #8 \n" |
425 | "smladx %[res], r2, r1, %[res] \n" | 428 | "smladx %[res], r2, r1, %[res] \n" |
426 | "pkhtb r2, r4, r5 \n" | 429 | "pkhtb r2, r4, r5 \n" |
427 | "ldrned r0, [%[v1]], #8 \n" | 430 | "ldrdne r0, [%[v1]], #8 \n" |
428 | "smladx %[res], r3, r2, %[res] \n" | 431 | "smladx %[res], r3, r2, %[res] \n" |
429 | "bne 1b \n" | 432 | "bne 1b \n" |
430 | #else | 433 | #else |
@@ -461,9 +464,9 @@ static inline int32_t scalarproduct(int16_t* v1, int16_t* v2) | |||
461 | "ldrd r4, [%[v2]], #8 \n" | 464 | "ldrd r4, [%[v2]], #8 \n" |
462 | "smlad %[res], r1, r6, %[res] \n" | 465 | "smlad %[res], r1, r6, %[res] \n" |
463 | "subs %[cnt], %[cnt], #1 \n" | 466 | "subs %[cnt], %[cnt], #1 \n" |
464 | "ldrned r0, [%[v1]], #8 \n" | 467 | "ldrdne r0, [%[v1]], #8 \n" |
465 | "smlad %[res], r2, r7, %[res] \n" | 468 | "smlad %[res], r2, r7, %[res] \n" |
466 | "ldrned r6, [%[v2]], #8 \n" | 469 | "ldrdne r6, [%[v2]], #8 \n" |
467 | "smlad %[res], r3, r4, %[res] \n" | 470 | "smlad %[res], r3, r4, %[res] \n" |
468 | "bne 1b \n" | 471 | "bne 1b \n" |
469 | #else | 472 | #else |