From e9dcb0f1457f83b33bd9778e9df4caff44ead6c1 Mon Sep 17 00:00:00 2001 From: Dave Hooper Date: Sat, 29 Aug 2009 12:07:32 +0000 Subject: Fix condition code clobbers (and one TAB) for inline arm code in lib and libtremor git-svn-id: svn://svn.rockbox.org/rockbox/trunk@22526 a1c6a512-1295-4272-9138-f99709370657 --- apps/codecs/lib/asm_arm.h | 30 +++++++++++------------------- apps/codecs/libtremor/asm_arm.h | 28 ++++++++++------------------ 2 files changed, 21 insertions(+), 37 deletions(-) (limited to 'apps/codecs') diff --git a/apps/codecs/lib/asm_arm.h b/apps/codecs/lib/asm_arm.h index 0db868dcb3..89606184da 100644 --- a/apps/codecs/lib/asm_arm.h +++ b/apps/codecs/lib/asm_arm.h @@ -23,8 +23,7 @@ static inline int32_t MULT32(int32_t x, int32_t y) { int lo,hi; asm volatile("smull\t%0, %1, %2, %3" : "=&r"(lo),"=&r"(hi) - : "%r"(x),"r"(y) - : "cc"); + : "%r"(x),"r"(y) ); return(hi); } @@ -39,23 +38,20 @@ static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) { "adc %1, %0, %1, lsl #17\n\t" : "=&r"(lo),"=&r"(hi) : "%r"(x),"r"(y) - : "cc"); + : "cc" ); return(hi); } -#define MB() asm volatile ("" : : : "memory") - #define XPROD32(a, b, t, v, x, y) \ { \ long l; \ asm( "smull %0, %1, %4, %6\n\t" \ - "smlal %0, %1, %5, %7\n\t" \ "rsb %3, %4, #0\n\t" \ + "smlal %0, %1, %5, %7\n\t" \ "smull %0, %2, %5, %6\n\t" \ "smlal %0, %2, %3, %7" \ : "=&r" (l), "=&r" (x), "=&r" (y), "=r" ((a)) \ - : "3" ((a)), "r" ((b)), "r" ((t)), "r" ((v)) \ - : "cc" ); \ + : "3" ((a)), "r" ((b)), "r" ((t)), "r" ((v)) ); \ } static inline void XPROD31(int32_t a, int32_t b, @@ -64,15 +60,13 @@ static inline void XPROD31(int32_t a, int32_t b, { int x1, y1, l; asm( "smull %0, %1, %4, %6\n\t" - "smlal %0, %1, %5, %7\n\t" "rsb %3, %4, #0\n\t" + "smlal %0, %1, %5, %7\n\t" "smull %0, %2, %5, %6\n\t" "smlal %0, %2, %3, %7" : "=&r" (l), "=&r" (x1), "=&r" (y1), "=r" (a) - : "3" (a), "r" (b), "r" (t), "r" (v) - : "cc" ); + : "3" (a), "r" (b), "r" (t), "r" (v) ); *x = x1 << 1; - MB(); *y = y1 << 1; } @@ -81,16 +75,14 @@ static inline void XNPROD31(int32_t a, int32_t b, int32_t *x, int32_t *y) { int x1, y1, l; - asm( "rsb %2, %4, #0\n\t" - "smull %0, %1, %3, %5\n\t" + asm( "smull %0, %1, %3, %5\n\t" + "rsb %2, %4, #0\n\t" "smlal %0, %1, %2, %6\n\t" "smull %0, %2, %4, %5\n\t" "smlal %0, %2, %3, %6" : "=&r" (l), "=&r" (x1), "=&r" (y1) - : "r" (a), "r" (b), "r" (t), "r" (v) - : "cc" ); + : "r" (a), "r" (b), "r" (t), "r" (v) ); *x = x1 << 1; - MB(); *y = y1 << 1; } @@ -158,7 +150,7 @@ void vect_mult_fw(int32_t *data, int32_t *window, int n) : [d] "+r" (data), [w] "+r" (window) : : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", - "memory", "cc"); + "memory" ); n -= 4; } while(n>0) { @@ -187,7 +179,7 @@ void vect_mult_bw(int32_t *data, int32_t *window, int n) : [d] "+r" (data), [w] "+r" (window) : : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", - "memory", "cc"); + "memory" ); n -= 4; } while(n>0) { diff --git a/apps/codecs/libtremor/asm_arm.h b/apps/codecs/libtremor/asm_arm.h index 5a8109841f..577dc91566 100644 --- a/apps/codecs/libtremor/asm_arm.h +++ b/apps/codecs/libtremor/asm_arm.h @@ -24,8 +24,7 @@ static inline ogg_int32_t MULT32(ogg_int32_t x, ogg_int32_t y) { int lo,hi; asm volatile("smull\t%0, %1, %2, %3" : "=&r"(lo),"=&r"(hi) - : "%r"(x),"r"(y) - : "cc"); + : "%r"(x),"r"(y) ); return(hi); } @@ -44,19 +43,16 @@ static inline ogg_int32_t MULT31_SHIFT15(ogg_int32_t x, ogg_int32_t y) { return(hi); } -#define MB() asm volatile ("" : : : "memory") - #define XPROD32(a, b, t, v, x, y) \ { \ long l; \ asm( "smull %0, %1, %4, %6\n\t" \ - "smlal %0, %1, %5, %7\n\t" \ "rsb %3, %4, #0\n\t" \ + "smlal %0, %1, %5, %7\n\t" \ "smull %0, %2, %5, %6\n\t" \ "smlal %0, %2, %3, %7" \ : "=&r" (l), "=&r" (x), "=&r" (y), "=r" ((a)) \ - : "3" ((a)), "r" ((b)), "r" ((t)), "r" ((v)) \ - : "cc" ); \ + : "3" ((a)), "r" ((b)), "r" ((t)), "r" ((v)) ); \ } static inline void XPROD31(ogg_int32_t a, ogg_int32_t b, @@ -65,15 +61,13 @@ static inline void XPROD31(ogg_int32_t a, ogg_int32_t b, { int x1, y1, l; asm( "smull %0, %1, %4, %6\n\t" - "smlal %0, %1, %5, %7\n\t" "rsb %3, %4, #0\n\t" + "smlal %0, %1, %5, %7\n\t" "smull %0, %2, %5, %6\n\t" "smlal %0, %2, %3, %7" : "=&r" (l), "=&r" (x1), "=&r" (y1), "=r" (a) - : "3" (a), "r" (b), "r" (t), "r" (v) - : "cc" ); + : "3" (a), "r" (b), "r" (t), "r" (v) ); *x = x1 << 1; - MB(); *y = y1 << 1; } @@ -82,16 +76,14 @@ static inline void XNPROD31(ogg_int32_t a, ogg_int32_t b, ogg_int32_t *x, ogg_int32_t *y) { int x1, y1, l; - asm( "rsb %2, %4, #0\n\t" - "smull %0, %1, %3, %5\n\t" + asm( "smull %0, %1, %3, %5\n\t" + "rsb %2, %4, #0\n\t" "smlal %0, %1, %2, %6\n\t" "smull %0, %2, %4, %5\n\t" "smlal %0, %2, %3, %6" : "=&r" (l), "=&r" (x1), "=&r" (y1) - : "r" (a), "r" (b), "r" (t), "r" (v) - : "cc" ); + : "r" (a), "r" (b), "r" (t), "r" (v) ); *x = x1 << 1; - MB(); *y = y1 << 1; } @@ -184,7 +176,7 @@ void vect_mult_fw(ogg_int32_t *data, LOOKUP_T *window, int n) : [d] "+r" (data), [w] "+r" (window) : : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", - "memory", "cc"); + "memory" ); n -= 4; } while (n); } @@ -205,7 +197,7 @@ void vect_mult_bw(ogg_int32_t *data, LOOKUP_T *window, int n) : [d] "+r" (data), [w] "+r" (window) : : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", - "memory", "cc"); + "memory" ); n -= 4; } while (n); } -- cgit v1.2.3