From 35f23267bfc97d070284a03e4adaa2c6b7bb6852 Mon Sep 17 00:00:00 2001 From: Jens Arnold Date: Thu, 25 Oct 2007 18:58:16 +0000 Subject: Further optimised the filter vector math assembly for coldfire, and added assembly filter vector math for ARM. Both make use of the fact that the first argument of the vector functions is longword aligned. * The ARM version is tailored for ARM7TDMI, and would slow down arm9 or higher. Introduced a new CPU_ macro for ARM7TDMI. Speedup for coldfire: -c3000 104%->109%, -c4000 43%->46%, -c5000 1.7%->2.0%. Speedup for PP502x: -c2000 66%->75%, -c3000 37%->48%, -c4000 11%->18%, -c5000 2.5%->3.7% git-svn-id: svn://svn.rockbox.org/rockbox/trunk@15302 a1c6a512-1295-4272-9138-f99709370657 --- apps/codecs/demac/libdemac/vector_math16_cf.h | 230 +++++++++++++++++++++----- 1 file changed, 190 insertions(+), 40 deletions(-) (limited to 'apps/codecs/demac/libdemac/vector_math16_cf.h') diff --git a/apps/codecs/demac/libdemac/vector_math16_cf.h b/apps/codecs/demac/libdemac/vector_math16_cf.h index 937462c293..0c3aaca223 100644 --- a/apps/codecs/demac/libdemac/vector_math16_cf.h +++ b/apps/codecs/demac/libdemac/vector_math16_cf.h @@ -24,20 +24,71 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110, USA */ +/* This version fetches data as 32 bit words, and *recommends* v1 to be + * 32 bit aligned, otherwise performance will suffer. */ static inline void vector_add(int16_t* v1, int16_t* v2) { -#define ADDHALFREGS(s1, sum) /* 's1' can be an A or D reg */ \ - "move.l " #s1 ", %%d4 \n" /* 'sum' must be a D reg */ \ - "add.l " #sum ", " #s1 "\n" /* 's1' and %%d4 are clobbered! */ \ - "clr.w %%d4 \n" \ - "add.l %%d4 , " #sum "\n" \ +#if ORDER > 16 + int cnt = ORDER>>4; +#endif + +#define ADDHALFREGS(s1, sum) /* Add register halves straight. */ \ + "move.l " #s1 ", %%d4 \n" /* 's1' can be an A or D reg. */ \ + "add.l " #sum ", " #s1 "\n" /* 'sum' must be a D reg. */ \ + "clr.w %%d4 \n" /* 's1' and %%d4 are clobbered! */ \ + "add.l %%d4 , " #sum "\n" \ + "move.w " #s1 ", " #sum "\n" + +#define ADDHALFXREGS(s1, s2, sum) /* Add register halves across. */ \ + "clr.w " #sum " \n" /* Needs 'sum' pre-swapped, swaps */ \ + "add.l " #s1 ", " #sum "\n" /* 's2', and clobbers 's1'. */ \ + "swap " #s2 " \n" /* 's1' can be an A or D reg. */ \ + "add.l " #s2 ", " #s1 "\n" /* 'sum' and 's2' must be D regs. */ \ "move.w " #s1 ", " #sum "\n" asm volatile ( -#if ORDER > 16 - "moveq.l %[cnt], %%d5 \n" + "move.l %[v2], %%d0 \n" + "and.l #2, %%d0 \n" + "jeq 20f \n" + + "10: \n" + "move.w (%[v2])+, %%d0 \n" + "swap %%d0 \n" "1: \n" + "movem.l (%[v1]), %%a0-%%a3 \n" + "movem.l (%[v2]), %%d1-%%d4 \n" + ADDHALFXREGS(%%a0, %%d1, %%d0) + "move.l %%d0, (%[v1])+ \n" + ADDHALFXREGS(%%a1, %%d2, %%d1) + "move.l %%d1, (%[v1])+ \n" + ADDHALFXREGS(%%a2, %%d3, %%d2) + "move.l %%d2, (%[v1])+ \n" + ADDHALFXREGS(%%a3, %%d4, %%d3) + "move.l %%d3, (%[v1])+ \n" + "lea.l (16, %[v2]), %[v2] \n" + "move.l %%d4, %%d0 \n" + + "movem.l (%[v1]), %%a0-%%a3 \n" + "movem.l (%[v2]), %%d1-%%d4 \n" + ADDHALFXREGS(%%a0, %%d1, %%d0) + "move.l %%d0, (%[v1])+ \n" + ADDHALFXREGS(%%a1, %%d2, %%d1) + "move.l %%d1, (%[v1])+ \n" + ADDHALFXREGS(%%a2, %%d3, %%d2) + "move.l %%d2, (%[v1])+ \n" + ADDHALFXREGS(%%a3, %%d4, %%d3) + "move.l %%d3, (%[v1])+ \n" +#if ORDER > 16 + "lea.l (16, %[v2]), %[v2] \n" + "move.l %%d4, %%d0 \n" + + "subq.l #1, %[cnt] \n" + "jne 1b \n" #endif + "jra 99f \n" + + "20: \n" + "1: \n" "movem.l (%[v2]), %%a0-%%a3 \n" "movem.l (%[v1]), %%d0-%%d3 \n" ADDHALFREGS(%%a0, %%d0) @@ -48,7 +99,6 @@ static inline void vector_add(int16_t* v1, int16_t* v2) "move.l %%d2, (%[v1])+ \n" ADDHALFREGS(%%a3, %%d3) "move.l %%d3, (%[v1])+ \n" - "lea.l (16, %[v2]), %[v2] \n" "movem.l (%[v2]), %%a0-%%a3 \n" @@ -64,34 +114,89 @@ static inline void vector_add(int16_t* v1, int16_t* v2) #if ORDER > 16 "lea.l (16, %[v2]), %[v2] \n" - "subq.l #1, %%d5 \n" - "bne.w 1b \n" + "subq.l #1, %[cnt] \n" + "jne 1b \n" #endif + "99: \n" : /* outputs */ - [v1]"+a"(v1), - [v2]"+a"(v2) +#if ORDER > 16 + [cnt]"+d"(cnt), +#endif + [v1] "+a"(v1), + [v2] "+a"(v2) : /* inputs */ - [cnt]"n"(ORDER>>4) : /* clobbers */ - "d0", "d1", "d2", "d3", "d4", "d5", + "d0", "d1", "d2", "d3", "d4", "a0", "a1", "a2", "a3", "memory" ); } +/* This version fetches data as 32 bit words, and *recommends* v1 to be + * 32 bit aligned, otherwise performance will suffer. */ static inline void vector_sub(int16_t* v1, int16_t* v2) { -#define SUBHALFREGS(min, sub, dif) /* 'min' can be an A or D reg */ \ - "move.l " #min ", " #dif "\n" /* 'sub' and 'dif' must be D regs */ \ - "sub.l " #sub ", " #min "\n" /* 'min' and 'sub' are clobbered! */ \ - "clr.w " #sub "\n" \ - "sub.l " #sub ", " #dif "\n" \ +#if ORDER > 16 + int cnt = ORDER>>4; +#endif + +#define SUBHALFREGS(min, sub, dif) /* Subtract register halves straight. */ \ + "move.l " #min ", " #dif "\n" /* 'min' can be an A or D reg */ \ + "sub.l " #sub ", " #min "\n" /* 'sub' and 'dif' must be D regs */ \ + "clr.w " #sub "\n" /* 'min' and 'sub' are clobbered! */ \ + "sub.l " #sub ", " #dif "\n" \ "move.w " #min ", " #dif "\n" + +#define SUBHALFXREGS(min, s2, s1d) /* Subtract register halves across. */ \ + "clr.w " #s1d "\n" /* Needs 's1d' pre-swapped, swaps */ \ + "sub.l " #s1d ", " #min "\n" /* 's2' and clobbers 'min'. */ \ + "move.l " #min ", " #s1d "\n" /* 'min' can be an A or D reg, */ \ + "swap " #s2 "\n" /* 's2' and 's1d' must be D regs. */ \ + "sub.l " #s2 ", " #min "\n" \ + "move.w " #min ", " #s1d "\n" asm volatile ( -#if ORDER > 16 - "moveq.l %[cnt], %%d5 \n" + "move.l %[v2], %%d0 \n" + "and.l #2, %%d0 \n" + "jeq 20f \n" + + "10: \n" + "move.w (%[v2])+, %%d0 \n" + "swap %%d0 \n" "1: \n" + "movem.l (%[v2]), %%d1-%%d4 \n" + "movem.l (%[v1]), %%a0-%%a3 \n" + SUBHALFXREGS(%%a0, %%d1, %%d0) + "move.l %%d0, (%[v1])+ \n" + SUBHALFXREGS(%%a1, %%d2, %%d1) + "move.l %%d1, (%[v1])+ \n" + SUBHALFXREGS(%%a2, %%d3, %%d2) + "move.l %%d2, (%[v1])+ \n" + SUBHALFXREGS(%%a3, %%d4, %%d3) + "move.l %%d3, (%[v1])+ \n" + "lea.l (16, %[v2]), %[v2] \n" + "move.l %%d4, %%d0 \n" + + "movem.l (%[v2]), %%d1-%%d4 \n" + "movem.l (%[v1]), %%a0-%%a3 \n" + SUBHALFXREGS(%%a0, %%d1, %%d0) + "move.l %%d0, (%[v1])+ \n" + SUBHALFXREGS(%%a1, %%d2, %%d1) + "move.l %%d1, (%[v1])+ \n" + SUBHALFXREGS(%%a2, %%d3, %%d2) + "move.l %%d2, (%[v1])+ \n" + SUBHALFXREGS(%%a3, %%d4, %%d3) + "move.l %%d3, (%[v1])+ \n" +#if ORDER > 16 + "lea.l (16, %[v2]), %[v2] \n" + "move.l %%d4, %%d0 \n" + + "subq.l #1, %[cnt] \n" + "bne.w 1b \n" #endif + "jra 99f \n" + + "20: \n" + "1: \n" "movem.l (%[v2]), %%d1-%%d4 \n" "movem.l (%[v1]), %%a0-%%a3 \n" SUBHALFREGS(%%a0, %%d1, %%d0) @@ -118,37 +223,79 @@ static inline void vector_sub(int16_t* v1, int16_t* v2) #if ORDER > 16 "lea.l (16, %[v2]), %[v2] \n" - "subq.l #1, %%d5 \n" + "subq.l #1, %[cnt] \n" "bne.w 1b \n" #endif + + "99: \n" : /* outputs */ - [v1]"+a"(v1), - [v2]"+a"(v2) +#if ORDER > 16 + [cnt]"+d"(cnt), +#endif + [v1] "+a"(v1), + [v2] "+a"(v2) : /* inputs */ - [cnt]"n"(ORDER>>4) : /* clobbers */ - "d0", "d1", "d2", "d3", "d4", "d5", + "d0", "d1", "d2", "d3", "d4", "a0", "a1", "a2", "a3", "memory" ); } #define PREPARE_SCALARPRODUCT coldfire_set_macsr(0); /* signed integer mode */ -/* Needs EMAC in signed integer mode! */ +/* This version fetches data as 32 bit words, and *recommends* v1 to be + * 32 bit aligned, otherwise performance will suffer. It also needs EMAC + * in signed integer mode - call above macro before use. */ static inline int32_t scalarproduct(int16_t* v1, int16_t* v2) { int res = 0; +#if ORDER > 32 + int cnt = ORDER>>5; +#endif #define MACBLOCK4 \ "mac.w %%d0u, %%d1u, (%[v1])+, %%d2, %%acc0\n" \ - "mac.w %%d0l, %%d1l, (%[v2])+, %%d3, %%acc0\n" \ - "mac.w %%d2u, %%d3u, (%[v1])+, %%d0, %%acc0\n" \ - "mac.w %%d2l, %%d3l, (%[v2])+, %%d1, %%acc0\n" + "mac.w %%d0l, %%d1l, (%[v2])+, %%d1, %%acc0\n" \ + "mac.w %%d2u, %%d1u, (%[v1])+, %%d0, %%acc0\n" \ + "mac.w %%d2l, %%d1l, (%[v2])+, %%d1, %%acc0\n" + +#define MACBLOCK4_U2 \ + "mac.w %%d0u, %%d1l, (%[v2])+, %%d1, %%acc0\n" \ + "mac.w %%d0l, %%d1u, (%[v1])+, %%d0, %%acc0\n" \ + "mac.w %%d0u, %%d1l, (%[v2])+, %%d1, %%acc0\n" \ + "mac.w %%d0l, %%d1u, (%[v1])+, %%d0, %%acc0\n" asm volatile ( + "move.l %[v2], %%d0 \n" + "and.l #2, %%d0 \n" + "jeq 20f \n" + + "10: \n" + "move.l (%[v1])+, %%d0 \n" + "move.w (%[v2])+, %%d1 \n" + "1: \n" +#if ORDER > 16 + MACBLOCK4_U2 + MACBLOCK4_U2 + MACBLOCK4_U2 + MACBLOCK4_U2 +#endif + MACBLOCK4_U2 + MACBLOCK4_U2 + MACBLOCK4_U2 + "mac.w %%d0u, %%d1l, (%[v2])+, %%d1, %%acc0\n" + "mac.w %%d0l, %%d1u, (%[v1])+, %%d0, %%acc0\n" + "mac.w %%d0u, %%d1l, (%[v2])+, %%d1, %%acc0\n" #if ORDER > 32 - "moveq.l %[cnt], %[res] \n" + "mac.w %%d0l, %%d1u, (%[v1])+, %%d0, %%acc0\n" + "subq.l #1, %[res] \n" + "bne.w 1b \n" +#else + "mac.w %%d0l, %%d1u, %%acc0 \n" #endif + "jra 99f \n" + + "20: \n" "move.l (%[v1])+, %%d0 \n" "move.l (%[v2])+, %%d1 \n" "1: \n" @@ -162,26 +309,29 @@ static inline int32_t scalarproduct(int16_t* v1, int16_t* v2) MACBLOCK4 MACBLOCK4 "mac.w %%d0u, %%d1u, (%[v1])+, %%d2, %%acc0\n" - "mac.w %%d0l, %%d1l, (%[v2])+, %%d3, %%acc0\n" + "mac.w %%d0l, %%d1l, (%[v2])+, %%d1, %%acc0\n" #if ORDER > 32 - "mac.w %%d2u, %%d3u, (%[v1])+, %%d0, %%acc0\n" - "mac.w %%d2l, %%d3l, (%[v2])+, %%d1, %%acc0\n" - + "mac.w %%d2u, %%d1u, (%[v1])+, %%d0, %%acc0\n" + "mac.w %%d2l, %%d1l, (%[v2])+, %%d1, %%acc0\n" "subq.l #1, %[res] \n" "bne.w 1b \n" #else - "mac.w %%d2u, %%d3u, %%acc0 \n" - "mac.w %%d2l, %%d3l, %%acc0 \n" + "mac.w %%d2u, %%d1u, %%acc0 \n" + "mac.w %%d2l, %%d1l, %%acc0 \n" #endif + + "99: \n" "movclr.l %%acc0, %[res] \n" : /* outputs */ [v1]"+a"(v1), [v2]"+a"(v2), - [res]"=&d"(res) + [res]"=d"(res) : /* inputs */ - [cnt]"n"(ORDER>>5) +#if ORDER > 32 + [cnt]"[res]"(cnt) +#endif : /* clobbers */ - "d0", "d1", "d2", "d3" + "d0", "d1", "d2" ); return res; } -- cgit v1.2.3