summaryrefslogtreecommitdiff
path: root/apps/codecs/lib/asm_mcf5249.h
diff options
context:
space:
mode:
Diffstat (limited to 'apps/codecs/lib/asm_mcf5249.h')
-rw-r--r--apps/codecs/lib/asm_mcf5249.h18
1 files changed, 9 insertions, 9 deletions
diff --git a/apps/codecs/lib/asm_mcf5249.h b/apps/codecs/lib/asm_mcf5249.h
index 5fb3cff94a..88d439631d 100644
--- a/apps/codecs/lib/asm_mcf5249.h
+++ b/apps/codecs/lib/asm_mcf5249.h
@@ -21,9 +21,7 @@
21 21
22#if defined(CPU_COLDFIRE) 22#if defined(CPU_COLDFIRE)
23 23
24#ifndef _V_WIDE_MATH 24#define INCL_OPTIMIZED_MULT32
25#define _V_WIDE_MATH
26
27static inline int32_t MULT32(int32_t x, int32_t y) { 25static inline int32_t MULT32(int32_t x, int32_t y) {
28 26
29 asm volatile ("mac.l %[x], %[y], %%acc0;" /* multiply & shift */ 27 asm volatile ("mac.l %[x], %[y], %%acc0;" /* multiply & shift */
@@ -35,6 +33,7 @@ static inline int32_t MULT32(int32_t x, int32_t y) {
35 return x; 33 return x;
36} 34}
37 35
36#define INCL_OPTIMIZED_MULT31
38static inline int32_t MULT31(int32_t x, int32_t y) { 37static inline int32_t MULT31(int32_t x, int32_t y) {
39 asm volatile ("mac.l %[x], %[y], %%acc0;" /* multiply */ 38 asm volatile ("mac.l %[x], %[y], %%acc0;" /* multiply */
40 "movclr.l %%acc0, %[x];" /* move and clear */ 39 "movclr.l %%acc0, %[x];" /* move and clear */
@@ -44,6 +43,7 @@ static inline int32_t MULT31(int32_t x, int32_t y) {
44 return x; 43 return x;
45} 44}
46 45
46#define INCL_OPTIMIZED_MULT31_SHIFT15
47static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) { 47static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) {
48 int32_t r; 48 int32_t r;
49 49
@@ -61,6 +61,7 @@ static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) {
61 return r; 61 return r;
62} 62}
63 63
64#define INCL_OPTIMIZED_MULT31_SHIFT16
64static inline int32_t MULT31_SHIFT16(int32_t x, int32_t y) { 65static inline int32_t MULT31_SHIFT16(int32_t x, int32_t y) {
65 int32_t r; 66 int32_t r;
66 67
@@ -76,6 +77,7 @@ static inline int32_t MULT31_SHIFT16(int32_t x, int32_t y) {
76 return x; 77 return x;
77} 78}
78 79
80#define INCL_OPTIMIZED_XPROD31
79static inline 81static inline
80void XPROD31(int32_t a, int32_t b, 82void XPROD31(int32_t a, int32_t b,
81 int32_t t, int32_t v, 83 int32_t t, int32_t v,
@@ -95,6 +97,7 @@ void XPROD31(int32_t a, int32_t b,
95 : "cc", "memory"); 97 : "cc", "memory");
96} 98}
97 99
100#define INCL_OPTIMIZED_XNPROD31
98static inline 101static inline
99void XNPROD31(int32_t a, int32_t b, 102void XNPROD31(int32_t a, int32_t b,
100 int32_t t, int32_t v, 103 int32_t t, int32_t v,
@@ -114,15 +117,11 @@ void XNPROD31(int32_t a, int32_t b,
114 : "cc", "memory"); 117 : "cc", "memory");
115} 118}
116 119
117#if 0 /* canonical Tremor definition */
118#define XPROD32(_a, _b, _t, _v, _x, _y) \
119 { (_x)=MULT32(_a,_t)+MULT32(_b,_v); \
120 (_y)=MULT32(_b,_t)-MULT32(_a,_v); }
121#endif
122 120
123/* this could lose the LSB by overflow, but i don't think it'll ever happen. 121/* this could lose the LSB by overflow, but i don't think it'll ever happen.
124 if anyone think they can hear a bug caused by this, please try the above 122 if anyone think they can hear a bug caused by this, please try the above
125 version. */ 123 version. */
124#define INCL_OPTIMIZED_XPROD32
126#define XPROD32(_a, _b, _t, _v, _x, _y) \ 125#define XPROD32(_a, _b, _t, _v, _x, _y) \
127 asm volatile ("mac.l %[a], %[t], %%acc0;" \ 126 asm volatile ("mac.l %[a], %[t], %%acc0;" \
128 "mac.l %[b], %[v], %%acc0;" \ 127 "mac.l %[b], %[v], %%acc0;" \
@@ -137,6 +136,7 @@ void XNPROD31(int32_t a, int32_t b,
137 [t] "r" (_t), [v] "r" (_v) \ 136 [t] "r" (_t), [v] "r" (_v) \
138 : "cc"); 137 : "cc");
139 138
139#define INCL_OPTIMIZED_XPROD31_R
140#define XPROD31_R(_a, _b, _t, _v, _x, _y) \ 140#define XPROD31_R(_a, _b, _t, _v, _x, _y) \
141 asm volatile ("mac.l %[a], %[t], %%acc0;" \ 141 asm volatile ("mac.l %[a], %[t], %%acc0;" \
142 "mac.l %[b], %[v], %%acc0;" \ 142 "mac.l %[b], %[v], %%acc0;" \
@@ -149,6 +149,7 @@ void XNPROD31(int32_t a, int32_t b,
149 [t] "r" (_t), [v] "r" (_v) \ 149 [t] "r" (_t), [v] "r" (_v) \
150 : "cc"); 150 : "cc");
151 151
152#define INCL_OPTIMIZED_XNPROD31_R
152#define XNPROD31_R(_a, _b, _t, _v, _x, _y) \ 153#define XNPROD31_R(_a, _b, _t, _v, _x, _y) \
153 asm volatile ("mac.l %[a], %[t], %%acc0;" \ 154 asm volatile ("mac.l %[a], %[t], %%acc0;" \
154 "msac.l %[b], %[v], %%acc0;" \ 155 "msac.l %[b], %[v], %%acc0;" \
@@ -336,7 +337,6 @@ void vect_mult_bw(int32_t *data, int32_t *window, int n)
336 337
337#endif 338#endif
338 339
339#endif
340/* not used anymore */ 340/* not used anymore */
341/* 341/*
342#ifndef _V_CLIP_MATH 342#ifndef _V_CLIP_MATH