diff options
author | Nils Wallménius <nils@rockbox.org> | 2011-06-08 10:35:27 +0000 |
---|---|---|
committer | Nils Wallménius <nils@rockbox.org> | 2011-06-08 10:35:27 +0000 |
commit | d5ceb4ce2b5ee8f81edfab4fb903c10353d788e5 (patch) | |
tree | 1193ac688f8cc2208ea8f2668b92f7cf581bb457 /apps/codecs/lib | |
parent | 6672766dfe374e26ee20efe010591b64529b2a0f (diff) | |
download | rockbox-d5ceb4ce2b5ee8f81edfab4fb903c10353d788e5.tar.gz rockbox-d5ceb4ce2b5ee8f81edfab4fb903c10353d788e5.zip |
codeclib: make selective inclusion of macros and inline functions from the codeclib_misc.h header easier and clean out some old libtremor stuff.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@29985 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'apps/codecs/lib')
-rw-r--r-- | apps/codecs/lib/asm_arm.h | 18 | ||||
-rw-r--r-- | apps/codecs/lib/asm_mcf5249.h | 18 | ||||
-rw-r--r-- | apps/codecs/lib/codeclib_misc.h | 59 |
3 files changed, 54 insertions, 41 deletions
diff --git a/apps/codecs/lib/asm_arm.h b/apps/codecs/lib/asm_arm.h index 629e47b3bd..54ce4b0d98 100644 --- a/apps/codecs/lib/asm_arm.h +++ b/apps/codecs/lib/asm_arm.h | |||
@@ -16,9 +16,7 @@ | |||
16 | ********************************************************************/ | 16 | ********************************************************************/ |
17 | #ifdef CPU_ARM | 17 | #ifdef CPU_ARM |
18 | 18 | ||
19 | #if !defined(_V_WIDE_MATH) && !defined(_LOW_ACCURACY_) | 19 | #define INCL_OPTIMIZED_MULT32 |
20 | #define _V_WIDE_MATH | ||
21 | |||
22 | #if ARM_ARCH >= 6 | 20 | #if ARM_ARCH >= 6 |
23 | static inline int32_t MULT32(int32_t x, int32_t y) { | 21 | static inline int32_t MULT32(int32_t x, int32_t y) { |
24 | int32_t hi; | 22 | int32_t hi; |
@@ -37,10 +35,12 @@ static inline int32_t MULT32(int32_t x, int32_t y) { | |||
37 | } | 35 | } |
38 | #endif | 36 | #endif |
39 | 37 | ||
38 | #define INCL_OPTIMIZED_MULT31 | ||
40 | static inline int32_t MULT31(int32_t x, int32_t y) { | 39 | static inline int32_t MULT31(int32_t x, int32_t y) { |
41 | return MULT32(x,y)<<1; | 40 | return MULT32(x,y)<<1; |
42 | } | 41 | } |
43 | 42 | ||
43 | #define INCL_OPTIMIZED_MULT31_SHIFT15 | ||
44 | static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) { | 44 | static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) { |
45 | int32_t lo,hi; | 45 | int32_t lo,hi; |
46 | asm volatile("smull %0, %1, %2, %3\n\t" | 46 | asm volatile("smull %0, %1, %2, %3\n\t" |
@@ -52,6 +52,7 @@ static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) { | |||
52 | return(hi); | 52 | return(hi); |
53 | } | 53 | } |
54 | 54 | ||
55 | #define INCL_OPTIMIZED_MULT31_SHIFT16 | ||
55 | static inline int32_t MULT31_SHIFT16(int32_t x, int32_t y) { | 56 | static inline int32_t MULT31_SHIFT16(int32_t x, int32_t y) { |
56 | int32_t lo,hi; | 57 | int32_t lo,hi; |
57 | asm volatile("smull %0, %1, %2, %3\n\t" | 58 | asm volatile("smull %0, %1, %2, %3\n\t" |
@@ -63,6 +64,7 @@ static inline int32_t MULT31_SHIFT16(int32_t x, int32_t y) { | |||
63 | return(hi); | 64 | return(hi); |
64 | } | 65 | } |
65 | 66 | ||
67 | #define INCL_OPTIMIZED_XPROD32 | ||
66 | #define XPROD32(a, b, t, v, x, y) \ | 68 | #define XPROD32(a, b, t, v, x, y) \ |
67 | { \ | 69 | { \ |
68 | int32_t l; \ | 70 | int32_t l; \ |
@@ -75,6 +77,8 @@ static inline int32_t MULT31_SHIFT16(int32_t x, int32_t y) { | |||
75 | : "r" ((a)), "r" ((b)), "r" ((t)), "r" ((v)) ); \ | 77 | : "r" ((a)), "r" ((b)), "r" ((t)), "r" ((v)) ); \ |
76 | } | 78 | } |
77 | 79 | ||
80 | #define INCL_OPTIMIZED_XPROD31_R | ||
81 | #define INCL_OPTIMIZED_XNPROD31_R | ||
78 | #if ARM_ARCH >= 6 | 82 | #if ARM_ARCH >= 6 |
79 | /* These may yield slightly different result from the macros below | 83 | /* These may yield slightly different result from the macros below |
80 | because only the high 32 bits of the multiplications are accumulated while | 84 | because only the high 32 bits of the multiplications are accumulated while |
@@ -134,6 +138,7 @@ static inline int32_t MULT31_SHIFT16(int32_t x, int32_t y) { | |||
134 | } | 138 | } |
135 | #endif | 139 | #endif |
136 | 140 | ||
141 | #define INCL_OPTIMIZED_XPROD31 | ||
137 | static inline void XPROD31(int32_t a, int32_t b, | 142 | static inline void XPROD31(int32_t a, int32_t b, |
138 | int32_t t, int32_t v, | 143 | int32_t t, int32_t v, |
139 | int32_t *x, int32_t *y) | 144 | int32_t *x, int32_t *y) |
@@ -144,6 +149,7 @@ static inline void XPROD31(int32_t a, int32_t b, | |||
144 | *y = _y1; | 149 | *y = _y1; |
145 | } | 150 | } |
146 | 151 | ||
152 | #define INCL_OPTIMIZED_XNPROD31 | ||
147 | static inline void XNPROD31(int32_t a, int32_t b, | 153 | static inline void XNPROD31(int32_t a, int32_t b, |
148 | int32_t t, int32_t v, | 154 | int32_t t, int32_t v, |
149 | int32_t *x, int32_t *y) | 155 | int32_t *x, int32_t *y) |
@@ -261,7 +267,6 @@ void vect_mult_bw(int32_t *data, int32_t *window, int n) | |||
261 | 267 | ||
262 | #endif | 268 | #endif |
263 | 269 | ||
264 | #endif | ||
265 | /* not used anymore */ | 270 | /* not used anymore */ |
266 | /* | 271 | /* |
267 | #ifndef _V_CLIP_MATH | 272 | #ifndef _V_CLIP_MATH |
@@ -282,11 +287,6 @@ static inline int32_t CLIP_TO_15(int32_t x) { | |||
282 | 287 | ||
283 | #endif | 288 | #endif |
284 | */ | 289 | */ |
285 | #ifndef _V_LSP_MATH_ASM | ||
286 | #define _V_LSP_MATH_ASM | ||
287 | |||
288 | 290 | ||
289 | |||
290 | #endif | ||
291 | #endif | 291 | #endif |
292 | 292 | ||
diff --git a/apps/codecs/lib/asm_mcf5249.h b/apps/codecs/lib/asm_mcf5249.h index 5fb3cff94a..88d439631d 100644 --- a/apps/codecs/lib/asm_mcf5249.h +++ b/apps/codecs/lib/asm_mcf5249.h | |||
@@ -21,9 +21,7 @@ | |||
21 | 21 | ||
22 | #if defined(CPU_COLDFIRE) | 22 | #if defined(CPU_COLDFIRE) |
23 | 23 | ||
24 | #ifndef _V_WIDE_MATH | 24 | #define INCL_OPTIMIZED_MULT32 |
25 | #define _V_WIDE_MATH | ||
26 | |||
27 | static inline int32_t MULT32(int32_t x, int32_t y) { | 25 | static inline int32_t MULT32(int32_t x, int32_t y) { |
28 | 26 | ||
29 | asm volatile ("mac.l %[x], %[y], %%acc0;" /* multiply & shift */ | 27 | asm volatile ("mac.l %[x], %[y], %%acc0;" /* multiply & shift */ |
@@ -35,6 +33,7 @@ static inline int32_t MULT32(int32_t x, int32_t y) { | |||
35 | return x; | 33 | return x; |
36 | } | 34 | } |
37 | 35 | ||
36 | #define INCL_OPTIMIZED_MULT31 | ||
38 | static inline int32_t MULT31(int32_t x, int32_t y) { | 37 | static inline int32_t MULT31(int32_t x, int32_t y) { |
39 | asm volatile ("mac.l %[x], %[y], %%acc0;" /* multiply */ | 38 | asm volatile ("mac.l %[x], %[y], %%acc0;" /* multiply */ |
40 | "movclr.l %%acc0, %[x];" /* move and clear */ | 39 | "movclr.l %%acc0, %[x];" /* move and clear */ |
@@ -44,6 +43,7 @@ static inline int32_t MULT31(int32_t x, int32_t y) { | |||
44 | return x; | 43 | return x; |
45 | } | 44 | } |
46 | 45 | ||
46 | #define INCL_OPTIMIZED_MULT31_SHIFT15 | ||
47 | static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) { | 47 | static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) { |
48 | int32_t r; | 48 | int32_t r; |
49 | 49 | ||
@@ -61,6 +61,7 @@ static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) { | |||
61 | return r; | 61 | return r; |
62 | } | 62 | } |
63 | 63 | ||
64 | #define INCL_OPTIMIZED_MULT31_SHIFT16 | ||
64 | static inline int32_t MULT31_SHIFT16(int32_t x, int32_t y) { | 65 | static inline int32_t MULT31_SHIFT16(int32_t x, int32_t y) { |
65 | int32_t r; | 66 | int32_t r; |
66 | 67 | ||
@@ -76,6 +77,7 @@ static inline int32_t MULT31_SHIFT16(int32_t x, int32_t y) { | |||
76 | return x; | 77 | return x; |
77 | } | 78 | } |
78 | 79 | ||
80 | #define INCL_OPTIMIZED_XPROD31 | ||
79 | static inline | 81 | static inline |
80 | void XPROD31(int32_t a, int32_t b, | 82 | void XPROD31(int32_t a, int32_t b, |
81 | int32_t t, int32_t v, | 83 | int32_t t, int32_t v, |
@@ -95,6 +97,7 @@ void XPROD31(int32_t a, int32_t b, | |||
95 | : "cc", "memory"); | 97 | : "cc", "memory"); |
96 | } | 98 | } |
97 | 99 | ||
100 | #define INCL_OPTIMIZED_XNPROD31 | ||
98 | static inline | 101 | static inline |
99 | void XNPROD31(int32_t a, int32_t b, | 102 | void XNPROD31(int32_t a, int32_t b, |
100 | int32_t t, int32_t v, | 103 | int32_t t, int32_t v, |
@@ -114,15 +117,11 @@ void XNPROD31(int32_t a, int32_t b, | |||
114 | : "cc", "memory"); | 117 | : "cc", "memory"); |
115 | } | 118 | } |
116 | 119 | ||
117 | #if 0 /* canonical Tremor definition */ | ||
118 | #define XPROD32(_a, _b, _t, _v, _x, _y) \ | ||
119 | { (_x)=MULT32(_a,_t)+MULT32(_b,_v); \ | ||
120 | (_y)=MULT32(_b,_t)-MULT32(_a,_v); } | ||
121 | #endif | ||
122 | 120 | ||
123 | /* this could lose the LSB by overflow, but i don't think it'll ever happen. | 121 | /* this could lose the LSB by overflow, but i don't think it'll ever happen. |
124 | if anyone think they can hear a bug caused by this, please try the above | 122 | if anyone think they can hear a bug caused by this, please try the above |
125 | version. */ | 123 | version. */ |
124 | #define INCL_OPTIMIZED_XPROD32 | ||
126 | #define XPROD32(_a, _b, _t, _v, _x, _y) \ | 125 | #define XPROD32(_a, _b, _t, _v, _x, _y) \ |
127 | asm volatile ("mac.l %[a], %[t], %%acc0;" \ | 126 | asm volatile ("mac.l %[a], %[t], %%acc0;" \ |
128 | "mac.l %[b], %[v], %%acc0;" \ | 127 | "mac.l %[b], %[v], %%acc0;" \ |
@@ -137,6 +136,7 @@ void XNPROD31(int32_t a, int32_t b, | |||
137 | [t] "r" (_t), [v] "r" (_v) \ | 136 | [t] "r" (_t), [v] "r" (_v) \ |
138 | : "cc"); | 137 | : "cc"); |
139 | 138 | ||
139 | #define INCL_OPTIMIZED_XPROD31_R | ||
140 | #define XPROD31_R(_a, _b, _t, _v, _x, _y) \ | 140 | #define XPROD31_R(_a, _b, _t, _v, _x, _y) \ |
141 | asm volatile ("mac.l %[a], %[t], %%acc0;" \ | 141 | asm volatile ("mac.l %[a], %[t], %%acc0;" \ |
142 | "mac.l %[b], %[v], %%acc0;" \ | 142 | "mac.l %[b], %[v], %%acc0;" \ |
@@ -149,6 +149,7 @@ void XNPROD31(int32_t a, int32_t b, | |||
149 | [t] "r" (_t), [v] "r" (_v) \ | 149 | [t] "r" (_t), [v] "r" (_v) \ |
150 | : "cc"); | 150 | : "cc"); |
151 | 151 | ||
152 | #define INCL_OPTIMIZED_XNPROD31_R | ||
152 | #define XNPROD31_R(_a, _b, _t, _v, _x, _y) \ | 153 | #define XNPROD31_R(_a, _b, _t, _v, _x, _y) \ |
153 | asm volatile ("mac.l %[a], %[t], %%acc0;" \ | 154 | asm volatile ("mac.l %[a], %[t], %%acc0;" \ |
154 | "msac.l %[b], %[v], %%acc0;" \ | 155 | "msac.l %[b], %[v], %%acc0;" \ |
@@ -336,7 +337,6 @@ void vect_mult_bw(int32_t *data, int32_t *window, int n) | |||
336 | 337 | ||
337 | #endif | 338 | #endif |
338 | 339 | ||
339 | #endif | ||
340 | /* not used anymore */ | 340 | /* not used anymore */ |
341 | /* | 341 | /* |
342 | #ifndef _V_CLIP_MATH | 342 | #ifndef _V_CLIP_MATH |
diff --git a/apps/codecs/lib/codeclib_misc.h b/apps/codecs/lib/codeclib_misc.h index f3b1805e26..08be93716f 100644 --- a/apps/codecs/lib/codeclib_misc.h +++ b/apps/codecs/lib/codeclib_misc.h | |||
@@ -15,32 +15,15 @@ | |||
15 | 15 | ||
16 | ********************************************************************/ | 16 | ********************************************************************/ |
17 | 17 | ||
18 | //#include "config-tremor.h" | 18 | #ifndef _CODECLIB_MISC_H_ |
19 | 19 | #define _CODECLIB_MISC_H_ | |
20 | #ifndef _V_RANDOM_H_ | ||
21 | #define _V_RANDOM_H_ | ||
22 | //#include "ivorbiscodec.h" | ||
23 | //#include "os_types.h" | ||
24 | 20 | ||
21 | #include <stdint.h> | ||
25 | #include "asm_arm.h" | 22 | #include "asm_arm.h" |
26 | #include "asm_mcf5249.h" | 23 | #include "asm_mcf5249.h" |
27 | 24 | ||
28 | |||
29 | /* Some prototypes that were not defined elsewhere */ | ||
30 | //void *_vorbis_block_alloc(vorbis_block *vb,long bytes); | ||
31 | //void _vorbis_block_ripcord(vorbis_block *vb); | ||
32 | //extern int _ilog(unsigned int v); | ||
33 | |||
34 | #ifndef _V_WIDE_MATH | ||
35 | #define _V_WIDE_MATH | ||
36 | |||
37 | #ifndef ROCKBOX | ||
38 | #include <inttypes.h> | ||
39 | #endif /* ROCKBOX */ | ||
40 | |||
41 | #ifndef _LOW_ACCURACY_ | 25 | #ifndef _LOW_ACCURACY_ |
42 | /* 64 bit multiply */ | 26 | /* 64 bit multiply */ |
43 | /* #include <sys/types.h> */ | ||
44 | 27 | ||
45 | #ifdef ROCKBOX_LITTLE_ENDIAN | 28 | #ifdef ROCKBOX_LITTLE_ENDIAN |
46 | union magic { | 29 | union magic { |
@@ -60,29 +43,43 @@ union magic { | |||
60 | }; | 43 | }; |
61 | #endif | 44 | #endif |
62 | 45 | ||
46 | #ifndef INCL_OPTIMIZED_MULT32 | ||
47 | #define INCL_OPTIMIZED_MULT32 | ||
63 | static inline int32_t MULT32(int32_t x, int32_t y) { | 48 | static inline int32_t MULT32(int32_t x, int32_t y) { |
64 | union magic magic; | 49 | union magic magic; |
65 | magic.whole = (int64_t)x * y; | 50 | magic.whole = (int64_t)x * y; |
66 | return magic.halves.hi; | 51 | return magic.halves.hi; |
67 | } | 52 | } |
53 | #endif | ||
68 | 54 | ||
55 | #ifndef INCL_OPTIMIZED_MULT31 | ||
56 | #define INCL_OPTIMIZED_MULT31 | ||
69 | static inline int32_t MULT31(int32_t x, int32_t y) { | 57 | static inline int32_t MULT31(int32_t x, int32_t y) { |
70 | return MULT32(x,y)<<1; | 58 | return MULT32(x,y)<<1; |
71 | } | 59 | } |
60 | #endif | ||
72 | 61 | ||
62 | #ifndef INCL_OPTIMIZED_MULT31_SHIFT15 | ||
63 | #define INCL_OPTIMIZED_MULT31_SHIFT15 | ||
73 | static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) { | 64 | static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) { |
74 | union magic magic; | 65 | union magic magic; |
75 | magic.whole = (int64_t)x * y; | 66 | magic.whole = (int64_t)x * y; |
76 | return ((uint32_t)(magic.halves.lo)>>15) | ((magic.halves.hi)<<17); | 67 | return ((uint32_t)(magic.halves.lo)>>15) | ((magic.halves.hi)<<17); |
77 | } | 68 | } |
69 | #endif | ||
78 | 70 | ||
71 | #ifndef INCL_OPTIMIZED_MULT31_SHIFT16 | ||
72 | #define INCL_OPTIMIZED_MULT31_SHIFT16 | ||
79 | static inline int32_t MULT31_SHIFT16(int32_t x, int32_t y) { | 73 | static inline int32_t MULT31_SHIFT16(int32_t x, int32_t y) { |
80 | union magic magic; | 74 | union magic magic; |
81 | magic.whole = (int64_t)x * y; | 75 | magic.whole = (int64_t)x * y; |
82 | return ((uint32_t)(magic.halves.lo)>>16) | ((magic.halves.hi)<<16); | 76 | return ((uint32_t)(magic.halves.lo)>>16) | ((magic.halves.hi)<<16); |
83 | } | 77 | } |
78 | #endif | ||
84 | 79 | ||
85 | #else | 80 | #else |
81 | /* Rockbox: unused */ | ||
82 | #if 0 | ||
86 | /* 32 bit multiply, more portable but less accurate */ | 83 | /* 32 bit multiply, more portable but less accurate */ |
87 | 84 | ||
88 | /* | 85 | /* |
@@ -110,6 +107,7 @@ static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) { | |||
110 | return (x >> 6) * y; /* y preshifted >>9 */ | 107 | return (x >> 6) * y; /* y preshifted >>9 */ |
111 | } | 108 | } |
112 | #endif | 109 | #endif |
110 | #endif | ||
113 | 111 | ||
114 | /* | 112 | /* |
115 | * The XPROD functions are meant to optimize the cross products found all | 113 | * The XPROD functions are meant to optimize the cross products found all |
@@ -121,13 +119,17 @@ static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) { | |||
121 | * macros. | 119 | * macros. |
122 | */ | 120 | */ |
123 | 121 | ||
122 | #ifndef INCL_OPTIMIZED_XPROD32 | ||
123 | #define INCL_OPTIMIZED_XPROD32 | ||
124 | /* replaced XPROD32 with a macro to avoid memory reference | 124 | /* replaced XPROD32 with a macro to avoid memory reference |
125 | _x, _y are the results (must be l-values) */ | 125 | _x, _y are the results (must be l-values) */ |
126 | #define XPROD32(_a, _b, _t, _v, _x, _y) \ | 126 | #define XPROD32(_a, _b, _t, _v, _x, _y) \ |
127 | { (_x)=MULT32(_a,_t)+MULT32(_b,_v); \ | 127 | { (_x)=MULT32(_a,_t)+MULT32(_b,_v); \ |
128 | (_y)=MULT32(_b,_t)-MULT32(_a,_v); } | 128 | (_y)=MULT32(_b,_t)-MULT32(_a,_v); } |
129 | #endif | ||
129 | 130 | ||
130 | 131 | /* Rockbox: Unused */ | |
132 | /* | ||
131 | #ifdef __i386__ | 133 | #ifdef __i386__ |
132 | 134 | ||
133 | #define XPROD31(_a, _b, _t, _v, _x, _y) \ | 135 | #define XPROD31(_a, _b, _t, _v, _x, _y) \ |
@@ -138,7 +140,10 @@ static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) { | |||
138 | *(_y)=MULT31(_b,_t)+MULT31(_a,_v); } | 140 | *(_y)=MULT31(_b,_t)+MULT31(_a,_v); } |
139 | 141 | ||
140 | #else | 142 | #else |
143 | */ | ||
141 | 144 | ||
145 | #ifndef INCL_OPTIMIZED_XPROD31 | ||
146 | #define INCL_OPTIMIZED_XPROD31 | ||
142 | static inline void XPROD31(int32_t a, int32_t b, | 147 | static inline void XPROD31(int32_t a, int32_t b, |
143 | int32_t t, int32_t v, | 148 | int32_t t, int32_t v, |
144 | int32_t *x, int32_t *y) | 149 | int32_t *x, int32_t *y) |
@@ -146,7 +151,10 @@ static inline void XPROD31(int32_t a, int32_t b, | |||
146 | *x = MULT31(a, t) + MULT31(b, v); | 151 | *x = MULT31(a, t) + MULT31(b, v); |
147 | *y = MULT31(b, t) - MULT31(a, v); | 152 | *y = MULT31(b, t) - MULT31(a, v); |
148 | } | 153 | } |
154 | #endif | ||
149 | 155 | ||
156 | #ifndef INCL_OPTIMIZED_XNPROD31 | ||
157 | #define INCL_OPTIMIZED_XNPROD31 | ||
150 | static inline void XNPROD31(int32_t a, int32_t b, | 158 | static inline void XNPROD31(int32_t a, int32_t b, |
151 | int32_t t, int32_t v, | 159 | int32_t t, int32_t v, |
152 | int32_t *x, int32_t *y) | 160 | int32_t *x, int32_t *y) |
@@ -155,19 +163,25 @@ static inline void XNPROD31(int32_t a, int32_t b, | |||
155 | *y = MULT31(b, t) + MULT31(a, v); | 163 | *y = MULT31(b, t) + MULT31(a, v); |
156 | } | 164 | } |
157 | #endif | 165 | #endif |
166 | /*#endif*/ | ||
158 | 167 | ||
168 | #ifndef INCL_OPTIMIZED_XPROD31_R | ||
169 | #define INCL_OPTIMIZED_XPROD31_R | ||
159 | #define XPROD31_R(_a, _b, _t, _v, _x, _y)\ | 170 | #define XPROD31_R(_a, _b, _t, _v, _x, _y)\ |
160 | {\ | 171 | {\ |
161 | _x = MULT31(_a, _t) + MULT31(_b, _v);\ | 172 | _x = MULT31(_a, _t) + MULT31(_b, _v);\ |
162 | _y = MULT31(_b, _t) - MULT31(_a, _v);\ | 173 | _y = MULT31(_b, _t) - MULT31(_a, _v);\ |
163 | } | 174 | } |
175 | #endif | ||
164 | 176 | ||
177 | #ifndef INCL_OPTIMIZED_XNPROD31_R | ||
178 | #define INCL_OPTIMIZED_XNPROD31_R | ||
165 | #define XNPROD31_R(_a, _b, _t, _v, _x, _y)\ | 179 | #define XNPROD31_R(_a, _b, _t, _v, _x, _y)\ |
166 | {\ | 180 | {\ |
167 | _x = MULT31(_a, _t) - MULT31(_b, _v);\ | 181 | _x = MULT31(_a, _t) - MULT31(_b, _v);\ |
168 | _y = MULT31(_b, _t) + MULT31(_a, _v);\ | 182 | _y = MULT31(_b, _t) + MULT31(_a, _v);\ |
169 | } | 183 | } |
170 | 184 | #endif | |
171 | 185 | ||
172 | #ifndef _V_VECT_OPS | 186 | #ifndef _V_VECT_OPS |
173 | #define _V_VECT_OPS | 187 | #define _V_VECT_OPS |
@@ -213,7 +227,6 @@ void vect_mult_bw(int32_t *data, int32_t *window, int n) | |||
213 | } | 227 | } |
214 | #endif | 228 | #endif |
215 | 229 | ||
216 | #endif | ||
217 | /* not used anymore */ | 230 | /* not used anymore */ |
218 | /* | 231 | /* |
219 | #ifndef _V_CLIP_MATH | 232 | #ifndef _V_CLIP_MATH |