summaryrefslogtreecommitdiff
path: root/apps/codecs/lib/codeclib_misc.h
diff options
context:
space:
mode:
Diffstat (limited to 'apps/codecs/lib/codeclib_misc.h')
-rw-r--r--apps/codecs/lib/codeclib_misc.h310
1 files changed, 0 insertions, 310 deletions
diff --git a/apps/codecs/lib/codeclib_misc.h b/apps/codecs/lib/codeclib_misc.h
deleted file mode 100644
index 8ebe22e37b..0000000000
--- a/apps/codecs/lib/codeclib_misc.h
+++ /dev/null
@@ -1,310 +0,0 @@
1/********************************************************************
2 * *
3 * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. *
4 * *
5 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
6 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
7 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
8 * *
9 * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
10 * BY THE Xiph.Org FOUNDATION http://www.xiph.org/ *
11 * *
12 ********************************************************************
13
14 function: miscellaneous math and prototypes
15
16 ********************************************************************/
17
18#ifndef _CODECLIB_MISC_H_
19#define _CODECLIB_MISC_H_
20
21#include <stdint.h>
22#include "asm_arm.h"
23#include "asm_mcf5249.h"
24
25#ifndef _LOW_ACCURACY_
26/* 64 bit multiply */
27
28#ifdef ROCKBOX_LITTLE_ENDIAN
29union magic {
30 struct {
31 int32_t lo;
32 int32_t hi;
33 } halves;
34 int64_t whole;
35};
36#elif defined(ROCKBOX_BIG_ENDIAN)
37union magic {
38 struct {
39 int32_t hi;
40 int32_t lo;
41 } halves;
42 int64_t whole;
43};
44#endif
45
46#ifndef INCL_OPTIMIZED_MULT32
47#define INCL_OPTIMIZED_MULT32
48static inline int32_t MULT32(int32_t x, int32_t y) {
49 union magic magic;
50 magic.whole = (int64_t)x * y;
51 return magic.halves.hi;
52}
53#endif
54
55#ifndef INCL_OPTIMIZED_MULT31
56#define INCL_OPTIMIZED_MULT31
57static inline int32_t MULT31(int32_t x, int32_t y) {
58 return MULT32(x,y)<<1;
59}
60#endif
61
62#ifndef INCL_OPTIMIZED_MULT31_SHIFT15
63#define INCL_OPTIMIZED_MULT31_SHIFT15
64static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) {
65 union magic magic;
66 magic.whole = (int64_t)x * y;
67 return ((uint32_t)(magic.halves.lo)>>15) | ((magic.halves.hi)<<17);
68}
69#endif
70
71#ifndef INCL_OPTIMIZED_MULT31_SHIFT16
72#define INCL_OPTIMIZED_MULT31_SHIFT16
73static inline int32_t MULT31_SHIFT16(int32_t x, int32_t y) {
74 union magic magic;
75 magic.whole = (int64_t)x * y;
76 return ((uint32_t)(magic.halves.lo)>>16) | ((magic.halves.hi)<<16);
77}
78#endif
79
80#else
81/* Rockbox: unused */
82#if 0
83/* 32 bit multiply, more portable but less accurate */
84
85/*
86 * Note: Precision is biased towards the first argument therefore ordering
87 * is important. Shift values were chosen for the best sound quality after
88 * many listening tests.
89 */
90
91/*
92 * For MULT32 and MULT31: The second argument is always a lookup table
93 * value already preshifted from 31 to 8 bits. We therefore take the
94 * opportunity to save on text space and use unsigned char for those
95 * tables in this case.
96 */
97
98static inline int32_t MULT32(int32_t x, int32_t y) {
99 return (x >> 9) * y; /* y preshifted >>23 */
100}
101
102static inline int32_t MULT31(int32_t x, int32_t y) {
103 return (x >> 8) * y; /* y preshifted >>23 */
104}
105
106static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) {
107 return (x >> 6) * y; /* y preshifted >>9 */
108}
109#endif
110#endif
111
112/*
113 * The XPROD functions are meant to optimize the cross products found all
114 * over the place in mdct.c by forcing memory operation ordering to avoid
115 * unnecessary register reloads as soon as memory is being written to.
116 * However this is only beneficial on CPUs with a sane number of general
117 * purpose registers which exclude the Intel x86. On Intel, better let the
118 * compiler actually reload registers directly from original memory by using
119 * macros.
120 */
121
122#ifndef INCL_OPTIMIZED_XPROD32
123#define INCL_OPTIMIZED_XPROD32
124/* replaced XPROD32 with a macro to avoid memory reference
125 _x, _y are the results (must be l-values) */
126#define XPROD32(_a, _b, _t, _v, _x, _y) \
127 { (_x)=MULT32(_a,_t)+MULT32(_b,_v); \
128 (_y)=MULT32(_b,_t)-MULT32(_a,_v); }
129#endif
130
131/* Rockbox: Unused */
132/*
133#ifdef __i386__
134
135#define XPROD31(_a, _b, _t, _v, _x, _y) \
136 { *(_x)=MULT31(_a,_t)+MULT31(_b,_v); \
137 *(_y)=MULT31(_b,_t)-MULT31(_a,_v); }
138#define XNPROD31(_a, _b, _t, _v, _x, _y) \
139 { *(_x)=MULT31(_a,_t)-MULT31(_b,_v); \
140 *(_y)=MULT31(_b,_t)+MULT31(_a,_v); }
141
142#else
143*/
144
145#ifndef INCL_OPTIMIZED_XPROD31
146#define INCL_OPTIMIZED_XPROD31
147static inline void XPROD31(int32_t a, int32_t b,
148 int32_t t, int32_t v,
149 int32_t *x, int32_t *y)
150{
151 *x = MULT31(a, t) + MULT31(b, v);
152 *y = MULT31(b, t) - MULT31(a, v);
153}
154#endif
155
156#ifndef INCL_OPTIMIZED_XNPROD31
157#define INCL_OPTIMIZED_XNPROD31
158static inline void XNPROD31(int32_t a, int32_t b,
159 int32_t t, int32_t v,
160 int32_t *x, int32_t *y)
161{
162 *x = MULT31(a, t) - MULT31(b, v);
163 *y = MULT31(b, t) + MULT31(a, v);
164}
165#endif
166/*#endif*/
167
168#ifndef INCL_OPTIMIZED_XPROD31_R
169#define INCL_OPTIMIZED_XPROD31_R
170#define XPROD31_R(_a, _b, _t, _v, _x, _y)\
171{\
172 _x = MULT31(_a, _t) + MULT31(_b, _v);\
173 _y = MULT31(_b, _t) - MULT31(_a, _v);\
174}
175#endif
176
177#ifndef INCL_OPTIMIZED_XNPROD31_R
178#define INCL_OPTIMIZED_XNPROD31_R
179#define XNPROD31_R(_a, _b, _t, _v, _x, _y)\
180{\
181 _x = MULT31(_a, _t) - MULT31(_b, _v);\
182 _y = MULT31(_b, _t) + MULT31(_a, _v);\
183}
184#endif
185
186#ifndef _V_VECT_OPS
187#define _V_VECT_OPS
188
189static inline
190void vect_add(int32_t *x, const int32_t *y, int n)
191{
192 while (n>0) {
193 *x++ += *y++;
194 n--;
195 }
196}
197
198static inline
199void vect_copy(int32_t *x, const int32_t *y, int n)
200{
201 while (n>0) {
202 *x++ = *y++;
203 n--;
204 }
205}
206
207static inline
208void vect_mult_fw(int32_t *data, const int32_t *window, int n)
209{
210 while(n>0) {
211 *data = MULT31(*data, *window);
212 data++;
213 window++;
214 n--;
215 }
216}
217
218static inline
219void vect_mult_bw(int32_t *data, const int32_t *window, int n)
220{
221 while(n>0) {
222 *data = MULT31(*data, *window);
223 data++;
224 window--;
225 n--;
226 }
227}
228#endif
229
230/* not used anymore */
231/*
232#ifndef _V_CLIP_MATH
233#define _V_CLIP_MATH
234
235static inline int32_t CLIP_TO_15(int32_t x) {
236 int ret=x;
237 ret-= ((x<=32767)-1)&(x-32767);
238 ret-= ((x>=-32768)-1)&(x+32768);
239 return(ret);
240}
241
242#endif
243*/
244static inline int32_t VFLOAT_MULT(int32_t a,int32_t ap,
245 int32_t b,int32_t bp,
246 int32_t *p){
247 if(a && b){
248#ifndef _LOW_ACCURACY_
249 *p=ap+bp+32;
250 return MULT32(a,b);
251#else
252 *p=ap+bp+31;
253 return (a>>15)*(b>>16);
254#endif
255 }else
256 return 0;
257}
258
259/*static inline int32_t VFLOAT_MULTI(int32_t a,int32_t ap,
260 int32_t i,
261 int32_t *p){
262
263 int ip=_ilog(abs(i))-31;
264 return VFLOAT_MULT(a,ap,i<<-ip,ip,p);
265}
266*/
267static inline int32_t VFLOAT_ADD(int32_t a,int32_t ap,
268 int32_t b,int32_t bp,
269 int32_t *p){
270
271 if(!a){
272 *p=bp;
273 return b;
274 }else if(!b){
275 *p=ap;
276 return a;
277 }
278
279 /* yes, this can leak a bit. */
280 if(ap>bp){
281 int shift=ap-bp+1;
282 *p=ap+1;
283 a>>=1;
284 if(shift<32){
285 b=(b+(1<<(shift-1)))>>shift;
286 }else{
287 b=0;
288 }
289 }else{
290 int shift=bp-ap+1;
291 *p=bp+1;
292 b>>=1;
293 if(shift<32){
294 a=(a+(1<<(shift-1)))>>shift;
295 }else{
296 a=0;
297 }
298 }
299
300 a+=b;
301 if((a&0xc0000000)==0xc0000000 ||
302 (a&0xc0000000)==0){
303 a<<=1;
304 (*p)--;
305 }
306 return(a);
307}
308
309#endif
310