summaryrefslogtreecommitdiff
path: root/apps/codecs/libwma/misc.h
diff options
context:
space:
mode:
Diffstat (limited to 'apps/codecs/libwma/misc.h')
-rw-r--r--apps/codecs/libwma/misc.h291
1 files changed, 291 insertions, 0 deletions
diff --git a/apps/codecs/libwma/misc.h b/apps/codecs/libwma/misc.h
new file mode 100644
index 0000000000..59760bf885
--- /dev/null
+++ b/apps/codecs/libwma/misc.h
@@ -0,0 +1,291 @@
1/********************************************************************
2 * *
3 * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. *
4 * *
5 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
6 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
7 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
8 * *
9 * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
10 * BY THE Xiph.Org FOUNDATION http://www.xiph.org/ *
11 * *
12 ********************************************************************
13
14 function: miscellaneous math and prototypes
15
16 ********************************************************************/
17
18//#include "config-tremor.h"
19
20#ifndef _V_RANDOM_H_
21#define _V_RANDOM_H_
22//#include "ivorbiscodec.h"
23//#include "os_types.h"
24
25//#include "asm_arm.h"
26//#include "asm_mcf5249.h"
27
28
29/* Some prototypes that were not defined elsewhere */
30//void *_vorbis_block_alloc(vorbis_block *vb,long bytes);
31//void _vorbis_block_ripcord(vorbis_block *vb);
32//extern int _ilog(unsigned int v);
33
34#ifndef _V_WIDE_MATH
35#define _V_WIDE_MATH
36
37#ifndef _LOW_ACCURACY_
38/* 64 bit multiply */
39/* #include <sys/types.h> */
40
41#if BYTE_ORDER==LITTLE_ENDIAN
42union magic {
43 struct {
44 ogg_int32_t lo;
45 ogg_int32_t hi;
46 } halves;
47 ogg_int64_t whole;
48};
49#elif BYTE_ORDER==BIG_ENDIAN
50union magic {
51 struct {
52 ogg_int32_t hi;
53 ogg_int32_t lo;
54 } halves;
55 ogg_int64_t whole;
56};
57#endif
58
59static inline ogg_int32_t MULT32(ogg_int32_t x, ogg_int32_t y) {
60 union magic magic;
61 magic.whole = (ogg_int64_t)x * y;
62 return magic.halves.hi;
63}
64static inline ogg_int32_t MULT31(ogg_int32_t x, ogg_int32_t y) {
65 return MULT32(x,y)<<1;
66}
67
68static inline ogg_int32_t MULT31_SHIFT15(ogg_int32_t x, ogg_int32_t y) {
69 union magic magic;
70 magic.whole = (ogg_int64_t)x * y;
71 return ((ogg_uint32_t)(magic.halves.lo)>>15) | ((magic.halves.hi)<<17);
72}
73
74#else
75/* 32 bit multiply, more portable but less accurate */
76
77/*
78 * Note: Precision is biased towards the first argument therefore ordering
79 * is important. Shift values were chosen for the best sound quality after
80 * many listening tests.
81 */
82
83/*
84 * For MULT32 and MULT31: The second argument is always a lookup table
85 * value already preshifted from 31 to 8 bits. We therefore take the
86 * opportunity to save on text space and use unsigned char for those
87 * tables in this case.
88 */
89
90static inline ogg_int32_t MULT32(ogg_int32_t x, ogg_int32_t y) {
91 return (x >> 9) * y; /* y preshifted >>23 */
92}
93
94static inline ogg_int32_t MULT31(ogg_int32_t x, ogg_int32_t y) {
95 return (x >> 8) * y; /* y preshifted >>23 */
96}
97
98static inline ogg_int32_t MULT31_SHIFT15(ogg_int32_t x, ogg_int32_t y) {
99 return (x >> 6) * y; /* y preshifted >>9 */
100}
101#endif
102
103/*
104 * This should be used as a memory barrier, forcing all cached values in
105 * registers to wr writen back to memory. Might or might not be beneficial
106 * depending on the architecture and compiler.
107 */
108#define MB()
109
110/*
111 * The XPROD functions are meant to optimize the cross products found all
112 * over the place in mdct.c by forcing memory operation ordering to avoid
113 * unnecessary register reloads as soon as memory is being written to.
114 * However this is only beneficial on CPUs with a sane number of general
115 * purpose registers which exclude the Intel x86. On Intel, better let the
116 * compiler actually reload registers directly from original memory by using
117 * macros.
118 */
119
120/* replaced XPROD32 with a macro to avoid memory reference
121 _x, _y are the results (must be l-values) */
122#define XPROD32(_a, _b, _t, _v, _x, _y) \
123 { (_x)=MULT32(_a,_t)+MULT32(_b,_v); \
124 (_y)=MULT32(_b,_t)-MULT32(_a,_v); }
125
126
127#ifdef __i386__
128
129#define XPROD31(_a, _b, _t, _v, _x, _y) \
130 { *(_x)=MULT31(_a,_t)+MULT31(_b,_v); \
131 *(_y)=MULT31(_b,_t)-MULT31(_a,_v); }
132#define XNPROD31(_a, _b, _t, _v, _x, _y) \
133 { *(_x)=MULT31(_a,_t)-MULT31(_b,_v); \
134 *(_y)=MULT31(_b,_t)+MULT31(_a,_v); }
135
136#else
137
138static inline void XPROD31(ogg_int32_t a, ogg_int32_t b,
139 ogg_int32_t t, ogg_int32_t v,
140 ogg_int32_t *x, ogg_int32_t *y)
141{
142 *x = MULT31(a, t) + MULT31(b, v);
143 *y = MULT31(b, t) - MULT31(a, v);
144}
145
146static inline void XNPROD31(ogg_int32_t a, ogg_int32_t b,
147 ogg_int32_t t, ogg_int32_t v,
148 ogg_int32_t *x, ogg_int32_t *y)
149{
150 *x = MULT31(a, t) - MULT31(b, v);
151 *y = MULT31(b, t) + MULT31(a, v);
152}
153#endif
154
155#ifndef _V_VECT_OPS
156#define _V_VECT_OPS
157
158static inline
159void vect_add(ogg_int32_t *x, ogg_int32_t *y, int n)
160{
161 while (n>0) {
162 *x++ += *y++;
163 n--;
164 }
165}
166
167static inline
168void vect_copy(ogg_int32_t *x, ogg_int32_t *y, int n)
169{
170 while (n>0) {
171 *x++ = *y++;
172 n--;
173 }
174}
175
176static inline
177void vect_mult_fw(ogg_int32_t *data, LOOKUP_T *window, int n)
178{
179 while(n>0) {
180 *data = MULT31(*data, *window);
181 data++;
182 window++;
183 n--;
184 }
185}
186
187static inline
188void vect_mult_bw(ogg_int32_t *data, LOOKUP_T *window, int n)
189{
190 while(n>0) {
191 *data = MULT31(*data, *window);
192 data++;
193 window--;
194 n--;
195 }
196}
197#endif
198
199#endif
200
201#ifndef _V_CLIP_MATH
202#define _V_CLIP_MATH
203
204static inline ogg_int32_t CLIP_TO_15(ogg_int32_t x) {
205 int ret=x;
206 ret-= ((x<=32767)-1)&(x-32767);
207 ret-= ((x>=-32768)-1)&(x+32768);
208 return(ret);
209}
210
211#endif
212
213static inline ogg_int32_t VFLOAT_MULT(ogg_int32_t a,ogg_int32_t ap,
214 ogg_int32_t b,ogg_int32_t bp,
215 ogg_int32_t *p){
216 if(a && b){
217#ifndef _LOW_ACCURACY_
218 *p=ap+bp+32;
219 return MULT32(a,b);
220#else
221 *p=ap+bp+31;
222 return (a>>15)*(b>>16);
223#endif
224 }else
225 return 0;
226}
227
228/*static inline ogg_int32_t VFLOAT_MULTI(ogg_int32_t a,ogg_int32_t ap,
229 ogg_int32_t i,
230 ogg_int32_t *p){
231
232 int ip=_ilog(abs(i))-31;
233 return VFLOAT_MULT(a,ap,i<<-ip,ip,p);
234}
235*/
236static inline ogg_int32_t VFLOAT_ADD(ogg_int32_t a,ogg_int32_t ap,
237 ogg_int32_t b,ogg_int32_t bp,
238 ogg_int32_t *p){
239
240 if(!a){
241 *p=bp;
242 return b;
243 }else if(!b){
244 *p=ap;
245 return a;
246 }
247
248 /* yes, this can leak a bit. */
249 if(ap>bp){
250 int shift=ap-bp+1;
251 *p=ap+1;
252 a>>=1;
253 if(shift<32){
254 b=(b+(1<<(shift-1)))>>shift;
255 }else{
256 b=0;
257 }
258 }else{
259 int shift=bp-ap+1;
260 *p=bp+1;
261 b>>=1;
262 if(shift<32){
263 a=(a+(1<<(shift-1)))>>shift;
264 }else{
265 a=0;
266 }
267 }
268
269 a+=b;
270 if((a&0xc0000000)==0xc0000000 ||
271 (a&0xc0000000)==0){
272 a<<=1;
273 (*p)--;
274 }
275 return(a);
276}
277
278#ifdef __GNUC__
279#if __GNUC__ >= 3
280#define EXPECT(a, b) __builtin_expect((a), (b))
281#else
282#define EXPECT(a, b) (a)
283#endif
284#else
285#define EXPECT(a, b) (a)
286#endif
287
288#endif
289
290
291