summaryrefslogtreecommitdiff
path: root/lib/rbcodec/codecs/libtremor/misc.h
diff options
context:
space:
mode:
Diffstat (limited to 'lib/rbcodec/codecs/libtremor/misc.h')
-rw-r--r--lib/rbcodec/codecs/libtremor/misc.h276
1 files changed, 276 insertions, 0 deletions
diff --git a/lib/rbcodec/codecs/libtremor/misc.h b/lib/rbcodec/codecs/libtremor/misc.h
new file mode 100644
index 0000000000..592a60ffd8
--- /dev/null
+++ b/lib/rbcodec/codecs/libtremor/misc.h
@@ -0,0 +1,276 @@
1/********************************************************************
2 * *
3 * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. *
4 * *
5 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
6 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
7 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
8 * *
9 * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
10 * BY THE Xiph.Org FOUNDATION http://www.xiph.org/ *
11 * *
12 ********************************************************************
13
14 function: miscellaneous math and prototypes
15
16 ********************************************************************/
17
18#include "config-tremor.h"
19
20#ifndef _V_RANDOM_H_
21#define _V_RANDOM_H_
22#include "ivorbiscodec.h"
23#include "os_types.h"
24
25#include "codeclib_misc.h"
26
27#include "asm_arm.h"
28#include "asm_mcf5249.h"
29
30/* Some prototypes that were not defined elsewhere */
31void *_vorbis_block_alloc(vorbis_block *vb,long bytes);
32void _vorbis_block_ripcord(vorbis_block *vb);
33extern int _ilog(unsigned int v);
34
35#ifndef _V_WIDE_MATH
36#define _V_WIDE_MATH
37
38#ifndef _LOW_ACCURACY_
39/* 64 bit multiply */
40/* #include <sys/types.h> */
41#if 0
42#if BYTE_ORDER==LITTLE_ENDIAN
43union magic {
44 struct {
45 ogg_int32_t lo;
46 ogg_int32_t hi;
47 } halves;
48 ogg_int64_t whole;
49};
50#elif BYTE_ORDER==BIG_ENDIAN
51union magic {
52 struct {
53 ogg_int32_t hi;
54 ogg_int32_t lo;
55 } halves;
56 ogg_int64_t whole;
57};
58#endif
59
60static inline ogg_int32_t MULT32(ogg_int32_t x, ogg_int32_t y) {
61 union magic magic;
62 magic.whole = (ogg_int64_t)x * y;
63 return magic.halves.hi;
64}
65static inline ogg_int32_t MULT31(ogg_int32_t x, ogg_int32_t y) {
66 return MULT32(x,y)<<1;
67}
68
69static inline ogg_int32_t MULT31_SHIFT15(ogg_int32_t x, ogg_int32_t y) {
70 union magic magic;
71 magic.whole = (ogg_int64_t)x * y;
72 return ((ogg_uint32_t)(magic.halves.lo)>>15) | ((magic.halves.hi)<<17);
73}
74#endif
75#else
76/* 32 bit multiply, more portable but less accurate */
77
78/*
79 * Note: Precision is biased towards the first argument therefore ordering
80 * is important. Shift values were chosen for the best sound quality after
81 * many listening tests.
82 */
83
84/*
85 * For MULT32 and MULT31: The second argument is always a lookup table
86 * value already preshifted from 31 to 8 bits. We therefore take the
87 * opportunity to save on text space and use unsigned char for those
88 * tables in this case.
89 */
90
91static inline ogg_int32_t MULT32(ogg_int32_t x, ogg_int32_t y) {
92 return (x >> 9) * y; /* y preshifted >>23 */
93}
94
95static inline ogg_int32_t MULT31(ogg_int32_t x, ogg_int32_t y) {
96 return (x >> 8) * y; /* y preshifted >>23 */
97}
98
99static inline ogg_int32_t MULT31_SHIFT15(ogg_int32_t x, ogg_int32_t y) {
100 return (x >> 6) * y; /* y preshifted >>9 */
101}
102#endif
103
104/*
105 * This should be used as a memory barrier, forcing all cached values in
106 * registers to wr writen back to memory. Might or might not be beneficial
107 * depending on the architecture and compiler.
108 */
109#define MB()
110
111/*
112 * The XPROD functions are meant to optimize the cross products found all
113 * over the place in mdct.c by forcing memory operation ordering to avoid
114 * unnecessary register reloads as soon as memory is being written to.
115 * However this is only beneficial on CPUs with a sane number of general
116 * purpose registers which exclude the Intel x86. On Intel, better let the
117 * compiler actually reload registers directly from original memory by using
118 * macros.
119 */
120
121/* replaced XPROD32 with a macro to avoid memory reference
122 _x, _y are the results (must be l-values) */
123/*
124#define XPROD32(_a, _b, _t, _v, _x, _y) \
125 { (_x)=MULT32(_a,_t)+MULT32(_b,_v); \
126 (_y)=MULT32(_b,_t)-MULT32(_a,_v); }
127*/
128
129#ifdef __i386__
130
131#define XPROD31(_a, _b, _t, _v, _x, _y) \
132 { *(_x)=MULT31(_a,_t)+MULT31(_b,_v); \
133 *(_y)=MULT31(_b,_t)-MULT31(_a,_v); }
134#define XNPROD31(_a, _b, _t, _v, _x, _y) \
135 { *(_x)=MULT31(_a,_t)-MULT31(_b,_v); \
136 *(_y)=MULT31(_b,_t)+MULT31(_a,_v); }
137
138#else
139/*
140static inline void XPROD31(ogg_int32_t a, ogg_int32_t b,
141 ogg_int32_t t, ogg_int32_t v,
142 ogg_int32_t *x, ogg_int32_t *y)
143{
144 *x = MULT31(a, t) + MULT31(b, v);
145 *y = MULT31(b, t) - MULT31(a, v);
146}
147
148static inline void XNPROD31(ogg_int32_t a, ogg_int32_t b,
149 ogg_int32_t t, ogg_int32_t v,
150 ogg_int32_t *x, ogg_int32_t *y)
151{
152 *x = MULT31(a, t) - MULT31(b, v);
153 *y = MULT31(b, t) + MULT31(a, v);
154}
155*/
156#endif
157
158#if 0
159#ifndef _V_VECT_OPS
160#define _V_VECT_OPS
161
162/* generic misc.h has symmetrical versions of vect_add_right_left
163 and vect_add_left_right (since symmetrical versions of
164 vect_mult_fw and vect_mult_bw i.e. both use MULT31) */
165static inline
166void vect_add_right_left(ogg_int32_t *x, const ogg_int32_t *y, int n)
167{
168 while (n>0) {
169 *x++ += *y++;
170 n--;
171 }
172}
173
174static inline
175void vect_add_left_right(ogg_int32_t *x, const ogg_int32_t *y, int n)
176{
177 vect_add_right_left(x,y,n);
178}
179
180static inline
181void ogg_vect_mult_fw(ogg_int32_t *data, LOOKUP_T *window, int n)
182{
183 while(n>0) {
184 *data = MULT31(*data, *window);
185 data++;
186 window++;
187 n--;
188 }
189}
190
191static inline
192void ogg_vect_mult_bw(ogg_int32_t *data, LOOKUP_T *window, int n)
193{
194 while(n>0) {
195 *data = MULT31(*data, *window);
196 data++;
197 window--;
198 n--;
199 }
200}
201
202/* generic memcpy is probably optimal */
203static inline void vect_copy(ogg_int32_t *x, const ogg_int32_t *y, int n)
204{
205 memcpy(x,y,n*sizeof(ogg_int32_t));
206}
207#endif
208
209static inline ogg_int32_t VFLOAT_MULT(ogg_int32_t a,ogg_int32_t ap,
210 ogg_int32_t b,ogg_int32_t bp,
211 ogg_int32_t *p){
212 if(a && b){
213#ifndef _LOW_ACCURACY_
214 *p=ap+bp+32;
215 return MULT32(a,b);
216#else
217 *p=ap+bp+31;
218 return (a>>15)*(b>>16);
219#endif
220 }else
221 return 0;
222}
223#endif
224#endif
225static inline ogg_int32_t VFLOAT_MULTI(ogg_int32_t a,ogg_int32_t ap,
226 ogg_int32_t i,
227 ogg_int32_t *p){
228
229 int ip=_ilog(abs(i))-31;
230 return VFLOAT_MULT(a,ap,i<<-ip,ip,p);
231}
232#if 0
233static inline ogg_int32_t VFLOAT_ADD(ogg_int32_t a,ogg_int32_t ap,
234 ogg_int32_t b,ogg_int32_t bp,
235 ogg_int32_t *p){
236
237 if(!a){
238 *p=bp;
239 return b;
240 }else if(!b){
241 *p=ap;
242 return a;
243 }
244
245 /* yes, this can leak a bit. */
246 if(ap>bp){
247 int shift=ap-bp+1;
248 *p=ap+1;
249 a>>=1;
250 if(shift<32){
251 b=(b+(1<<(shift-1)))>>shift;
252 }else{
253 b=0;
254 }
255 }else{
256 int shift=bp-ap+1;
257 *p=bp+1;
258 b>>=1;
259 if(shift<32){
260 a=(a+(1<<(shift-1)))>>shift;
261 }else{
262 a=0;
263 }
264 }
265
266 a+=b;
267 if((a&0xc0000000)==0xc0000000 ||
268 (a&0xc0000000)==0){
269 a<<=1;
270 (*p)--;
271 }
272 return(a);
273}
274#endif
275#endif
276