summaryrefslogtreecommitdiff
path: root/apps/codecs/Tremor/misc.h
diff options
context:
space:
mode:
Diffstat (limited to 'apps/codecs/Tremor/misc.h')
-rw-r--r--apps/codecs/Tremor/misc.h241
1 files changed, 241 insertions, 0 deletions
diff --git a/apps/codecs/Tremor/misc.h b/apps/codecs/Tremor/misc.h
new file mode 100644
index 0000000000..ffa9c42f1b
--- /dev/null
+++ b/apps/codecs/Tremor/misc.h
@@ -0,0 +1,241 @@
1/********************************************************************
2 * *
3 * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. *
4 * *
5 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
6 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
7 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
8 * *
9 * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
10 * BY THE Xiph.Org FOUNDATION http://www.xiph.org/ *
11 * *
12 ********************************************************************
13
14 function: miscellaneous math and prototypes
15
16 ********************************************************************/
17
18#include "config.h"
19
20#ifndef _V_RANDOM_H_
21#define _V_RANDOM_H_
22#include "ivorbiscodec.h"
23#include "os_types.h"
24
25#include "asm_arm.h"
26
27#ifndef _V_WIDE_MATH
28#define _V_WIDE_MATH
29
30#ifndef _LOW_ACCURACY_
31/* 64 bit multiply */
32
33//#include <sys/types.h>
34
35#if BYTE_ORDER==LITTLE_ENDIAN
36union magic {
37 struct {
38 ogg_int32_t lo;
39 ogg_int32_t hi;
40 } halves;
41 ogg_int64_t whole;
42};
43#endif
44
45#if BYTE_ORDER==BIG_ENDIAN
46union magic {
47 struct {
48 ogg_int32_t hi;
49 ogg_int32_t lo;
50 } halves;
51 ogg_int64_t whole;
52};
53#endif
54
55static inline ogg_int32_t MULT32(ogg_int32_t x, ogg_int32_t y) {
56 union magic magic;
57 magic.whole = (ogg_int64_t)x * y;
58 return magic.halves.hi;
59}
60
61static inline ogg_int32_t MULT31(ogg_int32_t x, ogg_int32_t y) {
62 return MULT32(x,y)<<1;
63}
64
65static inline ogg_int32_t MULT31_SHIFT15(ogg_int32_t x, ogg_int32_t y) {
66 union magic magic;
67 magic.whole = (ogg_int64_t)x * y;
68 return ((ogg_uint32_t)(magic.halves.lo)>>15) | ((magic.halves.hi)<<17);
69}
70
71#else
72/* 32 bit multiply, more portable but less accurate */
73
74/*
75 * Note: Precision is biased towards the first argument therefore ordering
76 * is important. Shift values were chosen for the best sound quality after
77 * many listening tests.
78 */
79
80/*
81 * For MULT32 and MULT31: The second argument is always a lookup table
82 * value already preshifted from 31 to 8 bits. We therefore take the
83 * opportunity to save on text space and use unsigned char for those
84 * tables in this case.
85 */
86
87static inline ogg_int32_t MULT32(ogg_int32_t x, ogg_int32_t y) {
88 return (x >> 9) * y; /* y preshifted >>23 */
89}
90
91static inline ogg_int32_t MULT31(ogg_int32_t x, ogg_int32_t y) {
92 return (x >> 8) * y; /* y preshifted >>23 */
93}
94
95static inline ogg_int32_t MULT31_SHIFT15(ogg_int32_t x, ogg_int32_t y) {
96 return (x >> 6) * y; /* y preshifted >>9 */
97}
98
99#endif
100
101/*
102 * This should be used as a memory barrier, forcing all cached values in
103 * registers to wr writen back to memory. Might or might not be beneficial
104 * depending on the architecture and compiler.
105 */
106#define MB()
107
108/*
109 * The XPROD functions are meant to optimize the cross products found all
110 * over the place in mdct.c by forcing memory operation ordering to avoid
111 * unnecessary register reloads as soon as memory is being written to.
112 * However this is only beneficial on CPUs with a sane number of general
113 * purpose registers which exclude the Intel x86. On Intel, better let the
114 * compiler actually reload registers directly from original memory by using
115 * macros.
116 */
117
118#ifdef __i386__
119
120#define XPROD32(_a, _b, _t, _v, _x, _y) \
121 { *(_x)=MULT32(_a,_t)+MULT32(_b,_v); \
122 *(_y)=MULT32(_b,_t)-MULT32(_a,_v); }
123#define XPROD31(_a, _b, _t, _v, _x, _y) \
124 { *(_x)=MULT31(_a,_t)+MULT31(_b,_v); \
125 *(_y)=MULT31(_b,_t)-MULT31(_a,_v); }
126#define XNPROD31(_a, _b, _t, _v, _x, _y) \
127 { *(_x)=MULT31(_a,_t)-MULT31(_b,_v); \
128 *(_y)=MULT31(_b,_t)+MULT31(_a,_v); }
129
130#else
131
132static inline void XPROD32(ogg_int32_t a, ogg_int32_t b,
133 ogg_int32_t t, ogg_int32_t v,
134 ogg_int32_t *x, ogg_int32_t *y)
135{
136 *x = MULT32(a, t) + MULT32(b, v);
137 *y = MULT32(b, t) - MULT32(a, v);
138}
139
140static inline void XPROD31(ogg_int32_t a, ogg_int32_t b,
141 ogg_int32_t t, ogg_int32_t v,
142 ogg_int32_t *x, ogg_int32_t *y)
143{
144 *x = MULT31(a, t) + MULT31(b, v);
145 *y = MULT31(b, t) - MULT31(a, v);
146}
147
148static inline void XNPROD31(ogg_int32_t a, ogg_int32_t b,
149 ogg_int32_t t, ogg_int32_t v,
150 ogg_int32_t *x, ogg_int32_t *y)
151{
152 *x = MULT31(a, t) - MULT31(b, v);
153 *y = MULT31(b, t) + MULT31(a, v);
154}
155
156#endif
157
158#endif
159
160#ifndef _V_CLIP_MATH
161#define _V_CLIP_MATH
162
163static inline ogg_int32_t CLIP_TO_15(ogg_int32_t x) {
164 int ret=x;
165 ret-= ((x<=32767)-1)&(x-32767);
166 ret-= ((x>=-32768)-1)&(x+32768);
167 return(ret);
168}
169
170#endif
171
172static inline ogg_int32_t VFLOAT_MULT(ogg_int32_t a,ogg_int32_t ap,
173 ogg_int32_t b,ogg_int32_t bp,
174 ogg_int32_t *p){
175 if(a && b){
176#ifndef _LOW_ACCURACY_
177 *p=ap+bp+32;
178 return MULT32(a,b);
179#else
180 *p=ap+bp+31;
181 return (a>>15)*(b>>16);
182#endif
183 }else
184 return 0;
185}
186
187static inline ogg_int32_t VFLOAT_MULTI(ogg_int32_t a,ogg_int32_t ap,
188 ogg_int32_t i,
189 ogg_int32_t *p){
190
191 int ip=_ilog(abs(i))-31;
192 return VFLOAT_MULT(a,ap,i<<-ip,ip,p);
193}
194
195static inline ogg_int32_t VFLOAT_ADD(ogg_int32_t a,ogg_int32_t ap,
196 ogg_int32_t b,ogg_int32_t bp,
197 ogg_int32_t *p){
198
199 if(!a){
200 *p=bp;
201 return b;
202 }else if(!b){
203 *p=ap;
204 return a;
205 }
206
207 /* yes, this can leak a bit. */
208 if(ap>bp){
209 int shift=ap-bp+1;
210 *p=ap+1;
211 a>>=1;
212 if(shift<32){
213 b=(b+(1<<(shift-1)))>>shift;
214 }else{
215 b=0;
216 }
217 }else{
218 int shift=bp-ap+1;
219 *p=bp+1;
220 b>>=1;
221 if(shift<32){
222 a=(a+(1<<(shift-1)))>>shift;
223 }else{
224 a=0;
225 }
226 }
227
228 a+=b;
229 if((a&0xc0000000)==0xc0000000 ||
230 (a&0xc0000000)==0){
231 a<<=1;
232 (*p)--;
233 }
234 return(a);
235}
236
237#endif
238
239
240
241