summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Giacomelli <giac2000@hotmail.com>2008-09-04 18:03:30 +0000
committerMichael Giacomelli <giac2000@hotmail.com>2008-09-04 18:03:30 +0000
commit850adb40d732b69b94f5bb5f691b88aa14c075a0 (patch)
treed34595fb9b7f4955821615ee0f8062535d16b5db
parent46f85c4c547188d08fd90bad7734d38c654f13ea (diff)
downloadrockbox-850adb40d732b69b94f5bb5f691b88aa14c075a0.tar.gz
rockbox-850adb40d732b69b94f5bb5f691b88aa14c075a0.zip
More files that were missed in the previous commit.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@18413 a1c6a512-1295-4272-9138-f99709370657
-rw-r--r--apps/codecs/lib/asm_arm.h230
-rw-r--r--apps/codecs/lib/asm_mcf5249.h327
-rw-r--r--apps/codecs/lib/mdct2.c521
-rw-r--r--apps/codecs/lib/mdct2.h75
-rw-r--r--apps/codecs/lib/mdct_arm.S429
-rw-r--r--apps/codecs/lib/mdct_lookup.h544
-rw-r--r--apps/codecs/lib/misc.h291
7 files changed, 2417 insertions, 0 deletions
diff --git a/apps/codecs/lib/asm_arm.h b/apps/codecs/lib/asm_arm.h
new file mode 100644
index 0000000000..0db868dcb3
--- /dev/null
+++ b/apps/codecs/lib/asm_arm.h
@@ -0,0 +1,230 @@
1/********************************************************************
2 * *
3 * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. *
4 * *
5 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
6 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
7 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
8 * *
9 * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
10 * BY THE Xiph.Org FOUNDATION http://www.xiph.org/ *
11 * *
12 ********************************************************************
13
14 function: arm7 and later wide math functions
15
16 ********************************************************************/
17#ifdef CPU_ARM
18
19#if !defined(_V_WIDE_MATH) && !defined(_LOW_ACCURACY_)
20#define _V_WIDE_MATH
21
22static inline int32_t MULT32(int32_t x, int32_t y) {
23 int lo,hi;
24 asm volatile("smull\t%0, %1, %2, %3"
25 : "=&r"(lo),"=&r"(hi)
26 : "%r"(x),"r"(y)
27 : "cc");
28 return(hi);
29}
30
31static inline int32_t MULT31(int32_t x, int32_t y) {
32 return MULT32(x,y)<<1;
33}
34
35static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) {
36 int lo,hi;
37 asm volatile("smull %0, %1, %2, %3\n\t"
38 "movs %0, %0, lsr #15\n\t"
39 "adc %1, %0, %1, lsl #17\n\t"
40 : "=&r"(lo),"=&r"(hi)
41 : "%r"(x),"r"(y)
42 : "cc");
43 return(hi);
44}
45
46#define MB() asm volatile ("" : : : "memory")
47
48#define XPROD32(a, b, t, v, x, y) \
49{ \
50 long l; \
51 asm( "smull %0, %1, %4, %6\n\t" \
52 "smlal %0, %1, %5, %7\n\t" \
53 "rsb %3, %4, #0\n\t" \
54 "smull %0, %2, %5, %6\n\t" \
55 "smlal %0, %2, %3, %7" \
56 : "=&r" (l), "=&r" (x), "=&r" (y), "=r" ((a)) \
57 : "3" ((a)), "r" ((b)), "r" ((t)), "r" ((v)) \
58 : "cc" ); \
59}
60
61static inline void XPROD31(int32_t a, int32_t b,
62 int32_t t, int32_t v,
63 int32_t *x, int32_t *y)
64{
65 int x1, y1, l;
66 asm( "smull %0, %1, %4, %6\n\t"
67 "smlal %0, %1, %5, %7\n\t"
68 "rsb %3, %4, #0\n\t"
69 "smull %0, %2, %5, %6\n\t"
70 "smlal %0, %2, %3, %7"
71 : "=&r" (l), "=&r" (x1), "=&r" (y1), "=r" (a)
72 : "3" (a), "r" (b), "r" (t), "r" (v)
73 : "cc" );
74 *x = x1 << 1;
75 MB();
76 *y = y1 << 1;
77}
78
79static inline void XNPROD31(int32_t a, int32_t b,
80 int32_t t, int32_t v,
81 int32_t *x, int32_t *y)
82{
83 int x1, y1, l;
84 asm( "rsb %2, %4, #0\n\t"
85 "smull %0, %1, %3, %5\n\t"
86 "smlal %0, %1, %2, %6\n\t"
87 "smull %0, %2, %4, %5\n\t"
88 "smlal %0, %2, %3, %6"
89 : "=&r" (l), "=&r" (x1), "=&r" (y1)
90 : "r" (a), "r" (b), "r" (t), "r" (v)
91 : "cc" );
92 *x = x1 << 1;
93 MB();
94 *y = y1 << 1;
95}
96
97#ifndef _V_VECT_OPS
98#define _V_VECT_OPS
99
100/* asm versions of vector operations for block.c, window.c */
101static inline
102void vect_add(int32_t *x, int32_t *y, int n)
103{
104 while (n>=4) {
105 asm volatile ("ldmia %[x], {r0, r1, r2, r3};"
106 "ldmia %[y]!, {r4, r5, r6, r7};"
107 "add r0, r0, r4;"
108 "add r1, r1, r5;"
109 "add r2, r2, r6;"
110 "add r3, r3, r7;"
111 "stmia %[x]!, {r0, r1, r2, r3};"
112 : [x] "+r" (x), [y] "+r" (y)
113 : : "r0", "r1", "r2", "r3",
114 "r4", "r5", "r6", "r7",
115 "memory");
116 n -= 4;
117 }
118 /* add final elements */
119 while (n>0) {
120 *x++ += *y++;
121 n--;
122 }
123}
124
125static inline
126void vect_copy(int32_t *x, int32_t *y, int n)
127{
128 while (n>=4) {
129 asm volatile ("ldmia %[y]!, {r0, r1, r2, r3};"
130 "stmia %[x]!, {r0, r1, r2, r3};"
131 : [x] "+r" (x), [y] "+r" (y)
132 : : "r0", "r1", "r2", "r3",
133 "memory");
134 n -= 4;
135 }
136 /* copy final elements */
137 while (n>0) {
138 *x++ = *y++;
139 n--;
140 }
141}
142
143static inline
144void vect_mult_fw(int32_t *data, int32_t *window, int n)
145{
146 while (n>=4) {
147 asm volatile ("ldmia %[d], {r0, r1, r2, r3};"
148 "ldmia %[w]!, {r4, r5, r6, r7};"
149 "smull r8, r9, r0, r4;"
150 "mov r0, r9, lsl #1;"
151 "smull r8, r9, r1, r5;"
152 "mov r1, r9, lsl #1;"
153 "smull r8, r9, r2, r6;"
154 "mov r2, r9, lsl #1;"
155 "smull r8, r9, r3, r7;"
156 "mov r3, r9, lsl #1;"
157 "stmia %[d]!, {r0, r1, r2, r3};"
158 : [d] "+r" (data), [w] "+r" (window)
159 : : "r0", "r1", "r2", "r3",
160 "r4", "r5", "r6", "r7", "r8", "r9",
161 "memory", "cc");
162 n -= 4;
163 }
164 while(n>0) {
165 *data = MULT31(*data, *window);
166 data++;
167 window++;
168 n--;
169 }
170}
171
172static inline
173void vect_mult_bw(int32_t *data, int32_t *window, int n)
174{
175 while (n>=4) {
176 asm volatile ("ldmia %[d], {r0, r1, r2, r3};"
177 "ldmda %[w]!, {r4, r5, r6, r7};"
178 "smull r8, r9, r0, r7;"
179 "mov r0, r9, lsl #1;"
180 "smull r8, r9, r1, r6;"
181 "mov r1, r9, lsl #1;"
182 "smull r8, r9, r2, r5;"
183 "mov r2, r9, lsl #1;"
184 "smull r8, r9, r3, r4;"
185 "mov r3, r9, lsl #1;"
186 "stmia %[d]!, {r0, r1, r2, r3};"
187 : [d] "+r" (data), [w] "+r" (window)
188 : : "r0", "r1", "r2", "r3",
189 "r4", "r5", "r6", "r7", "r8", "r9",
190 "memory", "cc");
191 n -= 4;
192 }
193 while(n>0) {
194 *data = MULT31(*data, *window);
195 data++;
196 window--;
197 n--;
198 }
199}
200
201#endif
202
203#endif
204
205#ifndef _V_CLIP_MATH
206#define _V_CLIP_MATH
207
208static inline int32_t CLIP_TO_15(int32_t x) {
209 int tmp;
210 asm volatile("subs %1, %0, #32768\n\t"
211 "movpl %0, #0x7f00\n\t"
212 "orrpl %0, %0, #0xff\n"
213 "adds %1, %0, #32768\n\t"
214 "movmi %0, #0x8000"
215 : "+r"(x),"=r"(tmp)
216 :
217 : "cc");
218 return(x);
219}
220
221#endif
222
223#ifndef _V_LSP_MATH_ASM
224#define _V_LSP_MATH_ASM
225
226
227
228#endif
229#endif
230
diff --git a/apps/codecs/lib/asm_mcf5249.h b/apps/codecs/lib/asm_mcf5249.h
new file mode 100644
index 0000000000..20899f0a5b
--- /dev/null
+++ b/apps/codecs/lib/asm_mcf5249.h
@@ -0,0 +1,327 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 *
9 * Copyright (C) 2005 by Pedro Vasconcelos
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 2
14 * of the License, or (at your option) any later version.
15 *
16 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
17 * KIND, either express or implied.
18 *
19 ****************************************************************************/
20/* asm routines for wide math on the MCF5249 */
21
22//#include "os_types.h"
23
24#if defined(CPU_COLDFIRE)
25
26/* attribute for 16-byte alignment */
27#define LINE_ATTR __attribute__ ((aligned (16)))
28
29#ifndef _V_WIDE_MATH
30#define _V_WIDE_MATH
31
32#define MB()
33
34static inline int32_t MULT32(int32_t x, int32_t y) {
35
36 asm volatile ("mac.l %[x], %[y], %%acc0;" /* multiply & shift */
37 "movclr.l %%acc0, %[x];" /* move & clear acc */
38 "asr.l #1, %[x];" /* no overflow test */
39 : [x] "+&d" (x)
40 : [y] "r" (y)
41 : "cc");
42 return x;
43}
44
45static inline int32_t MULT31(int32_t x, int32_t y) {
46
47 asm volatile ("mac.l %[x], %[y], %%acc0;" /* multiply */
48 "movclr.l %%acc0, %[x];" /* move and clear */
49 : [x] "+&r" (x)
50 : [y] "r" (y)
51 : "cc");
52 return x;
53}
54
55
56static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) {
57 int32_t r;
58
59 asm volatile ("mac.l %[x], %[y], %%acc0;" /* multiply */
60 "mulu.l %[y], %[x];" /* get lower half, avoid emac stall */
61 "movclr.l %%acc0, %[r];" /* get higher half */
62 "asl.l #8, %[r];" /* hi<<16, plus one free */
63 "asl.l #8, %[r];"
64 "lsr.l #8, %[x];" /* (unsigned)lo >> 15 */
65 "lsr.l #7, %[x];"
66 "or.l %[x], %[r];" /* logical-or results */
67 : [r] "=&d" (r), [x] "+d" (x)
68 : [y] "d" (y)
69 : "cc");
70 return r;
71}
72
73
74static inline
75void XPROD31(int32_t a, int32_t b,
76 int32_t t, int32_t v,
77 int32_t *x, int32_t *y)
78{
79 asm volatile ("mac.l %[a], %[t], %%acc0;"
80 "mac.l %[b], %[v], %%acc0;"
81 "mac.l %[b], %[t], %%acc1;"
82 "msac.l %[a], %[v], %%acc1;"
83 "movclr.l %%acc0, %[a];"
84 "move.l %[a], (%[x]);"
85 "movclr.l %%acc1, %[a];"
86 "move.l %[a], (%[y]);"
87 : [a] "+&r" (a)
88 : [x] "a" (x), [y] "a" (y),
89 [b] "r" (b), [t] "r" (t), [v] "r" (v)
90 : "cc", "memory");
91}
92
93
94static inline
95void XNPROD31(int32_t a, int32_t b,
96 int32_t t, int32_t v,
97 int32_t *x, int32_t *y)
98{
99 asm volatile ("mac.l %[a], %[t], %%acc0;"
100 "msac.l %[b], %[v], %%acc0;"
101 "mac.l %[b], %[t], %%acc1;"
102 "mac.l %[a], %[v], %%acc1;"
103 "movclr.l %%acc0, %[a];"
104 "move.l %[a], (%[x]);"
105 "movclr.l %%acc1, %[a];"
106 "move.l %[a], (%[y]);"
107 : [a] "+&r" (a)
108 : [x] "a" (x), [y] "a" (y),
109 [b] "r" (b), [t] "r" (t), [v] "r" (v)
110 : "cc", "memory");
111}
112
113
114#if 0 /* canonical Tremor definition */
115#define XPROD32(_a, _b, _t, _v, _x, _y) \
116 { (_x)=MULT32(_a,_t)+MULT32(_b,_v); \
117 (_y)=MULT32(_b,_t)-MULT32(_a,_v); }
118#endif
119
120/* this could lose the LSB by overflow, but i don't think it'll ever happen.
121 if anyone think they can hear a bug caused by this, please try the above
122 version. */
123#define XPROD32(_a, _b, _t, _v, _x, _y) \
124 asm volatile ("mac.l %[a], %[t], %%acc0;" \
125 "mac.l %[b], %[v], %%acc0;" \
126 "mac.l %[b], %[t], %%acc1;" \
127 "msac.l %[a], %[v], %%acc1;" \
128 "movclr.l %%acc0, %[x];" \
129 "asr.l #1, %[x];" \
130 "movclr.l %%acc1, %[y];" \
131 "asr.l #1, %[y];" \
132 : [x] "=&d" (_x), [y] "=&d" (_y) \
133 : [a] "r" (_a), [b] "r" (_b), \
134 [t] "r" (_t), [v] "r" (_v) \
135 : "cc");
136
137#ifndef _V_VECT_OPS
138#define _V_VECT_OPS
139
140/* asm versions of vector operations for block.c, window.c */
141/* assumes MAC is initialized & accumulators cleared */
142static inline
143void vect_add(int32_t *x, int32_t *y, int n)
144{
145 /* align to 16 bytes */
146 while(n>0 && (int)x&16) {
147 *x++ += *y++;
148 n--;
149 }
150 asm volatile ("bra 1f;"
151 "0:" /* loop start */
152 "movem.l (%[x]), %%d0-%%d3;" /* fetch values */
153 "movem.l (%[y]), %%a0-%%a3;"
154 /* add */
155 "add.l %%a0, %%d0;"
156 "add.l %%a1, %%d1;"
157 "add.l %%a2, %%d2;"
158 "add.l %%a3, %%d3;"
159 /* store and advance */
160 "movem.l %%d0-%%d3, (%[x]);"
161 "lea.l (4*4, %[x]), %[x];"
162 "lea.l (4*4, %[y]), %[y];"
163 "subq.l #4, %[n];" /* done 4 elements */
164 "1: cmpi.l #4, %[n];"
165 "bge 0b;"
166 : [n] "+d" (n), [x] "+a" (x), [y] "+a" (y)
167 : : "%d0", "%d1", "%d2", "%d3", "%a0", "%a1", "%a2", "%a3",
168 "cc", "memory");
169 /* add final elements */
170 while (n>0) {
171 *x++ += *y++;
172 n--;
173 }
174}
175
176static inline
177void vect_copy(int32_t *x, int32_t *y, int n)
178{
179 /* align to 16 bytes */
180 while(n>0 && (int)x&16) {
181 *x++ = *y++;
182 n--;
183 }
184 asm volatile ("bra 1f;"
185 "0:" /* loop start */
186 "movem.l (%[y]), %%d0-%%d3;" /* fetch values */
187 "movem.l %%d0-%%d3, (%[x]);" /* store */
188 "lea.l (4*4, %[x]), %[x];" /* advance */
189 "lea.l (4*4, %[y]), %[y];"
190 "subq.l #4, %[n];" /* done 4 elements */
191 "1: cmpi.l #4, %[n];"
192 "bge 0b;"
193 : [n] "+d" (n), [x] "+a" (x), [y] "+a" (y)
194 : : "%d0", "%d1", "%d2", "%d3", "cc", "memory");
195 /* copy final elements */
196 while (n>0) {
197 *x++ = *y++;
198 n--;
199 }
200}
201
202
203static inline
204void vect_mult_fw(int32_t *data, int32_t *window, int n)
205{
206 /* ensure data is aligned to 16-bytes */
207 while(n>0 && (int)data%16) {
208 *data = MULT31(*data, *window);
209 data++;
210 window++;
211 n--;
212 }
213 asm volatile ("movem.l (%[d]), %%d0-%%d3;" /* loop start */
214 "movem.l (%[w]), %%a0-%%a3;" /* pre-fetch registers */
215 "lea.l (4*4, %[w]), %[w];"
216 "bra 1f;" /* jump to loop condition */
217 "0:" /* loop body */
218 /* multiply and load next window values */
219 "mac.l %%d0, %%a0, (%[w])+, %%a0, %%acc0;"
220 "mac.l %%d1, %%a1, (%[w])+, %%a1, %%acc1;"
221 "mac.l %%d2, %%a2, (%[w])+, %%a2, %%acc2;"
222 "mac.l %%d3, %%a3, (%[w])+, %%a3, %%acc3;"
223 "movclr.l %%acc0, %%d0;" /* get the products */
224 "movclr.l %%acc1, %%d1;"
225 "movclr.l %%acc2, %%d2;"
226 "movclr.l %%acc3, %%d3;"
227 /* store and advance */
228 "movem.l %%d0-%%d3, (%[d]);"
229 "lea.l (4*4, %[d]), %[d];"
230 "movem.l (%[d]), %%d0-%%d3;"
231 "subq.l #4, %[n];" /* done 4 elements */
232 "1: cmpi.l #4, %[n];"
233 "bge 0b;"
234 /* multiply final elements */
235 "tst.l %[n];"
236 "beq 1f;" /* n=0 */
237 "mac.l %%d0, %%a0, %%acc0;"
238 "movclr.l %%acc0, %%d0;"
239 "move.l %%d0, (%[d])+;"
240 "subq.l #1, %[n];"
241 "beq 1f;" /* n=1 */
242 "mac.l %%d1, %%a1, %%acc0;"
243 "movclr.l %%acc0, %%d1;"
244 "move.l %%d1, (%[d])+;"
245 "subq.l #1, %[n];"
246 "beq 1f;" /* n=2 */
247 /* otherwise n = 3 */
248 "mac.l %%d2, %%a2, %%acc0;"
249 "movclr.l %%acc0, %%d2;"
250 "move.l %%d2, (%[d])+;"
251 "1:"
252 : [n] "+d" (n), [d] "+a" (data), [w] "+a" (window)
253 : : "%d0", "%d1", "%d2", "%d3", "%a0", "%a1", "%a2", "%a3",
254 "cc", "memory");
255}
256
257static inline
258void vect_mult_bw(int32_t *data, int32_t *window, int n)
259{
260 /* ensure at least data is aligned to 16-bytes */
261 while(n>0 && (int)data%16) {
262 *data = MULT31(*data, *window);
263 data++;
264 window--;
265 n--;
266 }
267 asm volatile ("lea.l (-3*4, %[w]), %[w];" /* loop start */
268 "movem.l (%[d]), %%d0-%%d3;" /* pre-fetch registers */
269 "movem.l (%[w]), %%a0-%%a3;"
270 "bra 1f;" /* jump to loop condition */
271 "0:" /* loop body */
272 /* multiply and load next window value */
273 "mac.l %%d0, %%a3, -(%[w]), %%a3, %%acc0;"
274 "mac.l %%d1, %%a2, -(%[w]), %%a2, %%acc1;"
275 "mac.l %%d2, %%a1, -(%[w]), %%a1, %%acc2;"
276 "mac.l %%d3, %%a0, -(%[w]), %%a0, %%acc3;"
277 "movclr.l %%acc0, %%d0;" /* get the products */
278 "movclr.l %%acc1, %%d1;"
279 "movclr.l %%acc2, %%d2;"
280 "movclr.l %%acc3, %%d3;"
281 /* store and advance */
282 "movem.l %%d0-%%d3, (%[d]);"
283 "lea.l (4*4, %[d]), %[d];"
284 "movem.l (%[d]), %%d0-%%d3;"
285 "subq.l #4, %[n];" /* done 4 elements */
286 "1: cmpi.l #4, %[n];"
287 "bge 0b;"
288 /* multiply final elements */
289 "tst.l %[n];"
290 "beq 1f;" /* n=0 */
291 "mac.l %%d0, %%a3, %%acc0;"
292 "movclr.l %%acc0, %%d0;"
293 "move.l %%d0, (%[d])+;"
294 "subq.l #1, %[n];"
295 "beq 1f;" /* n=1 */
296 "mac.l %%d1, %%a2, %%acc0;"
297 "movclr.l %%acc0, %%d1;"
298 "move.l %%d1, (%[d])+;"
299 "subq.l #1, %[n];"
300 "beq 1f;" /* n=2 */
301 /* otherwise n = 3 */
302 "mac.l %%d2, %%a1, %%acc0;"
303 "movclr.l %%acc0, %%d2;"
304 "move.l %%d2, (%[d])+;"
305 "1:"
306 : [n] "+d" (n), [d] "+a" (data), [w] "+a" (window)
307 : : "%d0", "%d1", "%d2", "%d3", "%a0", "%a1", "%a2", "%a3",
308 "cc", "memory");
309}
310
311#endif
312
313#endif
314
315#ifndef _V_CLIP_MATH
316#define _V_CLIP_MATH
317
318/* this is portable C and simple; why not use this as default? */
319static inline int32_t CLIP_TO_15(register int32_t x) {
320 register int32_t hi=32767, lo=-32768;
321 return (x>=hi ? hi : (x<=lo ? lo : x));
322}
323
324#endif
325#else
326#define LINE_ATTR
327#endif
diff --git a/apps/codecs/lib/mdct2.c b/apps/codecs/lib/mdct2.c
new file mode 100644
index 0000000000..c38f92db2c
--- /dev/null
+++ b/apps/codecs/lib/mdct2.c
@@ -0,0 +1,521 @@
1/********************************************************************
2 * *
3 * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. *
4 * *
5 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
6 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
7 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
8 * *
9 * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
10 * BY THE Xiph.Org FOUNDATION http://www.xiph.org/ *
11 * *
12 ********************************************************************
13
14 function: normalized modified discrete cosine transform
15 power of two length transform only [64 <= n ]
16
17
18 Original algorithm adapted long ago from _The use of multirate filter
19 banks for coding of high quality digital audio_, by T. Sporer,
20 K. Brandenburg and B. Edler, collection of the European Signal
21 Processing Conference (EUSIPCO), Amsterdam, June 1992, Vol.1, pp
22 211-214
23
24 The below code implements an algorithm that no longer looks much like
25 that presented in the paper, but the basic structure remains if you
26 dig deep enough to see it.
27
28 This module DOES NOT INCLUDE code to generate/apply the window
29 function. Everybody has their own weird favorite including me... I
30 happen to like the properties of y=sin(.5PI*sin^2(x)), but others may
31 vehemently disagree.
32
33 ********************************************************************/
34
35/*Tremor IMDCT adapted for use with libwmai*/
36
37
38#include "mdct2.h"
39#include "mdct_lookup.h"
40#include <codecs/lib/codeclib.h>
41
42#if defined(CPU_ARM) && CONFIG_CPU != S3C2440
43/* C code is faster on S3C2440 */
44
45extern void mdct_butterfly_32(int32_t *x);
46extern void mdct_butterfly_generic_loop(int32_t *x1, int32_t *x2,
47 const int32_t *T0, int step,
48 const int32_t *Ttop);
49
50static inline void mdct_butterfly_generic(int32_t *x,int points, int step){
51 mdct_butterfly_generic_loop(x + points, x + (points>>1), sincos_lookup0, step, sincos_lookup0+1024);
52}
53
54#else
55
56/* 8 point butterfly (in place) */
57static inline void mdct_butterfly_8(int32_t *x){
58 register int32_t r0 = x[4] + x[0];
59 register int32_t r1 = x[4] - x[0];
60 register int32_t r2 = x[5] + x[1];
61 register int32_t r3 = x[5] - x[1];
62 register int32_t r4 = x[6] + x[2];
63 register int32_t r5 = x[6] - x[2];
64 register int32_t r6 = x[7] + x[3];
65 register int32_t r7 = x[7] - x[3];
66
67 x[0] = r5 + r3;
68 x[1] = r7 - r1;
69 x[2] = r5 - r3;
70 x[3] = r7 + r1;
71 x[4] = r4 - r0;
72 x[5] = r6 - r2;
73 x[6] = r4 + r0;
74 x[7] = r6 + r2;
75 MB();
76}
77
78/* 16 point butterfly (in place, 4 register) */
79static inline void mdct_butterfly_16(int32_t *x){
80
81 register int32_t r0, r1;
82
83 r0 = x[ 0] - x[ 8]; x[ 8] += x[ 0];
84 r1 = x[ 1] - x[ 9]; x[ 9] += x[ 1];
85 x[ 0] = MULT31((r0 + r1) , cPI2_8);
86 x[ 1] = MULT31((r1 - r0) , cPI2_8);
87 MB();
88
89 r0 = x[10] - x[ 2]; x[10] += x[ 2];
90 r1 = x[ 3] - x[11]; x[11] += x[ 3];
91 x[ 2] = r1; x[ 3] = r0;
92 MB();
93
94 r0 = x[12] - x[ 4]; x[12] += x[ 4];
95 r1 = x[13] - x[ 5]; x[13] += x[ 5];
96 x[ 4] = MULT31((r0 - r1) , cPI2_8);
97 x[ 5] = MULT31((r0 + r1) , cPI2_8);
98 MB();
99
100 r0 = x[14] - x[ 6]; x[14] += x[ 6];
101 r1 = x[15] - x[ 7]; x[15] += x[ 7];
102 x[ 6] = r0; x[ 7] = r1;
103 MB();
104
105 mdct_butterfly_8(x);
106 mdct_butterfly_8(x+8);
107}
108
109/* 32 point butterfly (in place, 4 register) */
110static inline void mdct_butterfly_32(int32_t *x){
111
112 register int32_t r0, r1;
113
114 r0 = x[30] - x[14]; x[30] += x[14];
115 r1 = x[31] - x[15]; x[31] += x[15];
116 x[14] = r0; x[15] = r1;
117 MB();
118
119 r0 = x[28] - x[12]; x[28] += x[12];
120 r1 = x[29] - x[13]; x[29] += x[13];
121 XNPROD31( r0, r1, cPI1_8, cPI3_8, &x[12], &x[13] );
122 MB();
123
124 r0 = x[26] - x[10]; x[26] += x[10];
125 r1 = x[27] - x[11]; x[27] += x[11];
126 x[10] = MULT31((r0 - r1) , cPI2_8);
127 x[11] = MULT31((r0 + r1) , cPI2_8);
128 MB();
129
130 r0 = x[24] - x[ 8]; x[24] += x[ 8];
131 r1 = x[25] - x[ 9]; x[25] += x[ 9];
132 XNPROD31( r0, r1, cPI3_8, cPI1_8, &x[ 8], &x[ 9] );
133 MB();
134
135 r0 = x[22] - x[ 6]; x[22] += x[ 6];
136 r1 = x[ 7] - x[23]; x[23] += x[ 7];
137 x[ 6] = r1; x[ 7] = r0;
138 MB();
139
140 r0 = x[ 4] - x[20]; x[20] += x[ 4];
141 r1 = x[ 5] - x[21]; x[21] += x[ 5];
142 XPROD31 ( r0, r1, cPI3_8, cPI1_8, &x[ 4], &x[ 5] );
143 MB();
144
145 r0 = x[ 2] - x[18]; x[18] += x[ 2];
146 r1 = x[ 3] - x[19]; x[19] += x[ 3];
147 x[ 2] = MULT31((r1 + r0) , cPI2_8);
148 x[ 3] = MULT31((r1 - r0) , cPI2_8);
149 MB();
150
151 r0 = x[ 0] - x[16]; x[16] += x[ 0];
152 r1 = x[ 1] - x[17]; x[17] += x[ 1];
153 XPROD31 ( r0, r1, cPI1_8, cPI3_8, &x[ 0], &x[ 1] );
154 MB();
155
156 mdct_butterfly_16(x);
157 mdct_butterfly_16(x+16);
158}
159
160/* N/stage point generic N stage butterfly (in place, 4 register) */
161void mdct_butterfly_generic(int32_t *x,int points, int step)
162 ICODE_ATTR_TREMOR_MDCT;
163void mdct_butterfly_generic(int32_t *x,int points, int step){
164 const int32_t *T = sincos_lookup0;
165 int32_t *x1 = x + points - 8;
166 int32_t *x2 = x + (points>>1) - 8;
167 register int32_t r0;
168 register int32_t r1;
169 register int32_t r2;
170 register int32_t r3;
171
172 do{
173 r0 = x1[6] - x2[6]; x1[6] += x2[6];
174 r1 = x2[7] - x1[7]; x1[7] += x2[7];
175 r2 = x1[4] - x2[4]; x1[4] += x2[4];
176 r3 = x2[5] - x1[5]; x1[5] += x2[5];
177 XPROD31( r1, r0, T[0], T[1], &x2[6], &x2[7] ); T+=step;
178 XPROD31( r3, r2, T[0], T[1], &x2[4], &x2[5] ); T+=step;
179
180 r0 = x1[2] - x2[2]; x1[2] += x2[2];
181 r1 = x2[3] - x1[3]; x1[3] += x2[3];
182 r2 = x1[0] - x2[0]; x1[0] += x2[0];
183 r3 = x2[1] - x1[1]; x1[1] += x2[1];
184 XPROD31( r1, r0, T[0], T[1], &x2[2], &x2[3] ); T+=step;
185 XPROD31( r3, r2, T[0], T[1], &x2[0], &x2[1] ); T+=step;
186
187 x1-=8; x2-=8;
188 }while(T<sincos_lookup0+1024);
189 do{
190 r0 = x1[6] - x2[6]; x1[6] += x2[6];
191 r1 = x1[7] - x2[7]; x1[7] += x2[7];
192 r2 = x1[4] - x2[4]; x1[4] += x2[4];
193 r3 = x1[5] - x2[5]; x1[5] += x2[5];
194 XNPROD31( r0, r1, T[0], T[1], &x2[6], &x2[7] ); T-=step;
195 XNPROD31( r2, r3, T[0], T[1], &x2[4], &x2[5] ); T-=step;
196
197 r0 = x1[2] - x2[2]; x1[2] += x2[2];
198 r1 = x1[3] - x2[3]; x1[3] += x2[3];
199 r2 = x1[0] - x2[0]; x1[0] += x2[0];
200 r3 = x1[1] - x2[1]; x1[1] += x2[1];
201 XNPROD31( r0, r1, T[0], T[1], &x2[2], &x2[3] ); T-=step;
202 XNPROD31( r2, r3, T[0], T[1], &x2[0], &x2[1] ); T-=step;
203
204 x1-=8; x2-=8;
205 }while(T>sincos_lookup0);
206 do{
207 r0 = x2[6] - x1[6]; x1[6] += x2[6];
208 r1 = x2[7] - x1[7]; x1[7] += x2[7];
209 r2 = x2[4] - x1[4]; x1[4] += x2[4];
210 r3 = x2[5] - x1[5]; x1[5] += x2[5];
211 XPROD31( r0, r1, T[0], T[1], &x2[6], &x2[7] ); T+=step;
212 XPROD31( r2, r3, T[0], T[1], &x2[4], &x2[5] ); T+=step;
213
214 r0 = x2[2] - x1[2]; x1[2] += x2[2];
215 r1 = x2[3] - x1[3]; x1[3] += x2[3];
216 r2 = x2[0] - x1[0]; x1[0] += x2[0];
217 r3 = x2[1] - x1[1]; x1[1] += x2[1];
218 XPROD31( r0, r1, T[0], T[1], &x2[2], &x2[3] ); T+=step;
219 XPROD31( r2, r3, T[0], T[1], &x2[0], &x2[1] ); T+=step;
220
221 x1-=8; x2-=8;
222 }while(T<sincos_lookup0+1024);
223 do{
224 r0 = x1[6] - x2[6]; x1[6] += x2[6];
225 r1 = x2[7] - x1[7]; x1[7] += x2[7];
226 r2 = x1[4] - x2[4]; x1[4] += x2[4];
227 r3 = x2[5] - x1[5]; x1[5] += x2[5];
228 XNPROD31( r1, r0, T[0], T[1], &x2[6], &x2[7] ); T-=step;
229 XNPROD31( r3, r2, T[0], T[1], &x2[4], &x2[5] ); T-=step;
230
231 r0 = x1[2] - x2[2]; x1[2] += x2[2];
232 r1 = x2[3] - x1[3]; x1[3] += x2[3];
233 r2 = x1[0] - x2[0]; x1[0] += x2[0];
234 r3 = x2[1] - x1[1]; x1[1] += x2[1];
235 XNPROD31( r1, r0, T[0], T[1], &x2[2], &x2[3] ); T-=step;
236 XNPROD31( r3, r2, T[0], T[1], &x2[0], &x2[1] ); T-=step;
237
238 x1-=8; x2-=8;
239 }while(T>sincos_lookup0);
240}
241
242#endif /* CPU_ARM */
243
244static inline void mdct_butterflies(int32_t *x,int points,int shift) {
245
246 int stages=8-shift;
247 int i,j;
248
249 for(i=0;--stages>0;i++){
250 for(j=0;j<(1<<i);j++)
251 mdct_butterfly_generic(x+(points>>i)*j,points>>i,4<<(i+shift));
252 }
253
254 for(j=0;j<points;j+=32)
255 mdct_butterfly_32(x+j);
256}
257
258
259static const unsigned char bitrev[16] ICONST_ATTR =
260 {0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15};
261
262static inline int bitrev12(int x){
263 return bitrev[x>>8]|(bitrev[(x&0x0f0)>>4]<<4)|(((int)bitrev[x&0x00f])<<8);
264}
265
266static inline void mdct_bitreverse(int32_t *x,int n,int step,int shift) {
267
268 int bit = 0;
269 int32_t *w0 = x;
270 int32_t *w1 = x = w0+(n>>1);
271 const int32_t *T = (step>=4)?(sincos_lookup0+(step>>1)):sincos_lookup1;
272 const int32_t *Ttop = T+1024;
273 register int32_t r2;
274
275 do{
276 register int32_t r3 = bitrev12(bit++);
277 int32_t *x0 = x + ((r3 ^ 0xfff)>>shift) -1;
278 int32_t *x1 = x + (r3>>shift);
279
280 register int32_t r0 = x0[0] + x1[0];
281 register int32_t r1 = x1[1] - x0[1];
282
283 XPROD32( r0, r1, T[1], T[0], r2, r3 ); T+=step;
284
285 w1 -= 4;
286
287 r0 = (x0[1] + x1[1])>>1;
288 r1 = (x0[0] - x1[0])>>1;
289 w0[0] = r0 + r2;
290 w0[1] = r1 + r3;
291 w1[2] = r0 - r2;
292 w1[3] = r3 - r1;
293
294 r3 = bitrev12(bit++);
295 x0 = x + ((r3 ^ 0xfff)>>shift) -1;
296 x1 = x + (r3>>shift);
297
298 r0 = x0[0] + x1[0];
299 r1 = x1[1] - x0[1];
300
301 XPROD32( r0, r1, T[1], T[0], r2, r3 ); T+=step;
302
303 r0 = (x0[1] + x1[1])>>1;
304 r1 = (x0[0] - x1[0])>>1;
305 w0[2] = r0 + r2;
306 w0[3] = r1 + r3;
307 w1[0] = r0 - r2;
308 w1[1] = r3 - r1;
309
310 w0 += 4;
311 }while(T<Ttop);
312 do{
313 register int32_t r3 = bitrev12(bit++);
314 int32_t *x0 = x + ((r3 ^ 0xfff)>>shift) -1;
315 int32_t *x1 = x + (r3>>shift);
316
317 register int32_t r0 = x0[0] + x1[0];
318 register int32_t r1 = x1[1] - x0[1];
319
320 T-=step; XPROD32( r0, r1, T[0], T[1], r2, r3 );
321
322 w1 -= 4;
323
324 r0 = (x0[1] + x1[1])>>1;
325 r1 = (x0[0] - x1[0])>>1;
326 w0[0] = r0 + r2;
327 w0[1] = r1 + r3;
328 w1[2] = r0 - r2;
329 w1[3] = r3 - r1;
330
331 r3 = bitrev12(bit++);
332 x0 = x + ((r3 ^ 0xfff)>>shift) -1;
333 x1 = x + (r3>>shift);
334
335 r0 = x0[0] + x1[0];
336 r1 = x1[1] - x0[1];
337
338 T-=step; XPROD32( r0, r1, T[0], T[1], r2, r3 );
339
340 r0 = (x0[1] + x1[1])>>1;
341 r1 = (x0[0] - x1[0])>>1;
342 w0[2] = r0 + r2;
343 w0[3] = r1 + r3;
344 w1[0] = r0 - r2;
345 w1[1] = r3 - r1;
346
347 w0 += 4;
348 }while(w0<w1);
349}
350
351
352void mdct_backward(int n, int32_t *in, int32_t *out)
353 ICODE_ATTR_TREMOR_MDCT;
354void mdct_backward(int n, int32_t *in, int32_t *out) {
355 int n2=n>>1;
356 int n4=n>>2;
357 int32_t *iX;
358 int32_t *oX;
359 const int32_t *T;
360 const int32_t *V;
361 int shift;
362 int step;
363 for (shift=6;!(n&(1<<shift));shift++);
364 shift=13-shift;
365 step=2<<shift;
366
367 /* rotate */
368
369 iX = in+n2-7;
370 oX = out+n2+n4;
371 T = sincos_lookup0;
372
373 do{
374 oX-=4;
375 XPROD31( iX[4], iX[6], T[0], T[1], &oX[2], &oX[3] ); T+=step;
376 XPROD31( iX[0], iX[2], T[0], T[1], &oX[0], &oX[1] ); T+=step;
377 iX-=8;
378 }while(iX>=in+n4);
379 do{
380 oX-=4;
381 XPROD31( iX[4], iX[6], T[1], T[0], &oX[2], &oX[3] ); T-=step;
382 XPROD31( iX[0], iX[2], T[1], T[0], &oX[0], &oX[1] ); T-=step;
383 iX-=8;
384 }while(iX>=in);
385
386 iX = in+n2-8;
387 oX = out+n2+n4;
388 T = sincos_lookup0;
389
390 do{
391 T+=step; XNPROD31( iX[6], iX[4], T[0], T[1], &oX[0], &oX[1] );
392 T+=step; XNPROD31( iX[2], iX[0], T[0], T[1], &oX[2], &oX[3] );
393 iX-=8;
394 oX+=4;
395 }while(iX>=in+n4);
396 do{
397 T-=step; XNPROD31( iX[6], iX[4], T[1], T[0], &oX[0], &oX[1] );
398 T-=step; XNPROD31( iX[2], iX[0], T[1], T[0], &oX[2], &oX[3] );
399 iX-=8;
400 oX+=4;
401 }while(iX>=in);
402
403 mdct_butterflies(out+n2,n2,shift);
404 mdct_bitreverse(out,n,step,shift);
405 /* rotate + window */
406
407 step>>=2;
408 {
409 int32_t *oX1=out+n2+n4;
410 int32_t *oX2=out+n2+n4;
411 int32_t *iX =out;
412
413 switch(step) {
414 default: {
415 T=(step>=4)?(sincos_lookup0+(step>>1)):sincos_lookup1;
416 do{
417 oX1-=4;
418 XPROD31( iX[0], -iX[1], T[0], T[1], &oX1[3], &oX2[0] ); T+=step;
419 XPROD31( iX[2], -iX[3], T[0], T[1], &oX1[2], &oX2[1] ); T+=step;
420 XPROD31( iX[4], -iX[5], T[0], T[1], &oX1[1], &oX2[2] ); T+=step;
421 XPROD31( iX[6], -iX[7], T[0], T[1], &oX1[0], &oX2[3] ); T+=step;
422 oX2+=4;
423 iX+=8;
424 }while(iX<oX1);
425 break;
426 }
427
428 case 1: {
429 /* linear interpolation between table values: offset=0.5, step=1 */
430 register int32_t t0,t1,v0,v1;
431 T = sincos_lookup0;
432 V = sincos_lookup1;
433 t0 = (*T++)>>1;
434 t1 = (*T++)>>1;
435 do{
436 oX1-=4;
437
438 t0 += (v0 = (*V++)>>1);
439 t1 += (v1 = (*V++)>>1);
440 XPROD31( iX[0], -iX[1], t0, t1, &oX1[3], &oX2[0] );
441 v0 += (t0 = (*T++)>>1);
442 v1 += (t1 = (*T++)>>1);
443 XPROD31( iX[2], -iX[3], v0, v1, &oX1[2], &oX2[1] );
444 t0 += (v0 = (*V++)>>1);
445 t1 += (v1 = (*V++)>>1);
446 XPROD31( iX[4], -iX[5], t0, t1, &oX1[1], &oX2[2] );
447 v0 += (t0 = (*T++)>>1);
448 v1 += (t1 = (*T++)>>1);
449 XPROD31( iX[6], -iX[7], v0, v1, &oX1[0], &oX2[3] );
450
451 oX2+=4;
452 iX+=8;
453 }while(iX<oX1);
454 break;
455 }
456
457 case 0: {
458 /* linear interpolation between table values: offset=0.25, step=0.5 */
459 register int32_t t0,t1,v0,v1,q0,q1;
460 T = sincos_lookup0;
461 V = sincos_lookup1;
462 t0 = *T++;
463 t1 = *T++;
464 do{
465 oX1-=4;
466
467 v0 = *V++;
468 v1 = *V++;
469 t0 += (q0 = (v0-t0)>>2);
470 t1 += (q1 = (v1-t1)>>2);
471 XPROD31( iX[0], -iX[1], t0, t1, &oX1[3], &oX2[0] );
472 t0 = v0-q0;
473 t1 = v1-q1;
474 XPROD31( iX[2], -iX[3], t0, t1, &oX1[2], &oX2[1] );
475
476 t0 = *T++;
477 t1 = *T++;
478 v0 += (q0 = (t0-v0)>>2);
479 v1 += (q1 = (t1-v1)>>2);
480 XPROD31( iX[4], -iX[5], v0, v1, &oX1[1], &oX2[2] );
481 v0 = t0-q0;
482 v1 = t1-q1;
483 XPROD31( iX[6], -iX[7], v0, v1, &oX1[0], &oX2[3] );
484
485 oX2+=4;
486 iX+=8;
487 }while(iX<oX1);
488 break;
489 }
490 }
491
492 iX=out+n2+n4;
493 oX1=out+n4;
494 oX2=oX1;
495
496 do{
497 oX1-=4;
498 iX-=4;
499
500 oX2[0] = -(oX1[3] = iX[3]);
501 oX2[1] = -(oX1[2] = iX[2]);
502 oX2[2] = -(oX1[1] = iX[1]);
503 oX2[3] = -(oX1[0] = iX[0]);
504
505 oX2+=4;
506 }while(oX2<iX);
507
508 iX=out+n2+n4;
509 oX1=out+n2+n4;
510 oX2=out+n2;
511
512 do{
513 oX1-=4;
514 oX1[0]= iX[3];
515 oX1[1]= iX[2];
516 oX1[2]= iX[1];
517 oX1[3]= iX[0];
518 iX+=4;
519 }while(oX1>oX2);
520 }
521}
diff --git a/apps/codecs/lib/mdct2.h b/apps/codecs/lib/mdct2.h
new file mode 100644
index 0000000000..b03430b21a
--- /dev/null
+++ b/apps/codecs/lib/mdct2.h
@@ -0,0 +1,75 @@
1/********************************************************************
2 * *
3 * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. *
4 * *
5 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
6 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
7 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
8 * *
9 * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
10 * BY THE Xiph.Org FOUNDATION http://www.xiph.org/ *
11 * *
12 ********************************************************************
13
14 function: modified discrete cosine transform prototypes
15
16 ********************************************************************/
17
18#ifndef _OGG_mdct_H_
19#define _OGG_mdct_H_
20
21
22
23#ifdef _LOW_ACCURACY_
24# define X(n) (((((n)>>22)+1)>>1) - ((((n)>>22)+1)>>9))
25# //define LOOKUP_T const unsigned char
26#else
27# define X(n) (n)
28# //define LOOKUP_T const ogg_int32_t
29#endif
30
31#include <codecs.h>
32#include "asm_arm.h"
33#include "asm_mcf5249.h"
34#include "misc.h"
35
36#ifndef ICONST_ATTR_TREMOR_WINDOW
37#define ICONST_ATTR_TREMOR_WINDOW ICONST_ATTR
38#endif
39
40#ifndef ICODE_ATTR_TREMOR_MDCT
41#define ICODE_ATTR_TREMOR_MDCT ICODE_ATTR
42#endif
43
44#ifndef ICODE_ATTR_TREMOR_NOT_MDCT
45#define ICODE_ATTR_TREMOR_NOT_MDCT ICODE_ATTR
46#endif
47
48
49
50#ifdef _LOW_ACCURACY_
51#define cPI3_8 (0x0062)
52#define cPI2_8 (0x00b5)
53#define cPI1_8 (0x00ed)
54#else
55#define cPI3_8 (0x30fbc54d)
56#define cPI2_8 (0x5a82799a)
57#define cPI1_8 (0x7641af3d)
58#endif
59
60
61extern void mdct_backward(int n, int32_t *in, int32_t *out);
62
63#endif
64
65
66
67
68
69
70
71
72
73
74
75
diff --git a/apps/codecs/lib/mdct_arm.S b/apps/codecs/lib/mdct_arm.S
new file mode 100644
index 0000000000..d80c1e37d3
--- /dev/null
+++ b/apps/codecs/lib/mdct_arm.S
@@ -0,0 +1,429 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id: mdct_arm.S 18084 2008-07-17 17:43:49Z saratoga $
9 *
10 * Copyright (C) 2007 by Tomasz Malesinski
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21
22#include "config.h"
23/* Codecs should not normally do this, but we need to check a macro, and
24 * codecs.h would confuse the assembler. */
25
26#define cPI3_8 (0x30fbc54d)
27#define cPI2_8 (0x5a82799a)
28#define cPI1_8 (0x7641af3d)
29
30#ifdef USE_IRAM
31 .section .icode,"ax",%progbits
32#else
33 .text
34#endif
35 .align
36
37 .global mdct_butterfly_32
38 .global mdct_butterfly_generic_loop
39
40mdct_butterfly_8:
41 add r9, r5, r1 @ x4 + x0
42 sub r5, r5, r1 @ x4 - x0
43 add r7, r6, r2 @ x5 + x1
44 sub r6, r6, r2 @ x5 - x1
45 add r8, r10, r3 @ x6 + x2
46 sub r10, r10, r3 @ x6 - x2
47 add r12, r11, r4 @ x7 + x3
48 sub r11, r11, r4 @ x7 - x3
49
50 add r1, r10, r6 @ y0 = (x6 - x2) + (x5 - x1)
51 sub r2, r11, r5 @ y1 = (x7 - x3) - (x4 - x0)
52 sub r3, r10, r6 @ y2 = (x6 - x2) - (x5 - x1)
53 add r4, r11, r5 @ y3 = (x7 - x3) + (x4 - x0)
54 sub r5, r8, r9 @ y4 = (x6 + x2) - (x4 + x0)
55 sub r6, r12, r7 @ y5 = (x7 + x3) - (x5 + x1)
56 add r10, r8, r9 @ y6 = (x6 + x2) + (x4 + x0)
57 add r11, r12, r7 @ y7 = (x7 + x3) + (x5 + x1)
58 stmia r0, {r1, r2, r3, r4, r5, r6, r10, r11}
59
60 mov pc, lr
61
62mdct_butterfly_16:
63 str lr, [sp, #-4]!
64 add r1, r0, #8*4
65
66 ldmia r0, {r2, r3, r4, r5}
67 ldmia r1, {r6, r7, r8, r9}
68 add r6, r6, r2 @ y8 = x8 + x0
69 rsb r2, r6, r2, asl #1 @ x0 - x8
70 add r7, r7, r3 @ y9 = x9 + x1
71 rsb r3, r7, r3, asl #1 @ x1 - x9
72 add r8, r8, r4 @ y10 = x10 + x2
73 sub r11, r8, r4, asl #1 @ x10 - x2
74 add r9, r9, r5 @ y11 = x11 + x3
75 rsb r10, r9, r5, asl #1 @ x3 - x11
76
77 stmia r1!, {r6, r7, r8, r9}
78
79 add r2, r2, r3 @ (x0 - x8) + (x1 - x9)
80 rsb r3, r2, r3, asl #1 @ (x1 - x9) - (x0 - x8)
81
82 ldr r12, =cPI2_8
83 smull r8, r5, r2, r12
84 mov r5, r5, asl #1
85 smull r8, r6, r3, r12
86 mov r6, r6, asl #1
87
88 stmia r0!, {r5, r6, r10, r11}
89
90 ldmia r0, {r2, r3, r4, r5}
91 ldmia r1, {r6, r7, r8, r9}
92 add r6, r6, r2 @ y12 = x12 + x4
93 sub r2, r6, r2, asl #1 @ x12 - x4
94 add r7, r7, r3 @ y13 = x13 + x5
95 sub r3, r7, r3, asl #1 @ x13 - x5
96 add r8, r8, r4 @ y10 = x14 + x6
97 sub r10, r8, r4, asl #1 @ x14 - x6
98 add r9, r9, r5 @ y11 = x15 + x7
99 sub r11, r9, r5, asl #1 @ x15 - x7
100
101 stmia r1, {r6, r7, r8, r9}
102
103 sub r2, r2, r3 @ (x12 - x4) - (x13 - x5)
104 add r3, r2, r3, asl #1 @ (x12 - x4) + (x13 - x5)
105
106 smull r8, r5, r2, r12
107 mov r5, r5, asl #1
108 smull r8, r6, r3, r12
109 mov r6, r6, asl #1
110 @ no stmia here, r5, r6, r10, r11 are passed to mdct_butterfly_8
111
112 sub r0, r0, #4*4
113 ldmia r0, {r1, r2, r3, r4}
114 bl mdct_butterfly_8
115 add r0, r0, #8*4
116 ldmia r0, {r1, r2, r3, r4, r5, r6, r10, r11}
117 bl mdct_butterfly_8
118
119 ldr pc, [sp], #4
120
121mdct_butterfly_32:
122 stmdb sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r12, lr}
123
124 add r1, r0, #16*4
125
126 ldmia r0, {r2, r3, r4, r5}
127 ldmia r1, {r6, r7, r8, r9}
128 add r6, r6, r2 @ y16 = x16 + x0
129 rsb r2, r6, r2, asl #1 @ x0 - x16
130 add r7, r7, r3 @ y17 = x17 + x1
131 rsb r3, r7, r3, asl #1 @ x1 - x17
132 add r8, r8, r4 @ y18 = x18 + x2
133 rsb r4, r8, r4, asl #1 @ x2 - x18
134 add r9, r9, r5 @ y19 = x19 + x3
135 rsb r5, r9, r5, asl #1 @ x3 - x19
136
137 stmia r1!, {r6, r7, r8, r9}
138
139 ldr r12, =cPI1_8
140 ldr lr, =cPI3_8
141 smull r10, r6, r2, r12
142 smlal r10, r6, r3, lr
143 rsb r2, r2, #0
144 smull r10, r7, r3, r12
145 smlal r10, r7, r2, lr
146 mov r6, r6, asl #1
147 mov r7, r7, asl #1
148
149 add r4, r4, r5 @ (x3 - x19) + (x2 - x18)
150 rsb r5, r4, r5, asl #1 @ (x3 - x19) - (x2 - x18)
151
152 ldr r11, =cPI2_8
153 smull r10, r8, r4, r11
154 mov r8, r8, asl #1
155 smull r10, r9, r5, r11
156 mov r9, r9, asl #1
157
158 stmia r0!, {r6, r7, r8, r9}
159
160 ldmia r0, {r2, r3, r4, r5}
161 ldmia r1, {r6, r7, r8, r9}
162 add r6, r6, r2 @ y20 = x20 + x4
163 rsb r2, r6, r2, asl #1 @ x4 - x20
164 add r7, r7, r3 @ y21 = x21 + x5
165 rsb r3, r7, r3, asl #1 @ x5 - x21
166 add r8, r8, r4 @ y22 = x22 + x6
167 sub r4, r8, r4, asl #1 @ x22 - x6
168 add r9, r9, r5 @ y23 = x23 + x7
169 rsb r5, r9, r5, asl #1 @ x7 - x23
170
171 stmia r1!, {r6, r7, r8, r9}
172
173 smull r10, r6, r2, lr
174 smlal r10, r6, r3, r12
175 rsb r2, r2, #0
176 smull r10, r7, r3, lr
177 smlal r10, r7, r2, r12
178 mov r6, r6, asl #1
179 mov r7, r7, asl #1
180
181 mov r8, r5
182 mov r9, r4
183 stmia r0!, {r6, r7, r8, r9}
184
185 ldmia r0, {r2, r3, r4, r5}
186 ldmia r1, {r6, r7, r8, r9}
187 add r6, r6, r2 @ y24 = x24 + x8
188 sub r2, r6, r2, asl #1 @ x24 - x8
189 add r7, r7, r3 @ y25 = x25 + x9
190 sub r3, r7, r3, asl #1 @ x25 - x9
191 add r8, r8, r4 @ y26 = x26 + x10
192 sub r4, r8, r4, asl #1 @ x26 - x10
193 add r9, r9, r5 @ y27 = x27 + x11
194 sub r5, r9, r5, asl #1 @ x27 - x11
195
196 stmia r1!, {r6, r7, r8, r9}
197
198 smull r10, r7, r2, r12
199 smlal r10, r7, r3, lr
200 rsb r3, r3, #0
201 smull r10, r6, r3, r12
202 smlal r10, r6, r2, lr
203 mov r6, r6, asl #1
204 mov r7, r7, asl #1
205
206 sub r4, r4, r5 @ (x26 - x10) - (x27 - x11)
207 add r5, r4, r5, asl #1 @ (x26 - x10) + (x27 - x11)
208
209 ldr r11, =cPI2_8
210 smull r10, r8, r4, r11
211 mov r8, r8, asl #1
212 smull r10, r9, r5, r11
213 mov r9, r9, asl #1
214
215 stmia r0!, {r6, r7, r8, r9}
216
217 ldmia r0, {r2, r3, r4, r5}
218 ldmia r1, {r6, r7, r8, r9}
219 add r6, r6, r2 @ y28 = x28 + x12
220 sub r2, r6, r2, asl #1 @ x28 - x12
221 add r7, r7, r3 @ y29 = x29 + x13
222 sub r3, r7, r3, asl #1 @ x29 - x13
223 add r8, r8, r4 @ y30 = x30 + x14
224 sub r4, r8, r4, asl #1 @ x30 - x14
225 add r9, r9, r5 @ y31 = x31 + x15
226 sub r5, r9, r5, asl #1 @ x31 - x15
227
228 stmia r1, {r6, r7, r8, r9}
229
230 smull r10, r7, r2, lr
231 smlal r10, r7, r3, r12
232 rsb r3, r3, #0
233 smull r10, r6, r3, lr
234 smlal r10, r6, r2, r12
235 mov r6, r6, asl #1
236 mov r7, r7, asl #1
237
238 mov r8, r4
239 mov r9, r5
240 stmia r0, {r6, r7, r8, r9}
241
242 sub r0, r0, #12*4
243 str r0, [sp, #-4]!
244 bl mdct_butterfly_16
245
246 ldr r0, [sp], #4
247 add r0, r0, #16*4
248 bl mdct_butterfly_16
249
250 ldmia sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r12, pc}
251
252 @ mdct_butterfly_generic_loop(x1, x2, T0, step, Ttop)
253mdct_butterfly_generic_loop:
254 stmdb sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r12, lr}
255 str r2, [sp, #-4]
256 ldr r4, [sp, #40]
2571:
258 ldmdb r0, {r6, r7, r8, r9}
259 ldmdb r1, {r10, r11, r12, r14}
260
261 add r6, r6, r10
262 sub r10, r6, r10, asl #1
263 add r7, r7, r11
264 rsb r11, r7, r11, asl #1
265 add r8, r8, r12
266 sub r12, r8, r12, asl #1
267 add r9, r9, r14
268 rsb r14, r9, r14, asl #1
269
270 stmdb r0!, {r6, r7, r8, r9}
271
272 ldmia r2, {r6, r7}
273 smull r5, r8, r14, r6
274 smlal r5, r8, r12, r7
275 rsb r14, r14, #0
276 smull r5, r9, r12, r6
277 smlal r5, r9, r14, r7
278
279 mov r8, r8, asl #1
280 mov r9, r9, asl #1
281 stmdb r1!, {r8, r9}
282 add r2, r2, r3, asl #2
283
284 ldmia r2, {r6, r7}
285 smull r5, r8, r11, r6
286 smlal r5, r8, r10, r7
287 rsb r11, r11, #0
288 smull r5, r9, r10, r6
289 smlal r5, r9, r11, r7
290
291 mov r8, r8, asl #1
292 mov r9, r9, asl #1
293 stmdb r1!, {r8, r9}
294 add r2, r2, r3, asl #2
295
296 cmp r2, r4
297 blo 1b
298
299 ldr r4, [sp, #-4]
3001:
301 ldmdb r0, {r6, r7, r8, r9}
302 ldmdb r1, {r10, r11, r12, r14}
303
304 add r6, r6, r10
305 sub r10, r6, r10, asl #1
306 add r7, r7, r11
307 sub r11, r7, r11, asl #1
308 add r8, r8, r12
309 sub r12, r8, r12, asl #1
310 add r9, r9, r14
311 sub r14, r9, r14, asl #1
312
313 stmdb r0!, {r6, r7, r8, r9}
314
315 ldmia r2, {r6, r7}
316 smull r5, r9, r14, r6
317 smlal r5, r9, r12, r7
318 rsb r14, r14, #0
319 smull r5, r8, r12, r6
320 smlal r5, r8, r14, r7
321
322 mov r8, r8, asl #1
323 mov r9, r9, asl #1
324 stmdb r1!, {r8, r9}
325 sub r2, r2, r3, asl #2
326
327 ldmia r2, {r6, r7}
328 smull r5, r9, r11, r6
329 smlal r5, r9, r10, r7
330 rsb r11, r11, #0
331 smull r5, r8, r10, r6
332 smlal r5, r8, r11, r7
333
334 mov r8, r8, asl #1
335 mov r9, r9, asl #1
336 stmdb r1!, {r8, r9}
337 sub r2, r2, r3, asl #2
338
339 cmp r2, r4
340 bhi 1b
341
342 ldr r4, [sp, #40]
3431:
344 ldmdb r0, {r6, r7, r8, r9}
345 ldmdb r1, {r10, r11, r12, r14}
346
347 add r6, r6, r10
348 rsb r10, r6, r10, asl #1
349 add r7, r7, r11
350 rsb r11, r7, r11, asl #1
351 add r8, r8, r12
352 rsb r12, r8, r12, asl #1
353 add r9, r9, r14
354 rsb r14, r9, r14, asl #1
355
356 stmdb r0!, {r6, r7, r8, r9}
357
358 ldmia r2, {r6, r7}
359 smull r5, r8, r12, r6
360 smlal r5, r8, r14, r7
361 rsb r12, r12, #0
362 smull r5, r9, r14, r6
363 smlal r5, r9, r12, r7
364
365 mov r8, r8, asl #1
366 mov r9, r9, asl #1
367 stmdb r1!, {r8, r9}
368 add r2, r2, r3, asl #2
369
370 ldmia r2, {r6, r7}
371 smull r5, r8, r10, r6
372 smlal r5, r8, r11, r7
373 rsb r10, r10, #0
374 smull r5, r9, r11, r6
375 smlal r5, r9, r10, r7
376
377 mov r8, r8, asl #1
378 mov r9, r9, asl #1
379 stmdb r1!, {r8, r9}
380 add r2, r2, r3, asl #2
381
382 cmp r2, r4
383 blo 1b
384
385 ldr r4, [sp, #-4]
3861:
387 ldmdb r0, {r6, r7, r8, r9}
388 ldmdb r1, {r10, r11, r12, r14}
389
390 add r6, r6, r10
391 sub r10, r6, r10, asl #1
392 add r7, r7, r11
393 rsb r11, r7, r11, asl #1
394 add r8, r8, r12
395 sub r12, r8, r12, asl #1
396 add r9, r9, r14
397 rsb r14, r9, r14, asl #1
398
399 stmdb r0!, {r6, r7, r8, r9}
400
401 ldmia r2, {r6, r7}
402 smull r5, r9, r12, r6
403 smlal r5, r9, r14, r7
404 rsb r12, r12, #0
405 smull r5, r8, r14, r6
406 smlal r5, r8, r12, r7
407
408 mov r8, r8, asl #1
409 mov r9, r9, asl #1
410 stmdb r1!, {r8, r9}
411 sub r2, r2, r3, asl #2
412
413 ldmia r2, {r6, r7}
414 smull r5, r9, r10, r6
415 smlal r5, r9, r11, r7
416 rsb r10, r10, #0
417 smull r5, r8, r11, r6
418 smlal r5, r8, r10, r7
419
420 mov r8, r8, asl #1
421 mov r9, r9, asl #1
422 stmdb r1!, {r8, r9}
423 sub r2, r2, r3, asl #2
424
425 cmp r2, r4
426 bhi 1b
427
428 ldmia sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r12, pc}
429
diff --git a/apps/codecs/lib/mdct_lookup.h b/apps/codecs/lib/mdct_lookup.h
new file mode 100644
index 0000000000..65f1e49316
--- /dev/null
+++ b/apps/codecs/lib/mdct_lookup.h
@@ -0,0 +1,544 @@
1/********************************************************************
2 * *
3 * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. *
4 * *
5 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
6 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
7 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
8 * *
9 * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
10 * BY THE Xiph.Org FOUNDATION http://www.xiph.org/ *
11 * *
12 ********************************************************************
13
14 function: sin,cos lookup tables
15
16 ********************************************************************/
17
18
19
20
21
22
23
24/* {sin(2*i*PI/4096), cos(2*i*PI/4096)}, with i = 0 to 512 */
25static const int32_t sincos_lookup0[1026] ICONST_ATTR = {
26 X(0x00000000), X(0x7fffffff), X(0x003243f5), X(0x7ffff621),
27 X(0x006487e3), X(0x7fffd886), X(0x0096cbc1), X(0x7fffa72c),
28 X(0x00c90f88), X(0x7fff6216), X(0x00fb5330), X(0x7fff0943),
29 X(0x012d96b1), X(0x7ffe9cb2), X(0x015fda03), X(0x7ffe1c65),
30 X(0x01921d20), X(0x7ffd885a), X(0x01c45ffe), X(0x7ffce093),
31 X(0x01f6a297), X(0x7ffc250f), X(0x0228e4e2), X(0x7ffb55ce),
32 X(0x025b26d7), X(0x7ffa72d1), X(0x028d6870), X(0x7ff97c18),
33 X(0x02bfa9a4), X(0x7ff871a2), X(0x02f1ea6c), X(0x7ff75370),
34 X(0x03242abf), X(0x7ff62182), X(0x03566a96), X(0x7ff4dbd9),
35 X(0x0388a9ea), X(0x7ff38274), X(0x03bae8b2), X(0x7ff21553),
36 X(0x03ed26e6), X(0x7ff09478), X(0x041f6480), X(0x7feeffe1),
37 X(0x0451a177), X(0x7fed5791), X(0x0483ddc3), X(0x7feb9b85),
38 X(0x04b6195d), X(0x7fe9cbc0), X(0x04e8543e), X(0x7fe7e841),
39 X(0x051a8e5c), X(0x7fe5f108), X(0x054cc7b1), X(0x7fe3e616),
40 X(0x057f0035), X(0x7fe1c76b), X(0x05b137df), X(0x7fdf9508),
41 X(0x05e36ea9), X(0x7fdd4eec), X(0x0615a48b), X(0x7fdaf519),
42 X(0x0647d97c), X(0x7fd8878e), X(0x067a0d76), X(0x7fd6064c),
43 X(0x06ac406f), X(0x7fd37153), X(0x06de7262), X(0x7fd0c8a3),
44 X(0x0710a345), X(0x7fce0c3e), X(0x0742d311), X(0x7fcb3c23),
45 X(0x077501be), X(0x7fc85854), X(0x07a72f45), X(0x7fc560cf),
46 X(0x07d95b9e), X(0x7fc25596), X(0x080b86c2), X(0x7fbf36aa),
47 X(0x083db0a7), X(0x7fbc040a), X(0x086fd947), X(0x7fb8bdb8),
48 X(0x08a2009a), X(0x7fb563b3), X(0x08d42699), X(0x7fb1f5fc),
49 X(0x09064b3a), X(0x7fae7495), X(0x09386e78), X(0x7faadf7c),
50 X(0x096a9049), X(0x7fa736b4), X(0x099cb0a7), X(0x7fa37a3c),
51 X(0x09cecf89), X(0x7f9faa15), X(0x0a00ece8), X(0x7f9bc640),
52 X(0x0a3308bd), X(0x7f97cebd), X(0x0a6522fe), X(0x7f93c38c),
53 X(0x0a973ba5), X(0x7f8fa4b0), X(0x0ac952aa), X(0x7f8b7227),
54 X(0x0afb6805), X(0x7f872bf3), X(0x0b2d7baf), X(0x7f82d214),
55 X(0x0b5f8d9f), X(0x7f7e648c), X(0x0b919dcf), X(0x7f79e35a),
56 X(0x0bc3ac35), X(0x7f754e80), X(0x0bf5b8cb), X(0x7f70a5fe),
57 X(0x0c27c389), X(0x7f6be9d4), X(0x0c59cc68), X(0x7f671a05),
58 X(0x0c8bd35e), X(0x7f62368f), X(0x0cbdd865), X(0x7f5d3f75),
59 X(0x0cefdb76), X(0x7f5834b7), X(0x0d21dc87), X(0x7f531655),
60 X(0x0d53db92), X(0x7f4de451), X(0x0d85d88f), X(0x7f489eaa),
61 X(0x0db7d376), X(0x7f434563), X(0x0de9cc40), X(0x7f3dd87c),
62 X(0x0e1bc2e4), X(0x7f3857f6), X(0x0e4db75b), X(0x7f32c3d1),
63 X(0x0e7fa99e), X(0x7f2d1c0e), X(0x0eb199a4), X(0x7f2760af),
64 X(0x0ee38766), X(0x7f2191b4), X(0x0f1572dc), X(0x7f1baf1e),
65 X(0x0f475bff), X(0x7f15b8ee), X(0x0f7942c7), X(0x7f0faf25),
66 X(0x0fab272b), X(0x7f0991c4), X(0x0fdd0926), X(0x7f0360cb),
67 X(0x100ee8ad), X(0x7efd1c3c), X(0x1040c5bb), X(0x7ef6c418),
68 X(0x1072a048), X(0x7ef05860), X(0x10a4784b), X(0x7ee9d914),
69 X(0x10d64dbd), X(0x7ee34636), X(0x11082096), X(0x7edc9fc6),
70 X(0x1139f0cf), X(0x7ed5e5c6), X(0x116bbe60), X(0x7ecf1837),
71 X(0x119d8941), X(0x7ec8371a), X(0x11cf516a), X(0x7ec14270),
72 X(0x120116d5), X(0x7eba3a39), X(0x1232d979), X(0x7eb31e78),
73 X(0x1264994e), X(0x7eabef2c), X(0x1296564d), X(0x7ea4ac58),
74 X(0x12c8106f), X(0x7e9d55fc), X(0x12f9c7aa), X(0x7e95ec1a),
75 X(0x132b7bf9), X(0x7e8e6eb2), X(0x135d2d53), X(0x7e86ddc6),
76 X(0x138edbb1), X(0x7e7f3957), X(0x13c0870a), X(0x7e778166),
77 X(0x13f22f58), X(0x7e6fb5f4), X(0x1423d492), X(0x7e67d703),
78 X(0x145576b1), X(0x7e5fe493), X(0x148715ae), X(0x7e57dea7),
79 X(0x14b8b17f), X(0x7e4fc53e), X(0x14ea4a1f), X(0x7e47985b),
80 X(0x151bdf86), X(0x7e3f57ff), X(0x154d71aa), X(0x7e37042a),
81 X(0x157f0086), X(0x7e2e9cdf), X(0x15b08c12), X(0x7e26221f),
82 X(0x15e21445), X(0x7e1d93ea), X(0x16139918), X(0x7e14f242),
83 X(0x16451a83), X(0x7e0c3d29), X(0x1676987f), X(0x7e0374a0),
84 X(0x16a81305), X(0x7dfa98a8), X(0x16d98a0c), X(0x7df1a942),
85 X(0x170afd8d), X(0x7de8a670), X(0x173c6d80), X(0x7ddf9034),
86 X(0x176dd9de), X(0x7dd6668f), X(0x179f429f), X(0x7dcd2981),
87 X(0x17d0a7bc), X(0x7dc3d90d), X(0x1802092c), X(0x7dba7534),
88 X(0x183366e9), X(0x7db0fdf8), X(0x1864c0ea), X(0x7da77359),
89 X(0x18961728), X(0x7d9dd55a), X(0x18c7699b), X(0x7d9423fc),
90 X(0x18f8b83c), X(0x7d8a5f40), X(0x192a0304), X(0x7d808728),
91 X(0x195b49ea), X(0x7d769bb5), X(0x198c8ce7), X(0x7d6c9ce9),
92 X(0x19bdcbf3), X(0x7d628ac6), X(0x19ef0707), X(0x7d58654d),
93 X(0x1a203e1b), X(0x7d4e2c7f), X(0x1a517128), X(0x7d43e05e),
94 X(0x1a82a026), X(0x7d3980ec), X(0x1ab3cb0d), X(0x7d2f0e2b),
95 X(0x1ae4f1d6), X(0x7d24881b), X(0x1b161479), X(0x7d19eebf),
96 X(0x1b4732ef), X(0x7d0f4218), X(0x1b784d30), X(0x7d048228),
97 X(0x1ba96335), X(0x7cf9aef0), X(0x1bda74f6), X(0x7ceec873),
98 X(0x1c0b826a), X(0x7ce3ceb2), X(0x1c3c8b8c), X(0x7cd8c1ae),
99 X(0x1c6d9053), X(0x7ccda169), X(0x1c9e90b8), X(0x7cc26de5),
100 X(0x1ccf8cb3), X(0x7cb72724), X(0x1d00843d), X(0x7cabcd28),
101 X(0x1d31774d), X(0x7ca05ff1), X(0x1d6265dd), X(0x7c94df83),
102 X(0x1d934fe5), X(0x7c894bde), X(0x1dc4355e), X(0x7c7da505),
103 X(0x1df5163f), X(0x7c71eaf9), X(0x1e25f282), X(0x7c661dbc),
104 X(0x1e56ca1e), X(0x7c5a3d50), X(0x1e879d0d), X(0x7c4e49b7),
105 X(0x1eb86b46), X(0x7c4242f2), X(0x1ee934c3), X(0x7c362904),
106 X(0x1f19f97b), X(0x7c29fbee), X(0x1f4ab968), X(0x7c1dbbb3),
107 X(0x1f7b7481), X(0x7c116853), X(0x1fac2abf), X(0x7c0501d2),
108 X(0x1fdcdc1b), X(0x7bf88830), X(0x200d888d), X(0x7bebfb70),
109 X(0x203e300d), X(0x7bdf5b94), X(0x206ed295), X(0x7bd2a89e),
110 X(0x209f701c), X(0x7bc5e290), X(0x20d0089c), X(0x7bb9096b),
111 X(0x21009c0c), X(0x7bac1d31), X(0x21312a65), X(0x7b9f1de6),
112 X(0x2161b3a0), X(0x7b920b89), X(0x219237b5), X(0x7b84e61f),
113 X(0x21c2b69c), X(0x7b77ada8), X(0x21f3304f), X(0x7b6a6227),
114 X(0x2223a4c5), X(0x7b5d039e), X(0x225413f8), X(0x7b4f920e),
115 X(0x22847de0), X(0x7b420d7a), X(0x22b4e274), X(0x7b3475e5),
116 X(0x22e541af), X(0x7b26cb4f), X(0x23159b88), X(0x7b190dbc),
117 X(0x2345eff8), X(0x7b0b3d2c), X(0x23763ef7), X(0x7afd59a4),
118 X(0x23a6887f), X(0x7aef6323), X(0x23d6cc87), X(0x7ae159ae),
119 X(0x24070b08), X(0x7ad33d45), X(0x243743fa), X(0x7ac50dec),
120 X(0x24677758), X(0x7ab6cba4), X(0x2497a517), X(0x7aa8766f),
121 X(0x24c7cd33), X(0x7a9a0e50), X(0x24f7efa2), X(0x7a8b9348),
122 X(0x25280c5e), X(0x7a7d055b), X(0x2558235f), X(0x7a6e648a),
123 X(0x2588349d), X(0x7a5fb0d8), X(0x25b84012), X(0x7a50ea47),
124 X(0x25e845b6), X(0x7a4210d8), X(0x26184581), X(0x7a332490),
125 X(0x26483f6c), X(0x7a24256f), X(0x26783370), X(0x7a151378),
126 X(0x26a82186), X(0x7a05eead), X(0x26d809a5), X(0x79f6b711),
127 X(0x2707ebc7), X(0x79e76ca7), X(0x2737c7e3), X(0x79d80f6f),
128 X(0x27679df4), X(0x79c89f6e), X(0x27976df1), X(0x79b91ca4),
129 X(0x27c737d3), X(0x79a98715), X(0x27f6fb92), X(0x7999dec4),
130 X(0x2826b928), X(0x798a23b1), X(0x2856708d), X(0x797a55e0),
131 X(0x288621b9), X(0x796a7554), X(0x28b5cca5), X(0x795a820e),
132 X(0x28e5714b), X(0x794a7c12), X(0x29150fa1), X(0x793a6361),
133 X(0x2944a7a2), X(0x792a37fe), X(0x29743946), X(0x7919f9ec),
134 X(0x29a3c485), X(0x7909a92d), X(0x29d34958), X(0x78f945c3),
135 X(0x2a02c7b8), X(0x78e8cfb2), X(0x2a323f9e), X(0x78d846fb),
136 X(0x2a61b101), X(0x78c7aba2), X(0x2a911bdc), X(0x78b6fda8),
137 X(0x2ac08026), X(0x78a63d11), X(0x2aefddd8), X(0x789569df),
138 X(0x2b1f34eb), X(0x78848414), X(0x2b4e8558), X(0x78738bb3),
139 X(0x2b7dcf17), X(0x786280bf), X(0x2bad1221), X(0x7851633b),
140 X(0x2bdc4e6f), X(0x78403329), X(0x2c0b83fa), X(0x782ef08b),
141 X(0x2c3ab2b9), X(0x781d9b65), X(0x2c69daa6), X(0x780c33b8),
142 X(0x2c98fbba), X(0x77fab989), X(0x2cc815ee), X(0x77e92cd9),
143 X(0x2cf72939), X(0x77d78daa), X(0x2d263596), X(0x77c5dc01),
144 X(0x2d553afc), X(0x77b417df), X(0x2d843964), X(0x77a24148),
145 X(0x2db330c7), X(0x7790583e), X(0x2de2211e), X(0x777e5cc3),
146 X(0x2e110a62), X(0x776c4edb), X(0x2e3fec8b), X(0x775a2e89),
147 X(0x2e6ec792), X(0x7747fbce), X(0x2e9d9b70), X(0x7735b6af),
148 X(0x2ecc681e), X(0x77235f2d), X(0x2efb2d95), X(0x7710f54c),
149 X(0x2f29ebcc), X(0x76fe790e), X(0x2f58a2be), X(0x76ebea77),
150 X(0x2f875262), X(0x76d94989), X(0x2fb5fab2), X(0x76c69647),
151 X(0x2fe49ba7), X(0x76b3d0b4), X(0x30133539), X(0x76a0f8d2),
152 X(0x3041c761), X(0x768e0ea6), X(0x30705217), X(0x767b1231),
153 X(0x309ed556), X(0x76680376), X(0x30cd5115), X(0x7654e279),
154 X(0x30fbc54d), X(0x7641af3d), X(0x312a31f8), X(0x762e69c4),
155 X(0x3158970e), X(0x761b1211), X(0x3186f487), X(0x7607a828),
156 X(0x31b54a5e), X(0x75f42c0b), X(0x31e39889), X(0x75e09dbd),
157 X(0x3211df04), X(0x75ccfd42), X(0x32401dc6), X(0x75b94a9c),
158 X(0x326e54c7), X(0x75a585cf), X(0x329c8402), X(0x7591aedd),
159 X(0x32caab6f), X(0x757dc5ca), X(0x32f8cb07), X(0x7569ca99),
160 X(0x3326e2c3), X(0x7555bd4c), X(0x3354f29b), X(0x75419de7),
161 X(0x3382fa88), X(0x752d6c6c), X(0x33b0fa84), X(0x751928e0),
162 X(0x33def287), X(0x7504d345), X(0x340ce28b), X(0x74f06b9e),
163 X(0x343aca87), X(0x74dbf1ef), X(0x3468aa76), X(0x74c7663a),
164 X(0x34968250), X(0x74b2c884), X(0x34c4520d), X(0x749e18cd),
165 X(0x34f219a8), X(0x7489571c), X(0x351fd918), X(0x74748371),
166 X(0x354d9057), X(0x745f9dd1), X(0x357b3f5d), X(0x744aa63f),
167 X(0x35a8e625), X(0x74359cbd), X(0x35d684a6), X(0x74208150),
168 X(0x36041ad9), X(0x740b53fb), X(0x3631a8b8), X(0x73f614c0),
169 X(0x365f2e3b), X(0x73e0c3a3), X(0x368cab5c), X(0x73cb60a8),
170 X(0x36ba2014), X(0x73b5ebd1), X(0x36e78c5b), X(0x73a06522),
171 X(0x3714f02a), X(0x738acc9e), X(0x37424b7b), X(0x73752249),
172 X(0x376f9e46), X(0x735f6626), X(0x379ce885), X(0x73499838),
173 X(0x37ca2a30), X(0x7333b883), X(0x37f76341), X(0x731dc70a),
174 X(0x382493b0), X(0x7307c3d0), X(0x3851bb77), X(0x72f1aed9),
175 X(0x387eda8e), X(0x72db8828), X(0x38abf0ef), X(0x72c54fc1),
176 X(0x38d8fe93), X(0x72af05a7), X(0x39060373), X(0x7298a9dd),
177 X(0x3932ff87), X(0x72823c67), X(0x395ff2c9), X(0x726bbd48),
178 X(0x398cdd32), X(0x72552c85), X(0x39b9bebc), X(0x723e8a20),
179 X(0x39e6975e), X(0x7227d61c), X(0x3a136712), X(0x7211107e),
180 X(0x3a402dd2), X(0x71fa3949), X(0x3a6ceb96), X(0x71e35080),
181 X(0x3a99a057), X(0x71cc5626), X(0x3ac64c0f), X(0x71b54a41),
182 X(0x3af2eeb7), X(0x719e2cd2), X(0x3b1f8848), X(0x7186fdde),
183 X(0x3b4c18ba), X(0x716fbd68), X(0x3b78a007), X(0x71586b74),
184 X(0x3ba51e29), X(0x71410805), X(0x3bd19318), X(0x7129931f),
185 X(0x3bfdfecd), X(0x71120cc5), X(0x3c2a6142), X(0x70fa74fc),
186 X(0x3c56ba70), X(0x70e2cbc6), X(0x3c830a50), X(0x70cb1128),
187 X(0x3caf50da), X(0x70b34525), X(0x3cdb8e09), X(0x709b67c0),
188 X(0x3d07c1d6), X(0x708378ff), X(0x3d33ec39), X(0x706b78e3),
189 X(0x3d600d2c), X(0x70536771), X(0x3d8c24a8), X(0x703b44ad),
190 X(0x3db832a6), X(0x7023109a), X(0x3de4371f), X(0x700acb3c),
191 X(0x3e10320d), X(0x6ff27497), X(0x3e3c2369), X(0x6fda0cae),
192 X(0x3e680b2c), X(0x6fc19385), X(0x3e93e950), X(0x6fa90921),
193 X(0x3ebfbdcd), X(0x6f906d84), X(0x3eeb889c), X(0x6f77c0b3),
194 X(0x3f1749b8), X(0x6f5f02b2), X(0x3f430119), X(0x6f463383),
195 X(0x3f6eaeb8), X(0x6f2d532c), X(0x3f9a5290), X(0x6f1461b0),
196 X(0x3fc5ec98), X(0x6efb5f12), X(0x3ff17cca), X(0x6ee24b57),
197 X(0x401d0321), X(0x6ec92683), X(0x40487f94), X(0x6eaff099),
198 X(0x4073f21d), X(0x6e96a99d), X(0x409f5ab6), X(0x6e7d5193),
199 X(0x40cab958), X(0x6e63e87f), X(0x40f60dfb), X(0x6e4a6e66),
200 X(0x4121589b), X(0x6e30e34a), X(0x414c992f), X(0x6e174730),
201 X(0x4177cfb1), X(0x6dfd9a1c), X(0x41a2fc1a), X(0x6de3dc11),
202 X(0x41ce1e65), X(0x6dca0d14), X(0x41f93689), X(0x6db02d29),
203 X(0x42244481), X(0x6d963c54), X(0x424f4845), X(0x6d7c3a98),
204 X(0x427a41d0), X(0x6d6227fa), X(0x42a5311b), X(0x6d48047e),
205 X(0x42d0161e), X(0x6d2dd027), X(0x42faf0d4), X(0x6d138afb),
206 X(0x4325c135), X(0x6cf934fc), X(0x4350873c), X(0x6cdece2f),
207 X(0x437b42e1), X(0x6cc45698), X(0x43a5f41e), X(0x6ca9ce3b),
208 X(0x43d09aed), X(0x6c8f351c), X(0x43fb3746), X(0x6c748b3f),
209 X(0x4425c923), X(0x6c59d0a9), X(0x4450507e), X(0x6c3f055d),
210 X(0x447acd50), X(0x6c242960), X(0x44a53f93), X(0x6c093cb6),
211 X(0x44cfa740), X(0x6bee3f62), X(0x44fa0450), X(0x6bd3316a),
212 X(0x452456bd), X(0x6bb812d1), X(0x454e9e80), X(0x6b9ce39b),
213 X(0x4578db93), X(0x6b81a3cd), X(0x45a30df0), X(0x6b66536b),
214 X(0x45cd358f), X(0x6b4af279), X(0x45f7526b), X(0x6b2f80fb),
215 X(0x4621647d), X(0x6b13fef5), X(0x464b6bbe), X(0x6af86c6c),
216 X(0x46756828), X(0x6adcc964), X(0x469f59b4), X(0x6ac115e2),
217 X(0x46c9405c), X(0x6aa551e9), X(0x46f31c1a), X(0x6a897d7d),
218 X(0x471cece7), X(0x6a6d98a4), X(0x4746b2bc), X(0x6a51a361),
219 X(0x47706d93), X(0x6a359db9), X(0x479a1d67), X(0x6a1987b0),
220 X(0x47c3c22f), X(0x69fd614a), X(0x47ed5be6), X(0x69e12a8c),
221 X(0x4816ea86), X(0x69c4e37a), X(0x48406e08), X(0x69a88c19),
222 X(0x4869e665), X(0x698c246c), X(0x48935397), X(0x696fac78),
223 X(0x48bcb599), X(0x69532442), X(0x48e60c62), X(0x69368bce),
224 X(0x490f57ee), X(0x6919e320), X(0x49389836), X(0x68fd2a3d),
225 X(0x4961cd33), X(0x68e06129), X(0x498af6df), X(0x68c387e9),
226 X(0x49b41533), X(0x68a69e81), X(0x49dd282a), X(0x6889a4f6),
227 X(0x4a062fbd), X(0x686c9b4b), X(0x4a2f2be6), X(0x684f8186),
228 X(0x4a581c9e), X(0x683257ab), X(0x4a8101de), X(0x68151dbe),
229 X(0x4aa9dba2), X(0x67f7d3c5), X(0x4ad2a9e2), X(0x67da79c3),
230 X(0x4afb6c98), X(0x67bd0fbd), X(0x4b2423be), X(0x679f95b7),
231 X(0x4b4ccf4d), X(0x67820bb7), X(0x4b756f40), X(0x676471c0),
232 X(0x4b9e0390), X(0x6746c7d8), X(0x4bc68c36), X(0x67290e02),
233 X(0x4bef092d), X(0x670b4444), X(0x4c177a6e), X(0x66ed6aa1),
234 X(0x4c3fdff4), X(0x66cf8120), X(0x4c6839b7), X(0x66b187c3),
235 X(0x4c9087b1), X(0x66937e91), X(0x4cb8c9dd), X(0x6675658c),
236 X(0x4ce10034), X(0x66573cbb), X(0x4d092ab0), X(0x66390422),
237 X(0x4d31494b), X(0x661abbc5), X(0x4d595bfe), X(0x65fc63a9),
238 X(0x4d8162c4), X(0x65ddfbd3), X(0x4da95d96), X(0x65bf8447),
239 X(0x4dd14c6e), X(0x65a0fd0b), X(0x4df92f46), X(0x65826622),
240 X(0x4e210617), X(0x6563bf92), X(0x4e48d0dd), X(0x6545095f),
241 X(0x4e708f8f), X(0x6526438f), X(0x4e984229), X(0x65076e25),
242 X(0x4ebfe8a5), X(0x64e88926), X(0x4ee782fb), X(0x64c99498),
243 X(0x4f0f1126), X(0x64aa907f), X(0x4f369320), X(0x648b7ce0),
244 X(0x4f5e08e3), X(0x646c59bf), X(0x4f857269), X(0x644d2722),
245 X(0x4faccfab), X(0x642de50d), X(0x4fd420a4), X(0x640e9386),
246 X(0x4ffb654d), X(0x63ef3290), X(0x50229da1), X(0x63cfc231),
247 X(0x5049c999), X(0x63b0426d), X(0x5070e92f), X(0x6390b34a),
248 X(0x5097fc5e), X(0x637114cc), X(0x50bf031f), X(0x635166f9),
249 X(0x50e5fd6d), X(0x6331a9d4), X(0x510ceb40), X(0x6311dd64),
250 X(0x5133cc94), X(0x62f201ac), X(0x515aa162), X(0x62d216b3),
251 X(0x518169a5), X(0x62b21c7b), X(0x51a82555), X(0x6292130c),
252 X(0x51ced46e), X(0x6271fa69), X(0x51f576ea), X(0x6251d298),
253 X(0x521c0cc2), X(0x62319b9d), X(0x524295f0), X(0x6211557e),
254 X(0x5269126e), X(0x61f1003f), X(0x528f8238), X(0x61d09be5),
255 X(0x52b5e546), X(0x61b02876), X(0x52dc3b92), X(0x618fa5f7),
256 X(0x53028518), X(0x616f146c), X(0x5328c1d0), X(0x614e73da),
257 X(0x534ef1b5), X(0x612dc447), X(0x537514c2), X(0x610d05b7),
258 X(0x539b2af0), X(0x60ec3830), X(0x53c13439), X(0x60cb5bb7),
259 X(0x53e73097), X(0x60aa7050), X(0x540d2005), X(0x60897601),
260 X(0x5433027d), X(0x60686ccf), X(0x5458d7f9), X(0x604754bf),
261 X(0x547ea073), X(0x60262dd6), X(0x54a45be6), X(0x6004f819),
262 X(0x54ca0a4b), X(0x5fe3b38d), X(0x54efab9c), X(0x5fc26038),
263 X(0x55153fd4), X(0x5fa0fe1f), X(0x553ac6ee), X(0x5f7f8d46),
264 X(0x556040e2), X(0x5f5e0db3), X(0x5585adad), X(0x5f3c7f6b),
265 X(0x55ab0d46), X(0x5f1ae274), X(0x55d05faa), X(0x5ef936d1),
266 X(0x55f5a4d2), X(0x5ed77c8a), X(0x561adcb9), X(0x5eb5b3a2),
267 X(0x56400758), X(0x5e93dc1f), X(0x566524aa), X(0x5e71f606),
268 X(0x568a34a9), X(0x5e50015d), X(0x56af3750), X(0x5e2dfe29),
269 X(0x56d42c99), X(0x5e0bec6e), X(0x56f9147e), X(0x5de9cc33),
270 X(0x571deefa), X(0x5dc79d7c), X(0x5742bc06), X(0x5da5604f),
271 X(0x57677b9d), X(0x5d8314b1), X(0x578c2dba), X(0x5d60baa7),
272 X(0x57b0d256), X(0x5d3e5237), X(0x57d5696d), X(0x5d1bdb65),
273 X(0x57f9f2f8), X(0x5cf95638), X(0x581e6ef1), X(0x5cd6c2b5),
274 X(0x5842dd54), X(0x5cb420e0), X(0x58673e1b), X(0x5c9170bf),
275 X(0x588b9140), X(0x5c6eb258), X(0x58afd6bd), X(0x5c4be5b0),
276 X(0x58d40e8c), X(0x5c290acc), X(0x58f838a9), X(0x5c0621b2),
277 X(0x591c550e), X(0x5be32a67), X(0x594063b5), X(0x5bc024f0),
278 X(0x59646498), X(0x5b9d1154), X(0x598857b2), X(0x5b79ef96),
279 X(0x59ac3cfd), X(0x5b56bfbd), X(0x59d01475), X(0x5b3381ce),
280 X(0x59f3de12), X(0x5b1035cf), X(0x5a1799d1), X(0x5aecdbc5),
281 X(0x5a3b47ab), X(0x5ac973b5), X(0x5a5ee79a), X(0x5aa5fda5),
282 X(0x5a82799a), X(0x5a82799a)
283 };
284
285 /* {sin((2*i+1)*PI/4096), cos((2*i+1)*PI/4096)}, with i = 0 to 511 */
286static const int32_t sincos_lookup1[1024] ICONST_ATTR = {
287 X(0x001921fb), X(0x7ffffd88), X(0x004b65ee), X(0x7fffe9cb),
288 X(0x007da9d4), X(0x7fffc251), X(0x00afeda8), X(0x7fff8719),
289 X(0x00e23160), X(0x7fff3824), X(0x011474f6), X(0x7ffed572),
290 X(0x0146b860), X(0x7ffe5f03), X(0x0178fb99), X(0x7ffdd4d7),
291 X(0x01ab3e97), X(0x7ffd36ee), X(0x01dd8154), X(0x7ffc8549),
292 X(0x020fc3c6), X(0x7ffbbfe6), X(0x024205e8), X(0x7ffae6c7),
293 X(0x027447b0), X(0x7ff9f9ec), X(0x02a68917), X(0x7ff8f954),
294 X(0x02d8ca16), X(0x7ff7e500), X(0x030b0aa4), X(0x7ff6bcf0),
295 X(0x033d4abb), X(0x7ff58125), X(0x036f8a51), X(0x7ff4319d),
296 X(0x03a1c960), X(0x7ff2ce5b), X(0x03d407df), X(0x7ff1575d),
297 X(0x040645c7), X(0x7fefcca4), X(0x04388310), X(0x7fee2e30),
298 X(0x046abfb3), X(0x7fec7c02), X(0x049cfba7), X(0x7feab61a),
299 X(0x04cf36e5), X(0x7fe8dc78), X(0x05017165), X(0x7fe6ef1c),
300 X(0x0533ab20), X(0x7fe4ee06), X(0x0565e40d), X(0x7fe2d938),
301 X(0x05981c26), X(0x7fe0b0b1), X(0x05ca5361), X(0x7fde7471),
302 X(0x05fc89b8), X(0x7fdc247a), X(0x062ebf22), X(0x7fd9c0ca),
303 X(0x0660f398), X(0x7fd74964), X(0x06932713), X(0x7fd4be46),
304 X(0x06c5598a), X(0x7fd21f72), X(0x06f78af6), X(0x7fcf6ce8),
305 X(0x0729bb4e), X(0x7fcca6a7), X(0x075bea8c), X(0x7fc9ccb2),
306 X(0x078e18a7), X(0x7fc6df08), X(0x07c04598), X(0x7fc3dda9),
307 X(0x07f27157), X(0x7fc0c896), X(0x08249bdd), X(0x7fbd9fd0),
308 X(0x0856c520), X(0x7fba6357), X(0x0888ed1b), X(0x7fb7132b),
309 X(0x08bb13c5), X(0x7fb3af4e), X(0x08ed3916), X(0x7fb037bf),
310 X(0x091f5d06), X(0x7facac7f), X(0x09517f8f), X(0x7fa90d8e),
311 X(0x0983a0a7), X(0x7fa55aee), X(0x09b5c048), X(0x7fa1949e),
312 X(0x09e7de6a), X(0x7f9dbaa0), X(0x0a19fb04), X(0x7f99ccf4),
313 X(0x0a4c1610), X(0x7f95cb9a), X(0x0a7e2f85), X(0x7f91b694),
314 X(0x0ab0475c), X(0x7f8d8de1), X(0x0ae25d8d), X(0x7f895182),
315 X(0x0b147211), X(0x7f850179), X(0x0b4684df), X(0x7f809dc5),
316 X(0x0b7895f0), X(0x7f7c2668), X(0x0baaa53b), X(0x7f779b62),
317 X(0x0bdcb2bb), X(0x7f72fcb4), X(0x0c0ebe66), X(0x7f6e4a5e),
318 X(0x0c40c835), X(0x7f698461), X(0x0c72d020), X(0x7f64aabf),
319 X(0x0ca4d620), X(0x7f5fbd77), X(0x0cd6da2d), X(0x7f5abc8a),
320 X(0x0d08dc3f), X(0x7f55a7fa), X(0x0d3adc4e), X(0x7f507fc7),
321 X(0x0d6cda53), X(0x7f4b43f2), X(0x0d9ed646), X(0x7f45f47b),
322 X(0x0dd0d01f), X(0x7f409164), X(0x0e02c7d7), X(0x7f3b1aad),
323 X(0x0e34bd66), X(0x7f359057), X(0x0e66b0c3), X(0x7f2ff263),
324 X(0x0e98a1e9), X(0x7f2a40d2), X(0x0eca90ce), X(0x7f247ba5),
325 X(0x0efc7d6b), X(0x7f1ea2dc), X(0x0f2e67b8), X(0x7f18b679),
326 X(0x0f604faf), X(0x7f12b67c), X(0x0f923546), X(0x7f0ca2e7),
327 X(0x0fc41876), X(0x7f067bba), X(0x0ff5f938), X(0x7f0040f6),
328 X(0x1027d784), X(0x7ef9f29d), X(0x1059b352), X(0x7ef390ae),
329 X(0x108b8c9b), X(0x7eed1b2c), X(0x10bd6356), X(0x7ee69217),
330 X(0x10ef377d), X(0x7edff570), X(0x11210907), X(0x7ed94538),
331 X(0x1152d7ed), X(0x7ed28171), X(0x1184a427), X(0x7ecbaa1a),
332 X(0x11b66dad), X(0x7ec4bf36), X(0x11e83478), X(0x7ebdc0c6),
333 X(0x1219f880), X(0x7eb6aeca), X(0x124bb9be), X(0x7eaf8943),
334 X(0x127d7829), X(0x7ea85033), X(0x12af33ba), X(0x7ea1039b),
335 X(0x12e0ec6a), X(0x7e99a37c), X(0x1312a230), X(0x7e922fd6),
336 X(0x13445505), X(0x7e8aa8ac), X(0x137604e2), X(0x7e830dff),
337 X(0x13a7b1bf), X(0x7e7b5fce), X(0x13d95b93), X(0x7e739e1d),
338 X(0x140b0258), X(0x7e6bc8eb), X(0x143ca605), X(0x7e63e03b),
339 X(0x146e4694), X(0x7e5be40c), X(0x149fe3fc), X(0x7e53d462),
340 X(0x14d17e36), X(0x7e4bb13c), X(0x1503153a), X(0x7e437a9c),
341 X(0x1534a901), X(0x7e3b3083), X(0x15663982), X(0x7e32d2f4),
342 X(0x1597c6b7), X(0x7e2a61ed), X(0x15c95097), X(0x7e21dd73),
343 X(0x15fad71b), X(0x7e194584), X(0x162c5a3b), X(0x7e109a24),
344 X(0x165dd9f0), X(0x7e07db52), X(0x168f5632), X(0x7dff0911),
345 X(0x16c0cef9), X(0x7df62362), X(0x16f2443e), X(0x7ded2a47),
346 X(0x1723b5f9), X(0x7de41dc0), X(0x17552422), X(0x7ddafdce),
347 X(0x17868eb3), X(0x7dd1ca75), X(0x17b7f5a3), X(0x7dc883b4),
348 X(0x17e958ea), X(0x7dbf298d), X(0x181ab881), X(0x7db5bc02),
349 X(0x184c1461), X(0x7dac3b15), X(0x187d6c82), X(0x7da2a6c6),
350 X(0x18aec0db), X(0x7d98ff17), X(0x18e01167), X(0x7d8f4409),
351 X(0x19115e1c), X(0x7d85759f), X(0x1942a6f3), X(0x7d7b93da),
352 X(0x1973ebe6), X(0x7d719eba), X(0x19a52ceb), X(0x7d679642),
353 X(0x19d669fc), X(0x7d5d7a74), X(0x1a07a311), X(0x7d534b50),
354 X(0x1a38d823), X(0x7d4908d9), X(0x1a6a0929), X(0x7d3eb30f),
355 X(0x1a9b361d), X(0x7d3449f5), X(0x1acc5ef6), X(0x7d29cd8c),
356 X(0x1afd83ad), X(0x7d1f3dd6), X(0x1b2ea43a), X(0x7d149ad5),
357 X(0x1b5fc097), X(0x7d09e489), X(0x1b90d8bb), X(0x7cff1af5),
358 X(0x1bc1ec9e), X(0x7cf43e1a), X(0x1bf2fc3a), X(0x7ce94dfb),
359 X(0x1c240786), X(0x7cde4a98), X(0x1c550e7c), X(0x7cd333f3),
360 X(0x1c861113), X(0x7cc80a0f), X(0x1cb70f43), X(0x7cbcccec),
361 X(0x1ce80906), X(0x7cb17c8d), X(0x1d18fe54), X(0x7ca618f3),
362 X(0x1d49ef26), X(0x7c9aa221), X(0x1d7adb73), X(0x7c8f1817),
363 X(0x1dabc334), X(0x7c837ad8), X(0x1ddca662), X(0x7c77ca65),
364 X(0x1e0d84f5), X(0x7c6c06c0), X(0x1e3e5ee5), X(0x7c602fec),
365 X(0x1e6f342c), X(0x7c5445e9), X(0x1ea004c1), X(0x7c4848ba),
366 X(0x1ed0d09d), X(0x7c3c3860), X(0x1f0197b8), X(0x7c3014de),
367 X(0x1f325a0b), X(0x7c23de35), X(0x1f63178f), X(0x7c179467),
368 X(0x1f93d03c), X(0x7c0b3777), X(0x1fc4840a), X(0x7bfec765),
369 X(0x1ff532f2), X(0x7bf24434), X(0x2025dcec), X(0x7be5ade6),
370 X(0x205681f1), X(0x7bd9047c), X(0x208721f9), X(0x7bcc47fa),
371 X(0x20b7bcfe), X(0x7bbf7860), X(0x20e852f6), X(0x7bb295b0),
372 X(0x2118e3dc), X(0x7ba59fee), X(0x21496fa7), X(0x7b989719),
373 X(0x2179f64f), X(0x7b8b7b36), X(0x21aa77cf), X(0x7b7e4c45),
374 X(0x21daf41d), X(0x7b710a49), X(0x220b6b32), X(0x7b63b543),
375 X(0x223bdd08), X(0x7b564d36), X(0x226c4996), X(0x7b48d225),
376 X(0x229cb0d5), X(0x7b3b4410), X(0x22cd12bd), X(0x7b2da2fa),
377 X(0x22fd6f48), X(0x7b1feee5), X(0x232dc66d), X(0x7b1227d3),
378 X(0x235e1826), X(0x7b044dc7), X(0x238e646a), X(0x7af660c2),
379 X(0x23beab33), X(0x7ae860c7), X(0x23eeec78), X(0x7ada4dd8),
380 X(0x241f2833), X(0x7acc27f7), X(0x244f5e5c), X(0x7abdef25),
381 X(0x247f8eec), X(0x7aafa367), X(0x24afb9da), X(0x7aa144bc),
382 X(0x24dfdf20), X(0x7a92d329), X(0x250ffeb7), X(0x7a844eae),
383 X(0x25401896), X(0x7a75b74f), X(0x25702cb7), X(0x7a670d0d),
384 X(0x25a03b11), X(0x7a584feb), X(0x25d0439f), X(0x7a497feb),
385 X(0x26004657), X(0x7a3a9d0f), X(0x26304333), X(0x7a2ba75a),
386 X(0x26603a2c), X(0x7a1c9ece), X(0x26902b39), X(0x7a0d836d),
387 X(0x26c01655), X(0x79fe5539), X(0x26effb76), X(0x79ef1436),
388 X(0x271fda96), X(0x79dfc064), X(0x274fb3ae), X(0x79d059c8),
389 X(0x277f86b5), X(0x79c0e062), X(0x27af53a6), X(0x79b15435),
390 X(0x27df1a77), X(0x79a1b545), X(0x280edb23), X(0x79920392),
391 X(0x283e95a1), X(0x79823f20), X(0x286e49ea), X(0x797267f2),
392 X(0x289df7f8), X(0x79627e08), X(0x28cd9fc1), X(0x79528167),
393 X(0x28fd4140), X(0x79427210), X(0x292cdc6d), X(0x79325006),
394 X(0x295c7140), X(0x79221b4b), X(0x298bffb2), X(0x7911d3e2),
395 X(0x29bb87bc), X(0x790179cd), X(0x29eb0957), X(0x78f10d0f),
396 X(0x2a1a847b), X(0x78e08dab), X(0x2a49f920), X(0x78cffba3),
397 X(0x2a796740), X(0x78bf56f9), X(0x2aa8ced3), X(0x78ae9fb0),
398 X(0x2ad82fd2), X(0x789dd5cb), X(0x2b078a36), X(0x788cf94c),
399 X(0x2b36ddf7), X(0x787c0a36), X(0x2b662b0e), X(0x786b088c),
400 X(0x2b957173), X(0x7859f44f), X(0x2bc4b120), X(0x7848cd83),
401 X(0x2bf3ea0d), X(0x7837942b), X(0x2c231c33), X(0x78264849),
402 X(0x2c52478a), X(0x7814e9df), X(0x2c816c0c), X(0x780378f1),
403 X(0x2cb089b1), X(0x77f1f581), X(0x2cdfa071), X(0x77e05f91),
404 X(0x2d0eb046), X(0x77ceb725), X(0x2d3db928), X(0x77bcfc3f),
405 X(0x2d6cbb10), X(0x77ab2ee2), X(0x2d9bb5f6), X(0x77994f11),
406 X(0x2dcaa9d5), X(0x77875cce), X(0x2df996a3), X(0x7775581d),
407 X(0x2e287c5a), X(0x776340ff), X(0x2e575af3), X(0x77511778),
408 X(0x2e863267), X(0x773edb8b), X(0x2eb502ae), X(0x772c8d3a),
409 X(0x2ee3cbc1), X(0x771a2c88), X(0x2f128d99), X(0x7707b979),
410 X(0x2f41482e), X(0x76f5340e), X(0x2f6ffb7a), X(0x76e29c4b),
411 X(0x2f9ea775), X(0x76cff232), X(0x2fcd4c19), X(0x76bd35c7),
412 X(0x2ffbe95d), X(0x76aa670d), X(0x302a7f3a), X(0x76978605),
413 X(0x30590dab), X(0x768492b4), X(0x308794a6), X(0x76718d1c),
414 X(0x30b61426), X(0x765e7540), X(0x30e48c22), X(0x764b4b23),
415 X(0x3112fc95), X(0x76380ec8), X(0x31416576), X(0x7624c031),
416 X(0x316fc6be), X(0x76115f63), X(0x319e2067), X(0x75fdec60),
417 X(0x31cc7269), X(0x75ea672a), X(0x31fabcbd), X(0x75d6cfc5),
418 X(0x3228ff5c), X(0x75c32634), X(0x32573a3f), X(0x75af6a7b),
419 X(0x32856d5e), X(0x759b9c9b), X(0x32b398b3), X(0x7587bc98),
420 X(0x32e1bc36), X(0x7573ca75), X(0x330fd7e1), X(0x755fc635),
421 X(0x333debab), X(0x754bafdc), X(0x336bf78f), X(0x7537876c),
422 X(0x3399fb85), X(0x75234ce8), X(0x33c7f785), X(0x750f0054),
423 X(0x33f5eb89), X(0x74faa1b3), X(0x3423d78a), X(0x74e63108),
424 X(0x3451bb81), X(0x74d1ae55), X(0x347f9766), X(0x74bd199f),
425 X(0x34ad6b32), X(0x74a872e8), X(0x34db36df), X(0x7493ba34),
426 X(0x3508fa66), X(0x747eef85), X(0x3536b5be), X(0x746a12df),
427 X(0x356468e2), X(0x74552446), X(0x359213c9), X(0x744023bc),
428 X(0x35bfb66e), X(0x742b1144), X(0x35ed50c9), X(0x7415ece2),
429 X(0x361ae2d3), X(0x7400b69a), X(0x36486c86), X(0x73eb6e6e),
430 X(0x3675edd9), X(0x73d61461), X(0x36a366c6), X(0x73c0a878),
431 X(0x36d0d746), X(0x73ab2ab4), X(0x36fe3f52), X(0x73959b1b),
432 X(0x372b9ee3), X(0x737ff9ae), X(0x3758f5f2), X(0x736a4671),
433 X(0x37864477), X(0x73548168), X(0x37b38a6d), X(0x733eaa96),
434 X(0x37e0c7cc), X(0x7328c1ff), X(0x380dfc8d), X(0x7312c7a5),
435 X(0x383b28a9), X(0x72fcbb8c), X(0x38684c19), X(0x72e69db7),
436 X(0x389566d6), X(0x72d06e2b), X(0x38c278d9), X(0x72ba2cea),
437 X(0x38ef821c), X(0x72a3d9f7), X(0x391c8297), X(0x728d7557),
438 X(0x39497a43), X(0x7276ff0d), X(0x39766919), X(0x7260771b),
439 X(0x39a34f13), X(0x7249dd86), X(0x39d02c2a), X(0x72333251),
440 X(0x39fd0056), X(0x721c7580), X(0x3a29cb91), X(0x7205a716),
441 X(0x3a568dd4), X(0x71eec716), X(0x3a834717), X(0x71d7d585),
442 X(0x3aaff755), X(0x71c0d265), X(0x3adc9e86), X(0x71a9bdba),
443 X(0x3b093ca3), X(0x71929789), X(0x3b35d1a5), X(0x717b5fd3),
444 X(0x3b625d86), X(0x7164169d), X(0x3b8ee03e), X(0x714cbbeb),
445 X(0x3bbb59c7), X(0x71354fc0), X(0x3be7ca1a), X(0x711dd220),
446 X(0x3c143130), X(0x7106430e), X(0x3c408f03), X(0x70eea28e),
447 X(0x3c6ce38a), X(0x70d6f0a4), X(0x3c992ec0), X(0x70bf2d53),
448 X(0x3cc5709e), X(0x70a7589f), X(0x3cf1a91c), X(0x708f728b),
449 X(0x3d1dd835), X(0x70777b1c), X(0x3d49fde1), X(0x705f7255),
450 X(0x3d761a19), X(0x70475839), X(0x3da22cd7), X(0x702f2ccd),
451 X(0x3dce3614), X(0x7016f014), X(0x3dfa35c8), X(0x6ffea212),
452 X(0x3e262bee), X(0x6fe642ca), X(0x3e52187f), X(0x6fcdd241),
453 X(0x3e7dfb73), X(0x6fb5507a), X(0x3ea9d4c3), X(0x6f9cbd79),
454 X(0x3ed5a46b), X(0x6f841942), X(0x3f016a61), X(0x6f6b63d8),
455 X(0x3f2d26a0), X(0x6f529d40), X(0x3f58d921), X(0x6f39c57d),
456 X(0x3f8481dd), X(0x6f20dc92), X(0x3fb020ce), X(0x6f07e285),
457 X(0x3fdbb5ec), X(0x6eeed758), X(0x40074132), X(0x6ed5bb10),
458 X(0x4032c297), X(0x6ebc8db0), X(0x405e3a16), X(0x6ea34f3d),
459 X(0x4089a7a8), X(0x6e89ffb9), X(0x40b50b46), X(0x6e709f2a),
460 X(0x40e064ea), X(0x6e572d93), X(0x410bb48c), X(0x6e3daaf8),
461 X(0x4136fa27), X(0x6e24175c), X(0x416235b2), X(0x6e0a72c5),
462 X(0x418d6729), X(0x6df0bd35), X(0x41b88e84), X(0x6dd6f6b1),
463 X(0x41e3abbc), X(0x6dbd1f3c), X(0x420ebecb), X(0x6da336dc),
464 X(0x4239c7aa), X(0x6d893d93), X(0x4264c653), X(0x6d6f3365),
465 X(0x428fbabe), X(0x6d551858), X(0x42baa4e6), X(0x6d3aec6e),
466 X(0x42e584c3), X(0x6d20afac), X(0x43105a50), X(0x6d066215),
467 X(0x433b2585), X(0x6cec03af), X(0x4365e65b), X(0x6cd1947c),
468 X(0x43909ccd), X(0x6cb71482), X(0x43bb48d4), X(0x6c9c83c3),
469 X(0x43e5ea68), X(0x6c81e245), X(0x44108184), X(0x6c67300b),
470 X(0x443b0e21), X(0x6c4c6d1a), X(0x44659039), X(0x6c319975),
471 X(0x449007c4), X(0x6c16b521), X(0x44ba74bd), X(0x6bfbc021),
472 X(0x44e4d71c), X(0x6be0ba7b), X(0x450f2edb), X(0x6bc5a431),
473 X(0x45397bf4), X(0x6baa7d49), X(0x4563be60), X(0x6b8f45c7),
474 X(0x458df619), X(0x6b73fdae), X(0x45b82318), X(0x6b58a503),
475 X(0x45e24556), X(0x6b3d3bcb), X(0x460c5cce), X(0x6b21c208),
476 X(0x46366978), X(0x6b0637c1), X(0x46606b4e), X(0x6aea9cf8),
477 X(0x468a624a), X(0x6acef1b2), X(0x46b44e65), X(0x6ab335f4),
478 X(0x46de2f99), X(0x6a9769c1), X(0x470805df), X(0x6a7b8d1e),
479 X(0x4731d131), X(0x6a5fa010), X(0x475b9188), X(0x6a43a29a),
480 X(0x478546de), X(0x6a2794c1), X(0x47aef12c), X(0x6a0b7689),
481 X(0x47d8906d), X(0x69ef47f6), X(0x48022499), X(0x69d3090e),
482 X(0x482badab), X(0x69b6b9d3), X(0x48552b9b), X(0x699a5a4c),
483 X(0x487e9e64), X(0x697dea7b), X(0x48a805ff), X(0x69616a65),
484 X(0x48d16265), X(0x6944da10), X(0x48fab391), X(0x6928397e),
485 X(0x4923f97b), X(0x690b88b5), X(0x494d341e), X(0x68eec7b9),
486 X(0x49766373), X(0x68d1f68f), X(0x499f8774), X(0x68b5153a),
487 X(0x49c8a01b), X(0x689823bf), X(0x49f1ad61), X(0x687b2224),
488 X(0x4a1aaf3f), X(0x685e106c), X(0x4a43a5b0), X(0x6840ee9b),
489 X(0x4a6c90ad), X(0x6823bcb7), X(0x4a957030), X(0x68067ac3),
490 X(0x4abe4433), X(0x67e928c5), X(0x4ae70caf), X(0x67cbc6c0),
491 X(0x4b0fc99d), X(0x67ae54ba), X(0x4b387af9), X(0x6790d2b6),
492 X(0x4b6120bb), X(0x677340ba), X(0x4b89badd), X(0x67559eca),
493 X(0x4bb24958), X(0x6737ecea), X(0x4bdacc28), X(0x671a2b20),
494 X(0x4c034345), X(0x66fc596f), X(0x4c2baea9), X(0x66de77dc),
495 X(0x4c540e4e), X(0x66c0866d), X(0x4c7c622d), X(0x66a28524),
496 X(0x4ca4aa41), X(0x66847408), X(0x4ccce684), X(0x6666531d),
497 X(0x4cf516ee), X(0x66482267), X(0x4d1d3b7a), X(0x6629e1ec),
498 X(0x4d455422), X(0x660b91af), X(0x4d6d60df), X(0x65ed31b5),
499 X(0x4d9561ac), X(0x65cec204), X(0x4dbd5682), X(0x65b0429f),
500 X(0x4de53f5a), X(0x6591b38c), X(0x4e0d1c30), X(0x657314cf),
501 X(0x4e34ecfc), X(0x6554666d), X(0x4e5cb1b9), X(0x6535a86b),
502 X(0x4e846a60), X(0x6516dacd), X(0x4eac16eb), X(0x64f7fd98),
503 X(0x4ed3b755), X(0x64d910d1), X(0x4efb4b96), X(0x64ba147d),
504 X(0x4f22d3aa), X(0x649b08a0), X(0x4f4a4f89), X(0x647bed3f),
505 X(0x4f71bf2e), X(0x645cc260), X(0x4f992293), X(0x643d8806),
506 X(0x4fc079b1), X(0x641e3e38), X(0x4fe7c483), X(0x63fee4f8),
507 X(0x500f0302), X(0x63df7c4d), X(0x50363529), X(0x63c0043b),
508 X(0x505d5af1), X(0x63a07cc7), X(0x50847454), X(0x6380e5f6),
509 X(0x50ab814d), X(0x63613fcd), X(0x50d281d5), X(0x63418a50),
510 X(0x50f975e6), X(0x6321c585), X(0x51205d7b), X(0x6301f171),
511 X(0x5147388c), X(0x62e20e17), X(0x516e0715), X(0x62c21b7e),
512 X(0x5194c910), X(0x62a219aa), X(0x51bb7e75), X(0x628208a1),
513 X(0x51e22740), X(0x6261e866), X(0x5208c36a), X(0x6241b8ff),
514 X(0x522f52ee), X(0x62217a72), X(0x5255d5c5), X(0x62012cc2),
515 X(0x527c4bea), X(0x61e0cff5), X(0x52a2b556), X(0x61c06410),
516 X(0x52c91204), X(0x619fe918), X(0x52ef61ee), X(0x617f5f12),
517 X(0x5315a50e), X(0x615ec603), X(0x533bdb5d), X(0x613e1df0),
518 X(0x536204d7), X(0x611d66de), X(0x53882175), X(0x60fca0d2),
519 X(0x53ae3131), X(0x60dbcbd1), X(0x53d43406), X(0x60bae7e1),
520 X(0x53fa29ed), X(0x6099f505), X(0x542012e1), X(0x6078f344),
521 X(0x5445eedb), X(0x6057e2a2), X(0x546bbdd7), X(0x6036c325),
522 X(0x54917fce), X(0x601594d1), X(0x54b734ba), X(0x5ff457ad),
523 X(0x54dcdc96), X(0x5fd30bbc), X(0x5502775c), X(0x5fb1b104),
524 X(0x55280505), X(0x5f90478a), X(0x554d858d), X(0x5f6ecf53),
525 X(0x5572f8ed), X(0x5f4d4865), X(0x55985f20), X(0x5f2bb2c5),
526 X(0x55bdb81f), X(0x5f0a0e77), X(0x55e303e6), X(0x5ee85b82),
527 X(0x5608426e), X(0x5ec699e9), X(0x562d73b2), X(0x5ea4c9b3),
528 X(0x565297ab), X(0x5e82eae5), X(0x5677ae54), X(0x5e60fd84),
529 X(0x569cb7a8), X(0x5e3f0194), X(0x56c1b3a1), X(0x5e1cf71c),
530 X(0x56e6a239), X(0x5dfade20), X(0x570b8369), X(0x5dd8b6a7),
531 X(0x5730572e), X(0x5db680b4), X(0x57551d80), X(0x5d943c4e),
532 X(0x5779d65b), X(0x5d71e979), X(0x579e81b8), X(0x5d4f883b),
533 X(0x57c31f92), X(0x5d2d189a), X(0x57e7afe4), X(0x5d0a9a9a),
534 X(0x580c32a7), X(0x5ce80e41), X(0x5830a7d6), X(0x5cc57394),
535 X(0x58550f6c), X(0x5ca2ca99), X(0x58796962), X(0x5c801354),
536 X(0x589db5b3), X(0x5c5d4dcc), X(0x58c1f45b), X(0x5c3a7a05),
537 X(0x58e62552), X(0x5c179806), X(0x590a4893), X(0x5bf4a7d2),
538 X(0x592e5e19), X(0x5bd1a971), X(0x595265df), X(0x5bae9ce7),
539 X(0x59765fde), X(0x5b8b8239), X(0x599a4c12), X(0x5b68596d),
540 X(0x59be2a74), X(0x5b452288), X(0x59e1faff), X(0x5b21dd90),
541 X(0x5a05bdae), X(0x5afe8a8b), X(0x5a29727b), X(0x5adb297d),
542 X(0x5a4d1960), X(0x5ab7ba6c), X(0x5a70b258), X(0x5a943d5e),
543};
544
diff --git a/apps/codecs/lib/misc.h b/apps/codecs/lib/misc.h
new file mode 100644
index 0000000000..5ab78d62e7
--- /dev/null
+++ b/apps/codecs/lib/misc.h
@@ -0,0 +1,291 @@
1/********************************************************************
2 * *
3 * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. *
4 * *
5 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
6 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
7 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
8 * *
9 * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
10 * BY THE Xiph.Org FOUNDATION http://www.xiph.org/ *
11 * *
12 ********************************************************************
13
14 function: miscellaneous math and prototypes
15
16 ********************************************************************/
17
18//#include "config-tremor.h"
19
20#ifndef _V_RANDOM_H_
21#define _V_RANDOM_H_
22//#include "ivorbiscodec.h"
23//#include "os_types.h"
24
25//#include "asm_arm.h"
26//#include "asm_mcf5249.h"
27
28
29/* Some prototypes that were not defined elsewhere */
30//void *_vorbis_block_alloc(vorbis_block *vb,long bytes);
31//void _vorbis_block_ripcord(vorbis_block *vb);
32//extern int _ilog(unsigned int v);
33
34#ifndef _V_WIDE_MATH
35#define _V_WIDE_MATH
36
37#ifndef _LOW_ACCURACY_
38/* 64 bit multiply */
39/* #include <sys/types.h> */
40
41#if BYTE_ORDER==LITTLE_ENDIAN
42union magic {
43 struct {
44 int32_t lo;
45 int32_t hi;
46 } halves;
47 int64_t whole;
48};
49#elif BYTE_ORDER==BIG_ENDIAN
50union magic {
51 struct {
52 int32_t hi;
53 int32_t lo;
54 } halves;
55 int64_t whole;
56};
57#endif
58
59static inline int32_t MULT32(int32_t x, int32_t y) {
60 union magic magic;
61 magic.whole = (int64_t)x * y;
62 return magic.halves.hi;
63}
64static inline int32_t MULT31(int32_t x, int32_t y) {
65 return MULT32(x,y)<<1;
66}
67
68static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) {
69 union magic magic;
70 magic.whole = (int64_t)x * y;
71 return ((uint32_t)(magic.halves.lo)>>15) | ((magic.halves.hi)<<17);
72}
73
74#else
75/* 32 bit multiply, more portable but less accurate */
76
77/*
78 * Note: Precision is biased towards the first argument therefore ordering
79 * is important. Shift values were chosen for the best sound quality after
80 * many listening tests.
81 */
82
83/*
84 * For MULT32 and MULT31: The second argument is always a lookup table
85 * value already preshifted from 31 to 8 bits. We therefore take the
86 * opportunity to save on text space and use unsigned char for those
87 * tables in this case.
88 */
89
90static inline int32_t MULT32(int32_t x, int32_t y) {
91 return (x >> 9) * y; /* y preshifted >>23 */
92}
93
94static inline int32_t MULT31(int32_t x, int32_t y) {
95 return (x >> 8) * y; /* y preshifted >>23 */
96}
97
98static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) {
99 return (x >> 6) * y; /* y preshifted >>9 */
100}
101#endif
102
103/*
104 * This should be used as a memory barrier, forcing all cached values in
105 * registers to wr writen back to memory. Might or might not be beneficial
106 * depending on the architecture and compiler.
107 */
108#define MB()
109
110/*
111 * The XPROD functions are meant to optimize the cross products found all
112 * over the place in mdct.c by forcing memory operation ordering to avoid
113 * unnecessary register reloads as soon as memory is being written to.
114 * However this is only beneficial on CPUs with a sane number of general
115 * purpose registers which exclude the Intel x86. On Intel, better let the
116 * compiler actually reload registers directly from original memory by using
117 * macros.
118 */
119
120/* replaced XPROD32 with a macro to avoid memory reference
121 _x, _y are the results (must be l-values) */
122#define XPROD32(_a, _b, _t, _v, _x, _y) \
123 { (_x)=MULT32(_a,_t)+MULT32(_b,_v); \
124 (_y)=MULT32(_b,_t)-MULT32(_a,_v); }
125
126
127#ifdef __i386__
128
129#define XPROD31(_a, _b, _t, _v, _x, _y) \
130 { *(_x)=MULT31(_a,_t)+MULT31(_b,_v); \
131 *(_y)=MULT31(_b,_t)-MULT31(_a,_v); }
132#define XNPROD31(_a, _b, _t, _v, _x, _y) \
133 { *(_x)=MULT31(_a,_t)-MULT31(_b,_v); \
134 *(_y)=MULT31(_b,_t)+MULT31(_a,_v); }
135
136#else
137
138static inline void XPROD31(int32_t a, int32_t b,
139 int32_t t, int32_t v,
140 int32_t *x, int32_t *y)
141{
142 *x = MULT31(a, t) + MULT31(b, v);
143 *y = MULT31(b, t) - MULT31(a, v);
144}
145
146static inline void XNPROD31(int32_t a, int32_t b,
147 int32_t t, int32_t v,
148 int32_t *x, int32_t *y)
149{
150 *x = MULT31(a, t) - MULT31(b, v);
151 *y = MULT31(b, t) + MULT31(a, v);
152}
153#endif
154
155#ifndef _V_VECT_OPS
156#define _V_VECT_OPS
157
158static inline
159void vect_add(int32_t *x, int32_t *y, int n)
160{
161 while (n>0) {
162 *x++ += *y++;
163 n--;
164 }
165}
166
167static inline
168void vect_copy(int32_t *x, int32_t *y, int n)
169{
170 while (n>0) {
171 *x++ = *y++;
172 n--;
173 }
174}
175
176static inline
177void vect_mult_fw(int32_t *data, int32_t *window, int n)
178{
179 while(n>0) {
180 *data = MULT31(*data, *window);
181 data++;
182 window++;
183 n--;
184 }
185}
186
187static inline
188void vect_mult_bw(int32_t *data, int32_t *window, int n)
189{
190 while(n>0) {
191 *data = MULT31(*data, *window);
192 data++;
193 window--;
194 n--;
195 }
196}
197#endif
198
199#endif
200
201#ifndef _V_CLIP_MATH
202#define _V_CLIP_MATH
203
204static inline int32_t CLIP_TO_15(int32_t x) {
205 int ret=x;
206 ret-= ((x<=32767)-1)&(x-32767);
207 ret-= ((x>=-32768)-1)&(x+32768);
208 return(ret);
209}
210
211#endif
212
213static inline int32_t VFLOAT_MULT(int32_t a,int32_t ap,
214 int32_t b,int32_t bp,
215 int32_t *p){
216 if(a && b){
217#ifndef _LOW_ACCURACY_
218 *p=ap+bp+32;
219 return MULT32(a,b);
220#else
221 *p=ap+bp+31;
222 return (a>>15)*(b>>16);
223#endif
224 }else
225 return 0;
226}
227
228/*static inline int32_t VFLOAT_MULTI(int32_t a,int32_t ap,
229 int32_t i,
230 int32_t *p){
231
232 int ip=_ilog(abs(i))-31;
233 return VFLOAT_MULT(a,ap,i<<-ip,ip,p);
234}
235*/
236static inline int32_t VFLOAT_ADD(int32_t a,int32_t ap,
237 int32_t b,int32_t bp,
238 int32_t *p){
239
240 if(!a){
241 *p=bp;
242 return b;
243 }else if(!b){
244 *p=ap;
245 return a;
246 }
247
248 /* yes, this can leak a bit. */
249 if(ap>bp){
250 int shift=ap-bp+1;
251 *p=ap+1;
252 a>>=1;
253 if(shift<32){
254 b=(b+(1<<(shift-1)))>>shift;
255 }else{
256 b=0;
257 }
258 }else{
259 int shift=bp-ap+1;
260 *p=bp+1;
261 b>>=1;
262 if(shift<32){
263 a=(a+(1<<(shift-1)))>>shift;
264 }else{
265 a=0;
266 }
267 }
268
269 a+=b;
270 if((a&0xc0000000)==0xc0000000 ||
271 (a&0xc0000000)==0){
272 a<<=1;
273 (*p)--;
274 }
275 return(a);
276}
277
278#ifdef __GNUC__
279#if __GNUC__ >= 3
280#define EXPECT(a, b) __builtin_expect((a), (b))
281#else
282#define EXPECT(a, b) (a)
283#endif
284#else
285#define EXPECT(a, b) (a)
286#endif
287
288#endif
289
290
291