summaryrefslogtreecommitdiff
path: root/apps/codecs/lib
diff options
context:
space:
mode:
Diffstat (limited to 'apps/codecs/lib')
-rw-r--r--apps/codecs/lib/SOURCES12
-rw-r--r--apps/codecs/lib/asm_arm.h292
-rw-r--r--apps/codecs/lib/asm_mcf5249.h353
-rw-r--r--apps/codecs/lib/codeclib.c182
-rw-r--r--apps/codecs/lib/codeclib.h163
-rw-r--r--apps/codecs/lib/codeclib_misc.h310
-rw-r--r--apps/codecs/lib/ffmpeg_bitstream.c374
-rw-r--r--apps/codecs/lib/ffmpeg_bswap.h150
-rw-r--r--apps/codecs/lib/ffmpeg_get_bits.h743
-rw-r--r--apps/codecs/lib/ffmpeg_intreadwrite.h484
-rw-r--r--apps/codecs/lib/ffmpeg_put_bits.h323
-rw-r--r--apps/codecs/lib/fft-ffmpeg.c473
-rw-r--r--apps/codecs/lib/fft-ffmpeg_arm.h456
-rw-r--r--apps/codecs/lib/fft-ffmpeg_cf.h370
-rw-r--r--apps/codecs/lib/fft.h64
-rw-r--r--apps/codecs/lib/fixedpoint.c1
-rw-r--r--apps/codecs/lib/fixedpoint.h49
-rw-r--r--apps/codecs/lib/libcodec.make37
-rw-r--r--apps/codecs/lib/mdct.c644
-rw-r--r--apps/codecs/lib/mdct.h139
-rw-r--r--apps/codecs/lib/mdct_lookup.c872
-rw-r--r--apps/codecs/lib/mdct_lookup.h24
-rw-r--r--apps/codecs/lib/osx.dummy.c0
23 files changed, 0 insertions, 6515 deletions
diff --git a/apps/codecs/lib/SOURCES b/apps/codecs/lib/SOURCES
deleted file mode 100644
index 257dcb5838..0000000000
--- a/apps/codecs/lib/SOURCES
+++ /dev/null
@@ -1,12 +0,0 @@
1#if CONFIG_CODEC == SWCODEC /* software codec platforms */
2codeclib.c
3fixedpoint.c
4ffmpeg_bitstream.c
5
6mdct_lookup.c
7fft-ffmpeg.c
8mdct.c
9
10#elif (CONFIG_PLATFORM & PLATFORM_HOSTED) && defined(__APPLE__)
11osx.dummy.c
12#endif
diff --git a/apps/codecs/lib/asm_arm.h b/apps/codecs/lib/asm_arm.h
deleted file mode 100644
index 8e5d0e68df..0000000000
--- a/apps/codecs/lib/asm_arm.h
+++ /dev/null
@@ -1,292 +0,0 @@
1/********************************************************************
2 * *
3 * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. *
4 * *
5 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
6 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
7 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
8 * *
9 * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
10 * BY THE Xiph.Org FOUNDATION http://www.xiph.org/ *
11 * *
12 ********************************************************************
13
14 function: arm7 and later wide math functions
15
16 ********************************************************************/
17#ifdef CPU_ARM
18
19#define INCL_OPTIMIZED_MULT32
20#if ARM_ARCH >= 6
21static inline int32_t MULT32(int32_t x, int32_t y) {
22 int32_t hi;
23 asm volatile("smmul %[hi], %[x], %[y] \n\t"
24 : [hi] "=&r" (hi)
25 : [x] "r" (x), [y] "r" (y) );
26 return(hi);
27}
28#else
29static inline int32_t MULT32(int32_t x, int32_t y) {
30 int32_t lo, hi;
31 asm volatile("smull\t%0, %1, %2, %3 \n\t"
32 : "=&r"(lo),"=&r"(hi)
33 : "r"(x),"r"(y) );
34 return(hi);
35}
36#endif
37
38#define INCL_OPTIMIZED_MULT31
39static inline int32_t MULT31(int32_t x, int32_t y) {
40 return MULT32(x,y)<<1;
41}
42
43#define INCL_OPTIMIZED_MULT31_SHIFT15
44static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) {
45 int32_t lo,hi;
46 asm volatile("smull %0, %1, %2, %3\n\t"
47 "movs %0, %0, lsr #15\n\t"
48 "adc %1, %0, %1, lsl #17\n\t"
49 : "=&r"(lo),"=&r"(hi)
50 : "r"(x),"r"(y)
51 : "cc" );
52 return(hi);
53}
54
55#define INCL_OPTIMIZED_MULT31_SHIFT16
56static inline int32_t MULT31_SHIFT16(int32_t x, int32_t y) {
57 int32_t lo,hi;
58 asm volatile("smull %0, %1, %2, %3\n\t"
59 "movs %0, %0, lsr #16\n\t"
60 "adc %1, %0, %1, lsl #16\n\t"
61 : "=&r"(lo),"=&r"(hi)
62 : "r"(x),"r"(y)
63 : "cc" );
64 return(hi);
65}
66
67#define INCL_OPTIMIZED_XPROD32
68#define XPROD32(a, b, t, v, x, y) \
69{ \
70 int32_t l; \
71 asm("smull %0, %1, %3, %5\n\t" \
72 "rsb %2, %6, #0\n\t" \
73 "smlal %0, %1, %4, %6\n\t" \
74 "smull %0, %2, %3, %2\n\t" \
75 "smlal %0, %2, %4, %5" \
76 : "=&r" (l), "=&r" (x), "=&r" (y) \
77 : "r" ((a)), "r" ((b)), "r" ((t)), "r" ((v)) ); \
78}
79
80#define INCL_OPTIMIZED_XPROD31_R
81#define INCL_OPTIMIZED_XNPROD31_R
82#if ARM_ARCH >= 6
83/* These may yield slightly different result from the macros below
84 because only the high 32 bits of the multiplications are accumulated while
85 the below macros use a 64 bit accumulator that is truncated to 32 bits.*/
86#define XPROD31_R(_a, _b, _t, _v, _x, _y)\
87{\
88 int32_t x1, y1;\
89 asm("smmul %[x1], %[t], %[a] \n\t"\
90 "smmul %[y1], %[t], %[b] \n\t"\
91 "smmla %[x1], %[v], %[b], %[x1] \n\t"\
92 "smmls %[y1], %[v], %[a], %[y1] \n\t"\
93 : [x1] "=&r" (x1), [y1] "=&r" (y1)\
94 : [a] "r" (_a), [b] "r" (_b), [t] "r" (_t), [v] "r" (_v) );\
95 _x = x1 << 1;\
96 _y = y1 << 1;\
97}
98
99#define XNPROD31_R(_a, _b, _t, _v, _x, _y)\
100{\
101 int32_t x1, y1;\
102 asm("smmul %[x1], %[t], %[a] \n\t"\
103 "smmul %[y1], %[t], %[b] \n\t"\
104 "smmls %[x1], %[v], %[b], %[x1] \n\t"\
105 "smmla %[y1], %[v], %[a], %[y1] \n\t"\
106 : [x1] "=&r" (x1), [y1] "=&r" (y1)\
107 : [a] "r" (_a), [b] "r" (_b), [t] "r" (_t), [v] "r" (_v) );\
108 _x = x1 << 1;\
109 _y = y1 << 1;\
110}
111#else
112#define XPROD31_R(_a, _b, _t, _v, _x, _y)\
113{\
114 int32_t x1, y1, l;\
115 asm("smull %0, %1, %5, %3\n\t"\
116 "rsb %2, %3, #0\n\t"\
117 "smlal %0, %1, %6, %4\n\t"\
118 "smull %0, %2, %6, %2\n\t"\
119 "smlal %0, %2, %5, %4"\
120 : "=&r" (l), "=&r" (x1), "=&r" (y1)\
121 : "r" (_a), "r" (_b), "r" (_t), "r" (_v) );\
122 _x = x1 << 1;\
123 _y = y1 << 1;\
124}
125
126#define XNPROD31_R(_a, _b, _t, _v, _x, _y)\
127{\
128 int32_t x1, y1, l;\
129 asm("smull %0, %1, %5, %3\n\t"\
130 "rsb %2, %4, #0\n\t"\
131 "smlal %0, %1, %6, %2\n\t"\
132 "smull %0, %2, %5, %4\n\t"\
133 "smlal %0, %2, %6, %3"\
134 : "=&r" (l), "=&r" (x1), "=&r" (y1)\
135 : "r" (_a), "r" (_b), "r" (_t), "r" (_v) );\
136 _x = x1 << 1;\
137 _y = y1 << 1;\
138}
139#endif
140
141#define INCL_OPTIMIZED_XPROD31
142static inline void XPROD31(int32_t a, int32_t b,
143 int32_t t, int32_t v,
144 int32_t *x, int32_t *y)
145{
146 int32_t _x1, _y1;
147 XPROD31_R(a, b, t, v, _x1, _y1);
148 *x = _x1;
149 *y = _y1;
150}
151
152#define INCL_OPTIMIZED_XNPROD31
153static inline void XNPROD31(int32_t a, int32_t b,
154 int32_t t, int32_t v,
155 int32_t *x, int32_t *y)
156{
157 int32_t _x1, _y1;
158 XNPROD31_R(a, b, t, v, _x1, _y1);
159 *x = _x1;
160 *y = _y1;
161}
162
163
164#ifndef _V_VECT_OPS
165#define _V_VECT_OPS
166
167/* asm versions of vector operations for block.c, window.c */
168static inline
169void vect_add(int32_t *x, const int32_t *y, int n)
170{
171 while (n>=4) {
172 asm volatile ("ldmia %[x], {r0, r1, r2, r3};"
173 "ldmia %[y]!, {r4, r5, r6, r7};"
174 "add r0, r0, r4;"
175 "add r1, r1, r5;"
176 "add r2, r2, r6;"
177 "add r3, r3, r7;"
178 "stmia %[x]!, {r0, r1, r2, r3};"
179 : [x] "+r" (x), [y] "+r" (y)
180 : : "r0", "r1", "r2", "r3",
181 "r4", "r5", "r6", "r7",
182 "memory");
183 n -= 4;
184 }
185 /* add final elements */
186 while (n>0) {
187 *x++ += *y++;
188 n--;
189 }
190}
191
192static inline
193void vect_copy(int32_t *x, const int32_t *y, int n)
194{
195 while (n>=4) {
196 asm volatile ("ldmia %[y]!, {r0, r1, r2, r3};"
197 "stmia %[x]!, {r0, r1, r2, r3};"
198 : [x] "+r" (x), [y] "+r" (y)
199 : : "r0", "r1", "r2", "r3",
200 "memory");
201 n -= 4;
202 }
203 /* copy final elements */
204 while (n>0) {
205 *x++ = *y++;
206 n--;
207 }
208}
209
210static inline
211void vect_mult_fw(int32_t *data, const int32_t *window, int n)
212{
213 while (n>=4) {
214 asm volatile ("ldmia %[d], {r0, r1, r2, r3};"
215 "ldmia %[w]!, {r4, r5, r6, r7};"
216 "smull r8, r9, r0, r4;"
217 "mov r0, r9, lsl #1;"
218 "smull r8, r9, r1, r5;"
219 "mov r1, r9, lsl #1;"
220 "smull r8, r9, r2, r6;"
221 "mov r2, r9, lsl #1;"
222 "smull r8, r9, r3, r7;"
223 "mov r3, r9, lsl #1;"
224 "stmia %[d]!, {r0, r1, r2, r3};"
225 : [d] "+r" (data), [w] "+r" (window)
226 : : "r0", "r1", "r2", "r3",
227 "r4", "r5", "r6", "r7", "r8", "r9",
228 "memory" );
229 n -= 4;
230 }
231 while(n>0) {
232 *data = MULT31(*data, *window);
233 data++;
234 window++;
235 n--;
236 }
237}
238
239static inline
240void vect_mult_bw(int32_t *data, const int32_t *window, int n)
241{
242 while (n>=4) {
243 asm volatile ("ldmia %[d], {r0, r1, r2, r3};"
244 "ldmda %[w]!, {r4, r5, r6, r7};"
245 "smull r8, r9, r0, r7;"
246 "mov r0, r9, lsl #1;"
247 "smull r8, r9, r1, r6;"
248 "mov r1, r9, lsl #1;"
249 "smull r8, r9, r2, r5;"
250 "mov r2, r9, lsl #1;"
251 "smull r8, r9, r3, r4;"
252 "mov r3, r9, lsl #1;"
253 "stmia %[d]!, {r0, r1, r2, r3};"
254 : [d] "+r" (data), [w] "+r" (window)
255 : : "r0", "r1", "r2", "r3",
256 "r4", "r5", "r6", "r7", "r8", "r9",
257 "memory" );
258 n -= 4;
259 }
260 while(n>0) {
261 *data = MULT31(*data, *window);
262 data++;
263 window--;
264 n--;
265 }
266}
267
268#endif
269
270/* not used anymore */
271/*
272#ifndef _V_CLIP_MATH
273#define _V_CLIP_MATH
274
275static inline int32_t CLIP_TO_15(int32_t x) {
276 int tmp;
277 asm volatile("subs %1, %0, #32768\n\t"
278 "movpl %0, #0x7f00\n\t"
279 "orrpl %0, %0, #0xff\n"
280 "adds %1, %0, #32768\n\t"
281 "movmi %0, #0x8000"
282 : "+r"(x),"=r"(tmp)
283 :
284 : "cc");
285 return(x);
286}
287
288#endif
289*/
290
291#endif
292
diff --git a/apps/codecs/lib/asm_mcf5249.h b/apps/codecs/lib/asm_mcf5249.h
deleted file mode 100644
index 841c413a94..0000000000
--- a/apps/codecs/lib/asm_mcf5249.h
+++ /dev/null
@@ -1,353 +0,0 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 *
9 * Copyright (C) 2005 by Pedro Vasconcelos
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 2
14 * of the License, or (at your option) any later version.
15 *
16 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
17 * KIND, either express or implied.
18 *
19 ****************************************************************************/
20/* asm routines for wide math on the MCF5249 */
21
22#if defined(CPU_COLDFIRE)
23
24#define INCL_OPTIMIZED_MULT32
25static inline int32_t MULT32(int32_t x, int32_t y) {
26
27 asm volatile ("mac.l %[x], %[y], %%acc0;" /* multiply & shift */
28 "movclr.l %%acc0, %[x];" /* move & clear acc */
29 "asr.l #1, %[x];" /* no overflow test */
30 : [x] "+&d" (x)
31 : [y] "r" (y)
32 : "cc");
33 return x;
34}
35
36#define INCL_OPTIMIZED_MULT31
37static inline int32_t MULT31(int32_t x, int32_t y) {
38 asm volatile ("mac.l %[x], %[y], %%acc0;" /* multiply */
39 "movclr.l %%acc0, %[x];" /* move and clear */
40 : [x] "+&r" (x)
41 : [y] "r" (y)
42 : "cc");
43 return x;
44}
45
46#define INCL_OPTIMIZED_MULT31_SHIFT15
47/* NOTE: this requires that the emac is *NOT* rounding */
48static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) {
49 int32_t r;
50
51 asm volatile ("mac.l %[x], %[y], %%acc0;" /* multiply */
52 "mulu.l %[y], %[x];" /* get lower half, avoid emac stall */
53 "movclr.l %%acc0, %[r];" /* get higher half */
54 "swap %[r];" /* hi<<16, plus one free */
55 "lsr.l #8, %[x];" /* (unsigned)lo >> 15 */
56 "lsr.l #7, %[x];"
57 "move.w %[x], %[r];" /* logical-or results */
58 : [r] "=&d" (r), [x] "+d" (x)
59 : [y] "d" (y)
60 : "cc");
61 return r;
62}
63
64#define INCL_OPTIMIZED_MULT31_SHIFT16
65static inline int32_t MULT31_SHIFT16(int32_t x, int32_t y) {
66 int32_t r;
67
68 asm volatile ("mac.l %[x], %[y], %%acc0;" /* multiply */
69 "mulu.l %[y], %[x];" /* get lower half, avoid emac stall */
70 "movclr.l %%acc0, %[r];" /* get higher half */
71 "lsr.l #1, %[r];" /* hi >> 1, to compensate emac shift */
72 "move.w %[r], %[x];" /* x = x & 0xffff0000 | r & 0xffff */
73 "swap %[x];" /* x = (unsigned)x << 16 | (unsigned)x >> 16 */
74 : [r] "=&d" (r), [x] "+d" (x)
75 : [y] "d" (y)
76 : "cc");
77 return x;
78}
79
80#define INCL_OPTIMIZED_XPROD31
81static inline
82void XPROD31(int32_t a, int32_t b,
83 int32_t t, int32_t v,
84 int32_t *x, int32_t *y)
85{
86 asm volatile ("mac.l %[a], %[t], %%acc0;"
87 "mac.l %[b], %[v], %%acc0;"
88 "mac.l %[b], %[t], %%acc1;"
89 "msac.l %[a], %[v], %%acc1;"
90 "movclr.l %%acc0, %[a];"
91 "move.l %[a], (%[x]);"
92 "movclr.l %%acc1, %[a];"
93 "move.l %[a], (%[y]);"
94 : [a] "+&r" (a)
95 : [x] "a" (x), [y] "a" (y),
96 [b] "r" (b), [t] "r" (t), [v] "r" (v)
97 : "cc", "memory");
98}
99
100#define INCL_OPTIMIZED_XNPROD31
101static inline
102void XNPROD31(int32_t a, int32_t b,
103 int32_t t, int32_t v,
104 int32_t *x, int32_t *y)
105{
106 asm volatile ("mac.l %[a], %[t], %%acc0;"
107 "msac.l %[b], %[v], %%acc0;"
108 "mac.l %[b], %[t], %%acc1;"
109 "mac.l %[a], %[v], %%acc1;"
110 "movclr.l %%acc0, %[a];"
111 "move.l %[a], (%[x]);"
112 "movclr.l %%acc1, %[a];"
113 "move.l %[a], (%[y]);"
114 : [a] "+&r" (a)
115 : [x] "a" (x), [y] "a" (y),
116 [b] "r" (b), [t] "r" (t), [v] "r" (v)
117 : "cc", "memory");
118}
119
120
121/* this could lose the LSB by overflow, but i don't think it'll ever happen.
122 if anyone think they can hear a bug caused by this, please try the above
123 version. */
124#define INCL_OPTIMIZED_XPROD32
125#define XPROD32(_a, _b, _t, _v, _x, _y) \
126 asm volatile ("mac.l %[a], %[t], %%acc0;" \
127 "mac.l %[b], %[v], %%acc0;" \
128 "mac.l %[b], %[t], %%acc1;" \
129 "msac.l %[a], %[v], %%acc1;" \
130 "movclr.l %%acc0, %[x];" \
131 "asr.l #1, %[x];" \
132 "movclr.l %%acc1, %[y];" \
133 "asr.l #1, %[y];" \
134 : [x] "=d" (_x), [y] "=d" (_y) \
135 : [a] "r" (_a), [b] "r" (_b), \
136 [t] "r" (_t), [v] "r" (_v) \
137 : "cc");
138
139#define INCL_OPTIMIZED_XPROD31_R
140#define XPROD31_R(_a, _b, _t, _v, _x, _y) \
141 asm volatile ("mac.l %[a], %[t], %%acc0;" \
142 "mac.l %[b], %[v], %%acc0;" \
143 "mac.l %[b], %[t], %%acc1;" \
144 "msac.l %[a], %[v], %%acc1;" \
145 "movclr.l %%acc0, %[x];" \
146 "movclr.l %%acc1, %[y];" \
147 : [x] "=r" (_x), [y] "=r" (_y) \
148 : [a] "r" (_a), [b] "r" (_b), \
149 [t] "r" (_t), [v] "r" (_v) \
150 : "cc");
151
152#define INCL_OPTIMIZED_XNPROD31_R
153#define XNPROD31_R(_a, _b, _t, _v, _x, _y) \
154 asm volatile ("mac.l %[a], %[t], %%acc0;" \
155 "msac.l %[b], %[v], %%acc0;" \
156 "mac.l %[b], %[t], %%acc1;" \
157 "mac.l %[a], %[v], %%acc1;" \
158 "movclr.l %%acc0, %[x];" \
159 "movclr.l %%acc1, %[y];" \
160 : [x] "=r" (_x), [y] "=r" (_y) \
161 : [a] "r" (_a), [b] "r" (_b), \
162 [t] "r" (_t), [v] "r" (_v) \
163 : "cc");
164
165#ifndef _V_VECT_OPS
166#define _V_VECT_OPS
167
168/* asm versions of vector operations for block.c, window.c */
169/* assumes MAC is initialized & accumulators cleared */
170static inline
171void vect_add(int32_t *x, const int32_t *y, int n)
172{
173 /* align to 16 bytes */
174 while(n>0 && (int)x&15) {
175 *x++ += *y++;
176 n--;
177 }
178 asm volatile ("bra 1f;"
179 "0:" /* loop start */
180 "movem.l (%[x]), %%d0-%%d3;" /* fetch values */
181 "movem.l (%[y]), %%a0-%%a3;"
182 /* add */
183 "add.l %%a0, %%d0;"
184 "add.l %%a1, %%d1;"
185 "add.l %%a2, %%d2;"
186 "add.l %%a3, %%d3;"
187 /* store and advance */
188 "movem.l %%d0-%%d3, (%[x]);"
189 "lea.l (4*4, %[x]), %[x];"
190 "lea.l (4*4, %[y]), %[y];"
191 "subq.l #4, %[n];" /* done 4 elements */
192 "1: cmpi.l #4, %[n];"
193 "bge 0b;"
194 : [n] "+d" (n), [x] "+a" (x), [y] "+a" (y)
195 : : "%d0", "%d1", "%d2", "%d3", "%a0", "%a1", "%a2", "%a3",
196 "cc", "memory");
197 /* add final elements */
198 while (n>0) {
199 *x++ += *y++;
200 n--;
201 }
202}
203
204static inline
205void vect_copy(int32_t *x, const int32_t *y, int n)
206{
207 /* align to 16 bytes */
208 while(n>0 && (int)x&15) {
209 *x++ = *y++;
210 n--;
211 }
212 asm volatile ("bra 1f;"
213 "0:" /* loop start */
214 "movem.l (%[y]), %%d0-%%d3;" /* fetch values */
215 "movem.l %%d0-%%d3, (%[x]);" /* store */
216 "lea.l (4*4, %[x]), %[x];" /* advance */
217 "lea.l (4*4, %[y]), %[y];"
218 "subq.l #4, %[n];" /* done 4 elements */
219 "1: cmpi.l #4, %[n];"
220 "bge 0b;"
221 : [n] "+d" (n), [x] "+a" (x), [y] "+a" (y)
222 : : "%d0", "%d1", "%d2", "%d3", "cc", "memory");
223 /* copy final elements */
224 while (n>0) {
225 *x++ = *y++;
226 n--;
227 }
228}
229
230static inline
231void vect_mult_fw(int32_t *data, const int32_t *window, int n)
232{
233 /* ensure data is aligned to 16-bytes */
234 while(n>0 && (int)data&15) {
235 *data = MULT31(*data, *window);
236 data++;
237 window++;
238 n--;
239 }
240 asm volatile ("movem.l (%[d]), %%d0-%%d3;" /* loop start */
241 "movem.l (%[w]), %%a0-%%a3;" /* pre-fetch registers */
242 "lea.l (4*4, %[w]), %[w];"
243 "bra 1f;" /* jump to loop condition */
244 "0:" /* loop body */
245 /* multiply and load next window values */
246 "mac.l %%d0, %%a0, (%[w])+, %%a0, %%acc0;"
247 "mac.l %%d1, %%a1, (%[w])+, %%a1, %%acc1;"
248 "mac.l %%d2, %%a2, (%[w])+, %%a2, %%acc2;"
249 "mac.l %%d3, %%a3, (%[w])+, %%a3, %%acc3;"
250 "movclr.l %%acc0, %%d0;" /* get the products */
251 "movclr.l %%acc1, %%d1;"
252 "movclr.l %%acc2, %%d2;"
253 "movclr.l %%acc3, %%d3;"
254 /* store and advance */
255 "movem.l %%d0-%%d3, (%[d]);"
256 "lea.l (4*4, %[d]), %[d];"
257 "movem.l (%[d]), %%d0-%%d3;"
258 "subq.l #4, %[n];" /* done 4 elements */
259 "1: cmpi.l #4, %[n];"
260 "bge 0b;"
261 /* multiply final elements */
262 "tst.l %[n];"
263 "beq 1f;" /* n=0 */
264 "mac.l %%d0, %%a0, %%acc0;"
265 "movclr.l %%acc0, %%d0;"
266 "move.l %%d0, (%[d])+;"
267 "subq.l #1, %[n];"
268 "beq 1f;" /* n=1 */
269 "mac.l %%d1, %%a1, %%acc0;"
270 "movclr.l %%acc0, %%d1;"
271 "move.l %%d1, (%[d])+;"
272 "subq.l #1, %[n];"
273 "beq 1f;" /* n=2 */
274 /* otherwise n = 3 */
275 "mac.l %%d2, %%a2, %%acc0;"
276 "movclr.l %%acc0, %%d2;"
277 "move.l %%d2, (%[d])+;"
278 "1:"
279 : [n] "+d" (n), [d] "+a" (data), [w] "+a" (window)
280 : : "%d0", "%d1", "%d2", "%d3", "%a0", "%a1", "%a2", "%a3",
281 "cc", "memory");
282}
283
284static inline
285void vect_mult_bw(int32_t *data, const int32_t *window, int n)
286{
287 /* ensure at least data is aligned to 16-bytes */
288 while(n>0 && (int)data&15) {
289 *data = MULT31(*data, *window);
290 data++;
291 window--;
292 n--;
293 }
294 asm volatile ("lea.l (-3*4, %[w]), %[w];" /* loop start */
295 "movem.l (%[d]), %%d0-%%d3;" /* pre-fetch registers */
296 "movem.l (%[w]), %%a0-%%a3;"
297 "bra 1f;" /* jump to loop condition */
298 "0:" /* loop body */
299 /* multiply and load next window value */
300 "mac.l %%d0, %%a3, -(%[w]), %%a3, %%acc0;"
301 "mac.l %%d1, %%a2, -(%[w]), %%a2, %%acc1;"
302 "mac.l %%d2, %%a1, -(%[w]), %%a1, %%acc2;"
303 "mac.l %%d3, %%a0, -(%[w]), %%a0, %%acc3;"
304 "movclr.l %%acc0, %%d0;" /* get the products */
305 "movclr.l %%acc1, %%d1;"
306 "movclr.l %%acc2, %%d2;"
307 "movclr.l %%acc3, %%d3;"
308 /* store and advance */
309 "movem.l %%d0-%%d3, (%[d]);"
310 "lea.l (4*4, %[d]), %[d];"
311 "movem.l (%[d]), %%d0-%%d3;"
312 "subq.l #4, %[n];" /* done 4 elements */
313 "1: cmpi.l #4, %[n];"
314 "bge 0b;"
315 /* multiply final elements */
316 "tst.l %[n];"
317 "beq 1f;" /* n=0 */
318 "mac.l %%d0, %%a3, %%acc0;"
319 "movclr.l %%acc0, %%d0;"
320 "move.l %%d0, (%[d])+;"
321 "subq.l #1, %[n];"
322 "beq 1f;" /* n=1 */
323 "mac.l %%d1, %%a2, %%acc0;"
324 "movclr.l %%acc0, %%d1;"
325 "move.l %%d1, (%[d])+;"
326 "subq.l #1, %[n];"
327 "beq 1f;" /* n=2 */
328 /* otherwise n = 3 */
329 "mac.l %%d2, %%a1, %%acc0;"
330 "movclr.l %%acc0, %%d2;"
331 "move.l %%d2, (%[d])+;"
332 "1:"
333 : [n] "+d" (n), [d] "+a" (data), [w] "+a" (window)
334 : : "%d0", "%d1", "%d2", "%d3", "%a0", "%a1", "%a2", "%a3",
335 "cc", "memory");
336}
337
338#endif
339
340/* not used anymore */
341/*
342#ifndef _V_CLIP_MATH
343#define _V_CLIP_MATH
344
345* this is portable C and simple; why not use this as default?
346static inline int32_t CLIP_TO_15(register int32_t x) {
347 register int32_t hi=32767, lo=-32768;
348 return (x>=hi ? hi : (x<=lo ? lo : x));
349}
350
351#endif
352*/
353#endif
diff --git a/apps/codecs/lib/codeclib.c b/apps/codecs/lib/codeclib.c
deleted file mode 100644
index 36f4279941..0000000000
--- a/apps/codecs/lib/codeclib.c
+++ /dev/null
@@ -1,182 +0,0 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2005 Dave Chapman
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21
22/* "helper functions" common to all codecs */
23
24#include <string.h>
25#include "codecs.h"
26#include "dsp.h"
27#include "codeclib.h"
28#include "metadata.h"
29
30/* The following variables are used by codec_malloc() to make use of free RAM
31 * within the statically allocated codec buffer. */
32static size_t mem_ptr = 0;
33static size_t bufsize = 0;
34static unsigned char* mallocbuf = NULL;
35
36int codec_init(void)
37{
38 /* codec_get_buffer() aligns the resulting point to CACHEALIGN_SIZE. */
39 mem_ptr = 0;
40 mallocbuf = (unsigned char *)ci->codec_get_buffer((size_t *)&bufsize);
41
42 return 0;
43}
44
45void codec_set_replaygain(const struct mp3entry *id3)
46{
47 ci->configure(DSP_SET_TRACK_GAIN, id3->track_gain);
48 ci->configure(DSP_SET_ALBUM_GAIN, id3->album_gain);
49 ci->configure(DSP_SET_TRACK_PEAK, id3->track_peak);
50 ci->configure(DSP_SET_ALBUM_PEAK, id3->album_peak);
51}
52
53/* Various "helper functions" common to all the xxx2wav decoder plugins */
54
55
56void* codec_malloc(size_t size)
57{
58 void* x;
59
60 if (mem_ptr + (long)size > bufsize)
61 return NULL;
62
63 x=&mallocbuf[mem_ptr];
64
65 /* Keep memory aligned to CACHEALIGN_SIZE. */
66 mem_ptr += (size + (CACHEALIGN_SIZE-1)) & ~(CACHEALIGN_SIZE-1);
67
68 return(x);
69}
70
71void* codec_calloc(size_t nmemb, size_t size)
72{
73 void* x;
74 x = codec_malloc(nmemb*size);
75 if (x == NULL)
76 return NULL;
77 ci->memset(x,0,nmemb*size);
78 return(x);
79}
80
81void codec_free(void* ptr) {
82 (void)ptr;
83}
84
85void* codec_realloc(void* ptr, size_t size)
86{
87 void* x;
88 (void)ptr;
89 x = codec_malloc(size);
90 return(x);
91}
92
93size_t strlen(const char *s)
94{
95 return(ci->strlen(s));
96}
97
98char *strcpy(char *dest, const char *src)
99{
100 return(ci->strcpy(dest,src));
101}
102
103char *strcat(char *dest, const char *src)
104{
105 return(ci->strcat(dest,src));
106}
107
108int strcmp(const char *s1, const char *s2)
109{
110 return(ci->strcmp(s1,s2));
111}
112
113void *memcpy(void *dest, const void *src, size_t n)
114{
115 return(ci->memcpy(dest,src,n));
116}
117
118void *memset(void *s, int c, size_t n)
119{
120 return(ci->memset(s,c,n));
121}
122
123int memcmp(const void *s1, const void *s2, size_t n)
124{
125 return(ci->memcmp(s1,s2,n));
126}
127
128void* memchr(const void *s, int c, size_t n)
129{
130 return(ci->memchr(s,c,n));
131}
132
133void *memmove(void *dest, const void *src, size_t n)
134{
135 return(ci->memmove(dest,src,n));
136}
137
138void qsort(void *base, size_t nmemb, size_t size,
139 int(*compar)(const void *, const void *))
140{
141 ci->qsort(base,nmemb,size,compar);
142}
143
144/* From ffmpeg - libavutil/common.h */
145const uint8_t bs_log2_tab[256] ICONST_ATTR = {
146 0,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
147 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
148 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
149 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
150 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
151 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
152 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
153 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7
154};
155
156const uint8_t bs_clz_tab[256] ICONST_ATTR = {
157 8,7,6,6,5,5,5,5,4,4,4,4,4,4,4,4,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
158 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
159 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
160 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
161 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
162 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
163 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
164 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
165};
166
167#ifdef RB_PROFILE
168void __cyg_profile_func_enter(void *this_fn, void *call_site) {
169/* This workaround is required for coldfire gcc 3.4 but is broken for 4.4
170 and 4.5, but for those the other way works. */
171#if defined(CPU_COLDFIRE) && defined(__GNUC__) && __GNUC__ < 4
172 (void)call_site;
173 ci->profile_func_enter(this_fn, __builtin_return_address(1));
174#else
175 ci->profile_func_enter(this_fn, call_site);
176#endif
177}
178
179void __cyg_profile_func_exit(void *this_fn, void *call_site) {
180 ci->profile_func_exit(this_fn,call_site);
181}
182#endif
diff --git a/apps/codecs/lib/codeclib.h b/apps/codecs/lib/codeclib.h
deleted file mode 100644
index d0f985b8e1..0000000000
--- a/apps/codecs/lib/codeclib.h
+++ /dev/null
@@ -1,163 +0,0 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2005 Dave Chapman
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21
22#ifndef __CODECLIB_H__
23#define __CODECLIB_H__
24
25#include <inttypes.h>
26#include <string.h>
27#include "config.h"
28#include "codecs.h"
29#include "mdct.h"
30#include "fft.h"
31
32extern struct codec_api *ci;
33
34/* Standard library functions that are used by the codecs follow here */
35
36/* Get these functions 'out of the way' of the standard functions. Not doing
37 * so confuses the cygwin linker, and maybe others. These functions need to
38 * be implemented elsewhere */
39#define malloc(x) codec_malloc(x)
40#define calloc(x,y) codec_calloc(x,y)
41#define realloc(x,y) codec_realloc(x,y)
42#define free(x) codec_free(x)
43#undef alloca
44#define alloca(x) __builtin_alloca(x)
45
46void* codec_malloc(size_t size);
47void* codec_calloc(size_t nmemb, size_t size);
48void* codec_realloc(void* ptr, size_t size);
49void codec_free(void* ptr);
50
51void *memcpy(void *dest, const void *src, size_t n);
52void *memset(void *s, int c, size_t n);
53int memcmp(const void *s1, const void *s2, size_t n);
54void *memmove(void *s1, const void *s2, size_t n);
55
56size_t strlen(const char *s);
57char *strcpy(char *dest, const char *src);
58char *strcat(char *dest, const char *src);
59
60/* on some platforms strcmp() seems to be a tricky define which
61 * breaks if we write down strcmp's prototype */
62#undef strcmp
63int strcmp(const char *s1, const char *s2);
64
65void qsort(void *base, size_t nmemb, size_t size, int(*compar)(const void *, const void *));
66
67/*MDCT library functions*/
68/* -1- Tremor mdct */
69extern void mdct_backward(int n, int32_t *in, int32_t *out);
70/* -2- ffmpeg fft-based mdct */
71extern void ff_imdct_half(unsigned int nbits, int32_t *output, const int32_t *input);
72extern void ff_imdct_calc(unsigned int nbits, int32_t *output, const int32_t *input);
73/*ffmpeg fft (can be used without mdct)*/
74extern void ff_fft_calc_c(int nbits, FFTComplex *z);
75
76#if !defined(CPU_ARM) || ARM_ARCH < 5
77/* From libavutil/common.h */
78extern const uint8_t bs_log2_tab[256] ICONST_ATTR;
79extern const uint8_t bs_clz_tab[256] ICONST_ATTR;
80#endif
81
82#define BS_LOG2 0 /* default personality, equivalent floor(log2(x)) */
83#define BS_CLZ 1 /* alternate personality, Count Leading Zeros */
84#define BS_SHORT 2 /* input guaranteed not to exceed 16 bits */
85#define BS_0_0 4 /* guarantee mapping of 0 input to 0 output */
86
87/* Generic bit-scanning function, used to wrap platform CLZ instruction or
88 scan-and-lookup code, and to provide control over output for 0 inputs. */
89static inline unsigned int bs_generic(unsigned int v, int mode)
90{
91#if defined(CPU_ARM) && ARM_ARCH >= 5
92 unsigned int r = __builtin_clz(v);
93 if (mode & BS_CLZ)
94 {
95 if (mode & BS_0_0)
96 r &= 31;
97 } else {
98 r = 31 - r;
99 /* If mode is constant, this is a single conditional instruction */
100 if (mode & BS_0_0 && (signed)r < 0)
101 r += 1;
102 }
103#else
104 const uint8_t *bs_tab;
105 unsigned int r;
106 unsigned int n = v;
107 int inc;
108 /* Set up table, increment, and initial result value based on
109 personality. */
110 if (mode & BS_CLZ)
111 {
112 bs_tab = bs_clz_tab;
113 r = 24;
114 inc = -16;
115 } else {
116 bs_tab = bs_log2_tab;
117 r = 0;
118 inc = 16;
119 }
120 if (!(mode & BS_SHORT) && n >= 0x10000) {
121 n >>= 16;
122 r += inc;
123 }
124 if (n > 0xff) {
125 n >>= 8;
126 r += inc / 2;
127 }
128#ifdef CPU_COLDFIRE
129 /* The high 24 bits of n are guaranteed empty after the above, so a
130 superfluous ext.b instruction can be saved by loading the LUT value over
131 n with asm */
132 asm volatile (
133 "move.b (%1,%0.l),%0"
134 : "+d" (n)
135 : "a" (bs_tab)
136 );
137#else
138 n = bs_tab[n];
139#endif
140 r += n;
141 if (mode & BS_CLZ && mode & BS_0_0 && v == 0)
142 r = 0;
143#endif
144 return r;
145}
146
147/* TODO figure out if we really need to care about calculating
148 av_log2(0) */
149#define av_log2(v) bs_generic(v, BS_0_0)
150
151/* Various codec helper functions */
152
153int codec_init(void);
154void codec_set_replaygain(const struct mp3entry *id3);
155
156#ifdef RB_PROFILE
157void __cyg_profile_func_enter(void *this_fn, void *call_site)
158 NO_PROF_ATTR ICODE_ATTR;
159void __cyg_profile_func_exit(void *this_fn, void *call_site)
160 NO_PROF_ATTR ICODE_ATTR;
161#endif
162
163#endif /* __CODECLIB_H__ */
diff --git a/apps/codecs/lib/codeclib_misc.h b/apps/codecs/lib/codeclib_misc.h
deleted file mode 100644
index 8ebe22e37b..0000000000
--- a/apps/codecs/lib/codeclib_misc.h
+++ /dev/null
@@ -1,310 +0,0 @@
1/********************************************************************
2 * *
3 * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. *
4 * *
5 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
6 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
7 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
8 * *
9 * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
10 * BY THE Xiph.Org FOUNDATION http://www.xiph.org/ *
11 * *
12 ********************************************************************
13
14 function: miscellaneous math and prototypes
15
16 ********************************************************************/
17
18#ifndef _CODECLIB_MISC_H_
19#define _CODECLIB_MISC_H_
20
21#include <stdint.h>
22#include "asm_arm.h"
23#include "asm_mcf5249.h"
24
25#ifndef _LOW_ACCURACY_
26/* 64 bit multiply */
27
28#ifdef ROCKBOX_LITTLE_ENDIAN
29union magic {
30 struct {
31 int32_t lo;
32 int32_t hi;
33 } halves;
34 int64_t whole;
35};
36#elif defined(ROCKBOX_BIG_ENDIAN)
37union magic {
38 struct {
39 int32_t hi;
40 int32_t lo;
41 } halves;
42 int64_t whole;
43};
44#endif
45
46#ifndef INCL_OPTIMIZED_MULT32
47#define INCL_OPTIMIZED_MULT32
48static inline int32_t MULT32(int32_t x, int32_t y) {
49 union magic magic;
50 magic.whole = (int64_t)x * y;
51 return magic.halves.hi;
52}
53#endif
54
55#ifndef INCL_OPTIMIZED_MULT31
56#define INCL_OPTIMIZED_MULT31
57static inline int32_t MULT31(int32_t x, int32_t y) {
58 return MULT32(x,y)<<1;
59}
60#endif
61
62#ifndef INCL_OPTIMIZED_MULT31_SHIFT15
63#define INCL_OPTIMIZED_MULT31_SHIFT15
64static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) {
65 union magic magic;
66 magic.whole = (int64_t)x * y;
67 return ((uint32_t)(magic.halves.lo)>>15) | ((magic.halves.hi)<<17);
68}
69#endif
70
71#ifndef INCL_OPTIMIZED_MULT31_SHIFT16
72#define INCL_OPTIMIZED_MULT31_SHIFT16
73static inline int32_t MULT31_SHIFT16(int32_t x, int32_t y) {
74 union magic magic;
75 magic.whole = (int64_t)x * y;
76 return ((uint32_t)(magic.halves.lo)>>16) | ((magic.halves.hi)<<16);
77}
78#endif
79
80#else
81/* Rockbox: unused */
82#if 0
83/* 32 bit multiply, more portable but less accurate */
84
85/*
86 * Note: Precision is biased towards the first argument therefore ordering
87 * is important. Shift values were chosen for the best sound quality after
88 * many listening tests.
89 */
90
91/*
92 * For MULT32 and MULT31: The second argument is always a lookup table
93 * value already preshifted from 31 to 8 bits. We therefore take the
94 * opportunity to save on text space and use unsigned char for those
95 * tables in this case.
96 */
97
98static inline int32_t MULT32(int32_t x, int32_t y) {
99 return (x >> 9) * y; /* y preshifted >>23 */
100}
101
102static inline int32_t MULT31(int32_t x, int32_t y) {
103 return (x >> 8) * y; /* y preshifted >>23 */
104}
105
106static inline int32_t MULT31_SHIFT15(int32_t x, int32_t y) {
107 return (x >> 6) * y; /* y preshifted >>9 */
108}
109#endif
110#endif
111
112/*
113 * The XPROD functions are meant to optimize the cross products found all
114 * over the place in mdct.c by forcing memory operation ordering to avoid
115 * unnecessary register reloads as soon as memory is being written to.
116 * However this is only beneficial on CPUs with a sane number of general
117 * purpose registers which exclude the Intel x86. On Intel, better let the
118 * compiler actually reload registers directly from original memory by using
119 * macros.
120 */
121
122#ifndef INCL_OPTIMIZED_XPROD32
123#define INCL_OPTIMIZED_XPROD32
124/* replaced XPROD32 with a macro to avoid memory reference
125 _x, _y are the results (must be l-values) */
126#define XPROD32(_a, _b, _t, _v, _x, _y) \
127 { (_x)=MULT32(_a,_t)+MULT32(_b,_v); \
128 (_y)=MULT32(_b,_t)-MULT32(_a,_v); }
129#endif
130
131/* Rockbox: Unused */
132/*
133#ifdef __i386__
134
135#define XPROD31(_a, _b, _t, _v, _x, _y) \
136 { *(_x)=MULT31(_a,_t)+MULT31(_b,_v); \
137 *(_y)=MULT31(_b,_t)-MULT31(_a,_v); }
138#define XNPROD31(_a, _b, _t, _v, _x, _y) \
139 { *(_x)=MULT31(_a,_t)-MULT31(_b,_v); \
140 *(_y)=MULT31(_b,_t)+MULT31(_a,_v); }
141
142#else
143*/
144
145#ifndef INCL_OPTIMIZED_XPROD31
146#define INCL_OPTIMIZED_XPROD31
147static inline void XPROD31(int32_t a, int32_t b,
148 int32_t t, int32_t v,
149 int32_t *x, int32_t *y)
150{
151 *x = MULT31(a, t) + MULT31(b, v);
152 *y = MULT31(b, t) - MULT31(a, v);
153}
154#endif
155
156#ifndef INCL_OPTIMIZED_XNPROD31
157#define INCL_OPTIMIZED_XNPROD31
158static inline void XNPROD31(int32_t a, int32_t b,
159 int32_t t, int32_t v,
160 int32_t *x, int32_t *y)
161{
162 *x = MULT31(a, t) - MULT31(b, v);
163 *y = MULT31(b, t) + MULT31(a, v);
164}
165#endif
166/*#endif*/
167
168#ifndef INCL_OPTIMIZED_XPROD31_R
169#define INCL_OPTIMIZED_XPROD31_R
170#define XPROD31_R(_a, _b, _t, _v, _x, _y)\
171{\
172 _x = MULT31(_a, _t) + MULT31(_b, _v);\
173 _y = MULT31(_b, _t) - MULT31(_a, _v);\
174}
175#endif
176
177#ifndef INCL_OPTIMIZED_XNPROD31_R
178#define INCL_OPTIMIZED_XNPROD31_R
179#define XNPROD31_R(_a, _b, _t, _v, _x, _y)\
180{\
181 _x = MULT31(_a, _t) - MULT31(_b, _v);\
182 _y = MULT31(_b, _t) + MULT31(_a, _v);\
183}
184#endif
185
186#ifndef _V_VECT_OPS
187#define _V_VECT_OPS
188
189static inline
190void vect_add(int32_t *x, const int32_t *y, int n)
191{
192 while (n>0) {
193 *x++ += *y++;
194 n--;
195 }
196}
197
198static inline
199void vect_copy(int32_t *x, const int32_t *y, int n)
200{
201 while (n>0) {
202 *x++ = *y++;
203 n--;
204 }
205}
206
207static inline
208void vect_mult_fw(int32_t *data, const int32_t *window, int n)
209{
210 while(n>0) {
211 *data = MULT31(*data, *window);
212 data++;
213 window++;
214 n--;
215 }
216}
217
218static inline
219void vect_mult_bw(int32_t *data, const int32_t *window, int n)
220{
221 while(n>0) {
222 *data = MULT31(*data, *window);
223 data++;
224 window--;
225 n--;
226 }
227}
228#endif
229
230/* not used anymore */
231/*
232#ifndef _V_CLIP_MATH
233#define _V_CLIP_MATH
234
235static inline int32_t CLIP_TO_15(int32_t x) {
236 int ret=x;
237 ret-= ((x<=32767)-1)&(x-32767);
238 ret-= ((x>=-32768)-1)&(x+32768);
239 return(ret);
240}
241
242#endif
243*/
244static inline int32_t VFLOAT_MULT(int32_t a,int32_t ap,
245 int32_t b,int32_t bp,
246 int32_t *p){
247 if(a && b){
248#ifndef _LOW_ACCURACY_
249 *p=ap+bp+32;
250 return MULT32(a,b);
251#else
252 *p=ap+bp+31;
253 return (a>>15)*(b>>16);
254#endif
255 }else
256 return 0;
257}
258
259/*static inline int32_t VFLOAT_MULTI(int32_t a,int32_t ap,
260 int32_t i,
261 int32_t *p){
262
263 int ip=_ilog(abs(i))-31;
264 return VFLOAT_MULT(a,ap,i<<-ip,ip,p);
265}
266*/
267static inline int32_t VFLOAT_ADD(int32_t a,int32_t ap,
268 int32_t b,int32_t bp,
269 int32_t *p){
270
271 if(!a){
272 *p=bp;
273 return b;
274 }else if(!b){
275 *p=ap;
276 return a;
277 }
278
279 /* yes, this can leak a bit. */
280 if(ap>bp){
281 int shift=ap-bp+1;
282 *p=ap+1;
283 a>>=1;
284 if(shift<32){
285 b=(b+(1<<(shift-1)))>>shift;
286 }else{
287 b=0;
288 }
289 }else{
290 int shift=bp-ap+1;
291 *p=bp+1;
292 b>>=1;
293 if(shift<32){
294 a=(a+(1<<(shift-1)))>>shift;
295 }else{
296 a=0;
297 }
298 }
299
300 a+=b;
301 if((a&0xc0000000)==0xc0000000 ||
302 (a&0xc0000000)==0){
303 a<<=1;
304 (*p)--;
305 }
306 return(a);
307}
308
309#endif
310
diff --git a/apps/codecs/lib/ffmpeg_bitstream.c b/apps/codecs/lib/ffmpeg_bitstream.c
deleted file mode 100644
index e16df8dcce..0000000000
--- a/apps/codecs/lib/ffmpeg_bitstream.c
+++ /dev/null
@@ -1,374 +0,0 @@
1/*
2 * Common bit i/o utils
3 * Copyright (c) 2000, 2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5 * Copyright (c) 2010 Loren Merritt
6 *
7 * alternative bitstream reader & writer by Michael Niedermayer <michaelni@gmx.at>
8 *
9 * This file is part of FFmpeg.
10 *
11 * FFmpeg is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
15 *
16 * FFmpeg is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with FFmpeg; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26/**
27 * @file
28 * bitstream api.
29 */
30
31//#include "avcodec.h"
32#include "ffmpeg_get_bits.h"
33#include "ffmpeg_put_bits.h"
34#include "ffmpeg_intreadwrite.h"
35
36#define av_log(...)
37
38#ifdef ROCKBOX
39#undef DEBUGF
40#define DEBUGF(...)
41#endif
42
43const uint8_t ff_log2_run[32]={
44 0, 0, 0, 0, 1, 1, 1, 1,
45 2, 2, 2, 2, 3, 3, 3, 3,
46 4, 4, 5, 5, 6, 6, 7, 7,
47 8, 9,10,11,12,13,14,15
48};
49
50#if 0 // unused in rockbox
51void align_put_bits(PutBitContext *s)
52{
53#ifdef ALT_BITSTREAM_WRITER
54 put_bits(s,( - s->index) & 7,0);
55#else
56 put_bits(s,s->bit_left & 7,0);
57#endif
58}
59
60void ff_put_string(PutBitContext *pb, const char *string, int terminate_string)
61{
62 while(*string){
63 put_bits(pb, 8, *string);
64 string++;
65 }
66 if(terminate_string)
67 put_bits(pb, 8, 0);
68}
69#endif
70
71void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
72{
73 int words= length>>4;
74 int bits= length&15;
75 int i;
76
77 if(length==0) return;
78
79 if(words < 16 || put_bits_count(pb)&7){
80 for(i=0; i<words; i++) put_bits(pb, 16, AV_RB16(src + 2*i));
81 }else{
82 for(i=0; put_bits_count(pb)&31; i++)
83 put_bits(pb, 8, src[i]);
84 flush_put_bits(pb);
85 memcpy(put_bits_ptr(pb), src+i, 2*words-i);
86 skip_put_bytes(pb, 2*words-i);
87 }
88
89 put_bits(pb, bits, AV_RB16(src + 2*words)>>(16-bits));
90}
91
92/* VLC decoding */
93
94//#define DEBUG_VLC
95
96#define GET_DATA(v, table, i, wrap, size) \
97{\
98 const uint8_t *ptr = (const uint8_t *)table + i * wrap;\
99 switch(size) {\
100 case 1:\
101 v = *(const uint8_t *)ptr;\
102 break;\
103 case 2:\
104 v = *(const uint16_t *)ptr;\
105 break;\
106 default:\
107 v = *(const uint32_t *)ptr;\
108 break;\
109 }\
110}
111
112
113static int alloc_table(VLC *vlc, int size, int use_static)
114{
115 int index;
116 index = vlc->table_size;
117 vlc->table_size += size;
118 if (vlc->table_size > vlc->table_allocated) {
119 if(use_static)
120 {
121 DEBUGF("init_vlc() used with too little memory : table_size > allocated_memory\n");
122 return -1;
123 }
124// abort(); //cant do anything, init_vlc() is used with too little memory
125// vlc->table_allocated += (1 << vlc->bits);
126// vlc->table = av_realloc(vlc->table,
127// sizeof(VLC_TYPE) * 2 * vlc->table_allocated);
128 if (!vlc->table)
129 return -1;
130 }
131 return index;
132}
133
134/*
135static av_always_inline uint32_t bitswap_32(uint32_t x) {
136 return av_reverse[x&0xFF]<<24
137 | av_reverse[(x>>8)&0xFF]<<16
138 | av_reverse[(x>>16)&0xFF]<<8
139 | av_reverse[x>>24];
140}
141*/
142
143typedef struct {
144 uint8_t bits;
145 uint16_t symbol;
146 /** codeword, with the first bit-to-be-read in the msb
147 * (even if intended for a little-endian bitstream reader) */
148 uint32_t code;
149} __attribute__((__packed__)) VLCcode; /* packed to save space */
150
151static int compare_vlcspec(const void *a, const void *b)
152{
153 const VLCcode *sa=a, *sb=b;
154 return (sa->code >> 1) - (sb->code >> 1);
155}
156
157/**
158 * Build VLC decoding tables suitable for use with get_vlc().
159 *
160 * @param vlc the context to be initted
161 *
162 * @param table_nb_bits max length of vlc codes to store directly in this table
163 * (Longer codes are delegated to subtables.)
164 *
165 * @param nb_codes number of elements in codes[]
166 *
167 * @param codes descriptions of the vlc codes
168 * These must be ordered such that codes going into the same subtable are contiguous.
169 * Sorting by VLCcode.code is sufficient, though not necessary.
170 */
171static int build_table(VLC *vlc, int table_nb_bits, int nb_codes,
172 VLCcode *codes, int flags)
173{
174 int table_size, table_index, index, symbol, subtable_bits;
175 int i, j, k, n, nb, inc;
176 uint32_t code, code_prefix;
177 VLC_TYPE (*table)[2];
178
179 table_size = 1 << table_nb_bits;
180 table_index = alloc_table(vlc, table_size, flags & INIT_VLC_USE_NEW_STATIC);
181#ifdef DEBUG_VLC
182 av_log(NULL,AV_LOG_DEBUG,"new table index=%d size=%d\n",
183 table_index, table_size);
184#endif
185 if (table_index < 0)
186 return -1;
187 table = &vlc->table[table_index];
188
189 for (i = 0; i < table_size; i++) {
190 table[i][1] = 0; //bits
191 table[i][0] = -1; //codes
192 }
193
194 /* first pass: map codes and compute auxillary table sizes */
195 for (i = 0; i < nb_codes; i++) {
196 n = codes[i].bits;
197 code = codes[i].code;
198 symbol = codes[i].symbol;
199#if defined(DEBUG_VLC) && 0
200 av_log(NULL,AV_LOG_DEBUG,"i=%d n=%d code=0x%x\n", i, n, code);
201#endif
202 if (n <= table_nb_bits) {
203 /* no need to add another table */
204 j = code >> (32 - table_nb_bits);
205 nb = 1 << (table_nb_bits - n);
206 inc = 1;
207/* if (flags & INIT_VLC_LE) {
208 j = bitswap_32(code);
209 inc = 1 << n;
210 } */
211 for (k = 0; k < nb; k++) {
212#ifdef DEBUG_VLC
213 av_log(NULL, AV_LOG_DEBUG, "%4x: code=%d n=%d\n",
214 j, i, n);
215#endif
216 if (table[j][1] /*bits*/ != 0) {
217 av_log(NULL, AV_LOG_ERROR, "incorrect codes\n");
218 return -1;
219 }
220 table[j][1] = n; //bits
221 table[j][0] = symbol;
222 j += inc;
223 }
224 } else {
225 /* fill auxiliary table recursively */
226 n -= table_nb_bits;
227 code_prefix = code >> (32 - table_nb_bits);
228 subtable_bits = n;
229 codes[i].bits = n;
230 codes[i].code = code << table_nb_bits;
231 for (k = i+1; k < nb_codes; k++) {
232 n = codes[k].bits - table_nb_bits;
233 if (n <= 0)
234 break;
235 code = codes[k].code;
236 if (code >> (32 - table_nb_bits) != code_prefix)
237 break;
238 codes[k].bits = n;
239 codes[k].code = code << table_nb_bits;
240 subtable_bits = FFMAX(subtable_bits, n);
241 }
242 subtable_bits = FFMIN(subtable_bits, table_nb_bits);
243 j = /*(flags & INIT_VLC_LE) ? bitswap_32(code_prefix) >> (32 - table_nb_bits) :*/ code_prefix;
244 table[j][1] = -subtable_bits;
245#ifdef DEBUG_VLC
246 av_log(NULL,AV_LOG_DEBUG,"%4x: n=%d (subtable)\n",
247 j, codes[i].bits + table_nb_bits);
248#endif
249 index = build_table(vlc, subtable_bits, k-i, codes+i, flags);
250 if (index < 0)
251 return -1;
252 /* note: realloc has been done, so reload tables */
253 table = &vlc->table[table_index];
254 table[j][0] = index; //code
255 i = k-1;
256 }
257 }
258 return table_index;
259}
260
261
262/* Build VLC decoding tables suitable for use with get_vlc().
263
264 'nb_bits' set thee decoding table size (2^nb_bits) entries. The
265 bigger it is, the faster is the decoding. But it should not be too
266 big to save memory and L1 cache. '9' is a good compromise.
267
268 'nb_codes' : number of vlcs codes
269
270 'bits' : table which gives the size (in bits) of each vlc code.
271
272 'codes' : table which gives the bit pattern of of each vlc code.
273
274 'symbols' : table which gives the values to be returned from get_vlc().
275
276 'xxx_wrap' : give the number of bytes between each entry of the
277 'bits' or 'codes' tables.
278
279 'xxx_size' : gives the number of bytes of each entry of the 'bits'
280 or 'codes' tables.
281
282 'wrap' and 'size' allows to use any memory configuration and types
283 (byte/word/long) to store the 'bits', 'codes', and 'symbols' tables.
284
285 'use_static' should be set to 1 for tables, which should be freed
286 with av_free_static(), 0 if free_vlc() will be used.
287*/
288
289/* Rockbox: support for INIT_VLC_LE is currently disabled since none of our
290 codecs use it, there's a LUT based bit reverse function for this commented
291 out above (bitswap_32) and an inline asm version in libtremor/codebook.c
292 if we ever want this */
293
294static VLCcode buf[1336+1]; /* worst case is wma, which has one table with 1336 entries */
295
296int init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes,
297 const void *bits, int bits_wrap, int bits_size,
298 const void *codes, int codes_wrap, int codes_size,
299 const void *symbols, int symbols_wrap, int symbols_size,
300 int flags)
301{
302 if (nb_codes+1 > (int)(sizeof (buf)/ sizeof (VLCcode)))
303 {
304 DEBUGF("Table is larger than temp buffer!\n");
305 return -1;
306 }
307
308 int i, j, ret;
309
310 vlc->bits = nb_bits;
311 if(flags & INIT_VLC_USE_NEW_STATIC){
312 if(vlc->table_size && vlc->table_size == vlc->table_allocated){
313 return 0;
314 }else if(vlc->table_size){
315 DEBUGF("fatal error, we are called on a partially initialized table\n");
316 return -1;
317// abort(); // fatal error, we are called on a partially initialized table
318 }
319 }else {
320 vlc->table = NULL;
321 vlc->table_allocated = 0;
322 vlc->table_size = 0;
323 }
324
325#ifdef DEBUG_VLC
326 av_log(NULL,AV_LOG_DEBUG,"build table nb_codes=%d\n", nb_codes);
327#endif
328
329// buf = av_malloc((nb_codes+1)*sizeof(VLCcode));
330
331// assert(symbols_size <= 2 || !symbols);
332 j = 0;
333#define COPY(condition)\
334 for (i = 0; i < nb_codes; i++) {\
335 GET_DATA(buf[j].bits, bits, i, bits_wrap, bits_size);\
336 if (!(condition))\
337 continue;\
338 GET_DATA(buf[j].code, codes, i, codes_wrap, codes_size);\
339/* if (flags & INIT_VLC_LE)*/\
340/* buf[j].code = bitswap_32(buf[j].code);*/\
341/* else*/\
342 buf[j].code <<= 32 - buf[j].bits;\
343 if (symbols)\
344 GET_DATA(buf[j].symbol, symbols, i, symbols_wrap, symbols_size)\
345 else\
346 buf[j].symbol = i;\
347 j++;\
348 }
349 COPY(buf[j].bits > nb_bits);
350 // qsort is the slowest part of init_vlc, and could probably be improved or avoided
351 qsort(buf, j, sizeof(VLCcode), compare_vlcspec);
352 COPY(buf[j].bits && buf[j].bits <= nb_bits);
353 nb_codes = j;
354
355 ret = build_table(vlc, nb_bits, nb_codes, buf, flags);
356
357// av_free(buf);
358 if (ret < 0) {
359// av_freep(&vlc->table);
360 return -1;
361 }
362 if((flags & INIT_VLC_USE_NEW_STATIC) && vlc->table_size != vlc->table_allocated) {
363 av_log(NULL, AV_LOG_ERROR, "needed %d had %d\n", vlc->table_size, vlc->table_allocated);
364 }
365 return 0;
366}
367
368/* not used in rockbox
369void free_vlc(VLC *vlc)
370{
371 av_freep(&vlc->table);
372}
373*/
374
diff --git a/apps/codecs/lib/ffmpeg_bswap.h b/apps/codecs/lib/ffmpeg_bswap.h
deleted file mode 100644
index 24a2aab7ea..0000000000
--- a/apps/codecs/lib/ffmpeg_bswap.h
+++ /dev/null
@@ -1,150 +0,0 @@
1/**
2 * @file bswap.h
3 * byte swap.
4 */
5
6#ifndef __BSWAP_H__
7#define __BSWAP_H__
8
9#ifdef HAVE_BYTESWAP_H
10#include <byteswap.h>
11#else
12
13#ifdef ROCKBOX
14#include "codecs.h"
15
16/* rockbox' optimised inline functions */
17#define bswap_16(x) swap16(x)
18#define bswap_32(x) swap32(x)
19
20static inline uint64_t ByteSwap64(uint64_t x)
21{
22 union {
23 uint64_t ll;
24 struct {
25 uint32_t l,h;
26 } l;
27 } r;
28 r.l.l = bswap_32 (x);
29 r.l.h = bswap_32 (x>>32);
30 return r.ll;
31}
32#define bswap_64(x) ByteSwap64(x)
33
34#elif defined(ARCH_X86)
35static inline unsigned short ByteSwap16(unsigned short x)
36{
37 __asm("xchgb %b0,%h0" :
38 "=q" (x) :
39 "0" (x));
40 return x;
41}
42#define bswap_16(x) ByteSwap16(x)
43
44static inline unsigned int ByteSwap32(unsigned int x)
45{
46#if __CPU__ > 386
47 __asm("bswap %0":
48 "=r" (x) :
49#else
50 __asm("xchgb %b0,%h0\n"
51 " rorl $16,%0\n"
52 " xchgb %b0,%h0":
53 "=q" (x) :
54#endif
55 "0" (x));
56 return x;
57}
58#define bswap_32(x) ByteSwap32(x)
59
60static inline unsigned long long int ByteSwap64(unsigned long long int x)
61{
62 register union { __extension__ uint64_t __ll;
63 uint32_t __l[2]; } __x;
64 asm("xchgl %0,%1":
65 "=r"(__x.__l[0]),"=r"(__x.__l[1]):
66 "0"(bswap_32((unsigned long)x)),"1"(bswap_32((unsigned long)(x>>32))));
67 return __x.__ll;
68}
69#define bswap_64(x) ByteSwap64(x)
70
71#elif defined(ARCH_SH4)
72
73static inline uint16_t ByteSwap16(uint16_t x) {
74 __asm__("swap.b %0,%0":"=r"(x):"0"(x));
75 return x;
76}
77
78static inline uint32_t ByteSwap32(uint32_t x) {
79 __asm__(
80 "swap.b %0,%0\n"
81 "swap.w %0,%0\n"
82 "swap.b %0,%0\n"
83 :"=r"(x):"0"(x));
84 return x;
85}
86
87#define bswap_16(x) ByteSwap16(x)
88#define bswap_32(x) ByteSwap32(x)
89
90static inline uint64_t ByteSwap64(uint64_t x)
91{
92 union {
93 uint64_t ll;
94 struct {
95 uint32_t l,h;
96 } l;
97 } r;
98 r.l.l = bswap_32 (x);
99 r.l.h = bswap_32 (x>>32);
100 return r.ll;
101}
102#define bswap_64(x) ByteSwap64(x)
103
104#else
105
106#define bswap_16(x) (((x) & 0x00ff) << 8 | ((x) & 0xff00) >> 8)
107
108
109// code from bits/byteswap.h (C) 1997, 1998 Free Software Foundation, Inc.
110#define bswap_32(x) \
111 ((((x) & 0xff000000) >> 24) | (((x) & 0x00ff0000) >> 8) | \
112 (((x) & 0x0000ff00) << 8) | (((x) & 0x000000ff) << 24))
113
114static inline uint64_t ByteSwap64(uint64_t x)
115{
116 union {
117 uint64_t ll;
118 uint32_t l[2];
119 } w, r;
120 w.ll = x;
121 r.l[0] = bswap_32 (w.l[1]);
122 r.l[1] = bswap_32 (w.l[0]);
123 return r.ll;
124}
125#define bswap_64(x) ByteSwap64(x)
126
127#endif /* !ARCH_X86 */
128
129#endif /* !HAVE_BYTESWAP_H */
130
131// be2me ... BigEndian to MachineEndian
132// le2me ... LittleEndian to MachineEndian
133
134#ifdef ROCKBOX_BIG_ENDIAN
135#define be2me_16(x) (x)
136#define be2me_32(x) (x)
137#define be2me_64(x) (x)
138#define le2me_16(x) bswap_16(x)
139#define le2me_32(x) bswap_32(x)
140#define le2me_64(x) bswap_64(x)
141#else
142#define be2me_16(x) bswap_16(x)
143#define be2me_32(x) bswap_32(x)
144#define be2me_64(x) bswap_64(x)
145#define le2me_16(x) (x)
146#define le2me_32(x) (x)
147#define le2me_64(x) (x)
148#endif
149
150#endif /* __BSWAP_H__ */
diff --git a/apps/codecs/lib/ffmpeg_get_bits.h b/apps/codecs/lib/ffmpeg_get_bits.h
deleted file mode 100644
index 04eda021a7..0000000000
--- a/apps/codecs/lib/ffmpeg_get_bits.h
+++ /dev/null
@@ -1,743 +0,0 @@
1/*
2 * copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21/**
22 * @file
23 * bitstream reader API header.
24 */
25
26#ifndef AVCODEC_GET_BITS_H
27#define AVCODEC_GET_BITS_H
28
29#include <stdint.h>
30#include <stdlib.h>
31#include "ffmpeg_intreadwrite.h"
32//#include <assert.h>
33//#include "libavutil/bswap.h"
34//#include "libavutil/common.h"
35//#include "libavutil/intreadwrite.h"
36//#include "libavutil/log.h"
37//#include "mathops.h"
38
39#include "codecs.h"
40
41/* rockbox' optimised inline functions */
42#define bswap_16(x) swap16(x)
43#define bswap_32(x) swap32(x)
44
45#ifdef ROCKBOX_BIG_ENDIAN
46#define be2me_16(x) (x)
47#define be2me_32(x) (x)
48#define le2me_16(x) bswap_16(x)
49#define le2me_32(x) bswap_32(x)
50#else
51#define be2me_16(x) bswap_16(x)
52#define be2me_32(x) bswap_32(x)
53#define le2me_16(x) (x)
54#define le2me_32(x) (x)
55#endif
56
57#define av_const __attribute__((const))
58#define av_always_inline inline __attribute__((always_inline))
59
60/* The following is taken from mathops.h */
61
62#ifndef sign_extend
63static inline av_const int sign_extend(int val, unsigned bits)
64{
65 return (val << ((8 * sizeof(int)) - bits)) >> ((8 * sizeof(int)) - bits);
66}
67#endif
68
69#ifndef NEG_SSR32
70# define NEG_SSR32(a,s) ((( int32_t)(a))>>(32-(s)))
71#endif
72
73#ifndef NEG_USR32
74# define NEG_USR32(a,s) (((uint32_t)(a))>>(32-(s)))
75#endif
76
77/* these 2 are from libavutil/common.h */
78
79#define FFMAX(a,b) ((a) > (b) ? (a) : (b))
80#define FFMIN(a,b) ((a) > (b) ? (b) : (a))
81
82#if defined(ALT_BITSTREAM_READER_LE) && !defined(ALT_BITSTREAM_READER)
83# define ALT_BITSTREAM_READER
84#endif
85
86/*
87#if !defined(LIBMPEG2_BITSTREAM_READER) && !defined(A32_BITSTREAM_READER) && !defined(ALT_BITSTREAM_READER)
88# if ARCH_ARM && !HAVE_FAST_UNALIGNED
89# define A32_BITSTREAM_READER
90# else
91*/
92# define ALT_BITSTREAM_READER
93/*
94//#define LIBMPEG2_BITSTREAM_READER
95//#define A32_BITSTREAM_READER
96# endif
97#endif
98*/
99
100/* bit input */
101/* buffer, buffer_end and size_in_bits must be present and used by every reader */
102typedef struct GetBitContext {
103 const uint8_t *buffer, *buffer_end;
104#ifdef ALT_BITSTREAM_READER
105 int index;
106#elif defined LIBMPEG2_BITSTREAM_READER
107 uint8_t *buffer_ptr;
108 uint32_t cache;
109 int bit_count;
110#elif defined A32_BITSTREAM_READER
111 uint32_t *buffer_ptr;
112 uint32_t cache0;
113 uint32_t cache1;
114 int bit_count;
115#endif
116 int size_in_bits;
117} GetBitContext;
118
119#define VLC_TYPE int16_t
120
121typedef struct VLC {
122 int bits;
123 VLC_TYPE (*table)[2]; ///< code, bits
124 int table_size, table_allocated;
125} VLC;
126
127typedef struct RL_VLC_ELEM {
128 int16_t level;
129 int8_t len;
130 uint8_t run;
131} RL_VLC_ELEM;
132
133/* Bitstream reader API docs:
134name
135 arbitrary name which is used as prefix for the internal variables
136
137gb
138 getbitcontext
139
140OPEN_READER(name, gb)
141 loads gb into local variables
142
143CLOSE_READER(name, gb)
144 stores local vars in gb
145
146UPDATE_CACHE(name, gb)
147 refills the internal cache from the bitstream
148 after this call at least MIN_CACHE_BITS will be available,
149
150GET_CACHE(name, gb)
151 will output the contents of the internal cache, next bit is MSB of 32 or 64 bit (FIXME 64bit)
152
153SHOW_UBITS(name, gb, num)
154 will return the next num bits
155
156SHOW_SBITS(name, gb, num)
157 will return the next num bits and do sign extension
158
159SKIP_BITS(name, gb, num)
160 will skip over the next num bits
161 note, this is equivalent to SKIP_CACHE; SKIP_COUNTER
162
163SKIP_CACHE(name, gb, num)
164 will remove the next num bits from the cache (note SKIP_COUNTER MUST be called before UPDATE_CACHE / CLOSE_READER)
165
166SKIP_COUNTER(name, gb, num)
167 will increment the internal bit counter (see SKIP_CACHE & SKIP_BITS)
168
169LAST_SKIP_CACHE(name, gb, num)
170 will remove the next num bits from the cache if it is needed for UPDATE_CACHE otherwise it will do nothing
171
172LAST_SKIP_BITS(name, gb, num)
173 is equivalent to LAST_SKIP_CACHE; SKIP_COUNTER
174
175for examples see get_bits, show_bits, skip_bits, get_vlc
176*/
177
178#ifdef ALT_BITSTREAM_READER
179# define MIN_CACHE_BITS 25
180
181
182/* ROCKBOX: work around "set but not used" warning */
183# define OPEN_READER(name, gb)\
184 unsigned int name##_index= (gb)->index;\
185 int name##_cache __attribute__((unused)) = 0;\
186
187# define CLOSE_READER(name, gb)\
188 (gb)->index= name##_index;\
189
190# ifdef ALT_BITSTREAM_READER_LE
191# define UPDATE_CACHE(name, gb)\
192 name##_cache= AV_RL32( ((const uint8_t *)(gb)->buffer)+(name##_index>>3) ) >> (name##_index&0x07);\
193
194# define SKIP_CACHE(name, gb, num)\
195 name##_cache >>= (num);
196# else
197# define UPDATE_CACHE(name, gb)\
198 name##_cache= AV_RB32( ((const uint8_t *)(gb)->buffer)+(name##_index>>3) ) << (name##_index&0x07);\
199
200# define SKIP_CACHE(name, gb, num)\
201 name##_cache <<= (num);
202# endif
203
204// FIXME name?
205# define SKIP_COUNTER(name, gb, num)\
206 name##_index += (num);\
207
208# define SKIP_BITS(name, gb, num)\
209 {\
210 SKIP_CACHE(name, gb, num)\
211 SKIP_COUNTER(name, gb, num)\
212 }\
213
214# define LAST_SKIP_BITS(name, gb, num) SKIP_COUNTER(name, gb, num)
215# define LAST_SKIP_CACHE(name, gb, num) ;
216
217# ifdef ALT_BITSTREAM_READER_LE
218# define SHOW_UBITS(name, gb, num)\
219 zero_extend(name##_cache, num)
220
221# define SHOW_SBITS(name, gb, num)\
222 sign_extend(name##_cache, num)
223# else
224# define SHOW_UBITS(name, gb, num)\
225 NEG_USR32(name##_cache, num)
226
227# define SHOW_SBITS(name, gb, num)\
228 NEG_SSR32(name##_cache, num)
229# endif
230
231# define GET_CACHE(name, gb)\
232 ((uint32_t)name##_cache)
233
234static inline int get_bits_count(const GetBitContext *s){
235 return s->index;
236}
237
238static inline void skip_bits_long(GetBitContext *s, int n){
239 s->index += n;
240}
241
242#elif defined LIBMPEG2_BITSTREAM_READER
243//libmpeg2 like reader
244
245# define MIN_CACHE_BITS 17
246
247# define OPEN_READER(name, gb)\
248 int name##_bit_count=(gb)->bit_count;\
249 int name##_cache= (gb)->cache;\
250 uint8_t * name##_buffer_ptr=(gb)->buffer_ptr;\
251
252# define CLOSE_READER(name, gb)\
253 (gb)->bit_count= name##_bit_count;\
254 (gb)->cache= name##_cache;\
255 (gb)->buffer_ptr= name##_buffer_ptr;\
256
257# define UPDATE_CACHE(name, gb)\
258 if(name##_bit_count >= 0){\
259 name##_cache+= AV_RB16(name##_buffer_ptr) << name##_bit_count; \
260 name##_buffer_ptr+=2;\
261 name##_bit_count-= 16;\
262 }\
263
264# define SKIP_CACHE(name, gb, num)\
265 name##_cache <<= (num);\
266
267# define SKIP_COUNTER(name, gb, num)\
268 name##_bit_count += (num);\
269
270# define SKIP_BITS(name, gb, num)\
271 {\
272 SKIP_CACHE(name, gb, num)\
273 SKIP_COUNTER(name, gb, num)\
274 }\
275
276# define LAST_SKIP_BITS(name, gb, num) SKIP_BITS(name, gb, num)
277# define LAST_SKIP_CACHE(name, gb, num) SKIP_CACHE(name, gb, num)
278
279# define SHOW_UBITS(name, gb, num)\
280 NEG_USR32(name##_cache, num)
281
282# define SHOW_SBITS(name, gb, num)\
283 NEG_SSR32(name##_cache, num)
284
285# define GET_CACHE(name, gb)\
286 ((uint32_t)name##_cache)
287
288static inline int get_bits_count(const GetBitContext *s){
289 return (s->buffer_ptr - s->buffer)*8 - 16 + s->bit_count;
290}
291
292static inline void skip_bits_long(GetBitContext *s, int n){
293 OPEN_READER(re, s)
294 re_bit_count += n;
295 re_buffer_ptr += 2*(re_bit_count>>4);
296 re_bit_count &= 15;
297 re_cache = ((re_buffer_ptr[-2]<<8) + re_buffer_ptr[-1]) << (16+re_bit_count);
298 UPDATE_CACHE(re, s)
299 CLOSE_READER(re, s)
300}
301
302#elif defined A32_BITSTREAM_READER
303
304# define MIN_CACHE_BITS 32
305
306# define OPEN_READER(name, gb)\
307 int name##_bit_count=(gb)->bit_count;\
308 uint32_t name##_cache0= (gb)->cache0;\
309 uint32_t name##_cache1= (gb)->cache1;\
310 uint32_t * name##_buffer_ptr=(gb)->buffer_ptr;\
311
312# define CLOSE_READER(name, gb)\
313 (gb)->bit_count= name##_bit_count;\
314 (gb)->cache0= name##_cache0;\
315 (gb)->cache1= name##_cache1;\
316 (gb)->buffer_ptr= name##_buffer_ptr;\
317
318# define UPDATE_CACHE(name, gb)\
319 if(name##_bit_count > 0){\
320 const uint32_t next= av_be2ne32( *name##_buffer_ptr );\
321 name##_cache0 |= NEG_USR32(next,name##_bit_count);\
322 name##_cache1 |= next<<name##_bit_count;\
323 name##_buffer_ptr++;\
324 name##_bit_count-= 32;\
325 }\
326
327#if ARCH_X86
328# define SKIP_CACHE(name, gb, num)\
329 __asm__(\
330 "shldl %2, %1, %0 \n\t"\
331 "shll %2, %1 \n\t"\
332 : "+r" (name##_cache0), "+r" (name##_cache1)\
333 : "Ic" ((uint8_t)(num))\
334 );
335#else
336# define SKIP_CACHE(name, gb, num)\
337 name##_cache0 <<= (num);\
338 name##_cache0 |= NEG_USR32(name##_cache1,num);\
339 name##_cache1 <<= (num);
340#endif
341
342# define SKIP_COUNTER(name, gb, num)\
343 name##_bit_count += (num);\
344
345# define SKIP_BITS(name, gb, num)\
346 {\
347 SKIP_CACHE(name, gb, num)\
348 SKIP_COUNTER(name, gb, num)\
349 }\
350
351# define LAST_SKIP_BITS(name, gb, num) SKIP_BITS(name, gb, num)
352# define LAST_SKIP_CACHE(name, gb, num) SKIP_CACHE(name, gb, num)
353
354# define SHOW_UBITS(name, gb, num)\
355 NEG_USR32(name##_cache0, num)
356
357# define SHOW_SBITS(name, gb, num)\
358 NEG_SSR32(name##_cache0, num)
359
360# define GET_CACHE(name, gb)\
361 (name##_cache0)
362
363static inline int get_bits_count(const GetBitContext *s){
364 return ((uint8_t*)s->buffer_ptr - s->buffer)*8 - 32 + s->bit_count;
365}
366
367static inline void skip_bits_long(GetBitContext *s, int n){
368 OPEN_READER(re, s)
369 re_bit_count += n;
370 re_buffer_ptr += re_bit_count>>5;
371 re_bit_count &= 31;
372 re_cache0 = av_be2ne32( re_buffer_ptr[-1] ) << re_bit_count;
373 re_cache1 = 0;
374 UPDATE_CACHE(re, s)
375 CLOSE_READER(re, s)
376}
377
378#endif
379
380/**
381 * read mpeg1 dc style vlc (sign bit + mantisse with no MSB).
382 * if MSB not set it is negative
383 * @param n length in bits
384 * @author BERO
385 */
386static inline int get_xbits(GetBitContext *s, int n){
387 register int sign;
388 register int32_t cache;
389 OPEN_READER(re, s)
390 UPDATE_CACHE(re, s)
391 cache = GET_CACHE(re,s);
392 sign=(~cache)>>31;
393 LAST_SKIP_BITS(re, s, n)
394 CLOSE_READER(re, s)
395 return (NEG_USR32(sign ^ cache, n) ^ sign) - sign;
396}
397
398static inline int get_sbits(GetBitContext *s, int n){
399 register int tmp;
400 OPEN_READER(re, s)
401 UPDATE_CACHE(re, s)
402 tmp= SHOW_SBITS(re, s, n);
403 LAST_SKIP_BITS(re, s, n)
404 CLOSE_READER(re, s)
405 return tmp;
406}
407
408/**
409 * reads 1-17 bits.
410 * Note, the alt bitstream reader can read up to 25 bits, but the libmpeg2 reader can't
411 */
412static inline unsigned int get_bits(GetBitContext *s, int n){
413 register int tmp;
414 OPEN_READER(re, s)
415 UPDATE_CACHE(re, s)
416 tmp= SHOW_UBITS(re, s, n);
417 LAST_SKIP_BITS(re, s, n)
418 CLOSE_READER(re, s)
419 return tmp;
420}
421
422/**
423 * shows 1-17 bits.
424 * Note, the alt bitstream reader can read up to 25 bits, but the libmpeg2 reader can't
425 */
426static inline unsigned int show_bits(GetBitContext *s, int n){
427 register int tmp;
428 OPEN_READER(re, s)
429 UPDATE_CACHE(re, s)
430 tmp= SHOW_UBITS(re, s, n);
431// CLOSE_READER(re, s)
432 return tmp;
433}
434
435static inline void skip_bits(GetBitContext *s, int n){
436 //Note gcc seems to optimize this to s->index+=n for the ALT_READER :))
437 OPEN_READER(re, s)
438 UPDATE_CACHE(re, s)
439 LAST_SKIP_BITS(re, s, n)
440 CLOSE_READER(re, s)
441}
442
443static inline unsigned int get_bits1(GetBitContext *s){
444#ifdef ALT_BITSTREAM_READER
445 unsigned int index= s->index;
446 uint8_t result= s->buffer[ index>>3 ];
447#ifdef ALT_BITSTREAM_READER_LE
448 result>>= (index&0x07);
449 result&= 1;
450#else
451 result<<= (index&0x07);
452 result>>= 8 - 1;
453#endif
454 index++;
455 s->index= index;
456
457 return result;
458#else
459 return get_bits(s, 1);
460#endif
461}
462
463static inline unsigned int show_bits1(GetBitContext *s){
464 return show_bits(s, 1);
465}
466
467static inline void skip_bits1(GetBitContext *s){
468 skip_bits(s, 1);
469}
470
471/**
472 * reads 0-32 bits.
473 */
474static inline unsigned int get_bits_long(GetBitContext *s, int n){
475 if(n<=MIN_CACHE_BITS) return get_bits(s, n);
476 else{
477#ifdef ALT_BITSTREAM_READER_LE
478 int ret= get_bits(s, 16);
479 return ret | (get_bits(s, n-16) << 16);
480#else
481 int ret= get_bits(s, 16) << (n-16);
482 return ret | get_bits(s, n-16);
483#endif
484 }
485}
486
487/**
488 * reads 0-32 bits as a signed integer.
489 */
490static inline int get_sbits_long(GetBitContext *s, int n) {
491 return sign_extend(get_bits_long(s, n), n);
492}
493
494/**
495 * shows 0-32 bits.
496 */
497static inline unsigned int show_bits_long(GetBitContext *s, int n){
498 if(n<=MIN_CACHE_BITS) return show_bits(s, n);
499 else{
500 GetBitContext gb= *s;
501 return get_bits_long(&gb, n);
502 }
503}
504
505/* not used
506static inline int check_marker(GetBitContext *s, const char *msg)
507{
508 int bit= get_bits1(s);
509 if(!bit)
510 av_log(NULL, AV_LOG_INFO, "Marker bit missing %s\n", msg);
511
512 return bit;
513}
514*/
515
516/**
517 * init GetBitContext.
518 * @param buffer bitstream buffer, must be FF_INPUT_BUFFER_PADDING_SIZE bytes larger then the actual read bits
519 * because some optimized bitstream readers read 32 or 64 bit at once and could read over the end
520 * @param bit_size the size of the buffer in bits
521 *
522 * While GetBitContext stores the buffer size, for performance reasons you are
523 * responsible for checking for the buffer end yourself (take advantage of the padding)!
524 */
525static inline void init_get_bits(GetBitContext *s,
526 const uint8_t *buffer, int bit_size)
527{
528 int buffer_size= (bit_size+7)>>3;
529 if(buffer_size < 0 || bit_size < 0) {
530 buffer_size = bit_size = 0;
531 buffer = NULL;
532 }
533
534 s->buffer= buffer;
535 s->size_in_bits= bit_size;
536 s->buffer_end= buffer + buffer_size;
537#ifdef ALT_BITSTREAM_READER
538 s->index=0;
539#elif defined LIBMPEG2_BITSTREAM_READER
540 s->buffer_ptr = (uint8_t*)((intptr_t)buffer&(~1));
541 s->bit_count = 16 + 8*((intptr_t)buffer&1);
542 skip_bits_long(s, 0);
543#elif defined A32_BITSTREAM_READER
544 s->buffer_ptr = (uint32_t*)((intptr_t)buffer&(~3));
545 s->bit_count = 32 + 8*((intptr_t)buffer&3);
546 skip_bits_long(s, 0);
547#endif
548}
549
550static inline void align_get_bits(GetBitContext *s)
551{
552 int n= (-get_bits_count(s)) & 7;
553 if(n) skip_bits(s, n);
554}
555
556#define init_vlc(vlc, nb_bits, nb_codes,\
557 bits, bits_wrap, bits_size,\
558 codes, codes_wrap, codes_size,\
559 flags)\
560 init_vlc_sparse(vlc, nb_bits, nb_codes,\
561 bits, bits_wrap, bits_size,\
562 codes, codes_wrap, codes_size,\
563 NULL, 0, 0, flags)
564
565int init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes,
566 const void *bits, int bits_wrap, int bits_size,
567 const void *codes, int codes_wrap, int codes_size,
568 const void *symbols, int symbols_wrap, int symbols_size,
569 int flags);
570#define INIT_VLC_LE 2
571#define INIT_VLC_USE_NEW_STATIC 4
572void free_vlc(VLC *vlc);
573
574#define INIT_VLC_STATIC(vlc, bits, a,b,c,d,e,f,g, static_size, attr)\
575{\
576 static VLC_TYPE table[static_size][2] attr;\
577 (vlc)->table= table;\
578 (vlc)->table_allocated= static_size;\
579 init_vlc(vlc, bits, a,b,c,d,e,f,g, INIT_VLC_USE_NEW_STATIC);\
580}
581
582
583/**
584 *
585 * If the vlc code is invalid and max_depth=1, then no bits will be removed.
586 * If the vlc code is invalid and max_depth>1, then the number of bits removed
587 * is undefined.
588 */
589#define GET_VLC(code, name, gb, table, bits, max_depth)\
590{\
591 int n, nb_bits;\
592 unsigned int index;\
593\
594 index= SHOW_UBITS(name, gb, bits);\
595 code = table[index][0];\
596 n = table[index][1];\
597\
598 if(max_depth > 1 && n < 0){\
599 LAST_SKIP_BITS(name, gb, bits)\
600 UPDATE_CACHE(name, gb)\
601\
602 nb_bits = -n;\
603\
604 index= SHOW_UBITS(name, gb, nb_bits) + code;\
605 code = table[index][0];\
606 n = table[index][1];\
607 if(max_depth > 2 && n < 0){\
608 LAST_SKIP_BITS(name, gb, nb_bits)\
609 UPDATE_CACHE(name, gb)\
610\
611 nb_bits = -n;\
612\
613 index= SHOW_UBITS(name, gb, nb_bits) + code;\
614 code = table[index][0];\
615 n = table[index][1];\
616 }\
617 }\
618 SKIP_BITS(name, gb, n)\
619}
620
621#define GET_RL_VLC(level, run, name, gb, table, bits, max_depth, need_update)\
622{\
623 int n, nb_bits;\
624 unsigned int index;\
625\
626 index= SHOW_UBITS(name, gb, bits);\
627 level = table[index].level;\
628 n = table[index].len;\
629\
630 if(max_depth > 1 && n < 0){\
631 SKIP_BITS(name, gb, bits)\
632 if(need_update){\
633 UPDATE_CACHE(name, gb)\
634 }\
635\
636 nb_bits = -n;\
637\
638 index= SHOW_UBITS(name, gb, nb_bits) + level;\
639 level = table[index].level;\
640 n = table[index].len;\
641 }\
642 run= table[index].run;\
643 SKIP_BITS(name, gb, n)\
644}
645
646
647/**
648 * parses a vlc code, faster then get_vlc()
649 * @param bits is the number of bits which will be read at once, must be
650 * identical to nb_bits in init_vlc()
651 * @param max_depth is the number of times bits bits must be read to completely
652 * read the longest vlc code
653 * = (max_vlc_length + bits - 1) / bits
654 */
655static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE (*table)[2],
656 int bits, int max_depth)
657{
658 int code;
659
660 OPEN_READER(re, s)
661 UPDATE_CACHE(re, s)
662
663 GET_VLC(code, re, s, table, bits, max_depth)
664
665 CLOSE_READER(re, s)
666 return code;
667}
668
669//#define TRACE
670
671#ifdef TRACE
672static inline void print_bin(int bits, int n){
673 int i;
674
675 for(i=n-1; i>=0; i--){
676 av_log(NULL, AV_LOG_DEBUG, "%d", (bits>>i)&1);
677 }
678 for(i=n; i<24; i++)
679 av_log(NULL, AV_LOG_DEBUG, " ");
680}
681
682static inline int get_bits_trace(GetBitContext *s, int n, char *file, const char *func, int line){
683 int r= get_bits(s, n);
684
685 print_bin(r, n);
686 av_log(NULL, AV_LOG_DEBUG, "%5d %2d %3d bit @%5d in %s %s:%d\n", r, n, r, get_bits_count(s)-n, file, func, line);
687 return r;
688}
689static inline int get_vlc_trace(GetBitContext *s, VLC_TYPE (*table)[2], int bits, int max_depth, char *file, const char *func, int line){
690 int show= show_bits(s, 24);
691 int pos= get_bits_count(s);
692 int r= get_vlc2(s, table, bits, max_depth);
693 int len= get_bits_count(s) - pos;
694 int bits2= show>>(24-len);
695
696 print_bin(bits2, len);
697
698 av_log(NULL, AV_LOG_DEBUG, "%5d %2d %3d vlc @%5d in %s %s:%d\n", bits2, len, r, pos, file, func, line);
699 return r;
700}
701static inline int get_xbits_trace(GetBitContext *s, int n, char *file, const char *func, int line){
702 int show= show_bits(s, n);
703 int r= get_xbits(s, n);
704
705 print_bin(show, n);
706 av_log(NULL, AV_LOG_DEBUG, "%5d %2d %3d xbt @%5d in %s %s:%d\n", show, n, r, get_bits_count(s)-n, file, func, line);
707 return r;
708}
709
710#define get_bits(s, n) get_bits_trace(s, n, __FILE__, __PRETTY_FUNCTION__, __LINE__)
711#define get_bits1(s) get_bits_trace(s, 1, __FILE__, __PRETTY_FUNCTION__, __LINE__)
712#define get_xbits(s, n) get_xbits_trace(s, n, __FILE__, __PRETTY_FUNCTION__, __LINE__)
713#define get_vlc(s, vlc) get_vlc_trace(s, (vlc)->table, (vlc)->bits, 3, __FILE__, __PRETTY_FUNCTION__, __LINE__)
714#define get_vlc2(s, tab, bits, max) get_vlc_trace(s, tab, bits, max, __FILE__, __PRETTY_FUNCTION__, __LINE__)
715
716#define tprintf(p, ...) av_log(p, AV_LOG_DEBUG, __VA_ARGS__)
717
718#else //TRACE
719#define tprintf(p, ...) {}
720#endif
721
722static inline int decode012(GetBitContext *gb){
723 int n;
724 n = get_bits1(gb);
725 if (n == 0)
726 return 0;
727 else
728 return get_bits1(gb) + 1;
729}
730
731static inline int decode210(GetBitContext *gb){
732 if (get_bits1(gb))
733 return 0;
734 else
735 return 2 - get_bits1(gb);
736}
737
738static inline int get_bits_left(GetBitContext *gb)
739{
740 return gb->size_in_bits - get_bits_count(gb);
741}
742
743#endif /* AVCODEC_GET_BITS_H */
diff --git a/apps/codecs/lib/ffmpeg_intreadwrite.h b/apps/codecs/lib/ffmpeg_intreadwrite.h
deleted file mode 100644
index 24f03292e8..0000000000
--- a/apps/codecs/lib/ffmpeg_intreadwrite.h
+++ /dev/null
@@ -1,484 +0,0 @@
1/*
2 * This file is part of FFmpeg.
3 *
4 * FFmpeg is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
8 *
9 * FFmpeg is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with FFmpeg; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19#ifndef AVUTIL_INTREADWRITE_H
20#define AVUTIL_INTREADWRITE_H
21
22#include <stdint.h>
23/*
24 * Arch-specific headers can provide any combination of
25 * AV_[RW][BLN](16|24|32|64) and AV_(COPY|SWAP|ZERO)(64|128) macros.
26 * Preprocessor symbols must be defined, even if these are implemented
27 * as inline functions.
28 */
29
30/*
31 * Map AV_RNXX <-> AV_R[BL]XX for all variants provided by per-arch headers.
32 */
33#define HAVE_BIGENDIAN 0
34#if HAVE_BIGENDIAN
35
36# if defined(AV_RN16) && !defined(AV_RB16)
37# define AV_RB16(p) AV_RN16(p)
38# elif !defined(AV_RN16) && defined(AV_RB16)
39# define AV_RN16(p) AV_RB16(p)
40# endif
41
42# if defined(AV_WN16) && !defined(AV_WB16)
43# define AV_WB16(p, v) AV_WN16(p, v)
44# elif !defined(AV_WN16) && defined(AV_WB16)
45# define AV_WN16(p, v) AV_WB16(p, v)
46# endif
47
48# if defined(AV_RN24) && !defined(AV_RB24)
49# define AV_RB24(p) AV_RN24(p)
50# elif !defined(AV_RN24) && defined(AV_RB24)
51# define AV_RN24(p) AV_RB24(p)
52# endif
53
54# if defined(AV_WN24) && !defined(AV_WB24)
55# define AV_WB24(p, v) AV_WN24(p, v)
56# elif !defined(AV_WN24) && defined(AV_WB24)
57# define AV_WN24(p, v) AV_WB24(p, v)
58# endif
59
60# if defined(AV_RN32) && !defined(AV_RB32)
61# define AV_RB32(p) AV_RN32(p)
62# elif !defined(AV_RN32) && defined(AV_RB32)
63# define AV_RN32(p) AV_RB32(p)
64# endif
65
66# if defined(AV_WN32) && !defined(AV_WB32)
67# define AV_WB32(p, v) AV_WN32(p, v)
68# elif !defined(AV_WN32) && defined(AV_WB32)
69# define AV_WN32(p, v) AV_WB32(p, v)
70# endif
71
72# if defined(AV_RN64) && !defined(AV_RB64)
73# define AV_RB64(p) AV_RN64(p)
74# elif !defined(AV_RN64) && defined(AV_RB64)
75# define AV_RN64(p) AV_RB64(p)
76# endif
77
78# if defined(AV_WN64) && !defined(AV_WB64)
79# define AV_WB64(p, v) AV_WN64(p, v)
80# elif !defined(AV_WN64) && defined(AV_WB64)
81# define AV_WN64(p, v) AV_WB64(p, v)
82# endif
83
84#else /* HAVE_BIGENDIAN */
85
86# if defined(AV_RN16) && !defined(AV_RL16)
87# define AV_RL16(p) AV_RN16(p)
88# elif !defined(AV_RN16) && defined(AV_RL16)
89# define AV_RN16(p) AV_RL16(p)
90# endif
91
92# if defined(AV_WN16) && !defined(AV_WL16)
93# define AV_WL16(p, v) AV_WN16(p, v)
94# elif !defined(AV_WN16) && defined(AV_WL16)
95# define AV_WN16(p, v) AV_WL16(p, v)
96# endif
97
98# if defined(AV_RN24) && !defined(AV_RL24)
99# define AV_RL24(p) AV_RN24(p)
100# elif !defined(AV_RN24) && defined(AV_RL24)
101# define AV_RN24(p) AV_RL24(p)
102# endif
103
104# if defined(AV_WN24) && !defined(AV_WL24)
105# define AV_WL24(p, v) AV_WN24(p, v)
106# elif !defined(AV_WN24) && defined(AV_WL24)
107# define AV_WN24(p, v) AV_WL24(p, v)
108# endif
109
110# if defined(AV_RN32) && !defined(AV_RL32)
111# define AV_RL32(p) AV_RN32(p)
112# elif !defined(AV_RN32) && defined(AV_RL32)
113# define AV_RN32(p) AV_RL32(p)
114# endif
115
116# if defined(AV_WN32) && !defined(AV_WL32)
117# define AV_WL32(p, v) AV_WN32(p, v)
118# elif !defined(AV_WN32) && defined(AV_WL32)
119# define AV_WN32(p, v) AV_WL32(p, v)
120# endif
121
122# if defined(AV_RN64) && !defined(AV_RL64)
123# define AV_RL64(p) AV_RN64(p)
124# elif !defined(AV_RN64) && defined(AV_RL64)
125# define AV_RN64(p) AV_RL64(p)
126# endif
127
128# if defined(AV_WN64) && !defined(AV_WL64)
129# define AV_WL64(p, v) AV_WN64(p, v)
130# elif !defined(AV_WN64) && defined(AV_WL64)
131# define AV_WN64(p, v) AV_WL64(p, v)
132# endif
133
134#endif /* !HAVE_BIGENDIAN */
135
136#define HAVE_ATTRIBUTE_PACKED 0
137#define HAVE_FAST_UNALIGNED 0
138/*
139 * Define AV_[RW]N helper macros to simplify definitions not provided
140 * by per-arch headers.
141 */
142
143#if HAVE_ATTRIBUTE_PACKED
144
145union unaligned_64 { uint64_t l; } __attribute__((packed)) av_alias;
146union unaligned_32 { uint32_t l; } __attribute__((packed)) av_alias;
147union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias;
148
149# define AV_RN(s, p) (((const union unaligned_##s *) (p))->l)
150# define AV_WN(s, p, v) ((((union unaligned_##s *) (p))->l) = (v))
151
152#elif defined(__DECC)
153
154# define AV_RN(s, p) (*((const __unaligned uint##s##_t*)(p)))
155# define AV_WN(s, p, v) (*((__unaligned uint##s##_t*)(p)) = (v))
156
157#elif HAVE_FAST_UNALIGNED
158
159# define AV_RN(s, p) (((const av_alias##s*)(p))->u##s)
160# define AV_WN(s, p, v) (((av_alias##s*)(p))->u##s = (v))
161
162#else
163
164#ifndef AV_RB16
165# define AV_RB16(x) \
166 ((((const uint8_t*)(x))[0] << 8) | \
167 ((const uint8_t*)(x))[1])
168#endif
169#ifndef AV_WB16
170# define AV_WB16(p, d) do { \
171 ((uint8_t*)(p))[1] = (d); \
172 ((uint8_t*)(p))[0] = (d)>>8; \
173 } while(0)
174#endif
175
176#ifndef AV_RL16
177# define AV_RL16(x) \
178 ((((const uint8_t*)(x))[1] << 8) | \
179 ((const uint8_t*)(x))[0])
180#endif
181#ifndef AV_WL16
182# define AV_WL16(p, d) do { \
183 ((uint8_t*)(p))[0] = (d); \
184 ((uint8_t*)(p))[1] = (d)>>8; \
185 } while(0)
186#endif
187
188#ifndef AV_RB32
189/* Coldfire and ARMv6 and above support unaligned long reads */
190#if defined CPU_COLDFIRE || (defined CPU_ARM && ARM_ARCH >= 6)
191#define AV_RB32(x) (htobe32(*(const uint32_t*)(x)))
192#else
193# define AV_RB32(x) \
194 ((((const uint8_t*)(x))[0] << 24) | \
195 (((const uint8_t*)(x))[1] << 16) | \
196 (((const uint8_t*)(x))[2] << 8) | \
197 ((const uint8_t*)(x))[3])
198#endif
199#endif
200#ifndef AV_WB32
201# define AV_WB32(p, d) do { \
202 ((uint8_t*)(p))[3] = (d); \
203 ((uint8_t*)(p))[2] = (d)>>8; \
204 ((uint8_t*)(p))[1] = (d)>>16; \
205 ((uint8_t*)(p))[0] = (d)>>24; \
206 } while(0)
207#endif
208
209#ifndef AV_RL32
210# define AV_RL32(x) \
211 ((((const uint8_t*)(x))[3] << 24) | \
212 (((const uint8_t*)(x))[2] << 16) | \
213 (((const uint8_t*)(x))[1] << 8) | \
214 ((const uint8_t*)(x))[0])
215#endif
216#ifndef AV_WL32
217# define AV_WL32(p, d) do { \
218 ((uint8_t*)(p))[0] = (d); \
219 ((uint8_t*)(p))[1] = (d)>>8; \
220 ((uint8_t*)(p))[2] = (d)>>16; \
221 ((uint8_t*)(p))[3] = (d)>>24; \
222 } while(0)
223#endif
224
225#ifndef AV_RB64
226# define AV_RB64(x) \
227 (((uint64_t)((const uint8_t*)(x))[0] << 56) | \
228 ((uint64_t)((const uint8_t*)(x))[1] << 48) | \
229 ((uint64_t)((const uint8_t*)(x))[2] << 40) | \
230 ((uint64_t)((const uint8_t*)(x))[3] << 32) | \
231 ((uint64_t)((const uint8_t*)(x))[4] << 24) | \
232 ((uint64_t)((const uint8_t*)(x))[5] << 16) | \
233 ((uint64_t)((const uint8_t*)(x))[6] << 8) | \
234 (uint64_t)((const uint8_t*)(x))[7])
235#endif
236#ifndef AV_WB64
237# define AV_WB64(p, d) do { \
238 ((uint8_t*)(p))[7] = (d); \
239 ((uint8_t*)(p))[6] = (d)>>8; \
240 ((uint8_t*)(p))[5] = (d)>>16; \
241 ((uint8_t*)(p))[4] = (d)>>24; \
242 ((uint8_t*)(p))[3] = (d)>>32; \
243 ((uint8_t*)(p))[2] = (d)>>40; \
244 ((uint8_t*)(p))[1] = (d)>>48; \
245 ((uint8_t*)(p))[0] = (d)>>56; \
246 } while(0)
247#endif
248
249#ifndef AV_RL64
250# define AV_RL64(x) \
251 (((uint64_t)((const uint8_t*)(x))[7] << 56) | \
252 ((uint64_t)((const uint8_t*)(x))[6] << 48) | \
253 ((uint64_t)((const uint8_t*)(x))[5] << 40) | \
254 ((uint64_t)((const uint8_t*)(x))[4] << 32) | \
255 ((uint64_t)((const uint8_t*)(x))[3] << 24) | \
256 ((uint64_t)((const uint8_t*)(x))[2] << 16) | \
257 ((uint64_t)((const uint8_t*)(x))[1] << 8) | \
258 (uint64_t)((const uint8_t*)(x))[0])
259#endif
260#ifndef AV_WL64
261# define AV_WL64(p, d) do { \
262 ((uint8_t*)(p))[0] = (d); \
263 ((uint8_t*)(p))[1] = (d)>>8; \
264 ((uint8_t*)(p))[2] = (d)>>16; \
265 ((uint8_t*)(p))[3] = (d)>>24; \
266 ((uint8_t*)(p))[4] = (d)>>32; \
267 ((uint8_t*)(p))[5] = (d)>>40; \
268 ((uint8_t*)(p))[6] = (d)>>48; \
269 ((uint8_t*)(p))[7] = (d)>>56; \
270 } while(0)
271#endif
272
273#if HAVE_BIGENDIAN
274# define AV_RN(s, p) AV_RB##s(p)
275# define AV_WN(s, p, v) AV_WB##s(p, v)
276#else
277# define AV_RN(s, p) AV_RL##s(p)
278# define AV_WN(s, p, v) AV_WL##s(p, v)
279#endif
280
281#endif /* HAVE_FAST_UNALIGNED */
282
283#ifndef AV_RN16
284# define AV_RN16(p) AV_RN(16, p)
285#endif
286
287#ifndef AV_RN32
288# define AV_RN32(p) AV_RN(32, p)
289#endif
290
291#ifndef AV_RN64
292# define AV_RN64(p) AV_RN(64, p)
293#endif
294
295#ifndef AV_WN16
296# define AV_WN16(p, v) AV_WN(16, p, v)
297#endif
298
299#ifndef AV_WN32
300# define AV_WN32(p, v) AV_WN(32, p, v)
301#endif
302
303#ifndef AV_WN64
304# define AV_WN64(p, v) AV_WN(64, p, v)
305#endif
306
307#if HAVE_BIGENDIAN
308# define AV_RB(s, p) AV_RN##s(p)
309# define AV_WB(s, p, v) AV_WN##s(p, v)
310# define AV_RL(s, p) bswap_##s(AV_RN##s(p))
311# define AV_WL(s, p, v) AV_WN##s(p, bswap_##s(v))
312#else
313# define AV_RB(s, p) bswap_##s(AV_RN##s(p))
314# define AV_WB(s, p, v) AV_WN##s(p, bswap_##s(v))
315# define AV_RL(s, p) AV_RN##s(p)
316# define AV_WL(s, p, v) AV_WN##s(p, v)
317#endif
318
319#define AV_RB8(x) (((const uint8_t*)(x))[0])
320#define AV_WB8(p, d) do { ((uint8_t*)(p))[0] = (d); } while(0)
321
322#define AV_RL8(x) AV_RB8(x)
323#define AV_WL8(p, d) AV_WB8(p, d)
324
325#ifndef AV_RB16
326# define AV_RB16(p) AV_RB(16, p)
327#endif
328#ifndef AV_WB16
329# define AV_WB16(p, v) AV_WB(16, p, v)
330#endif
331
332#ifndef AV_RL16
333# define AV_RL16(p) AV_RL(16, p)
334#endif
335#ifndef AV_WL16
336# define AV_WL16(p, v) AV_WL(16, p, v)
337#endif
338
339#ifndef AV_RB32
340# define AV_RB32(p) AV_RB(32, p)
341#endif
342#ifndef AV_WB32
343# define AV_WB32(p, v) AV_WB(32, p, v)
344#endif
345
346#ifndef AV_RL32
347# define AV_RL32(p) AV_RL(32, p)
348#endif
349#ifndef AV_WL32
350# define AV_WL32(p, v) AV_WL(32, p, v)
351#endif
352
353#ifndef AV_RB64
354# define AV_RB64(p) AV_RB(64, p)
355#endif
356#ifndef AV_WB64
357# define AV_WB64(p, v) AV_WB(64, p, v)
358#endif
359
360#ifndef AV_RL64
361# define AV_RL64(p) AV_RL(64, p)
362#endif
363#ifndef AV_WL64
364# define AV_WL64(p, v) AV_WL(64, p, v)
365#endif
366
367#ifndef AV_RB24
368# define AV_RB24(x) \
369 ((((const uint8_t*)(x))[0] << 16) | \
370 (((const uint8_t*)(x))[1] << 8) | \
371 ((const uint8_t*)(x))[2])
372#endif
373#ifndef AV_WB24
374# define AV_WB24(p, d) do { \
375 ((uint8_t*)(p))[2] = (d); \
376 ((uint8_t*)(p))[1] = (d)>>8; \
377 ((uint8_t*)(p))[0] = (d)>>16; \
378 } while(0)
379#endif
380
381#ifndef AV_RL24
382# define AV_RL24(x) \
383 ((((const uint8_t*)(x))[2] << 16) | \
384 (((const uint8_t*)(x))[1] << 8) | \
385 ((const uint8_t*)(x))[0])
386#endif
387#ifndef AV_WL24
388# define AV_WL24(p, d) do { \
389 ((uint8_t*)(p))[0] = (d); \
390 ((uint8_t*)(p))[1] = (d)>>8; \
391 ((uint8_t*)(p))[2] = (d)>>16; \
392 } while(0)
393#endif
394
395/*
396 * The AV_[RW]NA macros access naturally aligned data
397 * in a type-safe way.
398 */
399
400#define AV_RNA(s, p) (((const av_alias##s*)(p))->u##s)
401#define AV_WNA(s, p, v) (((av_alias##s*)(p))->u##s = (v))
402
403#ifndef AV_RN16A
404# define AV_RN16A(p) AV_RNA(16, p)
405#endif
406
407#ifndef AV_RN32A
408# define AV_RN32A(p) AV_RNA(32, p)
409#endif
410
411#ifndef AV_RN64A
412# define AV_RN64A(p) AV_RNA(64, p)
413#endif
414
415#ifndef AV_WN16A
416# define AV_WN16A(p, v) AV_WNA(16, p, v)
417#endif
418
419#ifndef AV_WN32A
420# define AV_WN32A(p, v) AV_WNA(32, p, v)
421#endif
422
423#ifndef AV_WN64A
424# define AV_WN64A(p, v) AV_WNA(64, p, v)
425#endif
426
427/* Parameters for AV_COPY*, AV_SWAP*, AV_ZERO* must be
428 * naturally aligned. They may be implemented using MMX,
429 * so emms_c() must be called before using any float code
430 * afterwards.
431 */
432
433#define AV_COPY(n, d, s) \
434 (((av_alias##n*)(d))->u##n = ((const av_alias##n*)(s))->u##n)
435
436#ifndef AV_COPY16
437# define AV_COPY16(d, s) AV_COPY(16, d, s)
438#endif
439
440#ifndef AV_COPY32
441# define AV_COPY32(d, s) AV_COPY(32, d, s)
442#endif
443
444#ifndef AV_COPY64
445# define AV_COPY64(d, s) AV_COPY(64, d, s)
446#endif
447
448#ifndef AV_COPY128
449# define AV_COPY128(d, s) \
450 do { \
451 AV_COPY64(d, s); \
452 AV_COPY64((char*)(d)+8, (char*)(s)+8); \
453 } while(0)
454#endif
455
456#define AV_SWAP(n, a, b) FFSWAP(av_alias##n, *(av_alias##n*)(a), *(av_alias##n*)(b))
457
458#ifndef AV_SWAP64
459# define AV_SWAP64(a, b) AV_SWAP(64, a, b)
460#endif
461
462#define AV_ZERO(n, d) (((av_alias##n*)(d))->u##n = 0)
463
464#ifndef AV_ZERO16
465# define AV_ZERO16(d) AV_ZERO(16, d)
466#endif
467
468#ifndef AV_ZERO32
469# define AV_ZERO32(d) AV_ZERO(32, d)
470#endif
471
472#ifndef AV_ZERO64
473# define AV_ZERO64(d) AV_ZERO(64, d)
474#endif
475
476#ifndef AV_ZERO128
477# define AV_ZERO128(d) \
478 do { \
479 AV_ZERO64(d); \
480 AV_ZERO64((char*)(d)+8); \
481 } while(0)
482#endif
483
484#endif /* AVUTIL_INTREADWRITE_H */
diff --git a/apps/codecs/lib/ffmpeg_put_bits.h b/apps/codecs/lib/ffmpeg_put_bits.h
deleted file mode 100644
index 38db55fe18..0000000000
--- a/apps/codecs/lib/ffmpeg_put_bits.h
+++ /dev/null
@@ -1,323 +0,0 @@
1/*
2 * copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21/**
22 * @file libavcodec/put_bits.h
23 * bitstream writer API
24 */
25
26#ifndef AVCODEC_PUT_BITS_H
27#define AVCODEC_PUT_BITS_H
28
29#include <stdint.h>
30#include <stdlib.h>
31#include "ffmpeg_bswap.h"
32#include "ffmpeg_intreadwrite.h"
33
34#define av_log(...)
35#define HAVE_FAST_UNALIGNED 0
36
37/* buf and buf_end must be present and used by every alternative writer. */
38typedef struct PutBitContext {
39#ifdef ALT_BITSTREAM_WRITER
40 uint8_t *buf, *buf_end;
41 int index;
42#else
43 uint32_t bit_buf;
44 int bit_left;
45 uint8_t *buf, *buf_ptr, *buf_end;
46#endif
47 int size_in_bits;
48} PutBitContext;
49
50/**
51 * Initializes the PutBitContext s.
52 *
53 * @param buffer the buffer where to put bits
54 * @param buffer_size the size in bytes of buffer
55 */
56static inline void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
57{
58 if(buffer_size < 0) {
59 buffer_size = 0;
60 buffer = NULL;
61 }
62
63 s->size_in_bits= 8*buffer_size;
64 s->buf = buffer;
65 s->buf_end = s->buf + buffer_size;
66#ifdef ALT_BITSTREAM_WRITER
67 s->index=0;
68 ((uint32_t*)(s->buf))[0]=0;
69// memset(buffer, 0, buffer_size);
70#else
71 s->buf_ptr = s->buf;
72 s->bit_left=32;
73 s->bit_buf=0;
74#endif
75}
76
77/**
78 * Returns the total number of bits written to the bitstream.
79 */
80static inline int put_bits_count(PutBitContext *s)
81{
82#ifdef ALT_BITSTREAM_WRITER
83 return s->index;
84#else
85 return (s->buf_ptr - s->buf) * 8 + 32 - s->bit_left;
86#endif
87}
88
89/**
90 * Pads the end of the output stream with zeros.
91 */
92static inline void flush_put_bits(PutBitContext *s)
93{
94#ifdef ALT_BITSTREAM_WRITER
95 align_put_bits(s);
96#else
97#ifndef BITSTREAM_WRITER_LE
98 s->bit_buf<<= s->bit_left;
99#endif
100 while (s->bit_left < 32) {
101 /* XXX: should test end of buffer */
102#ifdef BITSTREAM_WRITER_LE
103 *s->buf_ptr++=s->bit_buf;
104 s->bit_buf>>=8;
105#else
106 *s->buf_ptr++=s->bit_buf >> 24;
107 s->bit_buf<<=8;
108#endif
109 s->bit_left+=8;
110 }
111 s->bit_left=32;
112 s->bit_buf=0;
113#endif
114}
115
116#if defined(ALT_BITSTREAM_WRITER) || defined(BITSTREAM_WRITER_LE)
117#define align_put_bits align_put_bits_unsupported_here
118#define ff_put_string ff_put_string_unsupported_here
119#define ff_copy_bits ff_copy_bits_unsupported_here
120#else
121/**
122 * Pads the bitstream with zeros up to the next byte boundary.
123 */
124void align_put_bits(PutBitContext *s);
125
126/**
127 * Puts the string string in the bitstream.
128 *
129 * @param terminate_string 0-terminates the written string if value is 1
130 */
131void ff_put_string(PutBitContext *pb, const char *string, int terminate_string);
132
133/**
134 * Copies the content of src to the bitstream.
135 *
136 * @param length the number of bits of src to copy
137 */
138void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length);
139#endif
140
141/**
142 * Writes up to 31 bits into a bitstream.
143 * Use put_bits32 to write 32 bits.
144 */
145static inline void put_bits(PutBitContext *s, int n, unsigned int value)
146#ifndef ALT_BITSTREAM_WRITER
147{
148 unsigned int bit_buf;
149 int bit_left;
150
151 // printf("put_bits=%d %x\n", n, value);
152 //assert(n <= 31 && value < (1U << n));
153
154 bit_buf = s->bit_buf;
155 bit_left = s->bit_left;
156
157 // printf("n=%d value=%x cnt=%d buf=%x\n", n, value, bit_cnt, bit_buf);
158 /* XXX: optimize */
159#ifdef BITSTREAM_WRITER_LE
160 bit_buf |= value << (32 - bit_left);
161 if (n >= bit_left) {
162#if !HAVE_FAST_UNALIGNED
163 if (3 & (intptr_t) s->buf_ptr) {
164 AV_WL32(s->buf_ptr, bit_buf);
165 } else
166#endif
167 *(uint32_t *)s->buf_ptr = le2me_32(bit_buf);
168 s->buf_ptr+=4;
169 bit_buf = (bit_left==32)?0:value >> bit_left;
170 bit_left+=32;
171 }
172 bit_left-=n;
173#else
174 if (n < bit_left) {
175 bit_buf = (bit_buf<<n) | value;
176 bit_left-=n;
177 } else {
178 bit_buf<<=bit_left;
179 bit_buf |= value >> (n - bit_left);
180#if !HAVE_FAST_UNALIGNED
181 if (3 & (intptr_t) s->buf_ptr) {
182 AV_WB32(s->buf_ptr, bit_buf);
183 } else
184#endif
185 *(uint32_t *)s->buf_ptr = be2me_32(bit_buf);
186 //printf("bitbuf = %08x\n", bit_buf);
187 s->buf_ptr+=4;
188 bit_left+=32 - n;
189 bit_buf = value;
190 }
191#endif
192
193 s->bit_buf = bit_buf;
194 s->bit_left = bit_left;
195}
196#else /* ALT_BITSTREAM_WRITER defined */
197{
198# ifdef ALIGNED_BITSTREAM_WRITER
199# if ARCH_X86
200 __asm__ volatile(
201 "movl %0, %%ecx \n\t"
202 "xorl %%eax, %%eax \n\t"
203 "shrdl %%cl, %1, %%eax \n\t"
204 "shrl %%cl, %1 \n\t"
205 "movl %0, %%ecx \n\t"
206 "shrl $3, %%ecx \n\t"
207 "andl $0xFFFFFFFC, %%ecx \n\t"
208 "bswapl %1 \n\t"
209 "orl %1, (%2, %%ecx) \n\t"
210 "bswapl %%eax \n\t"
211 "addl %3, %0 \n\t"
212 "movl %%eax, 4(%2, %%ecx) \n\t"
213 : "=&r" (s->index), "=&r" (value)
214 : "r" (s->buf), "r" (n), "0" (s->index), "1" (value<<(-n))
215 : "%eax", "%ecx"
216 );
217# else
218 int index= s->index;
219 uint32_t *ptr= ((uint32_t *)s->buf)+(index>>5);
220
221 value<<= 32-n;
222
223 ptr[0] |= be2me_32(value>>(index&31));
224 ptr[1] = be2me_32(value<<(32-(index&31)));
225//if(n>24) printf("%d %d\n", n, value);
226 index+= n;
227 s->index= index;
228# endif
229# else //ALIGNED_BITSTREAM_WRITER
230# if ARCH_X86
231 __asm__ volatile(
232 "movl $7, %%ecx \n\t"
233 "andl %0, %%ecx \n\t"
234 "addl %3, %%ecx \n\t"
235 "negl %%ecx \n\t"
236 "shll %%cl, %1 \n\t"
237 "bswapl %1 \n\t"
238 "movl %0, %%ecx \n\t"
239 "shrl $3, %%ecx \n\t"
240 "orl %1, (%%ecx, %2) \n\t"
241 "addl %3, %0 \n\t"
242 "movl $0, 4(%%ecx, %2) \n\t"
243 : "=&r" (s->index), "=&r" (value)
244 : "r" (s->buf), "r" (n), "0" (s->index), "1" (value)
245 : "%ecx"
246 );
247# else
248 int index= s->index;
249 uint32_t *ptr= (uint32_t*)(((uint8_t *)s->buf)+(index>>3));
250
251 ptr[0] |= be2me_32(value<<(32-n-(index&7) ));
252 ptr[1] = 0;
253//if(n>24) printf("%d %d\n", n, value);
254 index+= n;
255 s->index= index;
256# endif
257# endif //!ALIGNED_BITSTREAM_WRITER
258}
259#endif
260
261static inline void put_sbits(PutBitContext *pb, int n, int32_t value)
262{
263 //assert(n >= 0 && n <= 31);
264
265 put_bits(pb, n, value & ((1<<n)-1));
266}
267
268/**
269 * Returns the pointer to the byte where the bitstream writer will put
270 * the next bit.
271 */
272static inline uint8_t* put_bits_ptr(PutBitContext *s)
273{
274#ifdef ALT_BITSTREAM_WRITER
275 return s->buf + (s->index>>3);
276#else
277 return s->buf_ptr;
278#endif
279}
280
281/**
282 * Skips the given number of bytes.
283 * PutBitContext must be flushed & aligned to a byte boundary before calling this.
284 */
285static inline void skip_put_bytes(PutBitContext *s, int n)
286{
287 //assert((put_bits_count(s)&7)==0);
288#ifdef ALT_BITSTREAM_WRITER
289 FIXME may need some cleaning of the buffer
290 s->index += n<<3;
291#else
292 //assert(s->bit_left==32);
293 s->buf_ptr += n;
294#endif
295}
296
297/**
298 * Skips the given number of bits.
299 * Must only be used if the actual values in the bitstream do not matter.
300 * If n is 0 the behavior is undefined.
301 */
302static inline void skip_put_bits(PutBitContext *s, int n)
303{
304#ifdef ALT_BITSTREAM_WRITER
305 s->index += n;
306#else
307 s->bit_left -= n;
308 s->buf_ptr-= 4*(s->bit_left>>5);
309 s->bit_left &= 31;
310#endif
311}
312
313/**
314 * Changes the end of the buffer.
315 *
316 * @param size the new size in bytes of the buffer where to put bits
317 */
318static inline void set_put_bits_buffer_size(PutBitContext *s, int size)
319{
320 s->buf_end= s->buf + size;
321}
322
323#endif /* AVCODEC_PUT_BITS_H */
diff --git a/apps/codecs/lib/fft-ffmpeg.c b/apps/codecs/lib/fft-ffmpeg.c
deleted file mode 100644
index 807f606038..0000000000
--- a/apps/codecs/lib/fft-ffmpeg.c
+++ /dev/null
@@ -1,473 +0,0 @@
1/*
2 * FFT/IFFT transforms converted to integer precision
3 * Copyright (c) 2010 Dave Hooper, Mohamed Tarek, Michael Giacomelli
4 * Copyright (c) 2008 Loren Merritt
5 * Copyright (c) 2002 Fabrice Bellard
6 * Partly based on libdjbfft by D. J. Bernstein
7 *
8 * This file is part of FFmpeg.
9 *
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25/**
26 * @file libavcodec/fft.c
27 * FFT/IFFT transforms.
28 */
29
30
31#ifdef CPU_ARM
32// we definitely want CONFIG_SMALL undefined for ipod
33// so we get the inlined version of fft16 (which is measurably faster)
34#undef CONFIG_SMALL
35#else
36#undef CONFIG_SMALL
37#endif
38
39#include "fft.h"
40#include <string.h>
41#include <stdlib.h>
42#include <math.h>
43#include <inttypes.h>
44#include <time.h>
45#include <codecs/lib/codeclib.h>
46
47#include "codeclib_misc.h"
48#include "mdct_lookup.h"
49
50/* constants for fft_16 (same constants as in mdct_arm.S ... ) */
51#define cPI1_8 (0x7641af3d) /* cos(pi/8) s.31 */
52#define cPI2_8 (0x5a82799a) /* cos(2pi/8) = 1/sqrt(2) s.31 */
53#define cPI3_8 (0x30fbc54d) /* cos(3pi/8) s.31 */
54
55/* asm-optimised functions and/or macros */
56#include "fft-ffmpeg_arm.h"
57#include "fft-ffmpeg_cf.h"
58
59#ifndef ICODE_ATTR_TREMOR_MDCT
60#define ICODE_ATTR_TREMOR_MDCT ICODE_ATTR
61#endif
62
63#if 0
64static int split_radix_permutation(int i, int n, int inverse)
65{
66 int m;
67 if(n <= 2) return i&1;
68 m = n >> 1;
69 if(!(i&m)) return split_radix_permutation(i, m, inverse)*2;
70 m >>= 1;
71 if(inverse == !(i&m)) return split_radix_permutation(i, m, inverse)*4 + 1;
72 else return split_radix_permutation(i, m, inverse)*4 - 1;
73}
74
75static void ff_fft_permute_c(FFTContext *s, FFTComplex *z)
76{
77 int j, k, np;
78 FFTComplex tmp;
79 //const uint16_t *revtab = s->revtab;
80 np = 1 << s->nbits;
81
82 const int revtab_shift = (12 - s->nbits);
83
84 /* reverse */
85 for(j=0;j<np;j++) {
86 k = revtab[j]>>revtab_shift;
87 if (k < j) {
88 tmp = z[k];
89 z[k] = z[j];
90 z[j] = tmp;
91 }
92 }
93}
94#endif
95
96#define BF(x,y,a,b) {\
97 x = a - b;\
98 y = a + b;\
99}
100
101#define BF_REV(x,y,a,b) {\
102 x = a + b;\
103 y = a - b;\
104}
105
106#ifndef FFT_FFMPEG_INCL_OPTIMISED_BUTTERFLIES
107#define BUTTERFLIES(a0,a1,a2,a3) {\
108 {\
109 FFTSample temp1,temp2;\
110 BF(temp1, temp2, t5, t1);\
111 BF(a2.re, a0.re, a0.re, temp2);\
112 BF(a3.im, a1.im, a1.im, temp1);\
113 }\
114 {\
115 FFTSample temp1,temp2;\
116 BF(temp1, temp2, t2, t6);\
117 BF(a3.re, a1.re, a1.re, temp1);\
118 BF(a2.im, a0.im, a0.im, temp2);\
119 }\
120}
121
122// force loading all the inputs before storing any.
123// this is slightly slower for small data, but avoids store->load aliasing
124// for addresses separated by large powers of 2.
125#define BUTTERFLIES_BIG(a0,a1,a2,a3) {\
126 FFTSample r0=a0.re, i0=a0.im, r1=a1.re, i1=a1.im;\
127 {\
128 FFTSample temp1, temp2;\
129 BF(temp1, temp2, t5, t1);\
130 BF(a2.re, a0.re, r0, temp2);\
131 BF(a3.im, a1.im, i1, temp1);\
132 }\
133 {\
134 FFTSample temp1, temp2;\
135 BF(temp1, temp2, t2, t6);\
136 BF(a3.re, a1.re, r1, temp1);\
137 BF(a2.im, a0.im, i0, temp2);\
138 }\
139}
140#endif
141
142/*
143 see conjugate pair description in
144 http://www.fftw.org/newsplit.pdf
145
146 a0 = z[k]
147 a1 = z[k+N/4]
148 a2 = z[k+2N/4]
149 a3 = z[k+3N/4]
150
151 result:
152 y[k] = z[k]+w(z[k+2N/4])+w'(z[k+3N/4])
153 y[k+N/4] = z[k+N/4]-iw(z[k+2N/4])+iw'(z[k+3N/4])
154 y[k+2N/4] = z[k]-w(z[k+2N/4])-w'(z[k+3N/4])
155 y[k+3N/4] = z[k+N/4]+iw(z[k+2N/4])-iw'(z[k+3N/4])
156
157 i.e.
158
159 a0 = a0 + (w.a2 + w'.a3)
160 a1 = a1 - i(w.a2 - w'.a3)
161 a2 = a0 - (w.a2 + w'.a3)
162 a3 = a1 + i(w.a2 - w'.a3)
163
164 note re(w') = re(w) and im(w') = -im(w)
165
166 so therefore
167
168 re(a0) = re(a0) + re(w.a2) + re(w.a3)
169 im(a0) = im(a0) + im(w.a2) - im(w.a3) etc
170
171 and remember also that
172 Re([s+it][u+iv]) = su-tv
173 Im([s+it][u+iv]) = sv+tu
174
175 so
176 Re(w'.(s+it)) = Re(w').s - Im(w').t = Re(w).s + Im(w).t
177 Im(w'.(s+it)) = Re(w').t + Im(w').s = Re(w).t - Im(w).s
178
179 For inverse dft we take the complex conjugate of all twiddle factors.
180 Hence
181
182 a0 = a0 + (w'.a2 + w.a3)
183 a1 = a1 - i(w'.a2 - w.a3)
184 a2 = a0 - (w'.a2 + w.a3)
185 a3 = a1 + i(w'.a2 - w.a3)
186
187 Define t1 = Re(w'.a2) = Re(w)*Re(a2) + Im(w)*Im(a2)
188 t2 = Im(w'.a2) = Re(w)*Im(a2) - Im(w)*Re(a2)
189 t5 = Re(w.a3) = Re(w)*Re(a3) - Im(w)*Im(a3)
190 t6 = Im(w.a3) = Re(w)*Im(a3) + Im(w)*Re(a3)
191
192 Then we just output:
193 a0.re = a0.re + ( t1 + t5 )
194 a0.im = a0.im + ( t2 + t6 )
195 a1.re = a1.re + ( t2 - t6 ) // since we multiply by -i and i(-i) = 1
196 a1.im = a1.im - ( t1 - t5 ) // since we multiply by -i and 1(-i) = -i
197 a2.re = a0.re - ( t1 + t5 )
198 a2.im = a0.im - ( t1 + t5 )
199 a3.re = a1.re - ( t2 - t6 ) // since we multiply by +i and i(+i) = -1
200 a3.im = a1.im + ( t1 - t5 ) // since we multiply by +i and 1(+i) = i
201
202
203*/
204
205#ifndef FFT_FFMPEG_INCL_OPTIMISED_TRANSFORM
206static inline FFTComplex* TRANSFORM(FFTComplex * z, unsigned int n, FFTSample wre, FFTSample wim)
207{
208 register FFTSample t1,t2,t5,t6,r_re,r_im;
209 r_re = z[n*2].re;
210 r_im = z[n*2].im;
211 XPROD31_R(r_re, r_im, wre, wim, t1,t2);
212 r_re = z[n*3].re;
213 r_im = z[n*3].im;
214 XNPROD31_R(r_re, r_im, wre, wim, t5,t6);
215 BUTTERFLIES(z[0],z[n],z[n*2],z[n*3]);
216 return z+1;
217}
218
219static inline FFTComplex* TRANSFORM_W01(FFTComplex * z, unsigned int n, const FFTSample * w)
220{
221 register const FFTSample wre=w[0],wim=w[1];
222 register FFTSample t1,t2,t5,t6,r_re,r_im;
223 r_re = z[n*2].re;
224 r_im = z[n*2].im;
225 XPROD31_R(r_re, r_im, wre, wim, t1,t2);
226 r_re = z[n*3].re;
227 r_im = z[n*3].im;
228 XNPROD31_R(r_re, r_im, wre, wim, t5,t6);
229 BUTTERFLIES(z[0],z[n],z[n*2],z[n*3]);
230 return z+1;
231}
232
233static inline FFTComplex* TRANSFORM_W10(FFTComplex * z, unsigned int n, const FFTSample * w)
234{
235 register const FFTSample wim=w[0],wre=w[1];
236 register FFTSample t1,t2,t5,t6,r_re,r_im;
237 r_re = z[n*2].re;
238 r_im = z[n*2].im;
239 XPROD31_R(r_re, r_im, wre, wim, t1,t2);
240 r_re = z[n*3].re;
241 r_im = z[n*3].im;
242 XNPROD31_R(r_re, r_im, wre, wim, t5,t6);
243 BUTTERFLIES(z[0],z[n],z[n*2],z[n*3]);
244 return z+1;
245}
246
247static inline FFTComplex* TRANSFORM_EQUAL(FFTComplex * z, unsigned int n)
248{
249 register FFTSample t1,t2,t5,t6,temp1,temp2;
250 register FFTSample * my_z = (FFTSample *)(z);
251 my_z += n*4;
252 t2 = MULT31(my_z[0], cPI2_8);
253 temp1 = MULT31(my_z[1], cPI2_8);
254 my_z += n*2;
255 temp2 = MULT31(my_z[0], cPI2_8);
256 t5 = MULT31(my_z[1], cPI2_8);
257 t1 = ( temp1 + t2 );
258 t2 = ( temp1 - t2 );
259 t6 = ( temp2 + t5 );
260 t5 = ( temp2 - t5 );
261 my_z -= n*6;
262 BUTTERFLIES(z[0],z[n],z[n*2],z[n*3]);
263 return z+1;
264}
265
266static inline FFTComplex* TRANSFORM_ZERO(FFTComplex * z, unsigned int n)
267{
268 FFTSample t1,t2,t5,t6;
269 t1 = z[n*2].re;
270 t2 = z[n*2].im;
271 t5 = z[n*3].re;
272 t6 = z[n*3].im;
273 BUTTERFLIES(z[0],z[n],z[n*2],z[n*3]);
274 return z+1;
275}
276#endif
277
278/* z[0...8n-1], w[1...2n-1] */
279static void pass(FFTComplex *z_arg, unsigned int STEP_arg, unsigned int n_arg) ICODE_ATTR_TREMOR_MDCT;
280static void pass(FFTComplex *z_arg, unsigned int STEP_arg, unsigned int n_arg)
281{
282 register FFTComplex * z = z_arg;
283 register unsigned int STEP = STEP_arg;
284 register unsigned int n = n_arg;
285
286 register const FFTSample *w = sincos_lookup0+STEP;
287 /* wre = *(wim+1) . ordering is sin,cos */
288 register const FFTSample *w_end = sincos_lookup0+1024;
289
290 /* first two are special (well, first one is special, but we need to do pairs) */
291 z = TRANSFORM_ZERO(z,n);
292 z = TRANSFORM_W10(z,n,w);
293 w += STEP;
294 /* first pass forwards through sincos_lookup0*/
295 do {
296 z = TRANSFORM_W10(z,n,w);
297 w += STEP;
298 z = TRANSFORM_W10(z,n,w);
299 w += STEP;
300 } while(LIKELY(w < w_end));
301 /* second half: pass backwards through sincos_lookup0*/
302 /* wim and wre are now in opposite places so ordering now [0],[1] */
303 w_end=sincos_lookup0;
304 while(LIKELY(w>w_end))
305 {
306 z = TRANSFORM_W01(z,n,w);
307 w -= STEP;
308 z = TRANSFORM_W01(z,n,w);
309 w -= STEP;
310 }
311}
312
313/* what is STEP?
314 sincos_lookup0 has sin,cos pairs for 1/4 cycle, in 1024 points
315 so half cycle would be 2048 points
316 ff_cos_16 has 8 elements corresponding to 4 cos points and 4 sin points
317 so each of the 4 points pairs corresponds to a 256*2-byte jump in sincos_lookup0
318 8192/16 (from "ff_cos_16") is 512 bytes.
319 i.e. for fft16, STEP = 8192/16 */
320#define DECL_FFT(n,n2,n4)\
321static void fft##n(FFTComplex *z) ICODE_ATTR_TREMOR_MDCT;\
322static void fft##n(FFTComplex *z)\
323{\
324 fft##n2(z);\
325 fft##n4(z+n4*2);\
326 fft##n4(z+n4*3);\
327 pass(z,8192/n,n4);\
328}
329
330#ifndef FFT_FFMPEG_INCL_OPTIMISED_FFT4
331static inline void fft4(FFTComplex *z)
332{
333 FFTSample t1, t2, t3, t4, t5, t6, t7, t8;
334
335 BF(t3, t1, z[0].re, z[1].re); // t3=r1-r3 ; t1 = r1+r3
336 BF(t8, t6, z[3].re, z[2].re); // t8=r7-r5 ; t6 = r7+r5
337
338 BF(z[2].re, z[0].re, t1, t6); // r5=t1-t6 ; r1 = t1+t6
339
340 BF(t4, t2, z[0].im, z[1].im); // t4=r2-r4 ; t2 = r2+r4
341 BF(t7, t5, z[2].im, z[3].im); // t7=r6-r8 ; t5 = r6+r8
342
343 BF(z[3].im, z[1].im, t4, t8); // r8=t4-t8 ; r4 = t4+t8
344 BF(z[3].re, z[1].re, t3, t7); // r7=t3-t7 ; r3 = t3+t7
345 BF(z[2].im, z[0].im, t2, t5); // r6=t2-t5 ; r2 = t2+t5
346}
347#endif
348
349static void fft4_dispatch(FFTComplex *z)
350{
351 fft4(z);
352}
353
354#ifndef FFT_FFMPEG_INCL_OPTIMISED_FFT8
355static inline void fft8(FFTComplex *z)
356{
357 fft4(z);
358 FFTSample t1,t2,t3,t4,t7,t8;
359
360 BF(t1, z[5].re, z[4].re, -z[5].re);
361 BF(t2, z[5].im, z[4].im, -z[5].im);
362 BF(t3, z[7].re, z[6].re, -z[7].re);
363 BF(t4, z[7].im, z[6].im, -z[7].im);
364 BF(t8, t1, t3, t1);
365 BF(t7, t2, t2, t4);
366 BF(z[4].re, z[0].re, z[0].re, t1);
367 BF(z[4].im, z[0].im, z[0].im, t2);
368 BF(z[6].re, z[2].re, z[2].re, t7);
369 BF(z[6].im, z[2].im, z[2].im, t8);
370
371 z++;
372 TRANSFORM_EQUAL(z,2);
373}
374#endif
375
376static void fft8_dispatch(FFTComplex *z)
377{
378 fft8(z);
379}
380
381#ifndef CONFIG_SMALL
382static void fft16(FFTComplex *z) ICODE_ATTR_TREMOR_MDCT;
383static void fft16(FFTComplex *z)
384{
385 fft8(z);
386 fft4(z+8);
387 fft4(z+12);
388
389 TRANSFORM_ZERO(z,4);
390 z+=2;
391 TRANSFORM_EQUAL(z,4);
392 z-=1;
393 TRANSFORM(z,4,cPI1_8,cPI3_8);
394 z+=2;
395 TRANSFORM(z,4,cPI3_8,cPI1_8);
396}
397#else
398DECL_FFT(16,8,4)
399#endif
400DECL_FFT(32,16,8)
401DECL_FFT(64,32,16)
402DECL_FFT(128,64,32)
403DECL_FFT(256,128,64)
404DECL_FFT(512,256,128)
405DECL_FFT(1024,512,256)
406DECL_FFT(2048,1024,512)
407DECL_FFT(4096,2048,1024)
408
409static void (*fft_dispatch[])(FFTComplex*) = {
410 fft4_dispatch, fft8_dispatch, fft16, fft32, fft64, fft128, fft256, fft512, fft1024,
411 fft2048, fft4096
412};
413
414void ff_fft_calc_c(int nbits, FFTComplex *z)
415{
416 fft_dispatch[nbits-2](z);
417}
418
419#if 0
420int main (void)
421{
422#define PRECISION 16
423#define FFT_SIZE 1024
424#define ftofix32(x) ((fixed32)((x) * (float)(1 << PRECISION) + ((x) < 0 ? -0.5 : 0.5)))
425#define itofix32(x) ((x) << PRECISION)
426#define fixtoi32(x) ((x) >> PRECISION)
427
428 int j;
429 const long N = FFT_SIZE;
430 double r[FFT_SIZE] = {0.0}, i[FFT_SIZE] = {0.0};
431 long n;
432 double t;
433 double amp, phase;
434 clock_t start, end;
435 double exec_time = 0;
436 FFTContext s;
437 FFTComplex z[FFT_SIZE];
438 memset(z, 0, 64*sizeof(FFTComplex));
439
440 /* Generate saw-tooth test data */
441 for (n = 0; n < FFT_SIZE; n++)
442 {
443 t = (2 * M_PI * n)/N;
444 /*z[n].re = 1.1 + sin( t) +
445 0.5 * sin(2.0 * t) +
446 (1.0/3.0) * sin(3.0 * t) +
447 0.25 * sin(4.0 * t) +
448 0.2 * sin(5.0 * t) +
449 (1.0/6.0) * sin(6.0 * t) +
450 (1.0/7.0) * sin(7.0 * t) ;*/
451 z[n].re = ftofix32(cos(2*M_PI*n/64));
452 //printf("z[%d] = %f\n", n, z[n].re);
453 //getchar();
454 }
455
456 ff_fft_init(&s, 10, 1);
457//start = clock();
458//for(n = 0; n < 1000000; n++)
459 ff_fft_permute_c(&s, z);
460 ff_fft_calc_c(&s, z);
461//end = clock();
462//exec_time = (((double)end-(double)start)/CLOCKS_PER_SEC);
463 for(j = 0; j < FFT_SIZE; j++)
464 {
465 printf("%8.4f\n", sqrt(pow(fixtof32(z[j].re),2)+ pow(fixtof32(z[j].im), 2)));
466 //getchar();
467 }
468 printf("muls = %d, adds = %d\n", muls, adds);
469//printf(" Time elapsed = %f\n", exec_time);
470 //ff_fft_end(&s);
471
472}
473#endif
diff --git a/apps/codecs/lib/fft-ffmpeg_arm.h b/apps/codecs/lib/fft-ffmpeg_arm.h
deleted file mode 100644
index 073ad8ee46..0000000000
--- a/apps/codecs/lib/fft-ffmpeg_arm.h
+++ /dev/null
@@ -1,456 +0,0 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2010 Dave Hooper
11 *
12 * ARM optimisations for ffmpeg's fft (used in fft-ffmpeg.c)
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version 2
17 * of the License, or (at your option) any later version.
18 *
19 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
20 * KIND, either express or implied.
21 *
22 ****************************************************************************/
23
24#ifdef CPU_ARM
25
26/* Start off with optimised variants of the butterflies that work
27 nicely on arm */
28/* 1. where y and a share the same variable/register */
29#define BF_OPT(x,y,a,b) {\
30 y = a + b;\
31 x = y - (b<<1);\
32}
33
34/* 2. where y and b share the same variable/register */
35#define BF_OPT2(x,y,a,b) {\
36 x = a - b;\
37 y = x + (b<<1);\
38}
39
40/* 3. where y and b share the same variable/register (but y=(-b)) */
41#define BF_OPT2_REV(x,y,a,b) {\
42 x = a + b;\
43 y = x - (b<<1);\
44}
45
46
47/* standard BUTTERFLIES package. Note, we actually manually inline this
48 in all the TRANSFORM macros below anyway */
49#define FFT_FFMPEG_INCL_OPTIMISED_BUTTERFLIES
50#define BUTTERFLIES(a0,a1,a2,a3) {\
51 {\
52 BF_OPT(t1, t5, t5, t1);\
53 BF_OPT(t6, t2, t2, t6);\
54 BF_OPT(a2.re, a0.re, a0.re, t5);\
55 BF_OPT(a2.im, a0.im, a0.im, t2);\
56 BF_OPT(a3.re, a1.re, a1.re, t6);\
57 BF_OPT(a3.im, a1.im, a1.im, t1);\
58 }\
59}
60
61#define FFT_FFMPEG_INCL_OPTIMISED_TRANSFORM
62
63static inline FFTComplex* TRANSFORM( FFTComplex* z, int n, FFTSample wre, FFTSample wim )
64{
65 register FFTSample t1,t2 asm("r5"),t5 asm("r6"),t6 asm("r7"),r_re asm("r8"),r_im asm("r9");
66 z += n*2; /* z[o2] */
67 asm volatile( "ldmia %[my_z], {%[r_re],%[r_im]}\n\t":[r_re] "=r" (r_re), [r_im] "=r" (r_im):[my_z] "r" (z));
68 XPROD31_R(r_re, r_im, wre, wim, t1,t2);
69
70 z += n; /* z[o3] */
71 asm volatile( "ldmia %[my_z], {%[r_re],%[r_im]}\n\t":[r_re] "=r" (r_re), [r_im] "=r" (r_im):[my_z] "r" (z));
72 XNPROD31_R(r_re, r_im, wre, wim, t5,t6);
73
74 BF_OPT(t1, t5, t5, t1);
75 BF_OPT(t6, t2, t2, t6);
76
77 {
78 register FFTSample rt0temp asm("r4");
79 /*{*/
80 /* BF_OPT(t1, t5, t5, t1);*/
81 /* BF_OPT(t6, t2, t2, t6);*/
82 /* BF_OPT(a2.re, a0.re, a0.re, t5);*/
83 /* BF_OPT(a2.im, a0.im, a0.im, t2);*/
84 /* BF_OPT(a3.re, a1.re, a1.re, t6);*/
85 /* BF_OPT(a3.im, a1.im, a1.im, t1);*/
86 /*}*/
87 z -= n*3;
88 /* r_re = my_z[0]; r_im = my_z[1]; */
89 asm volatile( "ldmia %[my_z], {%[r_re],%[r_im]}\n\t":[r_re] "=r" (r_re), [r_im] "=r" (r_im):[my_z] "r" (z));
90 BF_OPT(rt0temp, r_re, r_re, t5);
91 BF_OPT(t2, r_im, r_im, t2);
92 /* my_z[0] = r_re; my_z[1] = r_im; */
93 asm volatile( "stmia %[my_z], {%[r_re],%[r_im]}\n\t"::[my_z] "r" (z), [r_re] "r" (r_re), [r_im] "r" (r_im):"memory" );
94 z += n;
95 /* r_re = my_z[0]; r_im = my_z[1]; */
96 asm volatile( "ldmia %[my_z], {%[r_re],%[r_im]}\n\t":[r_re] "=r" (r_re), [r_im] "=r" (r_im):[my_z] "r" (z));
97 BF_OPT(t5, r_re, r_re, t6);
98 BF_OPT(t6, r_im, r_im, t1);
99 /* my_z[0] = r_re; my_z[1] = r_im; */
100 asm volatile( "stmia %[my_z], {%[r_re],%[r_im]}\n\t"::[my_z] "r" (z), [r_re] "r" (r_re), [r_im] "r" (r_im):"memory");
101 z += n;
102 /* my_z[0] = rt0temp; my_z[1] = t2; */
103 asm volatile( "stmia %[my_z], {%[rt0temp],%[t2]}\n\t"::[my_z] "r" (z), [rt0temp] "r" (rt0temp), [t2] "r" (t2):"memory");
104 }
105 z += n;
106
107 /* my_z[0] = t5; my_z[1] = t6; */
108 asm volatile( "stmia %[my_z]!, {%[t5],%[t6]}\n\t":[my_z] "+r" (z) : [t5] "r" (t5), [t6] "r" (t6):"memory");
109 z -= n*3;
110 return(z);
111}
112
113static inline FFTComplex* TRANSFORM_W01( FFTComplex* z, int n, const FFTSample* w )
114{
115 register FFTSample t1,t2 asm("r5"),t5 asm("r6"),t6 asm("r7"),r_re asm("r8"),r_im asm("r9");
116
117 /* load wre,wim into t5,t6 */
118 asm volatile( "ldmia %[w], {%[wre], %[wim]}\n\t":[wre] "=r" (t5), [wim] "=r" (t6):[w] "r" (w));
119 z += n*2; /* z[o2] -- 2n * 2 since complex numbers */
120 asm volatile( "ldmia %[my_z], {%[r_re],%[r_im]}\n\t":[r_re] "=r" (r_re), [r_im] "=r" (r_im):[my_z] "r" (z));
121 XPROD31_R(r_re, r_im, t5 /*wre*/, t6 /*wim*/, t1,t2);
122
123 z += n; /* z[o3] */
124 asm volatile( "ldmia %[my_z], {%[r_re],%[r_im]}\n\t":[r_re] "=r" (r_re), [r_im] "=r" (r_im):[my_z] "r" (z));
125 XNPROD31_R(r_re, r_im, t5 /*wre*/, t6 /*wim*/, t5,t6);
126
127 BF_OPT(t1, t5, t5, t1);
128 BF_OPT(t6, t2, t2, t6);
129 {
130 register FFTSample rt0temp asm("r4");
131 /*{*/
132 /* BF_OPT(t1, t5, t5, t1);*/
133 /* BF_OPT(t6, t2, t2, t6);*/
134 /* BF_OPT(a2.re, a0.re, a0.re, t5);*/
135 /* BF_OPT(a2.im, a0.im, a0.im, t2);*/
136 /* BF_OPT(a3.re, a1.re, a1.re, t6);*/
137 /* BF_OPT(a3.im, a1.im, a1.im, t1);*/
138 /*}*/
139 z -= n*3;
140 /* r_re = my_z[0]; r_im = my_z[1]; */
141 asm volatile( "ldmia %[my_z], {%[r_re],%[r_im]}\n\t":[r_re] "=r" (r_re), [r_im] "=r" (r_im):[my_z] "r" (z));
142 BF_OPT(rt0temp, r_re, r_re, t5);
143 BF_OPT(t2, r_im, r_im, t2);
144 /* my_z[0] = r_re; my_z[1] = r_im; */
145 asm volatile( "stmia %[my_z], {%[r_re],%[r_im]}\n\t"::[my_z] "r" (z), [r_re] "r" (r_re), [r_im] "r" (r_im):"memory");
146 z += n;
147 /* r_re = my_z[0]; r_im = my_z[1]; */
148 asm volatile( "ldmia %[my_z], {%[r_re],%[r_im]}\n\t":[r_re] "=r" (r_re), [r_im] "=r" (r_im):[my_z] "r" (z));
149 BF_OPT(t5, r_re, r_re, t6);
150 BF_OPT(t6, r_im, r_im, t1);
151 /* my_z[0] = r_re; my_z[1] = r_im; */
152 asm volatile( "stmia %[my_z], {%[r_re],%[r_im]}\n\t"::[my_z] "r" (z), [r_re] "r" (r_re), [r_im] "r" (r_im):"memory");
153 z += n;
154 /* my_z[0] = rt0temp; my_z[1] = t2; */
155 asm volatile( "stmia %[my_z], {%[rt0temp],%[t2]}\n\t"::[my_z] "r" (z), [rt0temp] "r" (rt0temp), [t2] "r" (t2):"memory");
156 }
157 z += n;
158
159 /* my_z[0] = t5; my_z[1] = t6; */
160 asm volatile( "stmia %[my_z]!, {%[t5],%[t6]}\n\t":[my_z] "+r" (z) : [t5] "r" (t5), [t6] "r" (t6):"memory");
161 z -= n*3;
162 return(z);
163}
164
165static inline FFTComplex* TRANSFORM_W10( FFTComplex* z, int n, const FFTSample* w )
166{
167 register FFTSample t1,t2 asm("r5"),t5 asm("r6"),t6 asm("r7"),r_re asm("r8"),r_im asm("r9");
168
169 /* load wim,wre into t5,t6 */
170 asm volatile( "ldmia %[w], {%[wim], %[wre]}\n\t":[wim] "=r" (t5), [wre] "=r" (t6):[w] "r" (w));
171 z += n*2; /* z[o2] -- 2n * 2 since complex numbers */
172 asm volatile( "ldmia %[my_z], {%[r_re],%[r_im]}\n\t":[r_re] "=r" (r_re), [r_im] "=r" (r_im):[my_z] "r" (z));
173 XPROD31_R(r_re, r_im, t6 /*wim*/, t5 /*wre*/, t1,t2);
174
175 z += n; /* z[o3] */
176 asm volatile( "ldmia %[my_z], {%[r_re],%[r_im]}\n\t":[r_re] "=r" (r_re), [r_im] "=r" (r_im):[my_z] "r" (z));
177 XNPROD31_R(r_re, r_im, t6 /*wim*/, t5 /*wre*/, t5,t6);
178
179 BF_OPT(t1, t5, t5, t1);
180 BF_OPT(t6, t2, t2, t6);
181 {
182 register FFTSample rt0temp asm("r4");
183 /*{*/
184 /* BF_OPT(t1, t5, t5, t1);*/
185 /* BF_OPT(t6, t2, t2, t6);*/
186 /* BF_OPT(a2.re, a0.re, a0.re, t5);*/
187 /* BF_OPT(a2.im, a0.im, a0.im, t2);*/
188 /* BF_OPT(a3.re, a1.re, a1.re, t6);*/
189 /* BF_OPT(a3.im, a1.im, a1.im, t1);*/
190 /*}*/
191 z -= n*3;
192 /* r_re = my_z[0]; r_im = my_z[1]; */
193 asm volatile( "ldmia %[my_z], {%[r_re],%[r_im]}\n\t":[r_re] "=r" (r_re), [r_im] "=r" (r_im):[my_z] "r" (z));
194 BF_OPT(rt0temp, r_re, r_re, t5);
195 BF_OPT(t2, r_im, r_im, t2);
196 /* my_z[0] = r_re; my_z[1] = r_im; */
197 asm volatile( "stmia %[my_z], {%[r_re],%[r_im]}\n\t"::[my_z] "r" (z), [r_re] "r" (r_re), [r_im] "r" (r_im):"memory");
198 z += n;
199 /* r_re = my_z[0]; r_im = my_z[1]; */
200 asm volatile( "ldmia %[my_z], {%[r_re],%[r_im]}\n\t":[r_re] "=r" (r_re), [r_im] "=r" (r_im):[my_z] "r" (z));
201 BF_OPT(t5, r_re, r_re, t6);
202 BF_OPT(t6, r_im, r_im, t1);
203 /* my_z[0] = r_re; my_z[1] = r_im; */
204 asm volatile( "stmia %[my_z], {%[r_re],%[r_im]}\n\t"::[my_z] "r" (z), [r_re] "r" (r_re), [r_im] "r" (r_im):"memory");
205 z += n;
206 /* my_z[0] = rt0temp; my_z[1] = t2; */
207 asm volatile( "stmia %[my_z], {%[rt0temp],%[t2]}\n\t"::[my_z] "r" (z), [rt0temp] "r" (rt0temp), [t2] "r" (t2):"memory");
208 }
209 z += n;
210
211 /* my_z[0] = t5; my_z[1] = t6; */
212 asm volatile( "stmia %[my_z]!, {%[t5],%[t6]}\n\t":[my_z] "+r" (z) : [t5] "r" (t5), [t6] "r" (t6):"memory");
213 z -= n*3;
214 return(z);
215}
216
217static inline FFTComplex* TRANSFORM_EQUAL( FFTComplex* z, int n )
218{
219 register FFTSample t1,t2 asm("r5"),t5 asm("r6"),t6 asm("r7"),r_re asm("r8"),r_im asm("r9");
220
221 z += n*2; /* z[o2] -- 2n * 2 since complex numbers */
222 asm volatile( "ldmia %[my_z], {%[t5],%[t6]}\n\t":[t5] "=r" (t5), [t6] "=r" (t6):[my_z] "r" (z));
223 z += n; /* z[o3] */
224 asm volatile( "ldmia %[my_z], {%[r_re],%[r_im]}\n\t":[r_re] "=r" (r_re), [r_im] "=r" (r_im):[my_z] "r" (z));
225
226/**/
227/*t2 = MULT32(cPI2_8, t5);*/
228/*t1 = MULT31(cPI2_8, t6);*/
229/*t6 = MULT31(cPI2_8, r_re);*/
230/*t5 = MULT32(cPI2_8, r_im);*/
231
232/*t1 = ( t1 + (t2<<1) );*/
233/*t2 = ( t1 - (t2<<2) );*/
234/*t6 = ( t6 + (t5<<1) );*/
235/*t5 = ( t6 - (t5<<2) );*/
236/**/
237 t2 = MULT31(cPI2_8, t5);
238 t6 = MULT31(cPI2_8, t6);
239 r_re = MULT31(cPI2_8, r_re);
240 t5 = MULT31(cPI2_8, r_im);
241
242 t1 = ( t6 + t2 );
243 t2 = ( t6 - t2 );
244 t6 = ( r_re + t5 );
245 t5 = ( r_re - t5 );
246
247 BF_OPT(t1, t5, t5, t1);
248 BF_OPT(t6, t2, t2, t6);
249 {
250 register FFTSample rt0temp asm("r4");
251 /*{*/
252 /* BF_OPT(t1, t5, t5, t1);*/
253 /* BF_OPT(t6, t2, t2, t6);*/
254 /* BF_OPT(a2.re, a0.re, a0.re, t5);*/
255 /* BF_OPT(a2.im, a0.im, a0.im, t2);*/
256 /* BF_OPT(a3.re, a1.re, a1.re, t6);*/
257 /* BF_OPT(a3.im, a1.im, a1.im, t1);*/
258 /*}*/
259 z -= n*3;
260 /* r_re = my_z[0]; r_im = my_z[1]; */
261 asm volatile( "ldmia %[my_z], {%[r_re],%[r_im]}\n\t":[r_re] "=r" (r_re), [r_im] "=r" (r_im):[my_z] "r" (z));
262 BF_OPT(rt0temp, r_re, r_re, t5);
263 BF_OPT(t2, r_im, r_im, t2);
264 /* my_z[0] = r_re; my_z[1] = r_im; */
265 asm volatile( "stmia %[my_z], {%[r_re],%[r_im]}\n\t"::[my_z] "r" (z), [r_re] "r" (r_re), [r_im] "r" (r_im):"memory");
266 z += n;
267 /* r_re = my_z[0]; r_im = my_z[1]; */
268 asm volatile( "ldmia %[my_z], {%[r_re],%[r_im]}\n\t":[r_re] "=r" (r_re), [r_im] "=r" (r_im):[my_z] "r" (z));
269 BF_OPT(t5, r_re, r_re, t6);
270 BF_OPT(t6, r_im, r_im, t1);
271 /* my_z[0] = r_re; my_z[1] = r_im; */
272 asm volatile( "stmia %[my_z], {%[r_re],%[r_im]}\n\t"::[my_z] "r" (z), [r_re] "r" (r_re), [r_im] "r" (r_im):"memory");
273 z += n;
274 /* my_z[0] = rt0temp; my_z[1] = t2; */
275 asm volatile( "stmia %[my_z], {%[rt0temp],%[t2]}\n\t"::[my_z] "r" (z), [rt0temp] "r" (rt0temp), [t2] "r" (t2):"memory");
276 }
277 z += n;
278
279 /* my_z[0] = t5; my_z[1] = t6; */
280 asm volatile( "stmia %[my_z]!, {%[t5],%[t6]}\n\t":[my_z] "+r" (z) : [t5] "r" (t5), [t6] "r" (t6):"memory");
281 z -= n*3;
282 return(z);
283}
284
285static inline FFTComplex* TRANSFORM_ZERO( FFTComplex* z, int n )
286{
287 register FFTSample t1,t2 asm("r5"),t5 asm("r6"),t6 asm("r7"), r_re asm("r8"), r_im asm("r9");
288
289 z += n*2; /* z[o2] -- 2n * 2 since complex numbers */
290 asm volatile( "ldmia %[my_z], {%[r_re],%[r_im]}\n\t":[r_re] "=r" (r_re), [r_im] "=r" (r_im):[my_z] "r" (z));
291 z += n; /* z[o3] */
292 asm volatile( "ldmia %[my_z], {%[t5],%[t6]}\n\t":[t5] "=r" (t5), [t6] "=r" (t6):[my_z] "r" (z));
293
294 BF_OPT(t1, t5, t5, r_re);
295 BF_OPT(t6, t2, r_im, t6);
296 {
297 register FFTSample rt0temp asm("r4");
298 /*{*/
299 /* BF_OPT(t1, t5, t5, t1);*/
300 /* BF_OPT(t6, t2, t2, t6);*/
301 /* BF_OPT(a2.re, a0.re, a0.re, t5);*/
302 /* BF_OPT(a2.im, a0.im, a0.im, t2);*/
303 /* BF_OPT(a3.re, a1.re, a1.re, t6);*/
304 /* BF_OPT(a3.im, a1.im, a1.im, t1);*/
305 /*}*/
306 z -= n*3;
307 /* r_re = my_z[0]; r_im = my_z[1]; */
308 asm volatile( "ldmia %[my_z], {%[r_re],%[r_im]}\n\t":[r_re] "=r" (r_re), [r_im] "=r" (r_im):[my_z] "r" (z));
309 BF_OPT(rt0temp, r_re, r_re, t5);
310 BF_OPT(t2, r_im, r_im, t2);
311 /* my_z[0] = r_re; my_z[1] = r_im; */
312 asm volatile( "stmia %[my_z], {%[r_re],%[r_im]}\n\t"::[my_z] "r" (z), [r_re] "r" (r_re), [r_im] "r" (r_im):"memory");
313 z += n;
314 /* r_re = my_z[0]; r_im = my_z[1]; */
315 asm volatile( "ldmia %[my_z], {%[r_re],%[r_im]}\n\t":[r_re] "=r" (r_re), [r_im] "=r" (r_im):[my_z] "r" (z));
316 BF_OPT(t5, r_re, r_re, t6);
317 BF_OPT(t6, r_im, r_im, t1);
318 /* my_z[0] = r_re; my_z[1] = r_im; */
319 asm volatile( "stmia %[my_z], {%[r_re],%[r_im]}\n\t"::[my_z] "r" (z), [r_re] "r" (r_re), [r_im] "r" (r_im):"memory");
320 z += n;
321 /* my_z[0] = rt0temp; my_z[1] = t2; */
322 asm volatile( "stmia %[my_z], {%[rt0temp],%[t2]}\n\t"::[my_z] "r" (z), [rt0temp] "r" (rt0temp), [t2] "r" (t2):"memory");
323 }
324 z += n;
325
326 /* my_z[0] = t5; my_z[1] = t6; */
327 asm volatile( "stmia %[my_z]!, {%[t5],%[t6]}\n\t":[my_z] "+r" (z) : [t5] "r" (t5), [t6] "r" (t6):"memory");
328 z -= n*3;
329 return(z);
330}
331
332#define FFT_FFMPEG_INCL_OPTIMISED_FFT4
333static inline FFTComplex* fft4(FFTComplex * z)
334{
335 FFTSample temp;
336
337 /* input[0..7] -> output[0..7] */
338 /* load r1=z[0],r2=z[1],...,r8=z[7] */
339 asm volatile(
340 "ldmia %[z], {r1-r8}\n\t"
341 "add r1,r1,r3\n\t" /* r1 :=t1 */
342 "sub r3,r1,r3, lsl #1\n\t" /* r3 :=t3 */
343 "sub r7,r7,r5\n\t" /* r10:=t8 */
344 "add r5,r7,r5, lsl #1\n\t" /* r5 :=t6 */
345
346 "add r1,r1,r5\n\t" /* r1 = o[0] */
347 "sub r5,r1,r5, lsl #1\n\t" /* r5 = o[4] */
348
349 "add r2,r2,r4\n\t" /* r2 :=t2 */
350 "sub r4,r2,r4, lsl #1\n\t" /* r9 :=t4 */
351
352 "add %[temp],r6,r8\n\t" /* r10:=t5 */
353 "sub r6,r6,r8\n\t" /* r6 :=t7 */
354
355 "sub r8,r4,r7\n\t" /* r8 = o[7]*/
356 "add r4,r4,r7\n\t" /* r4 = o[3]*/
357 "sub r7,r3,r6\n\t" /* r7 = o[6]*/
358 "add r3,r3,r6\n\t" /* r3 = o[2]*/
359 "sub r6,r2,%[temp]\n\t" /* r6 = o[5]*/
360 "add r2,r2,%[temp]\n\t" /* r2 = o[1]*/
361
362 "stmia %[z]!, {r1-r8}\n\t"
363 : /* outputs */ [z] "+r" (z), [temp] "=r" (temp)
364 : /* inputs */
365 : /* clobbers */
366 "r1","r2","r3","r4","r5","r6","r7","r8","memory"
367 );
368 return z;
369}
370
371#define FFT_FFMPEG_INCL_OPTIMISED_FFT8
372 /* The chunk of asm below is equivalent to the following:
373
374 // first load in z[4].re thru z[7].im into local registers
375 // ...
376 BF_OPT2_REV(z[4].re, z[5].re, z[4].re, z[5].re); // x=a+b; y=x-(b<<1)
377 BF_OPT2_REV(z[4].im, z[5].im, z[4].im, z[5].im);
378 BF_REV (temp, z[7].re, z[6].re, z[7].re); // x=a+b; y=a-b;
379 BF_REV (z[6].re, z[7].im, z[6].im, z[7].im);
380 // save z[7].re and z[7].im as those are complete now
381 // z[5].re and z[5].im are also complete now but save these later on
382
383 BF(z[6].im, z[4].re, temp, z[4].re); // x=a-b; y=a+b
384 BF_OPT(z[6].re, z[4].im, z[4].im, z[6].re); // y=a+b; x=y-(b<<1)
385 // now load z[2].re and z[2].im
386 // ...
387 BF_OPT(z[6].re, z[2].re, z[2].re, z[6].re); // y=a+b; x=y-(b<<1)
388 BF_OPT(z[6].im, z[2].im, z[2].im, z[6].im); // y=a+b; x=y-(b<<1)
389 // Now save z[6].re and z[6].im, along with z[5].re and z[5].im
390 // for efficiency. Also save z[2].re and z[2].im.
391 // Now load z[0].re and z[0].im
392 // ...
393
394 BF_OPT(z[4].re, z[0].re, z[0].re, z[4].re); // y=a+b; x=y-(b<<1)
395 BF_OPT(z[4].im, z[0].im, z[0].im, z[4].im); // y=a+b; x=y-(b<<1)
396 // Finally save out z[4].re, z[4].im, z[0].re and z[0].im
397 // ...
398 */
399static inline void fft8(FFTComplex * z)
400{
401 FFTComplex* m4 = fft4(z);
402 {
403 /* note that we increment z_ptr on the final stmia, which
404 leaves z_ptr pointing to z[1].re ready for the Transform step */
405
406 register FFTSample temp;
407
408 asm volatile(
409 /* read in z[4].re thru z[7].im */
410 "ldmia %[z4_ptr]!, {r1-r8}\n\t"
411 /* (now points one word past &z[7].im) */
412 "add r1,r1,r3\n\t"
413 "sub r3,r1,r3,lsl #1\n\t"
414 "add r2,r2,r4\n\t"
415 "sub r4,r2,r4,lsl #1\n\t"
416 "add %[temp],r5,r7\n\t"
417 "sub r7,r5,r7\n\t"
418 "add r5,r6,r8\n\t"
419 "sub r8,r6,r8\n\t"
420
421 "stmdb %[z4_ptr]!, {r7,r8}\n\t" /* write z[7].re,z[7].im straight away */
422 /* Note, registers r7 & r8 now free */
423
424 "sub r6,%[temp],r1\n\t"
425 "add r1,%[temp],r1\n\t"
426 "add r2,r2,r5\n\t"
427 "sub r5,r2,r5,lsl #1\n\t"
428 "add %[temp], %[z_ptr], #16\n\t" /* point to &z[2].re */
429 "ldmia %[temp],{r7,r8}\n\t" /* load z[2].re and z[2].im */
430 "add r7,r7,r5\n\t"
431 "sub r5,r7,r5,lsl #1\n\t"
432 "add r8,r8,r6\n\t"
433 "sub r6,r8,r6,lsl #1\n\t"
434
435 /* write out z[5].re, z[5].im, z[6].re, z[6].im in one go*/
436 "stmdb %[z4_ptr]!, {r3-r6}\n\t"
437 "stmia %[temp],{r7,r8}\n\t" /* write out z[2].re, z[2].im */
438 "ldmia %[z_ptr],{r7,r8}\n\t" /* load r[0].re, r[0].im */
439
440 "add r7,r7,r1\n\t"
441 "sub r1,r7,r1,lsl #1\n\t"
442 "add r8,r8,r2\n\t"
443 "sub r2,r8,r2,lsl #1\n\t"
444
445 "stmia %[z_ptr]!,{r7,r8}\n\t" /* write out z[0].re, z[0].im */
446 "stmdb %[z4_ptr], {r1,r2}\n\t" /* write out z[4].re, z[4].im */
447 : [z4_ptr] "+r" (m4), [temp] "=r" (temp), [z_ptr] "+r" (z)
448 :
449 : "r1","r2","r3","r4","r5","r6","r7","r8","memory"
450 );
451 }
452
453 TRANSFORM_EQUAL(z,2);
454}
455
456#endif // CPU_ARM
diff --git a/apps/codecs/lib/fft-ffmpeg_cf.h b/apps/codecs/lib/fft-ffmpeg_cf.h
deleted file mode 100644
index a29464a23d..0000000000
--- a/apps/codecs/lib/fft-ffmpeg_cf.h
+++ /dev/null
@@ -1,370 +0,0 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2010 Nils Wallménius
11 *
12 * Coldfire v2 optimisations for ffmpeg's fft (used in fft-ffmpeg.c)
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version 2
17 * of the License, or (at your option) any later version.
18 *
19 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
20 * KIND, either express or implied.
21 *
22 ****************************************************************************/
23
24#ifdef CPU_COLDFIRE
25#define FFT_FFMPEG_INCL_OPTIMISED_FFT4
26static inline void fft4(FFTComplex * z)
27{
28 asm volatile ("movem.l (%[z]), %%d0-%%d7\n\t"
29 "move.l %%d0, %%a0\n\t"
30 "add.l %%d2, %%d0\n\t" /* d0 == t1 */
31 "neg.l %%d2\n\t"
32 "add.l %%a0, %%d2\n\t" /* d2 == t3, a0 free */
33 "move.l %%d6, %%a0\n\t"
34 "sub.l %%d4, %%d6\n\t" /* d6 == t8 */
35 "add.l %%d4, %%a0\n\t" /* a0 == t6 */
36
37 "move.l %%d0, %%d4\n\t"
38 "sub.l %%a0, %%d4\n\t" /* z[2].re done */
39 "add.l %%a0, %%d0\n\t" /* z[0].re done, a0 free */
40
41 "move.l %%d5, %%a0\n\t"
42 "sub.l %%d7, %%d5\n\t" /* d5 == t7 */
43 "add.l %%d7, %%a0\n\t" /* a0 == t5 */
44
45 "move.l %%d1, %%d7\n\t"
46 "sub.l %%d3, %%d7\n\t" /* d7 == t4 */
47 "add.l %%d3, %%d1\n\t" /* d1 == t2 */
48
49 "move.l %%d7, %%d3\n\t"
50 "sub.l %%d6, %%d7\n\t" /* z[3].im done */
51 "add.l %%d6, %%d3\n\t" /* z[1].im done */
52
53 "move.l %%d2, %%d6\n\t"
54 "sub.l %%d5, %%d6\n\t" /* z[3].re done */
55 "add.l %%d5, %%d2\n\t" /* z[1].re done */
56
57 "move.l %%d1, %%d5\n\t"
58 "sub.l %%a0, %%d5\n\t" /* z[2].im done */
59 "add.l %%a0, %%d1\n\t" /* z[0].im done */
60
61 "movem.l %%d0-%%d7, (%[z])\n\t"
62 : :[z] "a" (z)
63 : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
64 "a0", "cc", "memory");
65
66}
67
68#define FFT_FFMPEG_INCL_OPTIMISED_FFT8
69static inline void fft8(FFTComplex *z)
70{
71 asm volatile ("movem.l (4*8, %[z]), %%d0-%%d7\n\t"
72 "move.l %%d0, %%a1\n\t"
73 "add.l %%d2, %%a1\n\t" /* a1 == t1 */
74 "sub.l %%d2, %%d0\n\t" /* d0 == z[5].re */
75
76 "move.l %%d1, %%a2\n\t"
77 "add.l %%d3, %%a2\n\t" /* a2 == t2 */
78 "sub.l %%d3, %%d1\n\t" /* d1 == z[5].im */
79
80 "move.l %%d4, %%d2\n\t"
81 "add.l %%d6, %%d2\n\t" /* d2 == t3 */
82 "sub.l %%d6, %%d4\n\t" /* d4 == z[7].re */
83
84 "move.l %%d5, %%d3\n\t"
85 "add.l %%d7, %%d3\n\t" /* d3 == t4 */
86 "sub.l %%d7, %%d5\n\t" /* d5 == z[7].im */
87
88 "move.l %%d2, %%a4\n\t"
89 "sub.l %%a1, %%a4\n\t" /* a4 == t8 */
90 "add.l %%d2, %%a1\n\t" /* a1 == t1, d2 free */
91
92 "move.l %%a2, %%a3\n\t"
93 "sub.l %%d3, %%a3\n\t" /* a3 == t7 */
94 "add.l %%d3, %%a2\n\t" /* a2 == t2, d3 free */
95
96 /* emac block from TRANSFORM_EQUAL, do this now
97 so we don't need to store and load z[5] and z[7] */
98 "move.l %[_cPI2_8], %%d2\n\t"
99 "mac.l %%d2, %%d0, %%acc0\n\t"
100 "mac.l %%d2, %%d1, %%acc1\n\t"
101 "mac.l %%d2, %%d4, %%acc2\n\t"
102 "mac.l %%d2, %%d5, %%acc3\n\t"
103
104 /* fft4, clobbers all d regs and a0 */
105 "movem.l (%[z]), %%d0-%%d7\n\t"
106 "move.l %%d0, %%a0\n\t"
107 "add.l %%d2, %%d0\n\t" /* d0 == t1 */
108 "neg.l %%d2\n\t"
109 "add.l %%a0, %%d2\n\t" /* d2 == t3, a0 free */
110 "move.l %%d6, %%a0\n\t"
111 "sub.l %%d4, %%d6\n\t" /* d6 == t8 */
112 "add.l %%d4, %%a0\n\t" /* a0 == t6 */
113
114 "move.l %%d0, %%d4\n\t"
115 "sub.l %%a0, %%d4\n\t" /* z[2].re done */
116 "add.l %%a0, %%d0\n\t" /* z[0].re done, a0 free */
117
118 "move.l %%d5, %%a0\n\t"
119 "sub.l %%d7, %%d5\n\t" /* d5 == t7 */
120 "add.l %%d7, %%a0\n\t" /* a0 == t5 */
121
122 "move.l %%d1, %%d7\n\t"
123 "sub.l %%d3, %%d7\n\t" /* d7 == t4 */
124 "add.l %%d3, %%d1\n\t" /* d1 == t2 */
125
126 "move.l %%d7, %%d3\n\t"
127 "sub.l %%d6, %%d7\n\t" /* z[3].im done */
128 "add.l %%d6, %%d3\n\t" /* z[1].im done */
129
130 "move.l %%d2, %%d6\n\t"
131 "sub.l %%d5, %%d6\n\t" /* z[3].re done */
132 "add.l %%d5, %%d2\n\t" /* z[1].re done */
133
134 "move.l %%d1, %%d5\n\t"
135 "sub.l %%a0, %%d5\n\t" /* z[2].im done */
136 "add.l %%a0, %%d1\n\t" /* z[0].im done */
137 /* end of fft4, but don't store yet */
138
139 "move.l %%d0, %%a0\n\t"
140 "add.l %%a1, %%d0\n\t"
141 "sub.l %%a1, %%a0\n\t" /* z[4].re, z[0].re done, a1 free */
142
143 "move.l %%d1, %%a1\n\t"
144 "add.l %%a2, %%d1\n\t"
145 "sub.l %%a2, %%a1\n\t" /* z[4].im, z[0].im done, a2 free */
146
147 "move.l %%d4, %%a2\n\t"
148 "add.l %%a3, %%d4\n\t"
149 "sub.l %%a3, %%a2\n\t" /* z[6].re, z[2].re done, a3 free */
150
151 "move.l %%d5, %%a3\n\t"
152 "add.l %%a4, %%d5\n\t"
153 "sub.l %%a4, %%a3\n\t" /* z[6].im, z[2].im done, a4 free */
154
155 "movem.l %%d0-%%d1, (%[z])\n\t" /* save z[0] */
156 "movem.l %%d4-%%d5, (2*8, %[z])\n\t" /* save z[2] */
157 "movem.l %%a0-%%a1, (4*8, %[z])\n\t" /* save z[4] */
158 "movem.l %%a2-%%a3, (6*8, %[z])\n\t" /* save z[6] */
159
160 /* TRANSFORM_EQUAL */
161 "movclr.l %%acc0, %%d0\n\t"
162 "movclr.l %%acc1, %%d1\n\t"
163 "movclr.l %%acc2, %%d4\n\t"
164 "movclr.l %%acc3, %%d5\n\t"
165
166 "move.l %%d1, %%a0\n\t"
167 "add.l %%d0, %%a0\n\t" /* a0 == t1 */
168 "sub.l %%d0, %%d1\n\t" /* d1 == t2 */
169
170 "move.l %%d4, %%d0\n\t"
171 "add.l %%d5, %%d0\n\t" /* d0 == t6 */
172 "sub.l %%d5, %%d4\n\t" /* d4 == t5 */
173
174 "move.l %%d4, %%a1\n\t"
175 "sub.l %%a0, %%a1\n\t" /* a1 == temp1 */
176 "add.l %%a0, %%d4\n\t" /* d4 == temp2 */
177
178 "move.l %%d2, %%a2\n\t"
179 "sub.l %%d4, %%a2\n\t" /* a2 == z[5].re */
180 "add.l %%d4, %%d2\n\t" /* z[1].re done */
181
182 "move.l %%d7, %%d5\n\t"
183 "sub.l %%a1, %%d5\n\t" /* d5 == z[7].im */
184 "add.l %%a1, %%d7\n\t" /* z[3].im done */
185
186 "move.l %%d1, %%a0\n\t"
187 "sub.l %%d0, %%a0\n\t" /* a0 == temp1 */
188 "add.l %%d0, %%d1\n\t" /* d1 == temp2 */
189
190 "move.l %%d6, %%d4\n\t"
191 "sub.l %%a0, %%d4\n\t" /* d4 == z[7].re */
192 "add.l %%a0, %%d6\n\t" /* z[3].re done */
193
194 "move.l %%d3, %%a3\n\t"
195 "sub.l %%d1, %%a3\n\t" /* a3 == z[5].im */
196 "add.l %%d1, %%d3\n\t" /* z[1].im done */
197
198 "movem.l %%d2-%%d3, (1*8, %[z])\n\t" /* save z[1] */
199 "movem.l %%d6-%%d7, (3*8, %[z])\n\t" /* save z[3] */
200 "movem.l %%a2-%%a3, (5*8, %[z])\n\t" /* save z[5] */
201 "movem.l %%d4-%%d5, (7*8, %[z])\n\t" /* save z[7] */
202 : :[z] "a" (z), [_cPI2_8] "i" (cPI2_8)
203 : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
204 "a0", "a1", "a2", "a3", "a4", "cc", "memory");
205}
206
207#define FFT_FFMPEG_INCL_OPTIMISED_TRANSFORM
208
209static inline FFTComplex* TRANSFORM(FFTComplex * z, unsigned int n, FFTSample wre, FFTSample wim)
210{
211 asm volatile ("move.l (%[z2]), %%d5\n\t"
212 "mac.l %%d5, %[wre], (4, %[z2]), %%d4, %%acc0\n\t"
213 "mac.l %%d4, %[wim], %%acc0\n\t"
214 "mac.l %%d4, %[wre], (%[z3]), %%d6, %%acc1\n\t"
215 "msac.l %%d5, %[wim], (4, %[z3]), %%d7, %%acc1\n\t"
216 "mac.l %%d6, %[wre], (%[z])+, %%d4, %%acc2\n\t"
217 "msac.l %%d7, %[wim], (%[z])+, %%d5, %%acc2\n\t"
218 "mac.l %%d7, %[wre], %%acc3\n\t"
219 "mac.l %%d6, %[wim], %%acc3\n\t"
220
221 "movclr.l %%acc0, %[wre]\n\t" /* t1 */
222 "movclr.l %%acc2, %[wim]\n\t" /* t5 */
223
224 "move.l %%d4, %%d6\n\t"
225 "move.l %[wim], %%d7\n\t"
226 "sub.l %[wre], %[wim]\n\t" /* t5 = t5-t1 */
227 "add.l %[wre], %%d7\n\t"
228 "sub.l %%d7, %%d6\n\t" /* d6 = a0re - (t5+t1) => a2re */
229 "add.l %%d7, %%d4\n\t" /* d4 = a0re + (t5+t1) => a0re */
230
231 "movclr.l %%acc3, %%d7\n\t" /* t6 */
232 "movclr.l %%acc1, %%d3\n\t" /* t2 */
233
234 "move.l %%d3, %[wre]\n\t"
235 "add.l %%d7, %[wre]\n\t"
236 "sub.l %%d7, %%d3\n\t" /* t2 = t6-t2 */
237 "move.l %%d5, %%d7\n\t"
238 "sub.l %[wre], %%d7\n\t" /* d7 = a0im - (t2+t6) => a2im */
239
240 "movem.l %%d6-%%d7, (%[z2])\n\t" /* store z2 */
241 "add.l %[wre], %%d5\n\t" /* d5 = a0im + (t2+t6) => a0im */
242 "movem.l %%d4-%%d5, (-8, %[z])\n\t" /* store z0 */
243
244 "movem.l (%[z1]), %%d4-%%d5\n\t" /* load z1 */
245 "move.l %%d4, %%d6\n\t"
246
247 "sub.l %%d3, %%d6\n\t" /* d6 = a1re - (t2-t6) => a3re */
248 "add.l %%d3, %%d4\n\t" /* d4 = a1re + (t2-t6) => a1re */
249
250 "move.l %%d5, %%d7\n\t"
251 "sub.l %[wim], %%d7\n\t"
252 "movem.l %%d6-%%d7, (%[z3])\n\t" /* store z3 */
253 "add.l %[wim], %%d5\n\t"
254 "movem.l %%d4-%%d5, (%[z1])\n\t" /* store z1 */
255
256 : [wre] "+r" (wre), [wim] "+r" (wim), /* we clobber these after using them */
257 [z] "+a" (z)
258 : [z1] "a" (&z[n]), [z2] "a" (&z[2*n]), [z3] "a" (&z[3*n])
259 : "d3", "d4", "d5", "d6", "d7", "cc", "memory");
260 return z;
261}
262
263static inline FFTComplex* TRANSFORM_W01(FFTComplex * z, unsigned int n, const FFTSample * w)
264{
265 return TRANSFORM(z, n, w[0], w[1]);
266}
267
268static inline FFTComplex* TRANSFORM_W10(FFTComplex * z, unsigned int n, const FFTSample * w)
269{
270 return TRANSFORM(z, n, w[1], w[0]);
271}
272
273static inline FFTComplex* TRANSFORM_ZERO(FFTComplex * z, unsigned int n)
274{
275 asm volatile("movem.l (%[z]), %%d4-%%d5\n\t" /* load z0 */
276 "move.l %%d4, %%d6\n\t"
277 "movem.l (%[z2]), %%d2-%%d3\n\t" /* load z2 */
278 "movem.l (%[z3]), %%d0-%%d1\n\t" /* load z0 */
279 "move.l %%d0, %%d7\n\t"
280 "sub.l %%d2, %%d0\n\t"
281 "add.l %%d2, %%d7\n\t"
282 "sub.l %%d7, %%d6\n\t" /* d6 = a0re - (t5+t1) => a2re */
283 "add.l %%d7, %%d4\n\t" /* d4 = a0re + (t5+t1) => a0re */
284
285 "move.l %%d5, %%d7\n\t"
286 "move.l %%d3, %%d2\n\t"
287 "add.l %%d1, %%d2\n\t"
288 "sub.l %%d2, %%d7\n\t" /* d7 = a0im - (t2+t6) => a2im */
289 "movem.l %%d6-%%d7, (%[z2])\n\t" /* store z2 */
290 "add.l %%d2, %%d5\n\t" /* d5 = a0im + (t2+t6) => a0im */
291 "movem.l %%d4-%%d5, (%[z])\n\t" /* store z0 */
292
293 "movem.l (%[z1]), %%d4-%%d5\n\t" /* load z1 */
294 "move.l %%d4, %%d6\n\t"
295 "sub.l %%d1, %%d3\n\t"
296 "sub.l %%d3, %%d6\n\t" /* d6 = a1re - (t2-t6) => a3re */
297 "add.l %%d3, %%d4\n\t" /* d4 = a1re + (t2-t6) => a1re */
298
299 "move.l %%d5, %%d7\n\t"
300 "sub.l %%d0, %%d7\n\t"
301 "movem.l %%d6-%%d7, (%[z3])\n\t" /* store z3 */
302 "add.l %%d0, %%d5\n\t"
303
304 "movem.l %%d4-%%d5, (%[z1])\n\t" /* store z1 */
305
306 :
307 : [z] "a" (z), [z1] "a" (&z[n]), [z2] "a" (&z[2*n]), [z3] "a" (&z[3*n])
308 : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "cc", "memory");
309 return z+1;
310}
311
312static inline FFTComplex* TRANSFORM_EQUAL(FFTComplex * z, unsigned int n)
313{
314 asm volatile ("movem.l (%[z2]), %%d0-%%d1\n\t"
315 "move.l %[_cPI2_8], %%d2\n\t"
316 "mac.l %%d0, %%d2, (%[z3]), %%d0, %%acc0\n\t"
317 "mac.l %%d1, %%d2, (4, %[z3]), %%d1, %%acc1\n\t"
318 "mac.l %%d0, %%d2, (%[z]), %%d4, %%acc2\n\t"
319 "mac.l %%d1, %%d2, (4, %[z]), %%d5, %%acc3\n\t"
320
321 "movclr.l %%acc0, %%d0\n\t"
322 "movclr.l %%acc1, %%d1\n\t"
323 "movclr.l %%acc2, %%d2\n\t"
324 "movclr.l %%acc3, %%d3\n\t"
325
326 "move.l %%d0, %%d7\n\t"
327 "add.l %%d1, %%d0\n\t" /* d0 == t1 */
328 "sub.l %%d7, %%d1\n\t" /* d1 == t2 */
329
330 "move.l %%d3, %%d7\n\t"
331 "add.l %%d2, %%d3\n\t" /* d3 == t6 */
332 "sub.l %%d7, %%d2\n\t" /* d2 == t5 */
333
334 "move.l %%d4, %%d6\n\t"
335 "move.l %%d2, %%d7\n\t"
336 "sub.l %%d0, %%d2\n\t" /* t5 = t5-t1 */
337 "add.l %%d0, %%d7\n\t"
338 "sub.l %%d7, %%d6\n\t" /* d6 = a0re - (t5+t1) => a2re */
339 "add.l %%d7, %%d4\n\t" /* d4 = a0re + (t5+t1) => a0re */
340
341 "move.l %%d1, %%d0\n\t"
342 "add.l %%d3, %%d0\n\t"
343 "sub.l %%d3, %%d1\n\t" /* t2 = t6-t2 */
344 "move.l %%d5, %%d7\n\t"
345 "sub.l %%d0, %%d7\n\t" /* d7 = a0im - (t2+t6) => a2im */
346
347 "movem.l %%d6-%%d7, (%[z2])\n\t" /* store z2 */
348 "add.l %%d0, %%d5\n\t" /* d5 = a0im + (t2+t6) => a0im */
349 "movem.l %%d4-%%d5, (%[z])\n\t" /* store z0 */
350
351 "movem.l (%[z1]), %%d4-%%d5\n\t" /* load z1 */
352 "move.l %%d4, %%d6\n\t"
353
354 "sub.l %%d1, %%d6\n\t" /* d6 = a1re - (t2-t6) => a3re */
355 "add.l %%d1, %%d4\n\t" /* d4 = a1re + (t2-t6) => a1re */
356
357 "move.l %%d5, %%d7\n\t"
358 "sub.l %%d2, %%d7\n\t"
359 "movem.l %%d6-%%d7, (%[z3])\n\t" /* store z3 */
360 "add.l %%d2, %%d5\n\t"
361 "movem.l %%d4-%%d5, (%[z1])\n\t" /* store z1 */
362
363 :: [z] "a" (z), [z1] "a" (&z[n]), [z2] "a" (&z[2*n]), [z3] "a" (&z[3*n]),
364 [_cPI2_8] "i" (cPI2_8)
365 : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "cc", "memory");
366
367 return z+1;
368}
369
370#endif /* CPU_COLDIFRE */
diff --git a/apps/codecs/lib/fft.h b/apps/codecs/lib/fft.h
deleted file mode 100644
index 302a3b3996..0000000000
--- a/apps/codecs/lib/fft.h
+++ /dev/null
@@ -1,64 +0,0 @@
1/*
2 * WMA compatible decoder
3 * Copyright (c) 2002 The FFmpeg Project.
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19#ifndef CODECLIB_FFT_H_INCLUDED
20#define CODECLIB_FFT_H_INCLUDED
21
22#include <inttypes.h>
23typedef int32_t fixed32;
24typedef int64_t fixed64;
25
26#define FFT_FIXED
27
28#ifdef FFT_FIXED
29typedef fixed32 FFTSample;
30#else /* FFT_FIXED */
31typedef float FFTSample;
32#endif /* FFT_FIXED */
33
34typedef struct FFTComplex {
35 FFTSample re, im;
36} FFTComplex;
37
38typedef struct FFTContext {
39 int nbits;
40 int inverse;
41 uint16_t *revtab;
42 int mdct_size; /* size of MDCT (i.e. number of input data * 2) */
43 int mdct_bits; /* n = 2^nbits */
44 /* pre/post rotation tables */
45 FFTSample *tcos;
46 FFTSample *tsin;
47 void (*fft_permute)(struct FFTContext *s, FFTComplex *z);
48 void (*fft_calc)(struct FFTContext *s, FFTComplex *z);
49 void (*imdct_calc)(struct FFTContext *s, FFTSample *output, const FFTSample *input);
50 void (*imdct_half)(struct FFTContext *s, FFTSample *output, const FFTSample *input);
51 void (*mdct_calc)(struct FFTContext *s, FFTSample *output, const FFTSample *input);
52 int split_radix;
53 int permutation;
54#define FF_MDCT_PERM_NONE 0
55#define FF_MDCT_PERM_INTERLEAVE 1
56} FFTContext;
57
58// internal api (fft<->mdct)
59//int fft_calc_unscaled(FFTContext *s, FFTComplex *z);
60//void ff_fft_permute_c(FFTContext *s, FFTComplex *z); // internal only?
61void ff_fft_calc_c(int nbits, FFTComplex *z);
62
63#endif // CODECLIB_FFT_H_INCLUDED
64
diff --git a/apps/codecs/lib/fixedpoint.c b/apps/codecs/lib/fixedpoint.c
deleted file mode 100644
index 352e246673..0000000000
--- a/apps/codecs/lib/fixedpoint.c
+++ /dev/null
@@ -1 +0,0 @@
1#include "../../fixedpoint.c"
diff --git a/apps/codecs/lib/fixedpoint.h b/apps/codecs/lib/fixedpoint.h
deleted file mode 100644
index 1cbd1573bb..0000000000
--- a/apps/codecs/lib/fixedpoint.h
+++ /dev/null
@@ -1,49 +0,0 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2006 Jens Arnold
11 *
12 * Fixed point library for plugins
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version 2
17 * of the License, or (at your option) any later version.
18 *
19 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
20 * KIND, either express or implied.
21 *
22 ****************************************************************************/
23
24 /** CODECS - FIXED POINT MATH ROUTINES - USAGE
25 *
26 * - x and y arguments are fixed point integers
27 * - fracbits is the number of fractional bits in the argument(s)
28 * - functions return long fixed point integers with the specified number
29 * of fractional bits unless otherwise specified
30 *
31 * Calculate sin and cos of an angle:
32 * fp_sincos(phase, *cos)
33 * where phase is a 32 bit unsigned integer with 0 representing 0
34 * and 0xFFFFFFFF representing 2*pi, and *cos is the address to
35 * a long signed integer. Value returned is a long signed integer
36 * from -0x80000000 to 0x7fffffff, representing -1 to 1 respectively.
37 * That is, value is a fixed point integer with 31 fractional bits.
38 *
39 * Take square root of a fixed point number:
40 * fp_sqrt(x, fracbits)
41 *
42 */
43#ifndef _FIXEDPOINT_H_CODECS
44#define _FIXEDPOINT_H_CODECS
45
46long fp_sincos(unsigned long phase, long *cos);
47long fp_sqrt(long a, unsigned int fracbits);
48
49#endif
diff --git a/apps/codecs/lib/libcodec.make b/apps/codecs/lib/libcodec.make
deleted file mode 100644
index 7aef72f2b1..0000000000
--- a/apps/codecs/lib/libcodec.make
+++ /dev/null
@@ -1,37 +0,0 @@
1# __________ __ ___.
2# Open \______ \ ____ ____ | | _\_ |__ _______ ___
3# Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
4# Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
5# Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
6# \/ \/ \/ \/ \/
7# $Id$
8#
9
10CODECLIB := $(CODECDIR)/libcodec.a
11CODECLIB_SRC := $(call preprocess, $(APPSDIR)/codecs/lib/SOURCES)
12CODECLIB_OBJ := $(call c2obj, $(CODECLIB_SRC))
13OTHER_SRC += $(CODECLIB_SRC)
14
15$(CODECLIB): $(CODECLIB_OBJ)
16 $(SILENT)$(shell rm -f $@)
17 $(call PRINTS,AR $(@F))$(AR) rcs $@ $^ >/dev/null
18
19CODECLIBFLAGS = $(filter-out -O%,$(CODECFLAGS))
20
21ifeq ($(MEMORYSIZE),2)
22 CODECLIBFLAGS += -Os
23else ifeq ($(ARCH),arch_m68k)
24 CODECLIBFLAGS += -O2
25else
26 CODECLIBFLAGS += -O1
27endif
28
29# Do not use '-ffunction-sections' when compiling sdl-sim
30ifneq ($(findstring sdl-sim, $(APP_TYPE)), sdl-sim)
31 CODECLIBFLAGS += -ffunction-sections
32endif
33
34$(CODECDIR)/lib/%.o: $(ROOTDIR)/apps/codecs/lib/%.c
35 $(SILENT)mkdir -p $(dir $@)
36 $(call PRINTS,CC $(subst $(ROOTDIR)/,,$<))$(CC) \
37 -I$(dir $<) $(CODECLIBFLAGS) -c $< -o $@
diff --git a/apps/codecs/lib/mdct.c b/apps/codecs/lib/mdct.c
deleted file mode 100644
index 777aec4a55..0000000000
--- a/apps/codecs/lib/mdct.c
+++ /dev/null
@@ -1,644 +0,0 @@
1/*
2 * Fixed Point IMDCT
3 * Copyright (c) 2002 The FFmpeg Project.
4 * Copyright (c) 2010 Dave Hooper, Mohamed Tarek, Michael Giacomelli
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include "codeclib.h"
22#include "mdct.h"
23#include "codeclib_misc.h"
24#include "mdct_lookup.h"
25
26#ifndef ICODE_ATTR_TREMOR_MDCT
27#define ICODE_ATTR_TREMOR_MDCT ICODE_ATTR
28#endif
29
30/**
31 * Compute the middle half of the inverse MDCT of size N = 2^nbits
32 * thus excluding the parts that can be derived by symmetry
33 * @param output N/2 samples
34 * @param input N/2 samples
35 *
36 * NOTE - CANNOT CURRENTLY OPERATE IN PLACE (input and output must
37 * not overlap or intersect at all)
38 */
39void ff_imdct_half(unsigned int nbits, fixed32 *output, const fixed32 *input) ICODE_ATTR_TREMOR_MDCT;
40void ff_imdct_half(unsigned int nbits, fixed32 *output, const fixed32 *input)
41{
42 int n8, n4, n2, n, j;
43 const fixed32 *in1, *in2;
44 (void)j;
45 n = 1 << nbits;
46
47 n2 = n >> 1;
48 n4 = n >> 2;
49 n8 = n >> 3;
50
51 FFTComplex *z = (FFTComplex *)output;
52
53 /* pre rotation */
54 in1 = input;
55 in2 = input + n2 - 1;
56
57 /* revtab comes from the fft; revtab table is sized for N=4096 size fft = 2^12.
58 The fft is size N/4 so s->nbits-2, so our shift needs to be (12-(nbits-2)) */
59 const int revtab_shift = (14- nbits);
60
61 /* bitreverse reorder the input and rotate; result here is in OUTPUT ... */
62 /* (note that when using the current split radix, the bitreverse ordering is
63 complex, meaning that this reordering cannot easily be done in-place) */
64 /* Using the following pdf, you can see that it is possible to rearrange
65 the 'classic' pre/post rotate with an alternative one that enables
66 us to use fewer distinct twiddle factors.
67 http://www.eurasip.org/Proceedings/Eusipco/Eusipco2006/papers/1568980508.pdf
68
69 For prerotation, the factors are just sin,cos(2PI*i/N)
70 For postrotation, the factors are sin,cos(2PI*(i+1/4)/N)
71
72 Therefore, prerotation can immediately reuse the same twiddles as fft
73 (for postrotation it's still a bit complex, we reuse the fft trig tables
74 where we can, or a special table for N=2048, or interpolate between
75 trig tables for N>2048)
76 */
77 const int32_t *T = sincos_lookup0;
78 const int step = 2<<(12-nbits);
79 const uint16_t * p_revtab=revtab;
80 {
81 const uint16_t * const p_revtab_end = p_revtab + n8;
82#ifdef CPU_COLDFIRE
83 asm volatile ("move.l (%[in2]), %%d0\n\t"
84 "move.l (%[in1]), %%d1\n\t"
85 "bra.s 1f\n\t"
86 "0:\n\t"
87 "movem.l (%[T]), %%d2-%%d3\n\t"
88
89 "addq.l #8, %[in1]\n\t"
90 "subq.l #8, %[in2]\n\t"
91
92 "lea (%[step]*4, %[T]), %[T]\n\t"
93
94 "mac.l %%d0, %%d3, (%[T]), %%d4, %%acc0;"
95 "msac.l %%d1, %%d2, (4, %[T]), %%d5, %%acc0;"
96 "mac.l %%d1, %%d3, (%[in1]), %%d1, %%acc1;"
97 "mac.l %%d0, %%d2, (%[in2]), %%d0, %%acc1;"
98
99 "addq.l #8, %[in1]\n\t"
100 "subq.l #8, %[in2]\n\t"
101
102 "mac.l %%d0, %%d5, %%acc2;"
103 "msac.l %%d1, %%d4, (%[p_revtab])+, %%d2, %%acc2;"
104 "mac.l %%d1, %%d5, (%[in1]), %%d1, %%acc3;"
105 "mac.l %%d0, %%d4, (%[in2]), %%d0, %%acc3;"
106
107 "clr.l %%d3\n\t"
108 "move.w %%d2, %%d3\n\t"
109 "eor.l %%d3, %%d2\n\t"
110 "swap %%d2\n\t"
111 "lsr.l %[revtab_shift], %%d2\n\t"
112
113 "movclr.l %%acc0, %%d4;"
114 "movclr.l %%acc1, %%d5;"
115 "lsl.l #3, %%d2\n\t"
116 "lea (%%d2, %[z]), %%a1\n\t"
117 "movem.l %%d4-%%d5, (%%a1)\n\t"
118
119 "lsr.l %[revtab_shift], %%d3\n\t"
120
121 "movclr.l %%acc2, %%d4;"
122 "movclr.l %%acc3, %%d5;"
123 "lsl.l #3, %%d3\n\t"
124 "lea (%%d3, %[z]), %%a1\n\t"
125 "movem.l %%d4-%%d5, (%%a1)\n\t"
126
127 "lea (%[step]*4, %[T]), %[T]\n\t"
128
129 "1:\n\t"
130 "cmp.l %[p_revtab_end], %[p_revtab]\n\t"
131 "bcs.s 0b\n\t"
132 : [in1] "+a" (in1), [in2] "+a" (in2), [T] "+a" (T),
133 [p_revtab] "+a" (p_revtab)
134 : [z] "a" (z), [step] "d" (step), [revtab_shift] "d" (revtab_shift),
135 [p_revtab_end] "r" (p_revtab_end)
136 : "d0", "d1", "d2", "d3", "d4", "d5", "a1", "cc", "memory");
137#else
138 while(LIKELY(p_revtab < p_revtab_end))
139 {
140 j = (*p_revtab)>>revtab_shift;
141 XNPROD31(*in2, *in1, T[1], T[0], &z[j].re, &z[j].im );
142 T += step;
143 in1 += 2;
144 in2 -= 2;
145 p_revtab++;
146 j = (*p_revtab)>>revtab_shift;
147 XNPROD31(*in2, *in1, T[1], T[0], &z[j].re, &z[j].im );
148 T += step;
149 in1 += 2;
150 in2 -= 2;
151 p_revtab++;
152 }
153#endif
154 }
155 {
156 const uint16_t * const p_revtab_end = p_revtab + n8;
157#ifdef CPU_COLDFIRE
158 asm volatile ("move.l (%[in2]), %%d0\n\t"
159 "move.l (%[in1]), %%d1\n\t"
160 "bra.s 1f\n\t"
161 "0:\n\t"
162 "movem.l (%[T]), %%d2-%%d3\n\t"
163
164 "addq.l #8, %[in1]\n\t"
165 "subq.l #8, %[in2]\n\t"
166
167 "lea (%[step]*4, %[T]), %[T]\n\t"
168
169 "mac.l %%d0, %%d2, (%[T]), %%d4, %%acc0;"
170 "msac.l %%d1, %%d3, (4, %[T]), %%d5, %%acc0;"
171 "mac.l %%d1, %%d2, (%[in1]), %%d1, %%acc1;"
172 "mac.l %%d0, %%d3, (%[in2]), %%d0, %%acc1;"
173
174 "addq.l #8, %[in1]\n\t"
175 "subq.l #8, %[in2]\n\t"
176
177 "mac.l %%d0, %%d4, %%acc2;"
178 "msac.l %%d1, %%d5, (%[p_revtab])+, %%d2, %%acc2;"
179 "mac.l %%d1, %%d4, (%[in1]), %%d1, %%acc3;"
180 "mac.l %%d0, %%d5, (%[in2]), %%d0, %%acc3;"
181
182 "clr.l %%d3\n\t"
183 "move.w %%d2, %%d3\n\t"
184 "eor.l %%d3, %%d2\n\t"
185 "swap %%d2\n\t"
186 "lsr.l %[revtab_shift], %%d2\n\t"
187
188 "movclr.l %%acc0, %%d4;"
189 "movclr.l %%acc1, %%d5;"
190 "lsl.l #3, %%d2\n\t"
191 "lea (%%d2, %[z]), %%a1\n\t"
192 "movem.l %%d4-%%d5, (%%a1)\n\t"
193
194 "lsr.l %[revtab_shift], %%d3\n\t"
195
196 "movclr.l %%acc2, %%d4;"
197 "movclr.l %%acc3, %%d5;"
198 "lsl.l #3, %%d3\n\t"
199 "lea (%%d3, %[z]), %%a1\n\t"
200 "movem.l %%d4-%%d5, (%%a1)\n\t"
201
202 "lea (%[step]*4, %[T]), %[T]\n\t"
203
204 "1:\n\t"
205 "cmp.l %[p_revtab_end], %[p_revtab]\n\t"
206 "bcs.s 0b\n\t"
207 : [in1] "+a" (in1), [in2] "+a" (in2), [T] "+a" (T),
208 [p_revtab] "+a" (p_revtab)
209 : [z] "a" (z), [step] "d" (-step), [revtab_shift] "d" (revtab_shift),
210 [p_revtab_end] "r" (p_revtab_end)
211 : "d0", "d1", "d2", "d3", "d4", "d5", "a1", "cc", "memory");
212#else
213 while(LIKELY(p_revtab < p_revtab_end))
214 {
215 j = (*p_revtab)>>revtab_shift;
216 XNPROD31(*in2, *in1, T[0], T[1], &z[j].re, &z[j].im);
217 T -= step;
218 in1 += 2;
219 in2 -= 2;
220 p_revtab++;
221 j = (*p_revtab)>>revtab_shift;
222 XNPROD31(*in2, *in1, T[0], T[1], &z[j].re, &z[j].im);
223 T -= step;
224 in1 += 2;
225 in2 -= 2;
226 p_revtab++;
227 }
228#endif
229 }
230
231
232 /* ... and so fft runs in OUTPUT buffer */
233 ff_fft_calc_c(nbits-2, z);
234
235 /* post rotation + reordering. now keeps the result within the OUTPUT buffer */
236 switch( nbits )
237 {
238 default:
239 {
240 fixed32 * z1 = (fixed32 *)(&z[0]);
241 int magic_step = step>>2;
242 int newstep;
243 if(n<=1024)
244 {
245 T = sincos_lookup0 + magic_step;
246 newstep = step>>1;
247 }
248 else
249 {
250 T = sincos_lookup1;
251 newstep = 2;
252 }
253
254#ifdef CPU_COLDFIRE
255 fixed32 * z2 = (fixed32 *)(&z[n4]);
256 int c = n4;
257 if (newstep == 2)
258 {
259 asm volatile ("movem.l (%[z1]), %%d0-%%d1\n\t"
260 "addq.l #8, %[z1]\n\t"
261 "movem.l (%[T]), %%d2-%%d3\n\t"
262 "addq.l #8, %[T]\n\t"
263 "bra.s 1f\n\t"
264 "0:\n\t"
265 "msac.l %%d1, %%d2, (%[T])+, %%a3, %%acc0\n\t"
266 "mac.l %%d0, %%d3, (%[T])+, %%a4, %%acc0\n\t"
267
268 "msac.l %%d1, %%d3, -(%[z2]), %%d1, %%acc1\n\t"
269 "msac.l %%d0, %%d2, -(%[z2]), %%d0, %%acc1\n\t"
270
271 "msac.l %%d1, %%a4, (%[T])+, %%d2, %%acc2\n\t"
272 "mac.l %%d0, %%a3, (%[T])+, %%d3, %%acc2\n\t"
273 "msac.l %%d0, %%a4, (%[z1])+, %%d0, %%acc3\n\t"
274 "msac.l %%d1, %%a3, (%[z1])+, %%d1, %%acc3\n\t"
275
276 "movclr.l %%acc0, %%a3\n\t"
277 "movclr.l %%acc3, %%a4\n\t"
278 "movem.l %%a3-%%a4, (-16, %[z1])\n\t"
279
280 "movclr.l %%acc1, %%a4\n\t"
281 "movclr.l %%acc2, %%a3\n\t"
282 "movem.l %%a3-%%a4, (%[z2])\n\t"
283
284 "subq.l #2, %[n]\n\t"
285 "1:\n\t"
286 "bhi.s 0b\n\t"
287 : [z1] "+a" (z1), [z2] "+a" (z2), [T] "+a" (T), [n] "+d" (c)
288 :
289 : "d0", "d1", "d2", "d3", "a3", "a4", "cc", "memory");
290 }
291 else
292 {
293 asm volatile ("movem.l (%[z1]), %%d0-%%d1\n\t"
294 "addq.l #8, %[z1]\n\t"
295 "movem.l (%[T]), %%d2-%%d3\n\t"
296 "lea (%[newstep]*4, %[T]), %[T]\n\t"
297 "bra.s 1f\n\t"
298 "0:\n\t"
299 "msac.l %%d1, %%d2, (%[T]), %%a3, %%acc0\n\t"
300 "mac.l %%d0, %%d3, (4, %[T]), %%a4, %%acc0\n\t"
301 "msac.l %%d1, %%d3, -(%[z2]), %%d1, %%acc1\n\t"
302 "msac.l %%d0, %%d2, -(%[z2]), %%d0, %%acc1\n\t"
303
304 "lea (%[newstep]*4, %[T]), %[T]\n\t"
305 "msac.l %%d1, %%a4, (%[T]), %%d2, %%acc2\n\t"
306 "mac.l %%d0, %%a3, (4, %[T]), %%d3, %%acc2\n\t"
307 "msac.l %%d0, %%a4, (%[z1])+, %%d0, %%acc3\n\t"
308 "msac.l %%d1, %%a3, (%[z1])+, %%d1, %%acc3\n\t"
309
310 "lea (%[newstep]*4, %[T]), %[T]\n\t"
311
312 "movclr.l %%acc0, %%a3\n\t"
313 "movclr.l %%acc3, %%a4\n\t"
314 "movem.l %%a3-%%a4, (-16, %[z1])\n\t"
315
316 "movclr.l %%acc1, %%a4\n\t"
317 "movclr.l %%acc2, %%a3\n\t"
318 "movem.l %%a3-%%a4, (%[z2])\n\t"
319
320 "subq.l #2, %[n]\n\t"
321 "1:\n\t"
322 "bhi.s 0b\n\t"
323 : [z1] "+a" (z1), [z2] "+a" (z2), [T] "+a" (T), [n] "+d" (c)
324 : [newstep] "d" (newstep)
325 : "d0", "d1", "d2", "d3", "a3", "a4", "cc", "memory");
326 }
327#else
328 fixed32 * z2 = (fixed32 *)(&z[n4-1]);
329 while(z1<z2)
330 {
331 fixed32 r0,i0,r1,i1;
332 XNPROD31_R(z1[1], z1[0], T[0], T[1], r0, i1 ); T+=newstep;
333 XNPROD31_R(z2[1], z2[0], T[1], T[0], r1, i0 ); T+=newstep;
334 z1[0] = -r0;
335 z1[1] = -i0;
336 z2[0] = -r1;
337 z2[1] = -i1;
338 z1+=2;
339 z2-=2;
340 }
341#endif
342 break;
343 }
344
345 case 12: /* n=4096 */
346 {
347 /* linear interpolation (50:50) between sincos_lookup0 and sincos_lookup1 */
348 const int32_t * V = sincos_lookup1;
349 T = sincos_lookup0;
350 int32_t t0,t1,v0,v1;
351 fixed32 * z1 = (fixed32 *)(&z[0]);
352 fixed32 * z2 = (fixed32 *)(&z[n4-1]);
353
354 t0 = T[0]>>1; t1=T[1]>>1;
355
356 while(z1<z2)
357 {
358 fixed32 r0,i0,r1,i1;
359 t0 += (v0 = (V[0]>>1));
360 t1 += (v1 = (V[1]>>1));
361 XNPROD31_R(z1[1], z1[0], t0, t1, r0, i1 );
362 T+=2;
363 v0 += (t0 = (T[0]>>1));
364 v1 += (t1 = (T[1]>>1));
365 XNPROD31_R(z2[1], z2[0], v1, v0, r1, i0 );
366 z1[0] = -r0;
367 z1[1] = -i0;
368 z2[0] = -r1;
369 z2[1] = -i1;
370 z1+=2;
371 z2-=2;
372 V+=2;
373 }
374
375 break;
376 }
377
378 case 13: /* n = 8192 */
379 {
380 /* weight linear interpolation between sincos_lookup0 and sincos_lookup1
381 specifically: 25:75 for first twiddle and 75:25 for second twiddle */
382 const int32_t * V = sincos_lookup1;
383 T = sincos_lookup0;
384 int32_t t0,t1,v0,v1,q0,q1;
385 fixed32 * z1 = (fixed32 *)(&z[0]);
386 fixed32 * z2 = (fixed32 *)(&z[n4-1]);
387
388 t0 = T[0]; t1=T[1];
389
390 while(z1<z2)
391 {
392 fixed32 r0,i0,r1,i1;
393 v0 = V[0]; v1 = V[1];
394 t0 += (q0 = (v0-t0)>>1);
395 t1 += (q1 = (v1-t1)>>1);
396 XNPROD31_R(z1[1], z1[0], t0, t1, r0, i1 );
397 t0 = v0-q0;
398 t1 = v1-q1;
399 XNPROD31_R(z2[1], z2[0], t1, t0, r1, i0 );
400 z1[0] = -r0;
401 z1[1] = -i0;
402 z2[0] = -r1;
403 z2[1] = -i1;
404 z1+=2;
405 z2-=2;
406 T+=2;
407
408 t0 = T[0]; t1 = T[1];
409 v0 += (q0 = (t0-v0)>>1);
410 v1 += (q1 = (t1-v1)>>1);
411 XNPROD31_R(z1[1], z1[0], v0, v1, r0, i1 );
412 v0 = t0-q0;
413 v1 = t1-q1;
414 XNPROD31_R(z2[1], z2[0], v1, v0, r1, i0 );
415 z1[0] = -r0;
416 z1[1] = -i0;
417 z2[0] = -r1;
418 z2[1] = -i1;
419 z1+=2;
420 z2-=2;
421 V+=2;
422 }
423
424 break;
425 }
426 }
427}
428
429/**
430 * Compute inverse MDCT of size N = 2^nbits
431 * @param output N samples
432 * @param input N/2 samples
433 * "In-place" processing can be achieved provided that:
434 * [0 .. N/2-1 | N/2 .. N-1 ]
435 * <----input---->
436 * <-----------output----------->
437 *
438 * The result of ff_imdct_half is to put the 'half' imdct here
439 *
440 * N/2 N-1
441 * <--half imdct-->
442 *
443 * We want it here for the full imdct:
444 * N/4 3N/4-1
445 * <-------------->
446 *
447 * In addition we need to apply two symmetries to get the full imdct:
448 *
449 * <AAAAAA> <DDDDDD>
450 * <BBBBBB><CCCCCC>
451 *
452 * D is a reflection of C
453 * A is a reflection of B (but with sign flipped)
454 *
455 * We process the symmetries at the same time as we 'move' the half imdct
456 * from [N/2,N-1] to [N/4,3N/4-1]
457 *
458 * TODO: find a way to make ff_imdct_half put the result in [N/4..3N/4-1]
459 * This would require being able to use revtab 'inplace' (since the input
460 * and output of imdct_half would then overlap somewhat)
461 */
462void ff_imdct_calc(unsigned int nbits, fixed32 *output, const fixed32 *input) ICODE_ATTR_TREMOR_MDCT;
463#ifndef CPU_ARM
464void ff_imdct_calc(unsigned int nbits, fixed32 *output, const fixed32 *input)
465{
466 const int n = (1<<nbits);
467 const int n2 = (n>>1);
468 const int n4 = (n>>2);
469
470 /* tell imdct_half to put the output in [N/2..3N/4-1] i.e. output+n2 */
471 ff_imdct_half(nbits,output+n2,input);
472
473 fixed32 * in_r, * in_r2, * out_r, * out_r2;
474
475 /* Copy BBBB to AAAA, reflected and sign-flipped.
476 Also copy BBBB to its correct destination (from [N/2..3N/4-1] to [N/4..N/2-1]) */
477 out_r = output;
478 out_r2 = output+n2-8;
479 in_r = output+n2+n4-8;
480 while(out_r<out_r2)
481 {
482#if defined CPU_COLDFIRE
483 asm volatile(
484 "movem.l (%[in_r]), %%d0-%%d7\n\t"
485 "movem.l %%d0-%%d7, (%[out_r2])\n\t"
486 "neg.l %%d7\n\t"
487 "move.l %%d7, (%[out_r])+\n\t"
488 "neg.l %%d6\n\t"
489 "move.l %%d6, (%[out_r])+\n\t"
490 "neg.l %%d5\n\t"
491 "move.l %%d5, (%[out_r])+\n\t"
492 "neg.l %%d4\n\t"
493 "move.l %%d4, (%[out_r])+\n\t"
494 "neg.l %%d3\n\t"
495 "move.l %%d3, (%[out_r])+\n\t"
496 "neg.l %%d2\n\t"
497 "move.l %%d2, (%[out_r])+\n\t"
498 "lea.l (-8*4, %[in_r]), %[in_r]\n\t"
499 "neg.l %%d1\n\t"
500 "move.l %%d1, (%[out_r])+\n\t"
501 "lea.l (-8*4, %[out_r2]), %[out_r2]\n\t"
502 "neg.l %%d0\n\t"
503 "move.l %%d0, (%[out_r])+\n\t"
504 : [in_r] "+a" (in_r), [out_r] "+a" (out_r), [out_r2] "+a" (out_r2)
505 :
506 : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "cc", "memory" );
507#else
508 out_r[0] = -(out_r2[7] = in_r[7]);
509 out_r[1] = -(out_r2[6] = in_r[6]);
510 out_r[2] = -(out_r2[5] = in_r[5]);
511 out_r[3] = -(out_r2[4] = in_r[4]);
512 out_r[4] = -(out_r2[3] = in_r[3]);
513 out_r[5] = -(out_r2[2] = in_r[2]);
514 out_r[6] = -(out_r2[1] = in_r[1]);
515 out_r[7] = -(out_r2[0] = in_r[0]);
516 in_r -= 8;
517 out_r += 8;
518 out_r2 -= 8;
519#endif
520 }
521 in_r = output + n2+n4;
522 in_r2 = output + n-4;
523 out_r = output + n2;
524 out_r2 = output + n2 + n4 - 4;
525 while(in_r<in_r2)
526 {
527#if defined CPU_COLDFIRE
528 asm volatile(
529 "movem.l (%[in_r]), %%d0-%%d3\n\t"
530 "movem.l %%d0-%%d3, (%[out_r])\n\t"
531 "movem.l (%[in_r2]), %%d4-%%d7\n\t"
532 "movem.l %%d4-%%d7, (%[out_r2])\n\t"
533 "move.l %%d0, %%a3\n\t"
534 "move.l %%d3, %%d0\n\t"
535 "move.l %%d1, %%d3\n\t"
536 "movem.l %%d0/%%d2-%%d3/%%a3, (%[in_r2])\n\t"
537 "move.l %%d7, %%d1\n\t"
538 "move.l %%d6, %%d2\n\t"
539 "move.l %%d5, %%d3\n\t"
540 "movem.l %%d1-%%d4, (%[in_r])\n\t"
541 "lea.l (4*4, %[in_r]), %[in_r]\n\t"
542 "lea.l (-4*4, %[in_r2]), %[in_r2]\n\t"
543 "lea.l (4*4, %[out_r]), %[out_r]\n\t"
544 "lea.l (-4*4, %[out_r2]), %[out_r2]\n\t"
545 : [in_r] "+a" (in_r), [in_r2] "+a" (in_r2),
546 [out_r] "+a" (out_r), [out_r2] "+a" (out_r2)
547 :
548 : "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "a3", "memory", "cc" );
549#else
550 register fixed32 t0,t1,t2,t3;
551 register fixed32 s0,s1,s2,s3;
552
553 /* Copy and reflect CCCC to DDDD. Because CCCC is already where
554 we actually want to put DDDD this is a bit complicated.
555 * So simultaneously do the following things:
556 * 1. copy range from [n2+n4 .. n-1] to range[n2 .. n2+n4-1]
557 * 2. reflect range from [n2+n4 .. n-1] inplace
558 *
559 * [ | ]
560 * ^a -> <- ^b ^c -> <- ^d
561 *
562 * #1: copy from ^c to ^a
563 * #2: copy from ^d to ^b
564 * #3: swap ^c and ^d in place
565 */
566 /* #1 pt1 : load 4 words from ^c. */
567 t0=in_r[0]; t1=in_r[1]; t2=in_r[2]; t3=in_r[3];
568 /* #1 pt2 : write to ^a */
569 out_r[0]=t0;out_r[1]=t1;out_r[2]=t2;out_r[3]=t3;
570 /* #2 pt1 : load 4 words from ^d */
571 s0=in_r2[0];s1=in_r2[1];s2=in_r2[2];s3=in_r2[3];
572 /* #2 pt2 : write to ^b */
573 out_r2[0]=s0;out_r2[1]=s1;out_r2[2]=s2;out_r2[3]=s3;
574 /* #3 pt1 : write words from #2 to ^c */
575 in_r[0]=s3;in_r[1]=s2;in_r[2]=s1;in_r[3]=s0;
576 /* #3 pt2 : write words from #1 to ^d */
577 in_r2[0]=t3;in_r2[1]=t2;in_r2[2]=t1;in_r2[3]=t0;
578
579 in_r += 4;
580 in_r2 -= 4;
581 out_r += 4;
582 out_r2 -= 4;
583#endif
584 }
585}
586#else
587/* Follows the same structure as the canonical version above */
588void ff_imdct_calc(unsigned int nbits, fixed32 *output, const fixed32 *input)
589{
590 const int n = (1<<nbits);
591 const int n2 = (n>>1);
592 const int n4 = (n>>2);
593
594 ff_imdct_half(nbits,output+n2,input);
595
596 fixed32 * in_r, * in_r2, * out_r, * out_r2;
597
598 out_r = output;
599 out_r2 = output+n2;
600 in_r = output+n2+n4;
601 while(out_r<out_r2)
602 {
603 asm volatile(
604 "ldmdb %[in_r]!, {r0-r7}\n\t"
605 "stmdb %[out_r2]!, {r0-r7}\n\t"
606 "rsb r8,r0,#0\n\t"
607 "rsb r0,r7,#0\n\t"
608 "rsb r7,r1,#0\n\t"
609 "rsb r1,r6,#0\n\t"
610 "rsb r6,r2,#0\n\t"
611 "rsb r2,r5,#0\n\t"
612 "rsb r5,r3,#0\n\t"
613 "rsb r3,r4,#0\n\t"
614 "stmia %[out_r]!, {r0-r3,r5-r8}\n\t"
615 : [in_r] "+r" (in_r), [out_r] "+r" (out_r), [out_r2] "+r" (out_r2)
616 :
617 : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "memory" );
618 }
619 in_r = output + n2+n4;
620 in_r2 = output + n;
621 out_r = output + n2;
622 out_r2 = output + n2 + n4;
623 while(in_r<in_r2)
624 {
625 asm volatile(
626 "ldmia %[in_r], {r0-r3}\n\t"
627 "stmia %[out_r]!, {r0-r3}\n\t"
628 "ldmdb %[in_r2], {r5-r8}\n\t"
629 "stmdb %[out_r2]!, {r5-r8}\n\t"
630 "mov r4,r0\n\t"
631 "mov r0,r3\n\t"
632 "mov r3,r1\n\t"
633 "stmdb %[in_r2]!, {r0,r2,r3,r4}\n\t"
634 "mov r4,r8\n\t"
635 "mov r8,r5\n\t"
636 "mov r5,r7\n\t"
637 "stmia %[in_r]!, {r4,r5,r6,r8}\n\t"
638 :
639 [in_r] "+r" (in_r), [in_r2] "+r" (in_r2), [out_r] "+r" (out_r), [out_r2] "+r" (out_r2)
640 :
641 : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "memory" );
642 }
643}
644#endif
diff --git a/apps/codecs/lib/mdct.h b/apps/codecs/lib/mdct.h
deleted file mode 100644
index 48d1c25a55..0000000000
--- a/apps/codecs/lib/mdct.h
+++ /dev/null
@@ -1,139 +0,0 @@
1/*
2 * WMA compatible decoder
3 * Copyright (c) 2002 The FFmpeg Project.
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#ifndef CODECLIB_MDCT_H_INCLUDED
21#define CODECLIB_MDCT_H_INCLUDED
22
23//#include "types.h"
24#include "fft.h"
25
26void ff_imdct_calc(unsigned int nbits, fixed32 *output, const fixed32 *input);
27void ff_imdct_half(unsigned int nbits, fixed32 *output, const fixed32 *input);
28
29#ifdef CPU_ARM
30
31/*Sign-15.16 format */
32#define fixmul32b(x, y) \
33 ({ int32_t __hi; \
34 uint32_t __lo; \
35 int32_t __result; \
36 asm ("smull %0, %1, %3, %4\n\t" \
37 "mov %2, %1, lsl #1" \
38 : "=&r" (__lo), "=&r" (__hi), "=r" (__result) \
39 : "%r" (x), "r" (y) \
40 : "cc" ); \
41 __result; \
42 })
43
44#elif defined(CPU_COLDFIRE)
45
46static inline int32_t fixmul32b(int32_t x, int32_t y)
47{
48 asm (
49 "mac.l %[x], %[y], %%acc0 \n" /* multiply */
50 "movclr.l %%acc0, %[x] \n" /* get higher half */
51 : [x] "+d" (x)
52 : [y] "d" (y)
53 );
54 return x;
55}
56
57#else
58
59static inline fixed32 fixmul32b(fixed32 x, fixed32 y)
60{
61 fixed64 temp;
62
63 temp = x;
64 temp *= y;
65
66 temp >>= 31; //16+31-16 = 31 bits
67
68 return (fixed32)temp;
69}
70#endif
71
72
73#ifdef CPU_ARM
74static inline
75void CMUL(fixed32 *x, fixed32 *y,
76 fixed32 a, fixed32 b,
77 fixed32 t, fixed32 v)
78{
79 /* This version loses one bit of precision. Could be solved at the cost
80 * of 2 extra cycles if it becomes an issue. */
81 int x1, y1, l;
82 asm(
83 "smull %[l], %[y1], %[b], %[t] \n"
84 "smlal %[l], %[y1], %[a], %[v] \n"
85 "rsb %[b], %[b], #0 \n"
86 "smull %[l], %[x1], %[a], %[t] \n"
87 "smlal %[l], %[x1], %[b], %[v] \n"
88 : [l] "=&r" (l), [x1]"=&r" (x1), [y1]"=&r" (y1), [b] "+r" (b)
89 : [a] "r" (a), [t] "r" (t), [v] "r" (v)
90 : "cc"
91 );
92 *x = x1 << 1;
93 *y = y1 << 1;
94}
95#elif defined CPU_COLDFIRE
96static inline
97void CMUL(fixed32 *x, fixed32 *y,
98 fixed32 a, fixed32 b,
99 fixed32 t, fixed32 v)
100{
101 asm volatile ("mac.l %[a], %[t], %%acc0;"
102 "msac.l %[b], %[v], %%acc0;"
103 "mac.l %[b], %[t], %%acc1;"
104 "mac.l %[a], %[v], %%acc1;"
105 "movclr.l %%acc0, %[a];"
106 "move.l %[a], (%[x]);"
107 "movclr.l %%acc1, %[a];"
108 "move.l %[a], (%[y]);"
109 : [a] "+&r" (a)
110 : [x] "a" (x), [y] "a" (y),
111 [b] "r" (b), [t] "r" (t), [v] "r" (v)
112 : "cc", "memory");
113}
114#else
115static inline
116void CMUL(fixed32 *pre,
117 fixed32 *pim,
118 fixed32 are,
119 fixed32 aim,
120 fixed32 bre,
121 fixed32 bim)
122{
123 //int64_t x,y;
124 fixed32 _aref = are;
125 fixed32 _aimf = aim;
126 fixed32 _bref = bre;
127 fixed32 _bimf = bim;
128 fixed32 _r1 = fixmul32b(_bref, _aref);
129 fixed32 _r2 = fixmul32b(_bimf, _aimf);
130 fixed32 _r3 = fixmul32b(_bref, _aimf);
131 fixed32 _r4 = fixmul32b(_bimf, _aref);
132 *pre = _r1 - _r2;
133 *pim = _r3 + _r4;
134
135}
136#endif
137
138
139#endif // CODECLIB_MDCT_H_INCLUDED
diff --git a/apps/codecs/lib/mdct_lookup.c b/apps/codecs/lib/mdct_lookup.c
deleted file mode 100644
index a8ca748206..0000000000
--- a/apps/codecs/lib/mdct_lookup.c
+++ /dev/null
@@ -1,872 +0,0 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2009 Michael Giacomelli
11 *
12 *
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version 2
17 * of the License, or (at your option) any later version.
18 *
19 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
20 * KIND, either express or implied.
21 *
22 ****************************************************************************/
23
24#ifdef ROCKBOX
25#include <codecs/lib/codeclib.h>
26#else
27#include <stdlib.h>
28#include <stdint.h>
29#endif /* ROCKBOX */
30
31/* {sin(2*i*PI/4096, cos(2*i*PI/4096)}, with i = 0 to 512 */
32const int32_t sincos_lookup0[1026] ICONST_ATTR = {
33 0x00000000, 0x7fffffff, 0x003243f5, 0x7ffff621,
34 0x006487e3, 0x7fffd886, 0x0096cbc1, 0x7fffa72c,
35 0x00c90f88, 0x7fff6216, 0x00fb5330, 0x7fff0943,
36 0x012d96b1, 0x7ffe9cb2, 0x015fda03, 0x7ffe1c65,
37 0x01921d20, 0x7ffd885a, 0x01c45ffe, 0x7ffce093,
38 0x01f6a297, 0x7ffc250f, 0x0228e4e2, 0x7ffb55ce,
39 0x025b26d7, 0x7ffa72d1, 0x028d6870, 0x7ff97c18,
40 0x02bfa9a4, 0x7ff871a2, 0x02f1ea6c, 0x7ff75370,
41 0x03242abf, 0x7ff62182, 0x03566a96, 0x7ff4dbd9,
42 0x0388a9ea, 0x7ff38274, 0x03bae8b2, 0x7ff21553,
43 0x03ed26e6, 0x7ff09478, 0x041f6480, 0x7feeffe1,
44 0x0451a177, 0x7fed5791, 0x0483ddc3, 0x7feb9b85,
45 0x04b6195d, 0x7fe9cbc0, 0x04e8543e, 0x7fe7e841,
46 0x051a8e5c, 0x7fe5f108, 0x054cc7b1, 0x7fe3e616,
47 0x057f0035, 0x7fe1c76b, 0x05b137df, 0x7fdf9508,
48 0x05e36ea9, 0x7fdd4eec, 0x0615a48b, 0x7fdaf519,
49 0x0647d97c, 0x7fd8878e, 0x067a0d76, 0x7fd6064c,
50 0x06ac406f, 0x7fd37153, 0x06de7262, 0x7fd0c8a3,
51 0x0710a345, 0x7fce0c3e, 0x0742d311, 0x7fcb3c23,
52 0x077501be, 0x7fc85854, 0x07a72f45, 0x7fc560cf,
53 0x07d95b9e, 0x7fc25596, 0x080b86c2, 0x7fbf36aa,
54 0x083db0a7, 0x7fbc040a, 0x086fd947, 0x7fb8bdb8,
55 0x08a2009a, 0x7fb563b3, 0x08d42699, 0x7fb1f5fc,
56 0x09064b3a, 0x7fae7495, 0x09386e78, 0x7faadf7c,
57 0x096a9049, 0x7fa736b4, 0x099cb0a7, 0x7fa37a3c,
58 0x09cecf89, 0x7f9faa15, 0x0a00ece8, 0x7f9bc640,
59 0x0a3308bd, 0x7f97cebd, 0x0a6522fe, 0x7f93c38c,
60 0x0a973ba5, 0x7f8fa4b0, 0x0ac952aa, 0x7f8b7227,
61 0x0afb6805, 0x7f872bf3, 0x0b2d7baf, 0x7f82d214,
62 0x0b5f8d9f, 0x7f7e648c, 0x0b919dcf, 0x7f79e35a,
63 0x0bc3ac35, 0x7f754e80, 0x0bf5b8cb, 0x7f70a5fe,
64 0x0c27c389, 0x7f6be9d4, 0x0c59cc68, 0x7f671a05,
65 0x0c8bd35e, 0x7f62368f, 0x0cbdd865, 0x7f5d3f75,
66 0x0cefdb76, 0x7f5834b7, 0x0d21dc87, 0x7f531655,
67 0x0d53db92, 0x7f4de451, 0x0d85d88f, 0x7f489eaa,
68 0x0db7d376, 0x7f434563, 0x0de9cc40, 0x7f3dd87c,
69 0x0e1bc2e4, 0x7f3857f6, 0x0e4db75b, 0x7f32c3d1,
70 0x0e7fa99e, 0x7f2d1c0e, 0x0eb199a4, 0x7f2760af,
71 0x0ee38766, 0x7f2191b4, 0x0f1572dc, 0x7f1baf1e,
72 0x0f475bff, 0x7f15b8ee, 0x0f7942c7, 0x7f0faf25,
73 0x0fab272b, 0x7f0991c4, 0x0fdd0926, 0x7f0360cb,
74 0x100ee8ad, 0x7efd1c3c, 0x1040c5bb, 0x7ef6c418,
75 0x1072a048, 0x7ef05860, 0x10a4784b, 0x7ee9d914,
76 0x10d64dbd, 0x7ee34636, 0x11082096, 0x7edc9fc6,
77 0x1139f0cf, 0x7ed5e5c6, 0x116bbe60, 0x7ecf1837,
78 0x119d8941, 0x7ec8371a, 0x11cf516a, 0x7ec14270,
79 0x120116d5, 0x7eba3a39, 0x1232d979, 0x7eb31e78,
80 0x1264994e, 0x7eabef2c, 0x1296564d, 0x7ea4ac58,
81 0x12c8106f, 0x7e9d55fc, 0x12f9c7aa, 0x7e95ec1a,
82 0x132b7bf9, 0x7e8e6eb2, 0x135d2d53, 0x7e86ddc6,
83 0x138edbb1, 0x7e7f3957, 0x13c0870a, 0x7e778166,
84 0x13f22f58, 0x7e6fb5f4, 0x1423d492, 0x7e67d703,
85 0x145576b1, 0x7e5fe493, 0x148715ae, 0x7e57dea7,
86 0x14b8b17f, 0x7e4fc53e, 0x14ea4a1f, 0x7e47985b,
87 0x151bdf86, 0x7e3f57ff, 0x154d71aa, 0x7e37042a,
88 0x157f0086, 0x7e2e9cdf, 0x15b08c12, 0x7e26221f,
89 0x15e21445, 0x7e1d93ea, 0x16139918, 0x7e14f242,
90 0x16451a83, 0x7e0c3d29, 0x1676987f, 0x7e0374a0,
91 0x16a81305, 0x7dfa98a8, 0x16d98a0c, 0x7df1a942,
92 0x170afd8d, 0x7de8a670, 0x173c6d80, 0x7ddf9034,
93 0x176dd9de, 0x7dd6668f, 0x179f429f, 0x7dcd2981,
94 0x17d0a7bc, 0x7dc3d90d, 0x1802092c, 0x7dba7534,
95 0x183366e9, 0x7db0fdf8, 0x1864c0ea, 0x7da77359,
96 0x18961728, 0x7d9dd55a, 0x18c7699b, 0x7d9423fc,
97 0x18f8b83c, 0x7d8a5f40, 0x192a0304, 0x7d808728,
98 0x195b49ea, 0x7d769bb5, 0x198c8ce7, 0x7d6c9ce9,
99 0x19bdcbf3, 0x7d628ac6, 0x19ef0707, 0x7d58654d,
100 0x1a203e1b, 0x7d4e2c7f, 0x1a517128, 0x7d43e05e,
101 0x1a82a026, 0x7d3980ec, 0x1ab3cb0d, 0x7d2f0e2b,
102 0x1ae4f1d6, 0x7d24881b, 0x1b161479, 0x7d19eebf,
103 0x1b4732ef, 0x7d0f4218, 0x1b784d30, 0x7d048228,
104 0x1ba96335, 0x7cf9aef0, 0x1bda74f6, 0x7ceec873,
105 0x1c0b826a, 0x7ce3ceb2, 0x1c3c8b8c, 0x7cd8c1ae,
106 0x1c6d9053, 0x7ccda169, 0x1c9e90b8, 0x7cc26de5,
107 0x1ccf8cb3, 0x7cb72724, 0x1d00843d, 0x7cabcd28,
108 0x1d31774d, 0x7ca05ff1, 0x1d6265dd, 0x7c94df83,
109 0x1d934fe5, 0x7c894bde, 0x1dc4355e, 0x7c7da505,
110 0x1df5163f, 0x7c71eaf9, 0x1e25f282, 0x7c661dbc,
111 0x1e56ca1e, 0x7c5a3d50, 0x1e879d0d, 0x7c4e49b7,
112 0x1eb86b46, 0x7c4242f2, 0x1ee934c3, 0x7c362904,
113 0x1f19f97b, 0x7c29fbee, 0x1f4ab968, 0x7c1dbbb3,
114 0x1f7b7481, 0x7c116853, 0x1fac2abf, 0x7c0501d2,
115 0x1fdcdc1b, 0x7bf88830, 0x200d888d, 0x7bebfb70,
116 0x203e300d, 0x7bdf5b94, 0x206ed295, 0x7bd2a89e,
117 0x209f701c, 0x7bc5e290, 0x20d0089c, 0x7bb9096b,
118 0x21009c0c, 0x7bac1d31, 0x21312a65, 0x7b9f1de6,
119 0x2161b3a0, 0x7b920b89, 0x219237b5, 0x7b84e61f,
120 0x21c2b69c, 0x7b77ada8, 0x21f3304f, 0x7b6a6227,
121 0x2223a4c5, 0x7b5d039e, 0x225413f8, 0x7b4f920e,
122 0x22847de0, 0x7b420d7a, 0x22b4e274, 0x7b3475e5,
123 0x22e541af, 0x7b26cb4f, 0x23159b88, 0x7b190dbc,
124 0x2345eff8, 0x7b0b3d2c, 0x23763ef7, 0x7afd59a4,
125 0x23a6887f, 0x7aef6323, 0x23d6cc87, 0x7ae159ae,
126 0x24070b08, 0x7ad33d45, 0x243743fa, 0x7ac50dec,
127 0x24677758, 0x7ab6cba4, 0x2497a517, 0x7aa8766f,
128 0x24c7cd33, 0x7a9a0e50, 0x24f7efa2, 0x7a8b9348,
129 0x25280c5e, 0x7a7d055b, 0x2558235f, 0x7a6e648a,
130 0x2588349d, 0x7a5fb0d8, 0x25b84012, 0x7a50ea47,
131 0x25e845b6, 0x7a4210d8, 0x26184581, 0x7a332490,
132 0x26483f6c, 0x7a24256f, 0x26783370, 0x7a151378,
133 0x26a82186, 0x7a05eead, 0x26d809a5, 0x79f6b711,
134 0x2707ebc7, 0x79e76ca7, 0x2737c7e3, 0x79d80f6f,
135 0x27679df4, 0x79c89f6e, 0x27976df1, 0x79b91ca4,
136 0x27c737d3, 0x79a98715, 0x27f6fb92, 0x7999dec4,
137 0x2826b928, 0x798a23b1, 0x2856708d, 0x797a55e0,
138 0x288621b9, 0x796a7554, 0x28b5cca5, 0x795a820e,
139 0x28e5714b, 0x794a7c12, 0x29150fa1, 0x793a6361,
140 0x2944a7a2, 0x792a37fe, 0x29743946, 0x7919f9ec,
141 0x29a3c485, 0x7909a92d, 0x29d34958, 0x78f945c3,
142 0x2a02c7b8, 0x78e8cfb2, 0x2a323f9e, 0x78d846fb,
143 0x2a61b101, 0x78c7aba2, 0x2a911bdc, 0x78b6fda8,
144 0x2ac08026, 0x78a63d11, 0x2aefddd8, 0x789569df,
145 0x2b1f34eb, 0x78848414, 0x2b4e8558, 0x78738bb3,
146 0x2b7dcf17, 0x786280bf, 0x2bad1221, 0x7851633b,
147 0x2bdc4e6f, 0x78403329, 0x2c0b83fa, 0x782ef08b,
148 0x2c3ab2b9, 0x781d9b65, 0x2c69daa6, 0x780c33b8,
149 0x2c98fbba, 0x77fab989, 0x2cc815ee, 0x77e92cd9,
150 0x2cf72939, 0x77d78daa, 0x2d263596, 0x77c5dc01,
151 0x2d553afc, 0x77b417df, 0x2d843964, 0x77a24148,
152 0x2db330c7, 0x7790583e, 0x2de2211e, 0x777e5cc3,
153 0x2e110a62, 0x776c4edb, 0x2e3fec8b, 0x775a2e89,
154 0x2e6ec792, 0x7747fbce, 0x2e9d9b70, 0x7735b6af,
155 0x2ecc681e, 0x77235f2d, 0x2efb2d95, 0x7710f54c,
156 0x2f29ebcc, 0x76fe790e, 0x2f58a2be, 0x76ebea77,
157 0x2f875262, 0x76d94989, 0x2fb5fab2, 0x76c69647,
158 0x2fe49ba7, 0x76b3d0b4, 0x30133539, 0x76a0f8d2,
159 0x3041c761, 0x768e0ea6, 0x30705217, 0x767b1231,
160 0x309ed556, 0x76680376, 0x30cd5115, 0x7654e279,
161 0x30fbc54d, 0x7641af3d, 0x312a31f8, 0x762e69c4,
162 0x3158970e, 0x761b1211, 0x3186f487, 0x7607a828,
163 0x31b54a5e, 0x75f42c0b, 0x31e39889, 0x75e09dbd,
164 0x3211df04, 0x75ccfd42, 0x32401dc6, 0x75b94a9c,
165 0x326e54c7, 0x75a585cf, 0x329c8402, 0x7591aedd,
166 0x32caab6f, 0x757dc5ca, 0x32f8cb07, 0x7569ca99,
167 0x3326e2c3, 0x7555bd4c, 0x3354f29b, 0x75419de7,
168 0x3382fa88, 0x752d6c6c, 0x33b0fa84, 0x751928e0,
169 0x33def287, 0x7504d345, 0x340ce28b, 0x74f06b9e,
170 0x343aca87, 0x74dbf1ef, 0x3468aa76, 0x74c7663a,
171 0x34968250, 0x74b2c884, 0x34c4520d, 0x749e18cd,
172 0x34f219a8, 0x7489571c, 0x351fd918, 0x74748371,
173 0x354d9057, 0x745f9dd1, 0x357b3f5d, 0x744aa63f,
174 0x35a8e625, 0x74359cbd, 0x35d684a6, 0x74208150,
175 0x36041ad9, 0x740b53fb, 0x3631a8b8, 0x73f614c0,
176 0x365f2e3b, 0x73e0c3a3, 0x368cab5c, 0x73cb60a8,
177 0x36ba2014, 0x73b5ebd1, 0x36e78c5b, 0x73a06522,
178 0x3714f02a, 0x738acc9e, 0x37424b7b, 0x73752249,
179 0x376f9e46, 0x735f6626, 0x379ce885, 0x73499838,
180 0x37ca2a30, 0x7333b883, 0x37f76341, 0x731dc70a,
181 0x382493b0, 0x7307c3d0, 0x3851bb77, 0x72f1aed9,
182 0x387eda8e, 0x72db8828, 0x38abf0ef, 0x72c54fc1,
183 0x38d8fe93, 0x72af05a7, 0x39060373, 0x7298a9dd,
184 0x3932ff87, 0x72823c67, 0x395ff2c9, 0x726bbd48,
185 0x398cdd32, 0x72552c85, 0x39b9bebc, 0x723e8a20,
186 0x39e6975e, 0x7227d61c, 0x3a136712, 0x7211107e,
187 0x3a402dd2, 0x71fa3949, 0x3a6ceb96, 0x71e35080,
188 0x3a99a057, 0x71cc5626, 0x3ac64c0f, 0x71b54a41,
189 0x3af2eeb7, 0x719e2cd2, 0x3b1f8848, 0x7186fdde,
190 0x3b4c18ba, 0x716fbd68, 0x3b78a007, 0x71586b74,
191 0x3ba51e29, 0x71410805, 0x3bd19318, 0x7129931f,
192 0x3bfdfecd, 0x71120cc5, 0x3c2a6142, 0x70fa74fc,
193 0x3c56ba70, 0x70e2cbc6, 0x3c830a50, 0x70cb1128,
194 0x3caf50da, 0x70b34525, 0x3cdb8e09, 0x709b67c0,
195 0x3d07c1d6, 0x708378ff, 0x3d33ec39, 0x706b78e3,
196 0x3d600d2c, 0x70536771, 0x3d8c24a8, 0x703b44ad,
197 0x3db832a6, 0x7023109a, 0x3de4371f, 0x700acb3c,
198 0x3e10320d, 0x6ff27497, 0x3e3c2369, 0x6fda0cae,
199 0x3e680b2c, 0x6fc19385, 0x3e93e950, 0x6fa90921,
200 0x3ebfbdcd, 0x6f906d84, 0x3eeb889c, 0x6f77c0b3,
201 0x3f1749b8, 0x6f5f02b2, 0x3f430119, 0x6f463383,
202 0x3f6eaeb8, 0x6f2d532c, 0x3f9a5290, 0x6f1461b0,
203 0x3fc5ec98, 0x6efb5f12, 0x3ff17cca, 0x6ee24b57,
204 0x401d0321, 0x6ec92683, 0x40487f94, 0x6eaff099,
205 0x4073f21d, 0x6e96a99d, 0x409f5ab6, 0x6e7d5193,
206 0x40cab958, 0x6e63e87f, 0x40f60dfb, 0x6e4a6e66,
207 0x4121589b, 0x6e30e34a, 0x414c992f, 0x6e174730,
208 0x4177cfb1, 0x6dfd9a1c, 0x41a2fc1a, 0x6de3dc11,
209 0x41ce1e65, 0x6dca0d14, 0x41f93689, 0x6db02d29,
210 0x42244481, 0x6d963c54, 0x424f4845, 0x6d7c3a98,
211 0x427a41d0, 0x6d6227fa, 0x42a5311b, 0x6d48047e,
212 0x42d0161e, 0x6d2dd027, 0x42faf0d4, 0x6d138afb,
213 0x4325c135, 0x6cf934fc, 0x4350873c, 0x6cdece2f,
214 0x437b42e1, 0x6cc45698, 0x43a5f41e, 0x6ca9ce3b,
215 0x43d09aed, 0x6c8f351c, 0x43fb3746, 0x6c748b3f,
216 0x4425c923, 0x6c59d0a9, 0x4450507e, 0x6c3f055d,
217 0x447acd50, 0x6c242960, 0x44a53f93, 0x6c093cb6,
218 0x44cfa740, 0x6bee3f62, 0x44fa0450, 0x6bd3316a,
219 0x452456bd, 0x6bb812d1, 0x454e9e80, 0x6b9ce39b,
220 0x4578db93, 0x6b81a3cd, 0x45a30df0, 0x6b66536b,
221 0x45cd358f, 0x6b4af279, 0x45f7526b, 0x6b2f80fb,
222 0x4621647d, 0x6b13fef5, 0x464b6bbe, 0x6af86c6c,
223 0x46756828, 0x6adcc964, 0x469f59b4, 0x6ac115e2,
224 0x46c9405c, 0x6aa551e9, 0x46f31c1a, 0x6a897d7d,
225 0x471cece7, 0x6a6d98a4, 0x4746b2bc, 0x6a51a361,
226 0x47706d93, 0x6a359db9, 0x479a1d67, 0x6a1987b0,
227 0x47c3c22f, 0x69fd614a, 0x47ed5be6, 0x69e12a8c,
228 0x4816ea86, 0x69c4e37a, 0x48406e08, 0x69a88c19,
229 0x4869e665, 0x698c246c, 0x48935397, 0x696fac78,
230 0x48bcb599, 0x69532442, 0x48e60c62, 0x69368bce,
231 0x490f57ee, 0x6919e320, 0x49389836, 0x68fd2a3d,
232 0x4961cd33, 0x68e06129, 0x498af6df, 0x68c387e9,
233 0x49b41533, 0x68a69e81, 0x49dd282a, 0x6889a4f6,
234 0x4a062fbd, 0x686c9b4b, 0x4a2f2be6, 0x684f8186,
235 0x4a581c9e, 0x683257ab, 0x4a8101de, 0x68151dbe,
236 0x4aa9dba2, 0x67f7d3c5, 0x4ad2a9e2, 0x67da79c3,
237 0x4afb6c98, 0x67bd0fbd, 0x4b2423be, 0x679f95b7,
238 0x4b4ccf4d, 0x67820bb7, 0x4b756f40, 0x676471c0,
239 0x4b9e0390, 0x6746c7d8, 0x4bc68c36, 0x67290e02,
240 0x4bef092d, 0x670b4444, 0x4c177a6e, 0x66ed6aa1,
241 0x4c3fdff4, 0x66cf8120, 0x4c6839b7, 0x66b187c3,
242 0x4c9087b1, 0x66937e91, 0x4cb8c9dd, 0x6675658c,
243 0x4ce10034, 0x66573cbb, 0x4d092ab0, 0x66390422,
244 0x4d31494b, 0x661abbc5, 0x4d595bfe, 0x65fc63a9,
245 0x4d8162c4, 0x65ddfbd3, 0x4da95d96, 0x65bf8447,
246 0x4dd14c6e, 0x65a0fd0b, 0x4df92f46, 0x65826622,
247 0x4e210617, 0x6563bf92, 0x4e48d0dd, 0x6545095f,
248 0x4e708f8f, 0x6526438f, 0x4e984229, 0x65076e25,
249 0x4ebfe8a5, 0x64e88926, 0x4ee782fb, 0x64c99498,
250 0x4f0f1126, 0x64aa907f, 0x4f369320, 0x648b7ce0,
251 0x4f5e08e3, 0x646c59bf, 0x4f857269, 0x644d2722,
252 0x4faccfab, 0x642de50d, 0x4fd420a4, 0x640e9386,
253 0x4ffb654d, 0x63ef3290, 0x50229da1, 0x63cfc231,
254 0x5049c999, 0x63b0426d, 0x5070e92f, 0x6390b34a,
255 0x5097fc5e, 0x637114cc, 0x50bf031f, 0x635166f9,
256 0x50e5fd6d, 0x6331a9d4, 0x510ceb40, 0x6311dd64,
257 0x5133cc94, 0x62f201ac, 0x515aa162, 0x62d216b3,
258 0x518169a5, 0x62b21c7b, 0x51a82555, 0x6292130c,
259 0x51ced46e, 0x6271fa69, 0x51f576ea, 0x6251d298,
260 0x521c0cc2, 0x62319b9d, 0x524295f0, 0x6211557e,
261 0x5269126e, 0x61f1003f, 0x528f8238, 0x61d09be5,
262 0x52b5e546, 0x61b02876, 0x52dc3b92, 0x618fa5f7,
263 0x53028518, 0x616f146c, 0x5328c1d0, 0x614e73da,
264 0x534ef1b5, 0x612dc447, 0x537514c2, 0x610d05b7,
265 0x539b2af0, 0x60ec3830, 0x53c13439, 0x60cb5bb7,
266 0x53e73097, 0x60aa7050, 0x540d2005, 0x60897601,
267 0x5433027d, 0x60686ccf, 0x5458d7f9, 0x604754bf,
268 0x547ea073, 0x60262dd6, 0x54a45be6, 0x6004f819,
269 0x54ca0a4b, 0x5fe3b38d, 0x54efab9c, 0x5fc26038,
270 0x55153fd4, 0x5fa0fe1f, 0x553ac6ee, 0x5f7f8d46,
271 0x556040e2, 0x5f5e0db3, 0x5585adad, 0x5f3c7f6b,
272 0x55ab0d46, 0x5f1ae274, 0x55d05faa, 0x5ef936d1,
273 0x55f5a4d2, 0x5ed77c8a, 0x561adcb9, 0x5eb5b3a2,
274 0x56400758, 0x5e93dc1f, 0x566524aa, 0x5e71f606,
275 0x568a34a9, 0x5e50015d, 0x56af3750, 0x5e2dfe29,
276 0x56d42c99, 0x5e0bec6e, 0x56f9147e, 0x5de9cc33,
277 0x571deefa, 0x5dc79d7c, 0x5742bc06, 0x5da5604f,
278 0x57677b9d, 0x5d8314b1, 0x578c2dba, 0x5d60baa7,
279 0x57b0d256, 0x5d3e5237, 0x57d5696d, 0x5d1bdb65,
280 0x57f9f2f8, 0x5cf95638, 0x581e6ef1, 0x5cd6c2b5,
281 0x5842dd54, 0x5cb420e0, 0x58673e1b, 0x5c9170bf,
282 0x588b9140, 0x5c6eb258, 0x58afd6bd, 0x5c4be5b0,
283 0x58d40e8c, 0x5c290acc, 0x58f838a9, 0x5c0621b2,
284 0x591c550e, 0x5be32a67, 0x594063b5, 0x5bc024f0,
285 0x59646498, 0x5b9d1154, 0x598857b2, 0x5b79ef96,
286 0x59ac3cfd, 0x5b56bfbd, 0x59d01475, 0x5b3381ce,
287 0x59f3de12, 0x5b1035cf, 0x5a1799d1, 0x5aecdbc5,
288 0x5a3b47ab, 0x5ac973b5, 0x5a5ee79a, 0x5aa5fda5,
289 0x5a82799a, 0x5a82799a
290 };
291
292 /* {sin((2*i+1)*PI/4096, cos((2*i+1)*PI/4096)}, with i = 0 to 511 */
293const int32_t sincos_lookup1[1024] ICONST_ATTR = {
294 0x001921fb, 0x7ffffd88, 0x004b65ee, 0x7fffe9cb,
295 0x007da9d4, 0x7fffc251, 0x00afeda8, 0x7fff8719,
296 0x00e23160, 0x7fff3824, 0x011474f6, 0x7ffed572,
297 0x0146b860, 0x7ffe5f03, 0x0178fb99, 0x7ffdd4d7,
298 0x01ab3e97, 0x7ffd36ee, 0x01dd8154, 0x7ffc8549,
299 0x020fc3c6, 0x7ffbbfe6, 0x024205e8, 0x7ffae6c7,
300 0x027447b0, 0x7ff9f9ec, 0x02a68917, 0x7ff8f954,
301 0x02d8ca16, 0x7ff7e500, 0x030b0aa4, 0x7ff6bcf0,
302 0x033d4abb, 0x7ff58125, 0x036f8a51, 0x7ff4319d,
303 0x03a1c960, 0x7ff2ce5b, 0x03d407df, 0x7ff1575d,
304 0x040645c7, 0x7fefcca4, 0x04388310, 0x7fee2e30,
305 0x046abfb3, 0x7fec7c02, 0x049cfba7, 0x7feab61a,
306 0x04cf36e5, 0x7fe8dc78, 0x05017165, 0x7fe6ef1c,
307 0x0533ab20, 0x7fe4ee06, 0x0565e40d, 0x7fe2d938,
308 0x05981c26, 0x7fe0b0b1, 0x05ca5361, 0x7fde7471,
309 0x05fc89b8, 0x7fdc247a, 0x062ebf22, 0x7fd9c0ca,
310 0x0660f398, 0x7fd74964, 0x06932713, 0x7fd4be46,
311 0x06c5598a, 0x7fd21f72, 0x06f78af6, 0x7fcf6ce8,
312 0x0729bb4e, 0x7fcca6a7, 0x075bea8c, 0x7fc9ccb2,
313 0x078e18a7, 0x7fc6df08, 0x07c04598, 0x7fc3dda9,
314 0x07f27157, 0x7fc0c896, 0x08249bdd, 0x7fbd9fd0,
315 0x0856c520, 0x7fba6357, 0x0888ed1b, 0x7fb7132b,
316 0x08bb13c5, 0x7fb3af4e, 0x08ed3916, 0x7fb037bf,
317 0x091f5d06, 0x7facac7f, 0x09517f8f, 0x7fa90d8e,
318 0x0983a0a7, 0x7fa55aee, 0x09b5c048, 0x7fa1949e,
319 0x09e7de6a, 0x7f9dbaa0, 0x0a19fb04, 0x7f99ccf4,
320 0x0a4c1610, 0x7f95cb9a, 0x0a7e2f85, 0x7f91b694,
321 0x0ab0475c, 0x7f8d8de1, 0x0ae25d8d, 0x7f895182,
322 0x0b147211, 0x7f850179, 0x0b4684df, 0x7f809dc5,
323 0x0b7895f0, 0x7f7c2668, 0x0baaa53b, 0x7f779b62,
324 0x0bdcb2bb, 0x7f72fcb4, 0x0c0ebe66, 0x7f6e4a5e,
325 0x0c40c835, 0x7f698461, 0x0c72d020, 0x7f64aabf,
326 0x0ca4d620, 0x7f5fbd77, 0x0cd6da2d, 0x7f5abc8a,
327 0x0d08dc3f, 0x7f55a7fa, 0x0d3adc4e, 0x7f507fc7,
328 0x0d6cda53, 0x7f4b43f2, 0x0d9ed646, 0x7f45f47b,
329 0x0dd0d01f, 0x7f409164, 0x0e02c7d7, 0x7f3b1aad,
330 0x0e34bd66, 0x7f359057, 0x0e66b0c3, 0x7f2ff263,
331 0x0e98a1e9, 0x7f2a40d2, 0x0eca90ce, 0x7f247ba5,
332 0x0efc7d6b, 0x7f1ea2dc, 0x0f2e67b8, 0x7f18b679,
333 0x0f604faf, 0x7f12b67c, 0x0f923546, 0x7f0ca2e7,
334 0x0fc41876, 0x7f067bba, 0x0ff5f938, 0x7f0040f6,
335 0x1027d784, 0x7ef9f29d, 0x1059b352, 0x7ef390ae,
336 0x108b8c9b, 0x7eed1b2c, 0x10bd6356, 0x7ee69217,
337 0x10ef377d, 0x7edff570, 0x11210907, 0x7ed94538,
338 0x1152d7ed, 0x7ed28171, 0x1184a427, 0x7ecbaa1a,
339 0x11b66dad, 0x7ec4bf36, 0x11e83478, 0x7ebdc0c6,
340 0x1219f880, 0x7eb6aeca, 0x124bb9be, 0x7eaf8943,
341 0x127d7829, 0x7ea85033, 0x12af33ba, 0x7ea1039b,
342 0x12e0ec6a, 0x7e99a37c, 0x1312a230, 0x7e922fd6,
343 0x13445505, 0x7e8aa8ac, 0x137604e2, 0x7e830dff,
344 0x13a7b1bf, 0x7e7b5fce, 0x13d95b93, 0x7e739e1d,
345 0x140b0258, 0x7e6bc8eb, 0x143ca605, 0x7e63e03b,
346 0x146e4694, 0x7e5be40c, 0x149fe3fc, 0x7e53d462,
347 0x14d17e36, 0x7e4bb13c, 0x1503153a, 0x7e437a9c,
348 0x1534a901, 0x7e3b3083, 0x15663982, 0x7e32d2f4,
349 0x1597c6b7, 0x7e2a61ed, 0x15c95097, 0x7e21dd73,
350 0x15fad71b, 0x7e194584, 0x162c5a3b, 0x7e109a24,
351 0x165dd9f0, 0x7e07db52, 0x168f5632, 0x7dff0911,
352 0x16c0cef9, 0x7df62362, 0x16f2443e, 0x7ded2a47,
353 0x1723b5f9, 0x7de41dc0, 0x17552422, 0x7ddafdce,
354 0x17868eb3, 0x7dd1ca75, 0x17b7f5a3, 0x7dc883b4,
355 0x17e958ea, 0x7dbf298d, 0x181ab881, 0x7db5bc02,
356 0x184c1461, 0x7dac3b15, 0x187d6c82, 0x7da2a6c6,
357 0x18aec0db, 0x7d98ff17, 0x18e01167, 0x7d8f4409,
358 0x19115e1c, 0x7d85759f, 0x1942a6f3, 0x7d7b93da,
359 0x1973ebe6, 0x7d719eba, 0x19a52ceb, 0x7d679642,
360 0x19d669fc, 0x7d5d7a74, 0x1a07a311, 0x7d534b50,
361 0x1a38d823, 0x7d4908d9, 0x1a6a0929, 0x7d3eb30f,
362 0x1a9b361d, 0x7d3449f5, 0x1acc5ef6, 0x7d29cd8c,
363 0x1afd83ad, 0x7d1f3dd6, 0x1b2ea43a, 0x7d149ad5,
364 0x1b5fc097, 0x7d09e489, 0x1b90d8bb, 0x7cff1af5,
365 0x1bc1ec9e, 0x7cf43e1a, 0x1bf2fc3a, 0x7ce94dfb,
366 0x1c240786, 0x7cde4a98, 0x1c550e7c, 0x7cd333f3,
367 0x1c861113, 0x7cc80a0f, 0x1cb70f43, 0x7cbcccec,
368 0x1ce80906, 0x7cb17c8d, 0x1d18fe54, 0x7ca618f3,
369 0x1d49ef26, 0x7c9aa221, 0x1d7adb73, 0x7c8f1817,
370 0x1dabc334, 0x7c837ad8, 0x1ddca662, 0x7c77ca65,
371 0x1e0d84f5, 0x7c6c06c0, 0x1e3e5ee5, 0x7c602fec,
372 0x1e6f342c, 0x7c5445e9, 0x1ea004c1, 0x7c4848ba,
373 0x1ed0d09d, 0x7c3c3860, 0x1f0197b8, 0x7c3014de,
374 0x1f325a0b, 0x7c23de35, 0x1f63178f, 0x7c179467,
375 0x1f93d03c, 0x7c0b3777, 0x1fc4840a, 0x7bfec765,
376 0x1ff532f2, 0x7bf24434, 0x2025dcec, 0x7be5ade6,
377 0x205681f1, 0x7bd9047c, 0x208721f9, 0x7bcc47fa,
378 0x20b7bcfe, 0x7bbf7860, 0x20e852f6, 0x7bb295b0,
379 0x2118e3dc, 0x7ba59fee, 0x21496fa7, 0x7b989719,
380 0x2179f64f, 0x7b8b7b36, 0x21aa77cf, 0x7b7e4c45,
381 0x21daf41d, 0x7b710a49, 0x220b6b32, 0x7b63b543,
382 0x223bdd08, 0x7b564d36, 0x226c4996, 0x7b48d225,
383 0x229cb0d5, 0x7b3b4410, 0x22cd12bd, 0x7b2da2fa,
384 0x22fd6f48, 0x7b1feee5, 0x232dc66d, 0x7b1227d3,
385 0x235e1826, 0x7b044dc7, 0x238e646a, 0x7af660c2,
386 0x23beab33, 0x7ae860c7, 0x23eeec78, 0x7ada4dd8,
387 0x241f2833, 0x7acc27f7, 0x244f5e5c, 0x7abdef25,
388 0x247f8eec, 0x7aafa367, 0x24afb9da, 0x7aa144bc,
389 0x24dfdf20, 0x7a92d329, 0x250ffeb7, 0x7a844eae,
390 0x25401896, 0x7a75b74f, 0x25702cb7, 0x7a670d0d,
391 0x25a03b11, 0x7a584feb, 0x25d0439f, 0x7a497feb,
392 0x26004657, 0x7a3a9d0f, 0x26304333, 0x7a2ba75a,
393 0x26603a2c, 0x7a1c9ece, 0x26902b39, 0x7a0d836d,
394 0x26c01655, 0x79fe5539, 0x26effb76, 0x79ef1436,
395 0x271fda96, 0x79dfc064, 0x274fb3ae, 0x79d059c8,
396 0x277f86b5, 0x79c0e062, 0x27af53a6, 0x79b15435,
397 0x27df1a77, 0x79a1b545, 0x280edb23, 0x79920392,
398 0x283e95a1, 0x79823f20, 0x286e49ea, 0x797267f2,
399 0x289df7f8, 0x79627e08, 0x28cd9fc1, 0x79528167,
400 0x28fd4140, 0x79427210, 0x292cdc6d, 0x79325006,
401 0x295c7140, 0x79221b4b, 0x298bffb2, 0x7911d3e2,
402 0x29bb87bc, 0x790179cd, 0x29eb0957, 0x78f10d0f,
403 0x2a1a847b, 0x78e08dab, 0x2a49f920, 0x78cffba3,
404 0x2a796740, 0x78bf56f9, 0x2aa8ced3, 0x78ae9fb0,
405 0x2ad82fd2, 0x789dd5cb, 0x2b078a36, 0x788cf94c,
406 0x2b36ddf7, 0x787c0a36, 0x2b662b0e, 0x786b088c,
407 0x2b957173, 0x7859f44f, 0x2bc4b120, 0x7848cd83,
408 0x2bf3ea0d, 0x7837942b, 0x2c231c33, 0x78264849,
409 0x2c52478a, 0x7814e9df, 0x2c816c0c, 0x780378f1,
410 0x2cb089b1, 0x77f1f581, 0x2cdfa071, 0x77e05f91,
411 0x2d0eb046, 0x77ceb725, 0x2d3db928, 0x77bcfc3f,
412 0x2d6cbb10, 0x77ab2ee2, 0x2d9bb5f6, 0x77994f11,
413 0x2dcaa9d5, 0x77875cce, 0x2df996a3, 0x7775581d,
414 0x2e287c5a, 0x776340ff, 0x2e575af3, 0x77511778,
415 0x2e863267, 0x773edb8b, 0x2eb502ae, 0x772c8d3a,
416 0x2ee3cbc1, 0x771a2c88, 0x2f128d99, 0x7707b979,
417 0x2f41482e, 0x76f5340e, 0x2f6ffb7a, 0x76e29c4b,
418 0x2f9ea775, 0x76cff232, 0x2fcd4c19, 0x76bd35c7,
419 0x2ffbe95d, 0x76aa670d, 0x302a7f3a, 0x76978605,
420 0x30590dab, 0x768492b4, 0x308794a6, 0x76718d1c,
421 0x30b61426, 0x765e7540, 0x30e48c22, 0x764b4b23,
422 0x3112fc95, 0x76380ec8, 0x31416576, 0x7624c031,
423 0x316fc6be, 0x76115f63, 0x319e2067, 0x75fdec60,
424 0x31cc7269, 0x75ea672a, 0x31fabcbd, 0x75d6cfc5,
425 0x3228ff5c, 0x75c32634, 0x32573a3f, 0x75af6a7b,
426 0x32856d5e, 0x759b9c9b, 0x32b398b3, 0x7587bc98,
427 0x32e1bc36, 0x7573ca75, 0x330fd7e1, 0x755fc635,
428 0x333debab, 0x754bafdc, 0x336bf78f, 0x7537876c,
429 0x3399fb85, 0x75234ce8, 0x33c7f785, 0x750f0054,
430 0x33f5eb89, 0x74faa1b3, 0x3423d78a, 0x74e63108,
431 0x3451bb81, 0x74d1ae55, 0x347f9766, 0x74bd199f,
432 0x34ad6b32, 0x74a872e8, 0x34db36df, 0x7493ba34,
433 0x3508fa66, 0x747eef85, 0x3536b5be, 0x746a12df,
434 0x356468e2, 0x74552446, 0x359213c9, 0x744023bc,
435 0x35bfb66e, 0x742b1144, 0x35ed50c9, 0x7415ece2,
436 0x361ae2d3, 0x7400b69a, 0x36486c86, 0x73eb6e6e,
437 0x3675edd9, 0x73d61461, 0x36a366c6, 0x73c0a878,
438 0x36d0d746, 0x73ab2ab4, 0x36fe3f52, 0x73959b1b,
439 0x372b9ee3, 0x737ff9ae, 0x3758f5f2, 0x736a4671,
440 0x37864477, 0x73548168, 0x37b38a6d, 0x733eaa96,
441 0x37e0c7cc, 0x7328c1ff, 0x380dfc8d, 0x7312c7a5,
442 0x383b28a9, 0x72fcbb8c, 0x38684c19, 0x72e69db7,
443 0x389566d6, 0x72d06e2b, 0x38c278d9, 0x72ba2cea,
444 0x38ef821c, 0x72a3d9f7, 0x391c8297, 0x728d7557,
445 0x39497a43, 0x7276ff0d, 0x39766919, 0x7260771b,
446 0x39a34f13, 0x7249dd86, 0x39d02c2a, 0x72333251,
447 0x39fd0056, 0x721c7580, 0x3a29cb91, 0x7205a716,
448 0x3a568dd4, 0x71eec716, 0x3a834717, 0x71d7d585,
449 0x3aaff755, 0x71c0d265, 0x3adc9e86, 0x71a9bdba,
450 0x3b093ca3, 0x71929789, 0x3b35d1a5, 0x717b5fd3,
451 0x3b625d86, 0x7164169d, 0x3b8ee03e, 0x714cbbeb,
452 0x3bbb59c7, 0x71354fc0, 0x3be7ca1a, 0x711dd220,
453 0x3c143130, 0x7106430e, 0x3c408f03, 0x70eea28e,
454 0x3c6ce38a, 0x70d6f0a4, 0x3c992ec0, 0x70bf2d53,
455 0x3cc5709e, 0x70a7589f, 0x3cf1a91c, 0x708f728b,
456 0x3d1dd835, 0x70777b1c, 0x3d49fde1, 0x705f7255,
457 0x3d761a19, 0x70475839, 0x3da22cd7, 0x702f2ccd,
458 0x3dce3614, 0x7016f014, 0x3dfa35c8, 0x6ffea212,
459 0x3e262bee, 0x6fe642ca, 0x3e52187f, 0x6fcdd241,
460 0x3e7dfb73, 0x6fb5507a, 0x3ea9d4c3, 0x6f9cbd79,
461 0x3ed5a46b, 0x6f841942, 0x3f016a61, 0x6f6b63d8,
462 0x3f2d26a0, 0x6f529d40, 0x3f58d921, 0x6f39c57d,
463 0x3f8481dd, 0x6f20dc92, 0x3fb020ce, 0x6f07e285,
464 0x3fdbb5ec, 0x6eeed758, 0x40074132, 0x6ed5bb10,
465 0x4032c297, 0x6ebc8db0, 0x405e3a16, 0x6ea34f3d,
466 0x4089a7a8, 0x6e89ffb9, 0x40b50b46, 0x6e709f2a,
467 0x40e064ea, 0x6e572d93, 0x410bb48c, 0x6e3daaf8,
468 0x4136fa27, 0x6e24175c, 0x416235b2, 0x6e0a72c5,
469 0x418d6729, 0x6df0bd35, 0x41b88e84, 0x6dd6f6b1,
470 0x41e3abbc, 0x6dbd1f3c, 0x420ebecb, 0x6da336dc,
471 0x4239c7aa, 0x6d893d93, 0x4264c653, 0x6d6f3365,
472 0x428fbabe, 0x6d551858, 0x42baa4e6, 0x6d3aec6e,
473 0x42e584c3, 0x6d20afac, 0x43105a50, 0x6d066215,
474 0x433b2585, 0x6cec03af, 0x4365e65b, 0x6cd1947c,
475 0x43909ccd, 0x6cb71482, 0x43bb48d4, 0x6c9c83c3,
476 0x43e5ea68, 0x6c81e245, 0x44108184, 0x6c67300b,
477 0x443b0e21, 0x6c4c6d1a, 0x44659039, 0x6c319975,
478 0x449007c4, 0x6c16b521, 0x44ba74bd, 0x6bfbc021,
479 0x44e4d71c, 0x6be0ba7b, 0x450f2edb, 0x6bc5a431,
480 0x45397bf4, 0x6baa7d49, 0x4563be60, 0x6b8f45c7,
481 0x458df619, 0x6b73fdae, 0x45b82318, 0x6b58a503,
482 0x45e24556, 0x6b3d3bcb, 0x460c5cce, 0x6b21c208,
483 0x46366978, 0x6b0637c1, 0x46606b4e, 0x6aea9cf8,
484 0x468a624a, 0x6acef1b2, 0x46b44e65, 0x6ab335f4,
485 0x46de2f99, 0x6a9769c1, 0x470805df, 0x6a7b8d1e,
486 0x4731d131, 0x6a5fa010, 0x475b9188, 0x6a43a29a,
487 0x478546de, 0x6a2794c1, 0x47aef12c, 0x6a0b7689,
488 0x47d8906d, 0x69ef47f6, 0x48022499, 0x69d3090e,
489 0x482badab, 0x69b6b9d3, 0x48552b9b, 0x699a5a4c,
490 0x487e9e64, 0x697dea7b, 0x48a805ff, 0x69616a65,
491 0x48d16265, 0x6944da10, 0x48fab391, 0x6928397e,
492 0x4923f97b, 0x690b88b5, 0x494d341e, 0x68eec7b9,
493 0x49766373, 0x68d1f68f, 0x499f8774, 0x68b5153a,
494 0x49c8a01b, 0x689823bf, 0x49f1ad61, 0x687b2224,
495 0x4a1aaf3f, 0x685e106c, 0x4a43a5b0, 0x6840ee9b,
496 0x4a6c90ad, 0x6823bcb7, 0x4a957030, 0x68067ac3,
497 0x4abe4433, 0x67e928c5, 0x4ae70caf, 0x67cbc6c0,
498 0x4b0fc99d, 0x67ae54ba, 0x4b387af9, 0x6790d2b6,
499 0x4b6120bb, 0x677340ba, 0x4b89badd, 0x67559eca,
500 0x4bb24958, 0x6737ecea, 0x4bdacc28, 0x671a2b20,
501 0x4c034345, 0x66fc596f, 0x4c2baea9, 0x66de77dc,
502 0x4c540e4e, 0x66c0866d, 0x4c7c622d, 0x66a28524,
503 0x4ca4aa41, 0x66847408, 0x4ccce684, 0x6666531d,
504 0x4cf516ee, 0x66482267, 0x4d1d3b7a, 0x6629e1ec,
505 0x4d455422, 0x660b91af, 0x4d6d60df, 0x65ed31b5,
506 0x4d9561ac, 0x65cec204, 0x4dbd5682, 0x65b0429f,
507 0x4de53f5a, 0x6591b38c, 0x4e0d1c30, 0x657314cf,
508 0x4e34ecfc, 0x6554666d, 0x4e5cb1b9, 0x6535a86b,
509 0x4e846a60, 0x6516dacd, 0x4eac16eb, 0x64f7fd98,
510 0x4ed3b755, 0x64d910d1, 0x4efb4b96, 0x64ba147d,
511 0x4f22d3aa, 0x649b08a0, 0x4f4a4f89, 0x647bed3f,
512 0x4f71bf2e, 0x645cc260, 0x4f992293, 0x643d8806,
513 0x4fc079b1, 0x641e3e38, 0x4fe7c483, 0x63fee4f8,
514 0x500f0302, 0x63df7c4d, 0x50363529, 0x63c0043b,
515 0x505d5af1, 0x63a07cc7, 0x50847454, 0x6380e5f6,
516 0x50ab814d, 0x63613fcd, 0x50d281d5, 0x63418a50,
517 0x50f975e6, 0x6321c585, 0x51205d7b, 0x6301f171,
518 0x5147388c, 0x62e20e17, 0x516e0715, 0x62c21b7e,
519 0x5194c910, 0x62a219aa, 0x51bb7e75, 0x628208a1,
520 0x51e22740, 0x6261e866, 0x5208c36a, 0x6241b8ff,
521 0x522f52ee, 0x62217a72, 0x5255d5c5, 0x62012cc2,
522 0x527c4bea, 0x61e0cff5, 0x52a2b556, 0x61c06410,
523 0x52c91204, 0x619fe918, 0x52ef61ee, 0x617f5f12,
524 0x5315a50e, 0x615ec603, 0x533bdb5d, 0x613e1df0,
525 0x536204d7, 0x611d66de, 0x53882175, 0x60fca0d2,
526 0x53ae3131, 0x60dbcbd1, 0x53d43406, 0x60bae7e1,
527 0x53fa29ed, 0x6099f505, 0x542012e1, 0x6078f344,
528 0x5445eedb, 0x6057e2a2, 0x546bbdd7, 0x6036c325,
529 0x54917fce, 0x601594d1, 0x54b734ba, 0x5ff457ad,
530 0x54dcdc96, 0x5fd30bbc, 0x5502775c, 0x5fb1b104,
531 0x55280505, 0x5f90478a, 0x554d858d, 0x5f6ecf53,
532 0x5572f8ed, 0x5f4d4865, 0x55985f20, 0x5f2bb2c5,
533 0x55bdb81f, 0x5f0a0e77, 0x55e303e6, 0x5ee85b82,
534 0x5608426e, 0x5ec699e9, 0x562d73b2, 0x5ea4c9b3,
535 0x565297ab, 0x5e82eae5, 0x5677ae54, 0x5e60fd84,
536 0x569cb7a8, 0x5e3f0194, 0x56c1b3a1, 0x5e1cf71c,
537 0x56e6a239, 0x5dfade20, 0x570b8369, 0x5dd8b6a7,
538 0x5730572e, 0x5db680b4, 0x57551d80, 0x5d943c4e,
539 0x5779d65b, 0x5d71e979, 0x579e81b8, 0x5d4f883b,
540 0x57c31f92, 0x5d2d189a, 0x57e7afe4, 0x5d0a9a9a,
541 0x580c32a7, 0x5ce80e41, 0x5830a7d6, 0x5cc57394,
542 0x58550f6c, 0x5ca2ca99, 0x58796962, 0x5c801354,
543 0x589db5b3, 0x5c5d4dcc, 0x58c1f45b, 0x5c3a7a05,
544 0x58e62552, 0x5c179806, 0x590a4893, 0x5bf4a7d2,
545 0x592e5e19, 0x5bd1a971, 0x595265df, 0x5bae9ce7,
546 0x59765fde, 0x5b8b8239, 0x599a4c12, 0x5b68596d,
547 0x59be2a74, 0x5b452288, 0x59e1faff, 0x5b21dd90,
548 0x5a05bdae, 0x5afe8a8b, 0x5a29727b, 0x5adb297d,
549 0x5a4d1960, 0x5ab7ba6c, 0x5a70b258, 0x5a943d5e,
550};
551
552/*split radix bit reverse table for FFT of size up to 2048*/
553
554const uint16_t revtab[1<<12] = {
5550, 3072, 1536, 2816, 768, 3840, 1408, 2432, 384, 3456, 1920, 2752, 704,
5563776, 1216, 2240, 192, 3264, 1728, 3008, 960, 4032, 1376, 2400, 352, 3424,
5571888, 2656, 608, 3680, 1120, 2144, 96, 3168, 1632, 2912, 864, 3936, 1504,
5582528, 480, 3552, 2016, 2736, 688, 3760, 1200, 2224, 176, 3248, 1712, 2992,
559944, 4016, 1328, 2352, 304, 3376, 1840, 2608, 560, 3632, 1072, 2096, 48,
5603120, 1584, 2864, 816, 3888, 1456, 2480, 432, 3504, 1968, 2800, 752, 3824,
5611264, 2288, 240, 3312, 1776, 3056, 1008, 4080, 1368, 2392, 344, 3416, 1880,
5622648, 600, 3672, 1112, 2136, 88, 3160, 1624, 2904, 856, 3928, 1496, 2520,
563472, 3544, 2008, 2712, 664, 3736, 1176, 2200, 152, 3224, 1688, 2968, 920,
5643992, 1304, 2328, 280, 3352, 1816, 2584, 536, 3608, 1048, 2072, 24, 3096,
5651560, 2840, 792, 3864, 1432, 2456, 408, 3480, 1944, 2776, 728, 3800, 1240,
5662264, 216, 3288, 1752, 3032, 984, 4056, 1400, 2424, 376, 3448, 1912, 2680,
567632, 3704, 1144, 2168, 120, 3192, 1656, 2936, 888, 3960, 1528, 2552, 504,
5683576, 2040, 2732, 684, 3756, 1196, 2220, 172, 3244, 1708, 2988, 940, 4012,
5691324, 2348, 300, 3372, 1836, 2604, 556, 3628, 1068, 2092, 44, 3116, 1580,
5702860, 812, 3884, 1452, 2476, 428, 3500, 1964, 2796, 748, 3820, 1260, 2284,
571236, 3308, 1772, 3052, 1004, 4076, 1356, 2380, 332, 3404, 1868, 2636, 588,
5723660, 1100, 2124, 76, 3148, 1612, 2892, 844, 3916, 1484, 2508, 460, 3532,
5731996, 2700, 652, 3724, 1164, 2188, 140, 3212, 1676, 2956, 908, 3980, 1292,
5742316, 268, 3340, 1804, 2572, 524, 3596, 1036, 2060, 12, 3084, 1548, 2828,
575780, 3852, 1420, 2444, 396, 3468, 1932, 2764, 716, 3788, 1228, 2252, 204,
5763276, 1740, 3020, 972, 4044, 1388, 2412, 364, 3436, 1900, 2668, 620, 3692,
5771132, 2156, 108, 3180, 1644, 2924, 876, 3948, 1516, 2540, 492, 3564, 2028,
5782748, 700, 3772, 1212, 2236, 188, 3260, 1724, 3004, 956, 4028, 1340, 2364,
579316, 3388, 1852, 2620, 572, 3644, 1084, 2108, 60, 3132, 1596, 2876, 828,
5803900, 1468, 2492, 444, 3516, 1980, 2812, 764, 3836, 1276, 2300, 252, 3324,
5811788, 3068, 1020, 4092, 1366, 2390, 342, 3414, 1878, 2646, 598, 3670, 1110,
5822134, 86, 3158, 1622, 2902, 854, 3926, 1494, 2518, 470, 3542, 2006, 2710,
583662, 3734, 1174, 2198, 150, 3222, 1686, 2966, 918, 3990, 1302, 2326, 278,
5843350, 1814, 2582, 534, 3606, 1046, 2070, 22, 3094, 1558, 2838, 790, 3862,
5851430, 2454, 406, 3478, 1942, 2774, 726, 3798, 1238, 2262, 214, 3286, 1750,
5863030, 982, 4054, 1398, 2422, 374, 3446, 1910, 2678, 630, 3702, 1142, 2166,
587118, 3190, 1654, 2934, 886, 3958, 1526, 2550, 502, 3574, 2038, 2726, 678,
5883750, 1190, 2214, 166, 3238, 1702, 2982, 934, 4006, 1318, 2342, 294, 3366,
5891830, 2598, 550, 3622, 1062, 2086, 38, 3110, 1574, 2854, 806, 3878, 1446,
5902470, 422, 3494, 1958, 2790, 742, 3814, 1254, 2278, 230, 3302, 1766, 3046,
591998, 4070, 1350, 2374, 326, 3398, 1862, 2630, 582, 3654, 1094, 2118, 70,
5923142, 1606, 2886, 838, 3910, 1478, 2502, 454, 3526, 1990, 2694, 646, 3718,
5931158, 2182, 134, 3206, 1670, 2950, 902, 3974, 1286, 2310, 262, 3334, 1798,
5942566, 518, 3590, 1030, 2054, 6, 3078, 1542, 2822, 774, 3846, 1414, 2438,
595390, 3462, 1926, 2758, 710, 3782, 1222, 2246, 198, 3270, 1734, 3014, 966,
5964038, 1382, 2406, 358, 3430, 1894, 2662, 614, 3686, 1126, 2150, 102, 3174,
5971638, 2918, 870, 3942, 1510, 2534, 486, 3558, 2022, 2742, 694, 3766, 1206,
5982230, 182, 3254, 1718, 2998, 950, 4022, 1334, 2358, 310, 3382, 1846, 2614,
599566, 3638, 1078, 2102, 54, 3126, 1590, 2870, 822, 3894, 1462, 2486, 438,
6003510, 1974, 2806, 758, 3830, 1270, 2294, 246, 3318, 1782, 3062, 1014, 4086,
6011374, 2398, 350, 3422, 1886, 2654, 606, 3678, 1118, 2142, 94, 3166, 1630,
6022910, 862, 3934, 1502, 2526, 478, 3550, 2014, 2718, 670, 3742, 1182, 2206,
603158, 3230, 1694, 2974, 926, 3998, 1310, 2334, 286, 3358, 1822, 2590, 542,
6043614, 1054, 2078, 30, 3102, 1566, 2846, 798, 3870, 1438, 2462, 414, 3486,
6051950, 2782, 734, 3806, 1246, 2270, 222, 3294, 1758, 3038, 990, 4062, 1406,
6062430, 382, 3454, 1918, 2686, 638, 3710, 1150, 2174, 126, 3198, 1662, 2942,
607894, 3966, 1534, 2558, 510, 3582, 2046, 2731, 683, 3755, 1195, 2219, 171,
6083243, 1707, 2987, 939, 4011, 1323, 2347, 299, 3371, 1835, 2603, 555, 3627,
6091067, 2091, 43, 3115, 1579, 2859, 811, 3883, 1451, 2475, 427, 3499, 1963,
6102795, 747, 3819, 1259, 2283, 235, 3307, 1771, 3051, 1003, 4075, 1355, 2379,
611331, 3403, 1867, 2635, 587, 3659, 1099, 2123, 75, 3147, 1611, 2891, 843,
6123915, 1483, 2507, 459, 3531, 1995, 2699, 651, 3723, 1163, 2187, 139, 3211,
6131675, 2955, 907, 3979, 1291, 2315, 267, 3339, 1803, 2571, 523, 3595, 1035,
6142059, 11, 3083, 1547, 2827, 779, 3851, 1419, 2443, 395, 3467, 1931, 2763,
615715, 3787, 1227, 2251, 203, 3275, 1739, 3019, 971, 4043, 1387, 2411, 363,
6163435, 1899, 2667, 619, 3691, 1131, 2155, 107, 3179, 1643, 2923, 875, 3947,
6171515, 2539, 491, 3563, 2027, 2747, 699, 3771, 1211, 2235, 187, 3259, 1723,
6183003, 955, 4027, 1339, 2363, 315, 3387, 1851, 2619, 571, 3643, 1083, 2107,
61959, 3131, 1595, 2875, 827, 3899, 1467, 2491, 443, 3515, 1979, 2811, 763,
6203835, 1275, 2299, 251, 3323, 1787, 3067, 1019, 4091, 1363, 2387, 339, 3411,
6211875, 2643, 595, 3667, 1107, 2131, 83, 3155, 1619, 2899, 851, 3923, 1491,
6222515, 467, 3539, 2003, 2707, 659, 3731, 1171, 2195, 147, 3219, 1683, 2963,
623915, 3987, 1299, 2323, 275, 3347, 1811, 2579, 531, 3603, 1043, 2067, 19,
6243091, 1555, 2835, 787, 3859, 1427, 2451, 403, 3475, 1939, 2771, 723, 3795,
6251235, 2259, 211, 3283, 1747, 3027, 979, 4051, 1395, 2419, 371, 3443, 1907,
6262675, 627, 3699, 1139, 2163, 115, 3187, 1651, 2931, 883, 3955, 1523, 2547,
627499, 3571, 2035, 2723, 675, 3747, 1187, 2211, 163, 3235, 1699, 2979, 931,
6284003, 1315, 2339, 291, 3363, 1827, 2595, 547, 3619, 1059, 2083, 35, 3107,
6291571, 2851, 803, 3875, 1443, 2467, 419, 3491, 1955, 2787, 739, 3811, 1251,
6302275, 227, 3299, 1763, 3043, 995, 4067, 1347, 2371, 323, 3395, 1859, 2627,
631579, 3651, 1091, 2115, 67, 3139, 1603, 2883, 835, 3907, 1475, 2499, 451,
6323523, 1987, 2691, 643, 3715, 1155, 2179, 131, 3203, 1667, 2947, 899, 3971,
6331283, 2307, 259, 3331, 1795, 2563, 515, 3587, 1027, 2051, 3, 3075, 1539,
6342819, 771, 3843, 1411, 2435, 387, 3459, 1923, 2755, 707, 3779, 1219, 2243,
635195, 3267, 1731, 3011, 963, 4035, 1379, 2403, 355, 3427, 1891, 2659, 611,
6363683, 1123, 2147, 99, 3171, 1635, 2915, 867, 3939, 1507, 2531, 483, 3555,
6372019, 2739, 691, 3763, 1203, 2227, 179, 3251, 1715, 2995, 947, 4019, 1331,
6382355, 307, 3379, 1843, 2611, 563, 3635, 1075, 2099, 51, 3123, 1587, 2867,
639819, 3891, 1459, 2483, 435, 3507, 1971, 2803, 755, 3827, 1267, 2291, 243,
6403315, 1779, 3059, 1011, 4083, 1371, 2395, 347, 3419, 1883, 2651, 603, 3675,
6411115, 2139, 91, 3163, 1627, 2907, 859, 3931, 1499, 2523, 475, 3547, 2011,
6422715, 667, 3739, 1179, 2203, 155, 3227, 1691, 2971, 923, 3995, 1307, 2331,
643283, 3355, 1819, 2587, 539, 3611, 1051, 2075, 27, 3099, 1563, 2843, 795,
6443867, 1435, 2459, 411, 3483, 1947, 2779, 731, 3803, 1243, 2267, 219, 3291,
6451755, 3035, 987, 4059, 1403, 2427, 379, 3451, 1915, 2683, 635, 3707, 1147,
6462171, 123, 3195, 1659, 2939, 891, 3963, 1531, 2555, 507, 3579, 2043, 2735,
647687, 3759, 1199, 2223, 175, 3247, 1711, 2991, 943, 4015, 1327, 2351, 303,
6483375, 1839, 2607, 559, 3631, 1071, 2095, 47, 3119, 1583, 2863, 815, 3887,
6491455, 2479, 431, 3503, 1967, 2799, 751, 3823, 1263, 2287, 239, 3311, 1775,
6503055, 1007, 4079, 1359, 2383, 335, 3407, 1871, 2639, 591, 3663, 1103, 2127,
65179, 3151, 1615, 2895, 847, 3919, 1487, 2511, 463, 3535, 1999, 2703, 655,
6523727, 1167, 2191, 143, 3215, 1679, 2959, 911, 3983, 1295, 2319, 271, 3343,
6531807, 2575, 527, 3599, 1039, 2063, 15, 3087, 1551, 2831, 783, 3855, 1423,
6542447, 399, 3471, 1935, 2767, 719, 3791, 1231, 2255, 207, 3279, 1743, 3023,
655975, 4047, 1391, 2415, 367, 3439, 1903, 2671, 623, 3695, 1135, 2159, 111,
6563183, 1647, 2927, 879, 3951, 1519, 2543, 495, 3567, 2031, 2751, 703, 3775,
6571215, 2239, 191, 3263, 1727, 3007, 959, 4031, 1343, 2367, 319, 3391, 1855,
6582623, 575, 3647, 1087, 2111, 63, 3135, 1599, 2879, 831, 3903, 1471, 2495,
659447, 3519, 1983, 2815, 767, 3839, 1279, 2303, 255, 3327, 1791, 3071, 1023,
6604095, 1365, 2389, 341, 3413, 1877, 2645, 597, 3669, 1109, 2133, 85, 3157,
6611621, 2901, 853, 3925, 1493, 2517, 469, 3541, 2005, 2709, 661, 3733, 1173,
6622197, 149, 3221, 1685, 2965, 917, 3989, 1301, 2325, 277, 3349, 1813, 2581,
663533, 3605, 1045, 2069, 21, 3093, 1557, 2837, 789, 3861, 1429, 2453, 405,
6643477, 1941, 2773, 725, 3797, 1237, 2261, 213, 3285, 1749, 3029, 981, 4053,
6651397, 2421, 373, 3445, 1909, 2677, 629, 3701, 1141, 2165, 117, 3189, 1653,
6662933, 885, 3957, 1525, 2549, 501, 3573, 2037, 2725, 677, 3749, 1189, 2213,
667165, 3237, 1701, 2981, 933, 4005, 1317, 2341, 293, 3365, 1829, 2597, 549,
6683621, 1061, 2085, 37, 3109, 1573, 2853, 805, 3877, 1445, 2469, 421, 3493,
6691957, 2789, 741, 3813, 1253, 2277, 229, 3301, 1765, 3045, 997, 4069, 1349,
6702373, 325, 3397, 1861, 2629, 581, 3653, 1093, 2117, 69, 3141, 1605, 2885,
671837, 3909, 1477, 2501, 453, 3525, 1989, 2693, 645, 3717, 1157, 2181, 133,
6723205, 1669, 2949, 901, 3973, 1285, 2309, 261, 3333, 1797, 2565, 517, 3589,
6731029, 2053, 5, 3077, 1541, 2821, 773, 3845, 1413, 2437, 389, 3461, 1925,
6742757, 709, 3781, 1221, 2245, 197, 3269, 1733, 3013, 965, 4037, 1381, 2405,
675357, 3429, 1893, 2661, 613, 3685, 1125, 2149, 101, 3173, 1637, 2917, 869,
6763941, 1509, 2533, 485, 3557, 2021, 2741, 693, 3765, 1205, 2229, 181, 3253,
6771717, 2997, 949, 4021, 1333, 2357, 309, 3381, 1845, 2613, 565, 3637, 1077,
6782101, 53, 3125, 1589, 2869, 821, 3893, 1461, 2485, 437, 3509, 1973, 2805,
679757, 3829, 1269, 2293, 245, 3317, 1781, 3061, 1013, 4085, 1373, 2397, 349,
6803421, 1885, 2653, 605, 3677, 1117, 2141, 93, 3165, 1629, 2909, 861, 3933,
6811501, 2525, 477, 3549, 2013, 2717, 669, 3741, 1181, 2205, 157, 3229, 1693,
6822973, 925, 3997, 1309, 2333, 285, 3357, 1821, 2589, 541, 3613, 1053, 2077,
68329, 3101, 1565, 2845, 797, 3869, 1437, 2461, 413, 3485, 1949, 2781, 733,
6843805, 1245, 2269, 221, 3293, 1757, 3037, 989, 4061, 1405, 2429, 381, 3453,
6851917, 2685, 637, 3709, 1149, 2173, 125, 3197, 1661, 2941, 893, 3965, 1533,
6862557, 509, 3581, 2045, 2729, 681, 3753, 1193, 2217, 169, 3241, 1705, 2985,
687937, 4009, 1321, 2345, 297, 3369, 1833, 2601, 553, 3625, 1065, 2089, 41,
6883113, 1577, 2857, 809, 3881, 1449, 2473, 425, 3497, 1961, 2793, 745, 3817,
6891257, 2281, 233, 3305, 1769, 3049, 1001, 4073, 1353, 2377, 329, 3401, 1865,
6902633, 585, 3657, 1097, 2121, 73, 3145, 1609, 2889, 841, 3913, 1481, 2505,
691457, 3529, 1993, 2697, 649, 3721, 1161, 2185, 137, 3209, 1673, 2953, 905,
6923977, 1289, 2313, 265, 3337, 1801, 2569, 521, 3593, 1033, 2057, 9, 3081,
6931545, 2825, 777, 3849, 1417, 2441, 393, 3465, 1929, 2761, 713, 3785, 1225,
6942249, 201, 3273, 1737, 3017, 969, 4041, 1385, 2409, 361, 3433, 1897, 2665,
695617, 3689, 1129, 2153, 105, 3177, 1641, 2921, 873, 3945, 1513, 2537, 489,
6963561, 2025, 2745, 697, 3769, 1209, 2233, 185, 3257, 1721, 3001, 953, 4025,
6971337, 2361, 313, 3385, 1849, 2617, 569, 3641, 1081, 2105, 57, 3129, 1593,
6982873, 825, 3897, 1465, 2489, 441, 3513, 1977, 2809, 761, 3833, 1273, 2297,
699249, 3321, 1785, 3065, 1017, 4089, 1361, 2385, 337, 3409, 1873, 2641, 593,
7003665, 1105, 2129, 81, 3153, 1617, 2897, 849, 3921, 1489, 2513, 465, 3537,
7012001, 2705, 657, 3729, 1169, 2193, 145, 3217, 1681, 2961, 913, 3985, 1297,
7022321, 273, 3345, 1809, 2577, 529, 3601, 1041, 2065, 17, 3089, 1553, 2833,
703785, 3857, 1425, 2449, 401, 3473, 1937, 2769, 721, 3793, 1233, 2257, 209,
7043281, 1745, 3025, 977, 4049, 1393, 2417, 369, 3441, 1905, 2673, 625, 3697,
7051137, 2161, 113, 3185, 1649, 2929, 881, 3953, 1521, 2545, 497, 3569, 2033,
7062721, 673, 3745, 1185, 2209, 161, 3233, 1697, 2977, 929, 4001, 1313, 2337,
707289, 3361, 1825, 2593, 545, 3617, 1057, 2081, 33, 3105, 1569, 2849, 801,
7083873, 1441, 2465, 417, 3489, 1953, 2785, 737, 3809, 1249, 2273, 225, 3297,
7091761, 3041, 993, 4065, 1345, 2369, 321, 3393, 1857, 2625, 577, 3649, 1089,
7102113, 65, 3137, 1601, 2881, 833, 3905, 1473, 2497, 449, 3521, 1985, 2689,
711641, 3713, 1153, 2177, 129, 3201, 1665, 2945, 897, 3969, 1281, 2305, 257,
7123329, 1793, 2561, 513, 3585, 1025, 2049, 1, 3073, 1537, 2817, 769, 3841,
7131409, 2433, 385, 3457, 1921, 2753, 705, 3777, 1217, 2241, 193, 3265, 1729,
7143009, 961, 4033, 1377, 2401, 353, 3425, 1889, 2657, 609, 3681, 1121, 2145,
71597, 3169, 1633, 2913, 865, 3937, 1505, 2529, 481, 3553, 2017, 2737, 689,
7163761, 1201, 2225, 177, 3249, 1713, 2993, 945, 4017, 1329, 2353, 305, 3377,
7171841, 2609, 561, 3633, 1073, 2097, 49, 3121, 1585, 2865, 817, 3889, 1457,
7182481, 433, 3505, 1969, 2801, 753, 3825, 1265, 2289, 241, 3313, 1777, 3057,
7191009, 4081, 1369, 2393, 345, 3417, 1881, 2649, 601, 3673, 1113, 2137, 89,
7203161, 1625, 2905, 857, 3929, 1497, 2521, 473, 3545, 2009, 2713, 665, 3737,
7211177, 2201, 153, 3225, 1689, 2969, 921, 3993, 1305, 2329, 281, 3353, 1817,
7222585, 537, 3609, 1049, 2073, 25, 3097, 1561, 2841, 793, 3865, 1433, 2457,
723409, 3481, 1945, 2777, 729, 3801, 1241, 2265, 217, 3289, 1753, 3033, 985,
7244057, 1401, 2425, 377, 3449, 1913, 2681, 633, 3705, 1145, 2169, 121, 3193,
7251657, 2937, 889, 3961, 1529, 2553, 505, 3577, 2041, 2733, 685, 3757, 1197,
7262221, 173, 3245, 1709, 2989, 941, 4013, 1325, 2349, 301, 3373, 1837, 2605,
727557, 3629, 1069, 2093, 45, 3117, 1581, 2861, 813, 3885, 1453, 2477, 429,
7283501, 1965, 2797, 749, 3821, 1261, 2285, 237, 3309, 1773, 3053, 1005, 4077,
7291357, 2381, 333, 3405, 1869, 2637, 589, 3661, 1101, 2125, 77, 3149, 1613,
7302893, 845, 3917, 1485, 2509, 461, 3533, 1997, 2701, 653, 3725, 1165, 2189,
731141, 3213, 1677, 2957, 909, 3981, 1293, 2317, 269, 3341, 1805, 2573, 525,
7323597, 1037, 2061, 13, 3085, 1549, 2829, 781, 3853, 1421, 2445, 397, 3469,
7331933, 2765, 717, 3789, 1229, 2253, 205, 3277, 1741, 3021, 973, 4045, 1389,
7342413, 365, 3437, 1901, 2669, 621, 3693, 1133, 2157, 109, 3181, 1645, 2925,
735877, 3949, 1517, 2541, 493, 3565, 2029, 2749, 701, 3773, 1213, 2237, 189,
7363261, 1725, 3005, 957, 4029, 1341, 2365, 317, 3389, 1853, 2621, 573, 3645,
7371085, 2109, 61, 3133, 1597, 2877, 829, 3901, 1469, 2493, 445, 3517, 1981,
7382813, 765, 3837, 1277, 2301, 253, 3325, 1789, 3069, 1021, 4093, 1367, 2391,
739343, 3415, 1879, 2647, 599, 3671, 1111, 2135, 87, 3159, 1623, 2903, 855,
7403927, 1495, 2519, 471, 3543, 2007, 2711, 663, 3735, 1175, 2199, 151, 3223,
7411687, 2967, 919, 3991, 1303, 2327, 279, 3351, 1815, 2583, 535, 3607, 1047,
7422071, 23, 3095, 1559, 2839, 791, 3863, 1431, 2455, 407, 3479, 1943, 2775,
743727, 3799, 1239, 2263, 215, 3287, 1751, 3031, 983, 4055, 1399, 2423, 375,
7443447, 1911, 2679, 631, 3703, 1143, 2167, 119, 3191, 1655, 2935, 887, 3959,
7451527, 2551, 503, 3575, 2039, 2727, 679, 3751, 1191, 2215, 167, 3239, 1703,
7462983, 935, 4007, 1319, 2343, 295, 3367, 1831, 2599, 551, 3623, 1063, 2087,
74739, 3111, 1575, 2855, 807, 3879, 1447, 2471, 423, 3495, 1959, 2791, 743,
7483815, 1255, 2279, 231, 3303, 1767, 3047, 999, 4071, 1351, 2375, 327, 3399,
7491863, 2631, 583, 3655, 1095, 2119, 71, 3143, 1607, 2887, 839, 3911, 1479,
7502503, 455, 3527, 1991, 2695, 647, 3719, 1159, 2183, 135, 3207, 1671, 2951,
751903, 3975, 1287, 2311, 263, 3335, 1799, 2567, 519, 3591, 1031, 2055, 7,
7523079, 1543, 2823, 775, 3847, 1415, 2439, 391, 3463, 1927, 2759, 711, 3783,
7531223, 2247, 199, 3271, 1735, 3015, 967, 4039, 1383, 2407, 359, 3431, 1895,
7542663, 615, 3687, 1127, 2151, 103, 3175, 1639, 2919, 871, 3943, 1511, 2535,
755487, 3559, 2023, 2743, 695, 3767, 1207, 2231, 183, 3255, 1719, 2999, 951,
7564023, 1335, 2359, 311, 3383, 1847, 2615, 567, 3639, 1079, 2103, 55, 3127,
7571591, 2871, 823, 3895, 1463, 2487, 439, 3511, 1975, 2807, 759, 3831, 1271,
7582295, 247, 3319, 1783, 3063, 1015, 4087, 1375, 2399, 351, 3423, 1887, 2655,
759607, 3679, 1119, 2143, 95, 3167, 1631, 2911, 863, 3935, 1503, 2527, 479,
7603551, 2015, 2719, 671, 3743, 1183, 2207, 159, 3231, 1695, 2975, 927, 3999,
7611311, 2335, 287, 3359, 1823, 2591, 543, 3615, 1055, 2079, 31, 3103, 1567,
7622847, 799, 3871, 1439, 2463, 415, 3487, 1951, 2783, 735, 3807, 1247, 2271,
763223, 3295, 1759, 3039, 991, 4063, 1407, 2431, 383, 3455, 1919, 2687, 639,
7643711, 1151, 2175, 127, 3199, 1663, 2943, 895, 3967, 1535, 2559, 511, 3583,
7652047, 2730, 682, 3754, 1194, 2218, 170, 3242, 1706, 2986, 938, 4010, 1322,
7662346, 298, 3370, 1834, 2602, 554, 3626, 1066, 2090, 42, 3114, 1578, 2858,
767810, 3882, 1450, 2474, 426, 3498, 1962, 2794, 746, 3818, 1258, 2282, 234,
7683306, 1770, 3050, 1002, 4074, 1354, 2378, 330, 3402, 1866, 2634, 586, 3658,
7691098, 2122, 74, 3146, 1610, 2890, 842, 3914, 1482, 2506, 458, 3530, 1994,
7702698, 650, 3722, 1162, 2186, 138, 3210, 1674, 2954, 906, 3978, 1290, 2314,
771266, 3338, 1802, 2570, 522, 3594, 1034, 2058, 10, 3082, 1546, 2826, 778,
7723850, 1418, 2442, 394, 3466, 1930, 2762, 714, 3786, 1226, 2250, 202, 3274,
7731738, 3018, 970, 4042, 1386, 2410, 362, 3434, 1898, 2666, 618, 3690, 1130,
7742154, 106, 3178, 1642, 2922, 874, 3946, 1514, 2538, 490, 3562, 2026, 2746,
775698, 3770, 1210, 2234, 186, 3258, 1722, 3002, 954, 4026, 1338, 2362, 314,
7763386, 1850, 2618, 570, 3642, 1082, 2106, 58, 3130, 1594, 2874, 826, 3898,
7771466, 2490, 442, 3514, 1978, 2810, 762, 3834, 1274, 2298, 250, 3322, 1786,
7783066, 1018, 4090, 1362, 2386, 338, 3410, 1874, 2642, 594, 3666, 1106, 2130,
77982, 3154, 1618, 2898, 850, 3922, 1490, 2514, 466, 3538, 2002, 2706, 658,
7803730, 1170, 2194, 146, 3218, 1682, 2962, 914, 3986, 1298, 2322, 274, 3346,
7811810, 2578, 530, 3602, 1042, 2066, 18, 3090, 1554, 2834, 786, 3858, 1426,
7822450, 402, 3474, 1938, 2770, 722, 3794, 1234, 2258, 210, 3282, 1746, 3026,
783978, 4050, 1394, 2418, 370, 3442, 1906, 2674, 626, 3698, 1138, 2162, 114,
7843186, 1650, 2930, 882, 3954, 1522, 2546, 498, 3570, 2034, 2722, 674, 3746,
7851186, 2210, 162, 3234, 1698, 2978, 930, 4002, 1314, 2338, 290, 3362, 1826,
7862594, 546, 3618, 1058, 2082, 34, 3106, 1570, 2850, 802, 3874, 1442, 2466,
787418, 3490, 1954, 2786, 738, 3810, 1250, 2274, 226, 3298, 1762, 3042, 994,
7884066, 1346, 2370, 322, 3394, 1858, 2626, 578, 3650, 1090, 2114, 66, 3138,
7891602, 2882, 834, 3906, 1474, 2498, 450, 3522, 1986, 2690, 642, 3714, 1154,
7902178, 130, 3202, 1666, 2946, 898, 3970, 1282, 2306, 258, 3330, 1794, 2562,
791514, 3586, 1026, 2050, 2, 3074, 1538, 2818, 770, 3842, 1410, 2434, 386,
7923458, 1922, 2754, 706, 3778, 1218, 2242, 194, 3266, 1730, 3010, 962, 4034,
7931378, 2402, 354, 3426, 1890, 2658, 610, 3682, 1122, 2146, 98, 3170, 1634,
7942914, 866, 3938, 1506, 2530, 482, 3554, 2018, 2738, 690, 3762, 1202, 2226,
795178, 3250, 1714, 2994, 946, 4018, 1330, 2354, 306, 3378, 1842, 2610, 562,
7963634, 1074, 2098, 50, 3122, 1586, 2866, 818, 3890, 1458, 2482, 434, 3506,
7971970, 2802, 754, 3826, 1266, 2290, 242, 3314, 1778, 3058, 1010, 4082, 1370,
7982394, 346, 3418, 1882, 2650, 602, 3674, 1114, 2138, 90, 3162, 1626, 2906,
799858, 3930, 1498, 2522, 474, 3546, 2010, 2714, 666, 3738, 1178, 2202, 154,
8003226, 1690, 2970, 922, 3994, 1306, 2330, 282, 3354, 1818, 2586, 538, 3610,
8011050, 2074, 26, 3098, 1562, 2842, 794, 3866, 1434, 2458, 410, 3482, 1946,
8022778, 730, 3802, 1242, 2266, 218, 3290, 1754, 3034, 986, 4058, 1402, 2426,
803378, 3450, 1914, 2682, 634, 3706, 1146, 2170, 122, 3194, 1658, 2938, 890,
8043962, 1530, 2554, 506, 3578, 2042, 2734, 686, 3758, 1198, 2222, 174, 3246,
8051710, 2990, 942, 4014, 1326, 2350, 302, 3374, 1838, 2606, 558, 3630, 1070,
8062094, 46, 3118, 1582, 2862, 814, 3886, 1454, 2478, 430, 3502, 1966, 2798,
807750, 3822, 1262, 2286, 238, 3310, 1774, 3054, 1006, 4078, 1358, 2382, 334,
8083406, 1870, 2638, 590, 3662, 1102, 2126, 78, 3150, 1614, 2894, 846, 3918,
8091486, 2510, 462, 3534, 1998, 2702, 654, 3726, 1166, 2190, 142, 3214, 1678,
8102958, 910, 3982, 1294, 2318, 270, 3342, 1806, 2574, 526, 3598, 1038, 2062,
81114, 3086, 1550, 2830, 782, 3854, 1422, 2446, 398, 3470, 1934, 2766, 718,
8123790, 1230, 2254, 206, 3278, 1742, 3022, 974, 4046, 1390, 2414, 366, 3438,
8131902, 2670, 622, 3694, 1134, 2158, 110, 3182, 1646, 2926, 878, 3950, 1518,
8142542, 494, 3566, 2030, 2750, 702, 3774, 1214, 2238, 190, 3262, 1726, 3006,
815958, 4030, 1342, 2366, 318, 3390, 1854, 2622, 574, 3646, 1086, 2110, 62,
8163134, 1598, 2878, 830, 3902, 1470, 2494, 446, 3518, 1982, 2814, 766, 3838,
8171278, 2302, 254, 3326, 1790, 3070, 1022, 4094, 1364, 2388, 340, 3412, 1876,
8182644, 596, 3668, 1108, 2132, 84, 3156, 1620, 2900, 852, 3924, 1492, 2516,
819468, 3540, 2004, 2708, 660, 3732, 1172, 2196, 148, 3220, 1684, 2964, 916,
8203988, 1300, 2324, 276, 3348, 1812, 2580, 532, 3604, 1044, 2068, 20, 3092,
8211556, 2836, 788, 3860, 1428, 2452, 404, 3476, 1940, 2772, 724, 3796, 1236,
8222260, 212, 3284, 1748, 3028, 980, 4052, 1396, 2420, 372, 3444, 1908, 2676,
823628, 3700, 1140, 2164, 116, 3188, 1652, 2932, 884, 3956, 1524, 2548, 500,
8243572, 2036, 2724, 676, 3748, 1188, 2212, 164, 3236, 1700, 2980, 932, 4004,
8251316, 2340, 292, 3364, 1828, 2596, 548, 3620, 1060, 2084, 36, 3108, 1572,
8262852, 804, 3876, 1444, 2468, 420, 3492, 1956, 2788, 740, 3812, 1252, 2276,
827228, 3300, 1764, 3044, 996, 4068, 1348, 2372, 324, 3396, 1860, 2628, 580,
8283652, 1092, 2116, 68, 3140, 1604, 2884, 836, 3908, 1476, 2500, 452, 3524,
8291988, 2692, 644, 3716, 1156, 2180, 132, 3204, 1668, 2948, 900, 3972, 1284,
8302308, 260, 3332, 1796, 2564, 516, 3588, 1028, 2052, 4, 3076, 1540, 2820,
831772, 3844, 1412, 2436, 388, 3460, 1924, 2756, 708, 3780, 1220, 2244, 196,
8323268, 1732, 3012, 964, 4036, 1380, 2404, 356, 3428, 1892, 2660, 612, 3684,
8331124, 2148, 100, 3172, 1636, 2916, 868, 3940, 1508, 2532, 484, 3556, 2020,
8342740, 692, 3764, 1204, 2228, 180, 3252, 1716, 2996, 948, 4020, 1332, 2356,
835308, 3380, 1844, 2612, 564, 3636, 1076, 2100, 52, 3124, 1588, 2868, 820,
8363892, 1460, 2484, 436, 3508, 1972, 2804, 756, 3828, 1268, 2292, 244, 3316,
8371780, 3060, 1012, 4084, 1372, 2396, 348, 3420, 1884, 2652, 604, 3676, 1116,
8382140, 92, 3164, 1628, 2908, 860, 3932, 1500, 2524, 476, 3548, 2012, 2716,
839668, 3740, 1180, 2204, 156, 3228, 1692, 2972, 924, 3996, 1308, 2332, 284,
8403356, 1820, 2588, 540, 3612, 1052, 2076, 28, 3100, 1564, 2844, 796, 3868,
8411436, 2460, 412, 3484, 1948, 2780, 732, 3804, 1244, 2268, 220, 3292, 1756,
8423036, 988, 4060, 1404, 2428, 380, 3452, 1916, 2684, 636, 3708, 1148, 2172,
843124, 3196, 1660, 2940, 892, 3964, 1532, 2556, 508, 3580, 2044, 2728, 680,
8443752, 1192, 2216, 168, 3240, 1704, 2984, 936, 4008, 1320, 2344, 296, 3368,
8451832, 2600, 552, 3624, 1064, 2088, 40, 3112, 1576, 2856, 808, 3880, 1448,
8462472, 424, 3496, 1960, 2792, 744, 3816, 1256, 2280, 232, 3304, 1768, 3048,
8471000, 4072, 1352, 2376, 328, 3400, 1864, 2632, 584, 3656, 1096, 2120, 72,
8483144, 1608, 2888, 840, 3912, 1480, 2504, 456, 3528, 1992, 2696, 648, 3720,
8491160, 2184, 136, 3208, 1672, 2952, 904, 3976, 1288, 2312, 264, 3336, 1800,
8502568, 520, 3592, 1032, 2056, 8, 3080, 1544, 2824, 776, 3848, 1416, 2440,
851392, 3464, 1928, 2760, 712, 3784, 1224, 2248, 200, 3272, 1736, 3016, 968,
8524040, 1384, 2408, 360, 3432, 1896, 2664, 616, 3688, 1128, 2152, 104, 3176,
8531640, 2920, 872, 3944, 1512, 2536, 488, 3560, 2024, 2744, 696, 3768, 1208,
8542232, 184, 3256, 1720, 3000, 952, 4024, 1336, 2360, 312, 3384, 1848, 2616,
855568, 3640, 1080, 2104, 56, 3128, 1592, 2872, 824, 3896, 1464, 2488, 440,
8563512, 1976, 2808, 760, 3832, 1272, 2296, 248, 3320, 1784, 3064, 1016, 4088,
8571360, 2384, 336, 3408, 1872, 2640, 592, 3664, 1104, 2128, 80, 3152, 1616,
8582896, 848, 3920, 1488, 2512, 464, 3536, 2000, 2704, 656, 3728, 1168, 2192,
859144, 3216, 1680, 2960, 912, 3984, 1296, 2320, 272, 3344, 1808, 2576, 528,
8603600, 1040, 2064, 16, 3088, 1552, 2832, 784, 3856, 1424, 2448, 400, 3472,
8611936, 2768, 720, 3792, 1232, 2256, 208, 3280, 1744, 3024, 976, 4048, 1392,
8622416, 368, 3440, 1904, 2672, 624, 3696, 1136, 2160, 112, 3184, 1648, 2928,
863880, 3952, 1520, 2544, 496, 3568, 2032, 2720, 672, 3744, 1184, 2208, 160,
8643232, 1696, 2976, 928, 4000, 1312, 2336, 288, 3360, 1824, 2592, 544, 3616,
8651056, 2080, 32, 3104, 1568, 2848, 800, 3872, 1440, 2464, 416, 3488, 1952,
8662784, 736, 3808, 1248, 2272, 224, 3296, 1760, 3040, 992, 4064, 1344, 2368,
867320, 3392, 1856, 2624, 576, 3648, 1088, 2112, 64, 3136, 1600, 2880, 832,
8683904, 1472, 2496, 448, 3520, 1984, 2688, 640, 3712, 1152, 2176, 128, 3200,
8691664, 2944, 896, 3968, 1280, 2304, 256, 3328, 1792, 2560, 512, 3584, 1024,
8702048};
871
872
diff --git a/apps/codecs/lib/mdct_lookup.h b/apps/codecs/lib/mdct_lookup.h
deleted file mode 100644
index 909b95ddbb..0000000000
--- a/apps/codecs/lib/mdct_lookup.h
+++ /dev/null
@@ -1,24 +0,0 @@
1/********************************************************************
2 * *
3 * THIS FILE IS PART OF THE OggVorbis 'TREMOR' CODEC SOURCE CODE. *
4 * *
5 * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS *
6 * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE *
7 * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. *
8 * *
9 * THE OggVorbis 'TREMOR' SOURCE CODE IS (C) COPYRIGHT 1994-2002 *
10 * BY THE Xiph.Org FOUNDATION http://www.xiph.org/ *
11 * *
12 ********************************************************************
13
14 function: sin,cos lookup tables
15
16 ********************************************************************/
17
18
19extern const int32_t sincos_lookup0[1026];
20extern const int32_t sincos_lookup1[1024];
21extern const uint16_t revtab[1<<12];
22
23
24
diff --git a/apps/codecs/lib/osx.dummy.c b/apps/codecs/lib/osx.dummy.c
deleted file mode 100644
index e69de29bb2..0000000000
--- a/apps/codecs/lib/osx.dummy.c
+++ /dev/null