summaryrefslogtreecommitdiff
path: root/lib/rbcodec/codecs/libspc/cpu/spc_dsp_armv6.c
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2013-05-18 01:45:03 -0400
committerMichael Sevakis <jethead71@rockbox.org>2013-05-21 00:02:14 -0400
commit87021f7c0ac4620eafd185ff11905ee643f72b6c (patch)
tree03ae48f3d999cd8743af40cc5df933f64f6df2d2 /lib/rbcodec/codecs/libspc/cpu/spc_dsp_armv6.c
parenta17d6de5bc727b0bb55764ecef2605ae689e8dab (diff)
downloadrockbox-87021f7c0ac4620eafd185ff11905ee643f72b6c.tar.gz
rockbox-87021f7c0ac4620eafd185ff11905ee643f72b6c.zip
SPC Codec: Refactor for CPU and clean up some things.
CPU optimization gets its own files in which to fill-in optimizable routines. Some pointless #if 0's for profiling need removal. Those macros are empty if not profiling. Force some functions that are undesirable to be force-inlined by the compiler to be not inlined. Change-Id: Ia7b7e45380d7efb20c9b1a4d52e05db3ef6bbaab
Diffstat (limited to 'lib/rbcodec/codecs/libspc/cpu/spc_dsp_armv6.c')
-rw-r--r--lib/rbcodec/codecs/libspc/cpu/spc_dsp_armv6.c244
1 files changed, 244 insertions, 0 deletions
diff --git a/lib/rbcodec/codecs/libspc/cpu/spc_dsp_armv6.c b/lib/rbcodec/codecs/libspc/cpu/spc_dsp_armv6.c
new file mode 100644
index 0000000000..2e3de87613
--- /dev/null
+++ b/lib/rbcodec/codecs/libspc/cpu/spc_dsp_armv6.c
@@ -0,0 +1,244 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2010 Michael Sevakis (jhMikeS)
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21#if !SPC_NOINTERP
22
23#define SPC_GAUSSIAN_FAST_INTERP
24static inline int gaussian_fast_interp( int16_t const* samples,
25 int32_t position,
26 int16_t const* fwd,
27 int16_t const* rev )
28{
29 int output;
30 int t0, t1, t2, t3;
31
32 asm volatile (
33 /* NOTE: often-unaligned accesses */
34 "ldr %[t0], [%[samp]] \n" /* t0=i0i1 */
35 "ldr %[t2], [%[fwd]] \n" /* t2=f0f1 */
36 "ldr %[t1], [%[samp], #4] \n" /* t1=i2i3 */
37 "ldr %[t3], [%[rev]] \n" /* t3=r0r1 */
38 "smuad %[out], %[t0], %[t2] \n" /* out=f0*i0+f1*i1 */
39 "smladx %[out], %[t1], %[t3], %[out] \n" /* out+=r1*i2+r0*i3 */
40 : [out]"=r"(output),
41 [t0]"=&r"(t0), [t1]"=&r"(t1), [t2]"=&r"(t2), [t3]"=r"(t3)
42 : [fwd]"r"(fwd), [rev]"r"(rev),
43 [samp]"r"(samples + (position >> 12)));
44
45 return output;
46}
47
48#define SPC_GAUSSIAN_FAST_AMP
49static inline int gaussian_fast_amp( struct voice_t* voice, int output,
50 int* amp_0, int* amp_1 )
51{
52 int t0;
53
54 asm volatile (
55 "mov %[t0], %[out], asr #(11-5) \n" /* To do >> 16 below */
56 "mul %[out], %[t0], %[envx] \n"
57 : [out]"+r"(output), [t0]"=&r"(t0)
58 : [envx]"r"((int) voice->envx));
59
60 asm volatile (
61 "smulwb %[a0], %[out], %[v0] \n" /* amp * vol >> 16 */
62 "smulwb %[a1], %[out], %[v1] \n"
63 : [a0]"=&r"(*amp_0), [a1]"=r"(*amp_1)
64 : [out]"r"(output),
65 [v0]"r"(voice->volume [0]),
66 [v1]"r"(voice->volume [1]));
67
68 return output >> 5; /* 'output' still 5 bits too big */
69}
70
71#define SPC_GAUSSIAN_SLOW_INTERP
72static inline int gaussian_slow_interp( int16_t const* samples,
73 int32_t position,
74 int16_t const* fwd,
75 int16_t const* rev )
76{
77 int output;
78 int t0, t1, t2, t3;
79
80 asm volatile (
81 /* NOTE: often-unaligned accesses */
82 "ldr %[t0], [%[samp]] \n" /* t0=i0i1 */
83 "ldr %[t2], [%[fwd]] \n" /* t2=f0f1 */
84 "ldr %[t1], [%[samp], #4] \n" /* t1=i2i3 */
85 "ldr %[t3], [%[rev]] \n" /* t3=f2f3 */
86 "smulbb %[out], %[t0], %[t2] \n" /* out=f0*i0 */
87 "smultt %[t0], %[t0], %[t2] \n" /* t0=f1*i1 */
88 "smulbt %[t2], %[t1], %[t3] \n" /* t2=r1*i2 */
89 "smultb %[t3], %[t1], %[t3] \n" /* t3=r0*i3 */
90 : [out]"=r"(output),
91 [t0]"=&r"(t0), [t1]"=&r"(t1), [t2]"=&r"(t2), [t3]"=r"(t3)
92 : [fwd]"r"(fwd), [rev]"r"(rev),
93 [samp]"r"(samples + (position >> 12)));
94
95 asm volatile (
96 "mov %[out], %[out], asr #12 \n"
97 "add %[t0], %[out], %[t0], asr #12 \n"
98 "add %[t2], %[t0], %[t2], asr #12 \n"
99 "pkhbt %[t0], %[t2], %[t3], asl #4 \n" /* t3[31:16], t2[15:0] */
100 "sadd16 %[t0], %[t0], %[t0] \n" /* t3[31:16]*2, t2[15:0]*2 */
101 "qsubaddx %[out], %[t0], %[t0] \n" /* out[15:0]=
102 * sat16(t3[31:16]+t2[15:0]) */
103 : [out]"+r"(output),
104 [t0]"+r"(t0), [t2]"+r"(t2), [t3]"+r"(t3));
105
106 /* output will be sign-extended in next step */
107 return output;
108}
109
110#define SPC_GAUSSIAN_SLOW_AMP
111static inline int gaussian_slow_amp( struct voice_t* voice, int output,
112 int* amp_0, int* amp_1 )
113{
114 asm volatile (
115 "smulbb %[out], %[out], %[envx]"
116 : [out]"+r"(output)
117 : [envx]"r"(voice->envx));
118
119 asm volatile (
120 "mov %[out], %[out], asr #11 \n"
121 "bic %[out], %[out], #0x1 \n"
122 "smulbb %[amp_0], %[out], %[v0] \n"
123 "smulbb %[amp_1], %[out], %[v1] \n"
124 : [out]"+r"(output),
125 [amp_0]"=&r"(*amp_0), [amp_1]"=r"(*amp_1)
126 : [v0]"r"(voice->volume[0]), [v1]"r"(voice->volume[1]));
127
128 return output;
129}
130
131#endif /* !SPC_NOINTERP */
132
133#if !SPC_NOECHO
134
135#define SPC_DSP_ECHO_APPLY
136
137/* Echo filter history */
138static int32_t fir_buf[FIR_BUF_CNT] IBSS_ATTR_SPC
139 __attribute__(( aligned(FIR_BUF_ALIGN*1) ));
140
141static inline void echo_init( struct Spc_Dsp* this )
142{
143 this->fir.ptr = fir_buf;
144 ci->memset( fir_buf, 0, sizeof fir_buf );
145}
146
147static inline void echo_apply(struct Spc_Dsp* this,
148 uint8_t* const echo_ptr, int* out_0, int* out_1)
149{
150 /* Keep last 8 samples */
151 int32_t* fir_ptr;
152 int t0;
153 asm volatile (
154 "ldr %[t0], [%[ep]] \n"
155 "add %[p], %[t_p], #4 \n"
156 "bic %[t_p], %[p], %[mask] \n"
157 "str %[t0], [%[p], #-4] \n"
158 /* duplicate at +8 eliminates wrap checking below */
159 "str %[t0], [%[p], #28] \n"
160 : [p]"=&r"(fir_ptr), [t_p]"+r"(this->fir.ptr),
161 [t0]"=&r"(t0)
162 : [ep]"r"(echo_ptr), [mask]"i"(~FIR_BUF_MASK));
163
164 int32_t* fir_coeff = (int32_t *)this->fir.coeff;
165
166 asm volatile ( /* L0R0 = acc0 */
167 "ldmia %[p]!, { r2-r5 } \n" /* L1R1-L4R4 = r2-r5 */
168 "ldmia %[c]!, { r0-r1 } \n" /* C0C1-C2C3 = r0-r1 */
169 "pkhbt %[acc0], %[t0], r2, asl #16 \n" /* L0R0,L1R1->L0L1,R0R1 */
170 "pkhtb r2, r2, %[t0], asr #16 \n"
171 "smuad %[acc0], %[acc0], r0 \n" /* acc0=L0*C0+L1*C1 */
172 "smuad %[acc1], r2, r0 \n" /* acc1=R0*C0+R1*C1 */
173 "pkhbt %[t0], r3, r4, asl #16 \n" /* L2R2,L3R3->L2L3,R2R3 */
174 "pkhtb r4, r4, r3, asr #16 \n"
175 "smlad %[acc0], %[t0], r1, %[acc0] \n" /* acc0+=L2*C2+L3*C3 */
176 "smlad %[acc1], r4, r1, %[acc1] \n" /* acc1+=R2*C2+R3*C3 */
177 "ldmia %[p], { r2-r4 } \n" /* L5R5-L7R7 = r2-r4 */
178 "ldmia %[c], { r0-r1 } \n" /* C4C5-C6C7 = r0-r1 */
179 "pkhbt %[t0], r5, r2, asl #16 \n" /* L4R4,L5R5->L4L5,R4R5 */
180 "pkhtb r2, r2, r5, asr #16 \n"
181 "smlad %[acc0], %[t0], r0, %[acc0] \n" /* acc0+=L4*C4+L5*C5 */
182 "smlad %[acc1], r2, r0, %[acc1] \n" /* acc1+=R4*C4+R5*C5 */
183 "pkhbt %[t0], r3, r4, asl #16 \n" /* L6R6,L7R7->L6L7,R6R7 */
184 "pkhtb r4, r4, r3, asr #16 \n"
185 "smlad %[acc0], %[t0], r1, %[acc0] \n" /* acc0+=L6*C6+L7*C7 */
186 "smlad %[acc1], r4, r1, %[acc1] \n" /* acc1+=R6*C6+R7*C7 */
187 : [t0]"+r"(t0), [acc0]"=&r"(*out_0), [acc1]"=&r"(*out_1),
188 [p]"+r"(fir_ptr), [c]"+r"(fir_coeff)
189 :
190 : "r0", "r1", "r2", "r3", "r4", "r5");
191}
192
193#define SPC_DSP_ECHO_FEEDBACK
194static inline void echo_feedback(struct Spc_Dsp* this, uint8_t* echo_ptr,
195 int echo_0, int echo_1, int fb_0, int fb_1)
196{
197 int e0, e1;
198 asm volatile (
199 "mov %[e0], %[ei0], asl #7 \n"
200 "mov %[e1], %[ei1], asl #7 \n"
201 "mla %[e0], %[fb0], %[ef], %[e0] \n"
202 "mla %[e1], %[fb1], %[ef], %[e1] \n"
203 : [e0]"=&r"(e0), [e1]"=&r"(e1)
204 : [ei0]"r"(echo_0), [ei1]"r"(echo_1),
205 [fb0]"r"(fb_0), [fb1]"r"(fb_1),
206 [ef]"r"((int)this->r.g.echo_feedback));
207 asm volatile (
208 "ssat %[e0], #16, %[e0], asr #14 \n"
209 "ssat %[e1], #16, %[e1], asr #14 \n"
210 "pkhbt %[e0], %[e0], %[e1], lsl #16 \n"
211 "str %[e0], [%[ep]] \n"
212 : [e0]"+r"(e0), [e1]"+r"(e1)
213 : [ep]"r"((int32_t *)echo_ptr));
214}
215
216#define SPC_DSP_GENERATE_OUTPUT
217static inline void echo_output( struct Spc_Dsp* this, int global_muting,
218 int global_vol_0, int global_vol_1, int chans_0, int chans_1,
219 int fb_0, int fb_1, int* out_0, int* out_1 )
220{
221 int t0, t1;
222
223 asm volatile (
224 "mul %[t0], %[gv0], %[ch0] \n"
225 "mul %[t1], %[gv1], %[ch1] \n"
226 : [t0]"=&r"(t0), [t1]"=r"(t1)
227 : [gv0]"r"(global_vol_0), [gv1]"r"(global_vol_1),
228 [ch0]"r"(chans_0), [ch1]"r"(chans_1));
229 asm volatile (
230 "mla %[t0], %[i0], %[ev0], %[t0] \n"
231 "mla %[t1], %[i1], %[ev1], %[t1] \n"
232 : [t0]"+r"(t0), [t1]"+r"(t1)
233 : [i0]"r"(fb_0), [i1]"r"(fb_1),
234 [ev0]"r"((int)this->r.g.echo_volume_0),
235 [ev1]"r"((int)this->r.g.echo_volume_1));
236 asm volatile (
237 "mov %[o0], %[t0], asr %[gm] \n"
238 "mov %[o1], %[t1], asr %[gm] \n"
239 : [o0]"=&r"(*out_0), [o1]"=r"(*out_1)
240 : [t0]"r"(t0), [t1]"r"(t1),
241 [gm]"r"(global_muting));
242}
243
244#endif /* SPC_NOECHO */