summaryrefslogtreecommitdiff
path: root/apps/dsp.h
diff options
context:
space:
mode:
Diffstat (limited to 'apps/dsp.h')
-rw-r--r--apps/dsp.h80
1 files changed, 0 insertions, 80 deletions
diff --git a/apps/dsp.h b/apps/dsp.h
index 8c23c3053d..3d24b24245 100644
--- a/apps/dsp.h
+++ b/apps/dsp.h
@@ -64,86 +64,6 @@ enum {
64 DSP_CALLBACK_SET_STEREO_WIDTH 64 DSP_CALLBACK_SET_STEREO_WIDTH
65}; 65};
66 66
67/* A bunch of fixed point assembler helper macros */
68#if defined(CPU_COLDFIRE)
69/* These macros use the Coldfire EMAC extension and need the MACSR flags set
70 * to fractional mode with no rounding.
71 */
72
73/* Multiply two S.31 fractional integers and return the sign bit and the
74 * 31 most significant bits of the result.
75 */
76#define FRACMUL(x, y) \
77({ \
78 long t; \
79 asm ("mac.l %[a], %[b], %%acc0\n\t" \
80 "movclr.l %%acc0, %[t]\n\t" \
81 : [t] "=r" (t) : [a] "r" (x), [b] "r" (y)); \
82 t; \
83})
84
85/* Multiply two S.31 fractional integers, and return the 32 most significant
86 * bits after a shift left by the constant z. NOTE: Only works for shifts of
87 * 1 to 8 on Coldfire!
88 */
89#define FRACMUL_SHL(x, y, z) \
90({ \
91 long t, t2; \
92 asm ("mac.l %[a], %[b], %%acc0\n\t" \
93 "moveq.l %[d], %[t]\n\t" \
94 "move.l %%accext01, %[t2]\n\t" \
95 "and.l %[mask], %[t2]\n\t" \
96 "lsr.l %[t], %[t2]\n\t" \
97 "movclr.l %%acc0, %[t]\n\t" \
98 "asl.l %[c], %[t]\n\t" \
99 "or.l %[t2], %[t]\n\t" \
100 : [t] "=&d" (t), [t2] "=&d" (t2) \
101 : [a] "r" (x), [b] "r" (y), [mask] "d" (0xff), \
102 [c] "i" ((z)), [d] "i" (8 - (z))); \
103 t; \
104})
105
106#elif defined(CPU_ARM)
107
108/* Multiply two S.31 fractional integers and return the sign bit and the
109 * 31 most significant bits of the result.
110 */
111#define FRACMUL(x, y) \
112({ \
113 long t, t2; \
114 asm ("smull %[t], %[t2], %[a], %[b]\n\t" \
115 "mov %[t2], %[t2], asl #1\n\t" \
116 "orr %[t], %[t2], %[t], lsr #31\n\t" \
117 : [t] "=&r" (t), [t2] "=&r" (t2) \
118 : [a] "r" (x), [b] "r" (y)); \
119 t; \
120})
121
122/* Multiply two S.31 fractional integers, and return the 32 most significant
123 * bits after a shift left by the constant z.
124 */
125#define FRACMUL_SHL(x, y, z) \
126({ \
127 long t, t2; \
128 asm ("smull %[t], %[t2], %[a], %[b]\n\t" \
129 "mov %[t2], %[t2], asl %[c]\n\t" \
130 "orr %[t], %[t2], %[t], lsr %[d]\n\t" \
131 : [t] "=&r" (t), [t2] "=&r" (t2) \
132 : [a] "r" (x), [b] "r" (y), \
133 [c] "M" ((z) + 1), [d] "M" (31 - (z))); \
134 t; \
135})
136
137#else
138
139#define FRACMUL(x, y) (long) (((((long long) (x)) * ((long long) (y))) >> 31))
140#define FRACMUL_SHL(x, y, z) \
141((long)(((((long long) (x)) * ((long long) (y))) >> (31 - (z)))))
142
143#endif
144
145#define DIV64(x, y, z) (long)(((long long)(x) << (z))/(y))
146
147struct dsp_config; 67struct dsp_config;
148 68
149int dsp_process(struct dsp_config *dsp, char *dest, 69int dsp_process(struct dsp_config *dsp, char *dest,