summaryrefslogtreecommitdiff
path: root/lib/rbcodec/codecs/libmusepack/mpcdec_math.h
diff options
context:
space:
mode:
Diffstat (limited to 'lib/rbcodec/codecs/libmusepack/mpcdec_math.h')
-rw-r--r--lib/rbcodec/codecs/libmusepack/mpcdec_math.h231
1 files changed, 231 insertions, 0 deletions
diff --git a/lib/rbcodec/codecs/libmusepack/mpcdec_math.h b/lib/rbcodec/codecs/libmusepack/mpcdec_math.h
new file mode 100644
index 0000000000..955681f4e5
--- /dev/null
+++ b/lib/rbcodec/codecs/libmusepack/mpcdec_math.h
@@ -0,0 +1,231 @@
1/*
2 Copyright (c) 2005, The Musepack Development Team
3 All rights reserved.
4
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are
7 met:
8
9 * Redistributions of source code must retain the above copyright
10 notice, this list of conditions and the following disclaimer.
11
12 * Redistributions in binary form must reproduce the above
13 copyright notice, this list of conditions and the following
14 disclaimer in the documentation and/or other materials provided
15 with the distribution.
16
17 * Neither the name of the The Musepack Development Team nor the
18 names of its contributors may be used to endorse or promote
19 products derived from this software without specific prior
20 written permission.
21
22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33*/
34
35/// \file math.h
36/// Libmpcdec internal math routines.
37
38#ifndef _mpcdec_math_h_
39#define _mpcdec_math_h_
40
41#include "mpc_types.h"
42
43#define MPC_FIXED_POINT_SHIFT 16
44
45#ifdef MPC_FIXED_POINT
46
47 #ifdef _WIN32_WCE
48 #include <cmnintrin.h>
49 #define MPC_HAVE_MULHIGH
50 #endif
51
52 typedef mpc_int64_t MPC_SAMPLE_FORMAT_MULTIPLY;
53
54 #define MAKE_MPC_SAMPLE(X) (MPC_SAMPLE_FORMAT)((double)(X) * (double)(((mpc_int64_t)1)<<MPC_FIXED_POINT_FRACTPART))
55 #define MAKE_MPC_SAMPLE_EX(X,Y) (MPC_SAMPLE_FORMAT)((double)(X) * (double)(((mpc_int64_t)1)<<(Y)))
56
57 #define MPC_SHR_RND(X, Y) ((X+(1<<(Y-1)))>>Y)
58
59#if defined(CPU_COLDFIRE)
60
61 #define MPC_MULTIPLY(X,Y) mpc_multiply((X), (Y))
62 #define MPC_MULTIPLY_EX(X,Y,Z) mpc_multiply_ex((X), (Y), (Z))
63
64 static inline MPC_SAMPLE_FORMAT mpc_multiply(MPC_SAMPLE_FORMAT x,
65 MPC_SAMPLE_FORMAT y)
66 {
67 MPC_SAMPLE_FORMAT t1, t2;
68 asm volatile (
69 "mac.l %[x],%[y],%%acc0\n" /* multiply */
70 "mulu.l %[y],%[x] \n" /* get lower half, avoid emac stall */
71 "movclr.l %%acc0,%[t1] \n" /* get higher half */
72 "moveq.l #17,%[t2] \n"
73 "asl.l %[t2],%[t1] \n" /* hi <<= 17, plus one free */
74 "moveq.l #14,%[t2] \n"
75 "lsr.l %[t2],%[x] \n" /* (unsigned)lo >>= 14 */
76 "or.l %[x],%[t1] \n" /* combine result */
77 : /* outputs */
78 [t1]"=&d"(t1),
79 [t2]"=&d"(t2),
80 [x] "+d" (x)
81 : /* inputs */
82 [y] "d" (y)
83 );
84 return t1;
85 }
86
87 static inline MPC_SAMPLE_FORMAT mpc_multiply_ex(MPC_SAMPLE_FORMAT x,
88 MPC_SAMPLE_FORMAT y,
89 unsigned shift)
90 {
91 MPC_SAMPLE_FORMAT t1, t2;
92 asm volatile (
93 "mac.l %[x],%[y],%%acc0\n" /* multiply */
94 "mulu.l %[y],%[x] \n" /* get lower half, avoid emac stall */
95 "movclr.l %%acc0,%[t1] \n" /* get higher half */
96 "moveq.l #31,%[t2] \n"
97 "sub.l %[sh],%[t2] \n" /* t2 = 31 - shift */
98 "ble.s 1f \n"
99 "asl.l %[t2],%[t1] \n" /* hi <<= 31 - shift */
100 "lsr.l %[sh],%[x] \n" /* (unsigned)lo >>= shift */
101 "or.l %[x],%[t1] \n" /* combine result */
102 "bra.s 2f \n"
103 "1: \n"
104 "neg.l %[t2] \n" /* t2 = shift - 31 */
105 "asr.l %[t2],%[t1] \n" /* hi >>= t2 */
106 "2: \n"
107 : /* outputs */
108 [t1]"=&d"(t1),
109 [t2]"=&d"(t2),
110 [x] "+d" (x)
111 : /* inputs */
112 [y] "d" (y),
113 [sh]"d" (shift)
114 );
115 return t1;
116 }
117 #elif defined(CPU_ARM)
118 /* Calculate: result = (X*Y)>>14 */
119 #define MPC_MULTIPLY(X,Y) \
120 ({ \
121 MPC_SAMPLE_FORMAT lo; \
122 MPC_SAMPLE_FORMAT hi; \
123 asm volatile ( \
124 "smull %[lo], %[hi], %[x], %[y] \n\t" /* multiply */ \
125 "mov %[lo], %[lo], lsr #14 \n\t" /* lo >>= 14 */ \
126 "orr %[lo], %[lo], %[hi], lsl #18" /* lo |= (hi << 18) */ \
127 : [lo]"=&r"(lo), [hi]"=&r"(hi) \
128 : [x]"r"(X), [y]"r"(Y)); \
129 lo; \
130 })
131
132 /* Calculate: result = (X*Y)>>Z */
133 #define MPC_MULTIPLY_EX(X,Y,Z) \
134 ({ \
135 MPC_SAMPLE_FORMAT lo; \
136 MPC_SAMPLE_FORMAT hi; \
137 asm volatile ( \
138 "smull %[lo], %[hi], %[x], %[y] \n\t" /* multiply */ \
139 "mov %[lo], %[lo], lsr %[shr] \n\t" /* lo >>= Z */ \
140 "orr %[lo], %[lo], %[hi], lsl %[shl]" /* lo |= (hi << (32-Z)) */ \
141 : [lo]"=&r"(lo), [hi]"=&r"(hi) \
142 : [x]"r"(X), [y]"r"(Y), [shr]"r"(Z), [shl]"r"(32-Z)); \
143 lo; \
144 })
145 #else /* libmusepack standard */
146
147 #define MPC_MULTIPLY_NOTRUNCATE(X,Y) \
148 (((MPC_SAMPLE_FORMAT_MULTIPLY)(X) * (MPC_SAMPLE_FORMAT_MULTIPLY)(Y)) >> MPC_FIXED_POINT_FRACTPART)
149
150 #define MPC_MULTIPLY_EX_NOTRUNCATE(X,Y,Z) \
151 (((MPC_SAMPLE_FORMAT_MULTIPLY)(X) * (MPC_SAMPLE_FORMAT_MULTIPLY)(Y)) >> (Z))
152
153 #ifdef _DEBUG
154 static inline MPC_SAMPLE_FORMAT MPC_MULTIPLY(MPC_SAMPLE_FORMAT item1,MPC_SAMPLE_FORMAT item2)
155 {
156 MPC_SAMPLE_FORMAT_MULTIPLY temp = MPC_MULTIPLY_NOTRUNCATE(item1,item2);
157 assert(temp == (MPC_SAMPLE_FORMAT_MULTIPLY)(MPC_SAMPLE_FORMAT)temp);
158 return (MPC_SAMPLE_FORMAT)temp;
159 }
160
161 static inline MPC_SAMPLE_FORMAT MPC_MULTIPLY_EX(MPC_SAMPLE_FORMAT item1,MPC_SAMPLE_FORMAT item2,unsigned shift)
162 {
163 MPC_SAMPLE_FORMAT_MULTIPLY temp = MPC_MULTIPLY_EX_NOTRUNCATE(item1,item2,shift);
164 assert(temp == (MPC_SAMPLE_FORMAT_MULTIPLY)(MPC_SAMPLE_FORMAT)temp);
165 return (MPC_SAMPLE_FORMAT)temp;
166 }
167 #else
168 #define MPC_MULTIPLY(X,Y) ((MPC_SAMPLE_FORMAT)MPC_MULTIPLY_NOTRUNCATE(X,Y))
169 #define MPC_MULTIPLY_EX(X,Y,Z) ((MPC_SAMPLE_FORMAT)MPC_MULTIPLY_EX_NOTRUNCATE(X,Y,Z))
170 #endif
171
172 #endif
173
174 #ifdef MPC_HAVE_MULHIGH
175 #define MPC_MULTIPLY_FRACT(X,Y) _MulHigh(X,Y)
176 #else
177 #if defined(CPU_COLDFIRE)
178 /* loses one bit of accuracy. The rest of the macros won't be as easy as this... */
179 #define MPC_MULTIPLY_FRACT(X,Y) \
180 ({ \
181 MPC_SAMPLE_FORMAT t; \
182 asm volatile ( \
183 "mac.l %[A], %[B], %%acc0\n\t" \
184 "movclr.l %%acc0, %[t] \n\t" \
185 "asr.l #1, %[t] \n\t" \
186 : [t] "=d" (t) \
187 : [A] "r" ((X)), [B] "r" ((Y))); \
188 t; \
189 })
190 #elif defined(CPU_ARM)
191 /* Calculate: result = (X*Y)>>32, without need for >>32 */
192 #define MPC_MULTIPLY_FRACT(X,Y) \
193 ({ \
194 MPC_SAMPLE_FORMAT lo; \
195 MPC_SAMPLE_FORMAT hi; \
196 asm volatile ( \
197 "smull %[lo], %[hi], %[x], %[y]" /* hi = result */ \
198 : [lo]"=&r"(lo), [hi]"=&r"(hi) \
199 : [x]"r"(X), [y]"r"(Y)); \
200 hi; \
201 })
202 #else
203 #define MPC_MULTIPLY_FRACT(X,Y) MPC_MULTIPLY_EX(X,Y,32)
204 #endif
205 #endif
206
207 #define MPC_MAKE_FRACT_CONST(X) (MPC_SAMPLE_FORMAT)((X) * (double)(((mpc_int64_t)1)<<32) )
208
209 #define MPC_MULTIPLY_FLOAT_INT(X,Y) ((X)*(Y))
210
211#else
212 //in floating-point mode, decoded samples are in -1...1 range
213
214 typedef float MPC_SAMPLE_FORMAT;
215
216 #define MAKE_MPC_SAMPLE(X) ((MPC_SAMPLE_FORMAT)(X))
217 #define MAKE_MPC_SAMPLE_EX(X,Y) ((MPC_SAMPLE_FORMAT)(X))
218
219 #define MPC_MULTIPLY_FRACT(X,Y) ((X)*(Y))
220 #define MPC_MAKE_FRACT_CONST(X) (X)
221
222 #define MPC_MULTIPLY_FLOAT_INT(X,Y) ((X)*(Y))
223 #define MPC_MULTIPLY(X,Y) ((X)*(Y))
224 #define MPC_MULTIPLY_EX(X,Y,Z) ((X)*(Y))
225
226 #define MPC_SHR_RND(X, Y) (X)
227
228#endif
229
230#endif // _mpcdec_math_h_
231