diff options
author | Michael Giacomelli <giac2000@hotmail.com> | 2008-03-08 21:26:03 +0000 |
---|---|---|
committer | Michael Giacomelli <giac2000@hotmail.com> | 2008-03-08 21:26:03 +0000 |
commit | 75c7ac80e8540be0f581c3ad4d92b748f9649618 (patch) | |
tree | ce4a202bdc521ce9dd4c9638447724d86df08a03 /apps | |
parent | ae31160c1a6ff74378b7066317eadc23c0137e63 (diff) | |
download | rockbox-75c7ac80e8540be0f581c3ad4d92b748f9649618.tar.gz rockbox-75c7ac80e8540be0f581c3ad4d92b748f9649618.zip |
Commit FS#8670 by Andree Buschmann. Fixes potiential overflow issue in musepack files.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@16563 a1c6a512-1295-4272-9138-f99709370657
Diffstat (limited to 'apps')
-rw-r--r-- | apps/codecs/libmusepack/synth_filter.c | 82 |
1 files changed, 49 insertions, 33 deletions
diff --git a/apps/codecs/libmusepack/synth_filter.c b/apps/codecs/libmusepack/synth_filter.c index 978e48521b..aed5d75fb1 100644 --- a/apps/codecs/libmusepack/synth_filter.c +++ b/apps/codecs/libmusepack/synth_filter.c | |||
@@ -120,6 +120,9 @@ static const MPC_SAMPLE_FORMAT Di_opt [32] [16] ICONST_ATTR = { | |||
120 | 120 | ||
121 | #undef _ | 121 | #undef _ |
122 | 122 | ||
123 | // needed to prevent from internal overflow in calculate_V | ||
124 | #define OVERFLOW_FIX 1 | ||
125 | |||
123 | // V-coefficients were expanded (<<) by V_COEFFICIENT_EXPAND | 126 | // V-coefficients were expanded (<<) by V_COEFFICIENT_EXPAND |
124 | #define V_COEFFICIENT_EXPAND 27 | 127 | #define V_COEFFICIENT_EXPAND 27 |
125 | 128 | ||
@@ -129,6 +132,11 @@ static const MPC_SAMPLE_FORMAT Di_opt [32] [16] ICONST_ATTR = { | |||
129 | // samples are rounded to +/- 2^19 as pre-shift before 32=32x32-multiply | 132 | // samples are rounded to +/- 2^19 as pre-shift before 32=32x32-multiply |
130 | #define MPC_MULTIPLY_V(sample, vcoef) ( MPC_SHR_RND(sample, 12) * vcoef ) | 133 | #define MPC_MULTIPLY_V(sample, vcoef) ( MPC_SHR_RND(sample, 12) * vcoef ) |
131 | 134 | ||
135 | // pre- and postscale are used to avoid internal overflow in synthesis calculation | ||
136 | #define MPC_MULTIPLY_V_PRESCALE(sample, vcoef) ( MPC_SHR_RND(sample, (12+OVERFLOW_FIX)) * vcoef ) | ||
137 | #define MPC_MULTIPLY_V_POSTSCALE(sample, vcoef) ( MPC_SHR_RND(sample, (12-OVERFLOW_FIX)) * vcoef ) | ||
138 | #define MPC_V_POSTSCALE(sample) (sample<<OVERFLOW_FIX) | ||
139 | |||
132 | // round to +/- 2^16 as pre-shift before 32=32x32-multiply | 140 | // round to +/- 2^16 as pre-shift before 32=32x32-multiply |
133 | #define MPC_MAKE_INVCOS(value) (MPC_SHR_RND(value, 15)) | 141 | #define MPC_MAKE_INVCOS(value) (MPC_SHR_RND(value, 15)) |
134 | #else | 142 | #else |
@@ -137,12 +145,20 @@ static const MPC_SAMPLE_FORMAT Di_opt [32] [16] ICONST_ATTR = { | |||
137 | // Will loose 5bit accuracy on result in fract part without effect on final audio result | 145 | // Will loose 5bit accuracy on result in fract part without effect on final audio result |
138 | #define MPC_MULTIPLY_V(sample, vcoef) ( (MPC_MULTIPLY_FRACT(sample, vcoef)) << (32-V_COEFFICIENT_EXPAND) ) | 146 | #define MPC_MULTIPLY_V(sample, vcoef) ( (MPC_MULTIPLY_FRACT(sample, vcoef)) << (32-V_COEFFICIENT_EXPAND) ) |
139 | 147 | ||
148 | // pre- and postscale are used to avoid internal overflow in synthesis calculation | ||
149 | #define MPC_MULTIPLY_V_PRESCALE(sample, vcoef) ( (MPC_MULTIPLY_FRACT(sample, vcoef)) << (32-V_COEFFICIENT_EXPAND-OVERFLOW_FIX) ) | ||
150 | #define MPC_MULTIPLY_V_POSTSCALE(sample, vcoef) ( (MPC_MULTIPLY_FRACT(sample, vcoef)) << (32-V_COEFFICIENT_EXPAND+OVERFLOW_FIX) ) | ||
151 | #define MPC_V_POSTSCALE(sample) (sample<<OVERFLOW_FIX) | ||
152 | |||
140 | // directly use accurate 32bit-coefficients | 153 | // directly use accurate 32bit-coefficients |
141 | #define MPC_MAKE_INVCOS(value) (value) | 154 | #define MPC_MAKE_INVCOS(value) (value) |
142 | #endif | 155 | #endif |
143 | #else | 156 | #else |
144 | // for floating point use the standard multiplication macro | 157 | // for floating point use the standard multiplication macro |
145 | #define MPC_MULTIPLY_V(sample, vcoef) ( MPC_MULTIPLY(sample, vcoef) ) | 158 | #define MPC_MULTIPLY_V (sample, vcoef) ( MPC_MULTIPLY(sample, vcoef) ) |
159 | #define MPC_MULTIPLY_V_PRESCALE (sample, vcoef) ( MPC_MULTIPLY(sample, vcoef) ) | ||
160 | #define MPC_MULTIPLY_V_POSTSCALE(sample, vcoef) ( MPC_MULTIPLY(sample, vcoef) ) | ||
161 | #define MPC_V_POSTSCALE(sample) (sample) | ||
146 | 162 | ||
147 | // downscale the accurate 32bit-coefficients and convert to float | 163 | // downscale the accurate 32bit-coefficients and convert to float |
148 | #define MPC_MAKE_INVCOS(value) MAKE_MPC_SAMPLE((double)value/(double)(1<<V_COEFFICIENT_EXPAND)) | 164 | #define MPC_MAKE_INVCOS(value) MAKE_MPC_SAMPLE((double)value/(double)(1<<V_COEFFICIENT_EXPAND)) |
@@ -294,22 +310,22 @@ mpc_calculate_new_V ( const MPC_SAMPLE_FORMAT * Sample, MPC_SAMPLE_FORMAT * V ) | |||
294 | V[42] = tmp - A[10] - A[11]; | 310 | V[42] = tmp - A[10] - A[11]; |
295 | // 9 adds, 9 subs | 311 | // 9 adds, 9 subs |
296 | 312 | ||
297 | A[ 0] = MPC_MULTIPLY_V((Sample[ 0] - Sample[31]), INVCOS01); | 313 | A[ 0] = MPC_MULTIPLY_V_PRESCALE((Sample[ 0] - Sample[31]), INVCOS01); |
298 | A[ 1] = MPC_MULTIPLY_V((Sample[ 1] - Sample[30]), INVCOS03); | 314 | A[ 1] = MPC_MULTIPLY_V_PRESCALE((Sample[ 1] - Sample[30]), INVCOS03); |
299 | A[ 2] = MPC_MULTIPLY_V((Sample[ 2] - Sample[29]), INVCOS05); | 315 | A[ 2] = MPC_MULTIPLY_V_PRESCALE((Sample[ 2] - Sample[29]), INVCOS05); |
300 | A[ 3] = MPC_MULTIPLY_V((Sample[ 3] - Sample[28]), INVCOS07); | 316 | A[ 3] = MPC_MULTIPLY_V_PRESCALE((Sample[ 3] - Sample[28]), INVCOS07); |
301 | A[ 4] = MPC_MULTIPLY_V((Sample[ 4] - Sample[27]), INVCOS09); | 317 | A[ 4] = MPC_MULTIPLY_V_PRESCALE((Sample[ 4] - Sample[27]), INVCOS09); |
302 | A[ 5] = MPC_MULTIPLY_V((Sample[ 5] - Sample[26]), INVCOS11); | 318 | A[ 5] = MPC_MULTIPLY_V_PRESCALE((Sample[ 5] - Sample[26]), INVCOS11); |
303 | A[ 6] = MPC_MULTIPLY_V((Sample[ 6] - Sample[25]), INVCOS13); | 319 | A[ 6] = MPC_MULTIPLY_V_PRESCALE((Sample[ 6] - Sample[25]), INVCOS13); |
304 | A[ 7] = MPC_MULTIPLY_V((Sample[ 7] - Sample[24]), INVCOS15); | 320 | A[ 7] = MPC_MULTIPLY_V_PRESCALE((Sample[ 7] - Sample[24]), INVCOS15); |
305 | A[ 8] = MPC_MULTIPLY_V((Sample[ 8] - Sample[23]), INVCOS17); | 321 | A[ 8] = MPC_MULTIPLY_V_PRESCALE((Sample[ 8] - Sample[23]), INVCOS17); |
306 | A[ 9] = MPC_MULTIPLY_V((Sample[ 9] - Sample[22]), INVCOS19); | 322 | A[ 9] = MPC_MULTIPLY_V_PRESCALE((Sample[ 9] - Sample[22]), INVCOS19); |
307 | A[10] = MPC_MULTIPLY_V((Sample[10] - Sample[21]), INVCOS21); | 323 | A[10] = MPC_MULTIPLY_V_PRESCALE((Sample[10] - Sample[21]), INVCOS21); |
308 | A[11] = MPC_MULTIPLY_V((Sample[11] - Sample[20]), INVCOS23); | 324 | A[11] = MPC_MULTIPLY_V_PRESCALE((Sample[11] - Sample[20]), INVCOS23); |
309 | A[12] = MPC_MULTIPLY_V((Sample[12] - Sample[19]), INVCOS25); | 325 | A[12] = MPC_MULTIPLY_V_PRESCALE((Sample[12] - Sample[19]), INVCOS25); |
310 | A[13] = MPC_MULTIPLY_V((Sample[13] - Sample[18]), INVCOS27); | 326 | A[13] = MPC_MULTIPLY_V_PRESCALE((Sample[13] - Sample[18]), INVCOS27); |
311 | A[14] = MPC_MULTIPLY_V((Sample[14] - Sample[17]), INVCOS29); | 327 | A[14] = MPC_MULTIPLY_V_PRESCALE((Sample[14] - Sample[17]), INVCOS29); |
312 | A[15] = MPC_MULTIPLY_V((Sample[15] - Sample[16]), INVCOS31); | 328 | A[15] = MPC_MULTIPLY_V_PRESCALE((Sample[15] - Sample[16]), INVCOS31); |
313 | // 16 subs, 16 muls, 16 shifts | 329 | // 16 subs, 16 muls, 16 shifts |
314 | 330 | ||
315 | B[ 0] = A[ 0] + A[15]; | 331 | B[ 0] = A[ 0] + A[15]; |
@@ -366,22 +382,22 @@ mpc_calculate_new_V ( const MPC_SAMPLE_FORMAT * Sample, MPC_SAMPLE_FORMAT * V ) | |||
366 | B[15] = MPC_MULTIPLY_V((A[13] - A[14]), INVCOS24); | 382 | B[15] = MPC_MULTIPLY_V((A[13] - A[14]), INVCOS24); |
367 | // 8 adds, 8 subs, 8 muls, 8 shift | 383 | // 8 adds, 8 subs, 8 muls, 8 shift |
368 | 384 | ||
369 | A[ 0] = B[ 0] + B[ 1]; | 385 | A[ 0] = MPC_V_POSTSCALE((B[ 0] + B[ 1])); |
370 | A[ 1] = MPC_MULTIPLY_V((B[ 0] - B[ 1]), INVCOS16); | 386 | A[ 1] = MPC_MULTIPLY_V_POSTSCALE((B[ 0] - B[ 1]), INVCOS16); |
371 | A[ 2] = B[ 2] + B[ 3]; | 387 | A[ 2] = MPC_V_POSTSCALE((B[ 2] + B[ 3])); |
372 | A[ 3] = MPC_MULTIPLY_V((B[ 2] - B[ 3]), INVCOS16); | 388 | A[ 3] = MPC_MULTIPLY_V_POSTSCALE((B[ 2] - B[ 3]), INVCOS16); |
373 | A[ 4] = B[ 4] + B[ 5]; | 389 | A[ 4] = MPC_V_POSTSCALE((B[ 4] + B[ 5])); |
374 | A[ 5] = MPC_MULTIPLY_V((B[ 4] - B[ 5]), INVCOS16); | 390 | A[ 5] = MPC_MULTIPLY_V_POSTSCALE((B[ 4] - B[ 5]), INVCOS16); |
375 | A[ 6] = B[ 6] + B[ 7]; | 391 | A[ 6] = MPC_V_POSTSCALE((B[ 6] + B[ 7])); |
376 | A[ 7] = MPC_MULTIPLY_V((B[ 6] - B[ 7]), INVCOS16); | 392 | A[ 7] = MPC_MULTIPLY_V_POSTSCALE((B[ 6] - B[ 7]), INVCOS16); |
377 | A[ 8] = B[ 8] + B[ 9]; | 393 | A[ 8] = MPC_V_POSTSCALE((B[ 8] + B[ 9])); |
378 | A[ 9] = MPC_MULTIPLY_V((B[ 8] - B[ 9]), INVCOS16); | 394 | A[ 9] = MPC_MULTIPLY_V_POSTSCALE((B[ 8] - B[ 9]), INVCOS16); |
379 | A[10] = B[10] + B[11]; | 395 | A[10] = MPC_V_POSTSCALE((B[10] + B[11])); |
380 | A[11] = MPC_MULTIPLY_V((B[10] - B[11]), INVCOS16); | 396 | A[11] = MPC_MULTIPLY_V_POSTSCALE((B[10] - B[11]), INVCOS16); |
381 | A[12] = B[12] + B[13]; | 397 | A[12] = MPC_V_POSTSCALE((B[12] + B[13])); |
382 | A[13] = MPC_MULTIPLY_V((B[12] - B[13]), INVCOS16); | 398 | A[13] = MPC_MULTIPLY_V_POSTSCALE((B[12] - B[13]), INVCOS16); |
383 | A[14] = B[14] + B[15]; | 399 | A[14] = MPC_V_POSTSCALE((B[14] + B[15])); |
384 | A[15] = MPC_MULTIPLY_V((B[14] - B[15]), INVCOS16); | 400 | A[15] = MPC_MULTIPLY_V_POSTSCALE((B[14] - B[15]), INVCOS16); |
385 | // 8 adds, 8 subs, 8 muls, 8 shift | 401 | // 8 adds, 8 subs, 8 muls, 8 shift |
386 | 402 | ||
387 | // multiple used expressions: A[ 4]+A[ 6]+A[ 7], A[ 9]+A[13]+A[15] | 403 | // multiple used expressions: A[ 4]+A[ 6]+A[ 7], A[ 9]+A[13]+A[15] |