diff options
Diffstat (limited to 'apps/dsp.c')
-rw-r--r-- | apps/dsp.c | 34 |
1 files changed, 13 insertions, 21 deletions
diff --git a/apps/dsp.c b/apps/dsp.c index d27df0500c..cca94ae075 100644 --- a/apps/dsp.c +++ b/apps/dsp.c | |||
@@ -112,7 +112,7 @@ struct crossfeed_data | |||
112 | int32_t coefs[3]; /* 04h - Coefficients for the shelving filter */ | 112 | int32_t coefs[3]; /* 04h - Coefficients for the shelving filter */ |
113 | int32_t history[4]; /* 10h - Format is x[n - 1], y[n - 1] for both channels */ | 113 | int32_t history[4]; /* 10h - Format is x[n - 1], y[n - 1] for both channels */ |
114 | int32_t delay[13][2]; /* 20h */ | 114 | int32_t delay[13][2]; /* 20h */ |
115 | int index; /* 88h - Current index/pointer into the delay line */ | 115 | int32_t *index; /* 88h - Current pointer into the delay line */ |
116 | /* 8ch */ | 116 | /* 8ch */ |
117 | }; | 117 | }; |
118 | 118 | ||
@@ -138,10 +138,6 @@ typedef int (*resample_fn_type)(int count, struct dsp_data *data, | |||
138 | int32_t *src[], int32_t *dst[]); | 138 | int32_t *src[], int32_t *dst[]); |
139 | typedef void (*sample_output_fn_type)(int count, struct dsp_data *data, | 139 | typedef void (*sample_output_fn_type)(int count, struct dsp_data *data, |
140 | int32_t *src[], int16_t *dst); | 140 | int32_t *src[], int16_t *dst); |
141 | /* If ACF_SWITCHPARAM is no longer needed, make apply_crossfeed of type | ||
142 | channels_process_fn_type since it is really just that */ | ||
143 | typedef void (*apply_crossfeed_fn_type)(ACF_SWITCHPARAM(int count, | ||
144 | int32_t *buf[])); | ||
145 | typedef void (*channels_process_fn_type)(int count, int32_t *buf[]); | 141 | typedef void (*channels_process_fn_type)(int count, int32_t *buf[]); |
146 | 142 | ||
147 | /* | 143 | /* |
@@ -164,7 +160,7 @@ struct dsp_config | |||
164 | sample_output_fn_type output_samples; | 160 | sample_output_fn_type output_samples; |
165 | /* These will be NULL for the voice codec and is more economical that | 161 | /* These will be NULL for the voice codec and is more economical that |
166 | way */ | 162 | way */ |
167 | apply_crossfeed_fn_type apply_crossfeed; | 163 | channels_process_fn_type apply_crossfeed; |
168 | channels_process_fn_type channels_process; | 164 | channels_process_fn_type channels_process; |
169 | }; | 165 | }; |
170 | 166 | ||
@@ -175,14 +171,10 @@ static struct dither_data dither_data[2] IBSS_ATTR; /* 0=left, 1=right */ | |||
175 | static long dither_mask IBSS_ATTR; | 171 | static long dither_mask IBSS_ATTR; |
176 | static long dither_bias IBSS_ATTR; | 172 | static long dither_bias IBSS_ATTR; |
177 | /* Crossfeed */ | 173 | /* Crossfeed */ |
178 | #ifdef DSP_CROSSFEED_DELAY_PTR | ||
179 | struct crossfeed_data crossfeed_data IDATA_ATTR = /* A */ | 174 | struct crossfeed_data crossfeed_data IDATA_ATTR = /* A */ |
180 | { | 175 | { |
181 | .index = (intptr_t)crossfeed_data.delay | 176 | .index = (int32_t *)crossfeed_data.delay |
182 | }; | 177 | }; |
183 | #else | ||
184 | struct crossfeed_data crossfeed_data IBSS_ATTR; /* A */ | ||
185 | #endif | ||
186 | 178 | ||
187 | /* Equalizer */ | 179 | /* Equalizer */ |
188 | static struct eq_state eq_data; /* A/V */ | 180 | static struct eq_state eq_data; /* A/V */ |
@@ -719,7 +711,7 @@ static void apply_crossfeed(int count, int32_t *buf[]) | |||
719 | int32_t *delay = &crossfeed_data.delay[0][0]; | 711 | int32_t *delay = &crossfeed_data.delay[0][0]; |
720 | int32_t *coefs = &crossfeed_data.coefs[0]; | 712 | int32_t *coefs = &crossfeed_data.coefs[0]; |
721 | int32_t gain = crossfeed_data.gain; | 713 | int32_t gain = crossfeed_data.gain; |
722 | int di = crossfeed_data.index; | 714 | int32_t *di = crossfeed_data.index; |
723 | 715 | ||
724 | int32_t acc; | 716 | int32_t acc; |
725 | int32_t left, right; | 717 | int32_t left, right; |
@@ -731,28 +723,28 @@ static void apply_crossfeed(int count, int32_t *buf[]) | |||
731 | right = buf[1][i]; | 723 | right = buf[1][i]; |
732 | 724 | ||
733 | /* Filter delayed sample from left speaker */ | 725 | /* Filter delayed sample from left speaker */ |
734 | ACC_INIT(acc, delay[di*2], coefs[0]); | 726 | ACC_INIT(acc, *di, coefs[0]); |
735 | ACC(acc, hist_l[0], coefs[1]); | 727 | ACC(acc, hist_l[0], coefs[1]); |
736 | ACC(acc, hist_l[1], coefs[2]); | 728 | ACC(acc, hist_l[1], coefs[2]); |
737 | /* Save filter history for left speaker */ | 729 | /* Save filter history for left speaker */ |
738 | hist_l[1] = GET_ACC(acc); | 730 | hist_l[1] = GET_ACC(acc); |
739 | hist_l[0] = delay[di*2]; | 731 | hist_l[0] = *di; |
732 | *di++ = left; | ||
740 | /* Filter delayed sample from right speaker */ | 733 | /* Filter delayed sample from right speaker */ |
741 | ACC_INIT(acc, delay[di*2 + 1], coefs[0]); | 734 | ACC_INIT(acc, *di, coefs[0]); |
742 | ACC(acc, hist_r[0], coefs[1]); | 735 | ACC(acc, hist_r[0], coefs[1]); |
743 | ACC(acc, hist_r[1], coefs[2]); | 736 | ACC(acc, hist_r[1], coefs[2]); |
744 | /* Save filter history for right speaker */ | 737 | /* Save filter history for right speaker */ |
745 | hist_r[1] = GET_ACC(acc); | 738 | hist_r[1] = GET_ACC(acc); |
746 | hist_r[0] = delay[di*2 + 1]; | 739 | hist_r[0] = *di; |
747 | delay[di*2] = left; | 740 | *di++ = right; |
748 | delay[di*2 + 1] = right; | ||
749 | /* Now add the attenuated direct sound and write to outputs */ | 741 | /* Now add the attenuated direct sound and write to outputs */ |
750 | buf[0][i] = FRACMUL(left, gain) + hist_r[1]; | 742 | buf[0][i] = FRACMUL(left, gain) + hist_r[1]; |
751 | buf[1][i] = FRACMUL(right, gain) + hist_l[1]; | 743 | buf[1][i] = FRACMUL(right, gain) + hist_l[1]; |
752 | 744 | ||
753 | /* Wrap delay line index if bigger than delay line size */ | 745 | /* Wrap delay line index if bigger than delay line size */ |
754 | if (++di > 12) | 746 | if (di >= delay + 13*2) |
755 | di = 0; | 747 | di = delay; |
756 | } | 748 | } |
757 | /* Write back local copies of data we've modified */ | 749 | /* Write back local copies of data we've modified */ |
758 | crossfeed_data.index = di; | 750 | crossfeed_data.index = di; |
@@ -1127,7 +1119,7 @@ int dsp_process(char *dst, const char *src[], int count) | |||
1127 | if ((samples = resample(samples, tmp)) <= 0) | 1119 | if ((samples = resample(samples, tmp)) <= 0) |
1128 | break; /* I'm pretty sure we're downsampling here */ | 1120 | break; /* I'm pretty sure we're downsampling here */ |
1129 | if (dsp->apply_crossfeed) | 1121 | if (dsp->apply_crossfeed) |
1130 | dsp->apply_crossfeed(ACF_SWITCHPARAM(samples, tmp)); | 1122 | dsp->apply_crossfeed(samples, tmp); |
1131 | /* TODO: EQ and tone controls need separate structs for audio and voice | 1123 | /* TODO: EQ and tone controls need separate structs for audio and voice |
1132 | * DSP processing thanks to filter history. isn't really audible now, but | 1124 | * DSP processing thanks to filter history. isn't really audible now, but |
1133 | * might be the day we start handling voice more delicately. | 1125 | * might be the day we start handling voice more delicately. |