Line data Source code
1 : /*
2 : * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 : *
4 : * Use of this source code is governed by a BSD-style license
5 : * that can be found in the LICENSE file in the root of the source
6 : * tree. An additional intellectual property rights grant can be found
7 : * in the file PATENTS. All contributing project authors may
8 : * be found in the AUTHORS file in the root of the source tree.
9 : */
10 :
11 : #include "webrtc/modules/audio_coding/neteq/merge.h"
12 :
13 : #include <assert.h>
14 : #include <string.h> // memmove, memcpy, memset, size_t
15 :
16 : #include <algorithm> // min, max
17 : #include <memory>
18 :
19 : #include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
20 : #include "webrtc/modules/audio_coding/neteq/audio_multi_vector.h"
21 : #include "webrtc/modules/audio_coding/neteq/cross_correlation.h"
22 : #include "webrtc/modules/audio_coding/neteq/dsp_helper.h"
23 : #include "webrtc/modules/audio_coding/neteq/expand.h"
24 : #include "webrtc/modules/audio_coding/neteq/sync_buffer.h"
25 :
26 : namespace webrtc {
27 :
28 0 : Merge::Merge(int fs_hz,
29 : size_t num_channels,
30 : Expand* expand,
31 0 : SyncBuffer* sync_buffer)
32 : : fs_hz_(fs_hz),
33 : num_channels_(num_channels),
34 0 : fs_mult_(fs_hz_ / 8000),
35 0 : timestamps_per_call_(static_cast<size_t>(fs_hz_ / 100)),
36 : expand_(expand),
37 : sync_buffer_(sync_buffer),
38 0 : expanded_(num_channels_) {
39 0 : assert(num_channels_ > 0);
40 0 : }
41 :
42 : Merge::~Merge() = default;
43 :
44 0 : size_t Merge::Process(int16_t* input, size_t input_length,
45 : int16_t* external_mute_factor_array,
46 : AudioMultiVector* output) {
47 : // TODO(hlundin): Change to an enumerator and skip assert.
48 0 : assert(fs_hz_ == 8000 || fs_hz_ == 16000 || fs_hz_ == 32000 ||
49 : fs_hz_ == 48000);
50 0 : assert(fs_hz_ <= kMaxSampleRate); // Should not be possible.
51 :
52 : size_t old_length;
53 : size_t expand_period;
54 : // Get expansion data to overlap and mix with.
55 0 : size_t expanded_length = GetExpandedSignal(&old_length, &expand_period);
56 :
57 : // Transfer input signal to an AudioMultiVector.
58 0 : AudioMultiVector input_vector(num_channels_);
59 0 : input_vector.PushBackInterleaved(input, input_length);
60 0 : size_t input_length_per_channel = input_vector.Size();
61 0 : assert(input_length_per_channel == input_length / num_channels_);
62 :
63 0 : size_t best_correlation_index = 0;
64 0 : size_t output_length = 0;
65 :
66 : std::unique_ptr<int16_t[]> input_channel(
67 0 : new int16_t[input_length_per_channel]);
68 0 : std::unique_ptr<int16_t[]> expanded_channel(new int16_t[expanded_length]);
69 0 : for (size_t channel = 0; channel < num_channels_; ++channel) {
70 0 : input_vector[channel].CopyTo(
71 0 : input_length_per_channel, 0, input_channel.get());
72 0 : expanded_[channel].CopyTo(expanded_length, 0, expanded_channel.get());
73 :
74 0 : int16_t new_mute_factor = SignalScaling(
75 0 : input_channel.get(), input_length_per_channel, expanded_channel.get());
76 :
77 : // Adjust muting factor (product of "main" muting factor and expand muting
78 : // factor).
79 0 : int16_t* external_mute_factor = &external_mute_factor_array[channel];
80 0 : *external_mute_factor =
81 0 : (*external_mute_factor * expand_->MuteFactor(channel)) >> 14;
82 :
83 : // Update |external_mute_factor| if it is lower than |new_mute_factor|.
84 0 : if (new_mute_factor > *external_mute_factor) {
85 0 : *external_mute_factor = std::min(new_mute_factor,
86 0 : static_cast<int16_t>(16384));
87 : }
88 :
89 0 : if (channel == 0) {
90 : // Downsample, correlate, and find strongest correlation period for the
91 : // master (i.e., first) channel only.
92 : // Downsample to 4kHz sample rate.
93 0 : Downsample(input_channel.get(), input_length_per_channel,
94 0 : expanded_channel.get(), expanded_length);
95 :
96 : // Calculate the lag of the strongest correlation period.
97 0 : best_correlation_index = CorrelateAndPeakSearch(
98 0 : old_length, input_length_per_channel, expand_period);
99 : }
100 :
101 0 : temp_data_.resize(input_length_per_channel + best_correlation_index);
102 0 : int16_t* decoded_output = temp_data_.data() + best_correlation_index;
103 :
104 : // Mute the new decoded data if needed (and unmute it linearly).
105 : // This is the overlapping part of expanded_signal.
106 0 : size_t interpolation_length = std::min(
107 0 : kMaxCorrelationLength * fs_mult_,
108 0 : expanded_length - best_correlation_index);
109 0 : interpolation_length = std::min(interpolation_length,
110 0 : input_length_per_channel);
111 0 : if (*external_mute_factor < 16384) {
112 : // Set a suitable muting slope (Q20). 0.004 for NB, 0.002 for WB,
113 : // and so on.
114 0 : int increment = 4194 / fs_mult_;
115 0 : *external_mute_factor =
116 0 : static_cast<int16_t>(DspHelper::RampSignal(input_channel.get(),
117 : interpolation_length,
118 0 : *external_mute_factor,
119 0 : increment));
120 0 : DspHelper::UnmuteSignal(&input_channel[interpolation_length],
121 : input_length_per_channel - interpolation_length,
122 : external_mute_factor, increment,
123 0 : &decoded_output[interpolation_length]);
124 : } else {
125 : // No muting needed.
126 0 : memmove(
127 0 : &decoded_output[interpolation_length],
128 0 : &input_channel[interpolation_length],
129 0 : sizeof(int16_t) * (input_length_per_channel - interpolation_length));
130 : }
131 :
132 : // Do overlap and mix linearly.
133 : int16_t increment =
134 0 : static_cast<int16_t>(16384 / (interpolation_length + 1)); // In Q14.
135 0 : int16_t mute_factor = 16384 - increment;
136 0 : memmove(temp_data_.data(), expanded_channel.get(),
137 0 : sizeof(int16_t) * best_correlation_index);
138 0 : DspHelper::CrossFade(&expanded_channel[best_correlation_index],
139 0 : input_channel.get(), interpolation_length,
140 0 : &mute_factor, increment, decoded_output);
141 :
142 0 : output_length = best_correlation_index + input_length_per_channel;
143 0 : if (channel == 0) {
144 0 : assert(output->Empty()); // Output should be empty at this point.
145 0 : output->AssertSize(output_length);
146 : } else {
147 0 : assert(output->Size() == output_length);
148 : }
149 0 : (*output)[channel].OverwriteAt(temp_data_.data(), output_length, 0);
150 : }
151 :
152 : // Copy back the first part of the data to |sync_buffer_| and remove it from
153 : // |output|.
154 0 : sync_buffer_->ReplaceAtIndex(*output, old_length, sync_buffer_->next_index());
155 0 : output->PopFront(old_length);
156 :
157 : // Return new added length. |old_length| samples were borrowed from
158 : // |sync_buffer_|.
159 0 : return output_length - old_length;
160 : }
161 :
162 0 : size_t Merge::GetExpandedSignal(size_t* old_length, size_t* expand_period) {
163 : // Check how much data that is left since earlier.
164 0 : *old_length = sync_buffer_->FutureLength();
165 : // Should never be less than overlap_length.
166 0 : assert(*old_length >= expand_->overlap_length());
167 : // Generate data to merge the overlap with using expand.
168 0 : expand_->SetParametersForMergeAfterExpand();
169 :
170 0 : if (*old_length >= 210 * kMaxSampleRate / 8000) {
171 : // TODO(hlundin): Write test case for this.
172 : // The number of samples available in the sync buffer is more than what fits
173 : // in expanded_signal. Keep the first 210 * kMaxSampleRate / 8000 samples,
174 : // but shift them towards the end of the buffer. This is ok, since all of
175 : // the buffer will be expand data anyway, so as long as the beginning is
176 : // left untouched, we're fine.
177 0 : size_t length_diff = *old_length - 210 * kMaxSampleRate / 8000;
178 0 : sync_buffer_->InsertZerosAtIndex(length_diff, sync_buffer_->next_index());
179 0 : *old_length = 210 * kMaxSampleRate / 8000;
180 : // This is the truncated length.
181 : }
182 : // This assert should always be true thanks to the if statement above.
183 0 : assert(210 * kMaxSampleRate / 8000 >= *old_length);
184 :
185 0 : AudioMultiVector expanded_temp(num_channels_);
186 0 : expand_->Process(&expanded_temp);
187 0 : *expand_period = expanded_temp.Size(); // Samples per channel.
188 :
189 0 : expanded_.Clear();
190 : // Copy what is left since earlier into the expanded vector.
191 0 : expanded_.PushBackFromIndex(*sync_buffer_, sync_buffer_->next_index());
192 0 : assert(expanded_.Size() == *old_length);
193 0 : assert(expanded_temp.Size() > 0);
194 : // Do "ugly" copy and paste from the expanded in order to generate more data
195 : // to correlate (but not interpolate) with.
196 0 : const size_t required_length = static_cast<size_t>((120 + 80 + 2) * fs_mult_);
197 0 : if (expanded_.Size() < required_length) {
198 0 : while (expanded_.Size() < required_length) {
199 : // Append one more pitch period each time.
200 0 : expanded_.PushBack(expanded_temp);
201 : }
202 : // Trim the length to exactly |required_length|.
203 0 : expanded_.PopBack(expanded_.Size() - required_length);
204 : }
205 0 : assert(expanded_.Size() >= required_length);
206 0 : return required_length;
207 : }
208 :
209 0 : int16_t Merge::SignalScaling(const int16_t* input, size_t input_length,
210 : const int16_t* expanded_signal) const {
211 : // Adjust muting factor if new vector is more or less of the BGN energy.
212 : const size_t mod_input_length =
213 0 : std::min(static_cast<size_t>(64 * fs_mult_), input_length);
214 : const int16_t expanded_max =
215 0 : WebRtcSpl_MaxAbsValueW16(expanded_signal, mod_input_length);
216 0 : int32_t factor = (expanded_max * expanded_max) /
217 0 : (std::numeric_limits<int32_t>::max() /
218 0 : static_cast<int32_t>(mod_input_length));
219 0 : const int expanded_shift = factor == 0 ? 0 : 31 - WebRtcSpl_NormW32(factor);
220 : int32_t energy_expanded = WebRtcSpl_DotProductWithScale(expanded_signal,
221 : expanded_signal,
222 : mod_input_length,
223 0 : expanded_shift);
224 :
225 : // Calculate energy of input signal.
226 0 : const int16_t input_max = WebRtcSpl_MaxAbsValueW16(input, mod_input_length);
227 0 : factor = (input_max * input_max) / (std::numeric_limits<int32_t>::max() /
228 : static_cast<int32_t>(mod_input_length));
229 0 : const int input_shift = factor == 0 ? 0 : 31 - WebRtcSpl_NormW32(factor);
230 : int32_t energy_input = WebRtcSpl_DotProductWithScale(input, input,
231 : mod_input_length,
232 0 : input_shift);
233 :
234 : // Align to the same Q-domain.
235 0 : if (input_shift > expanded_shift) {
236 0 : energy_expanded = energy_expanded >> (input_shift - expanded_shift);
237 : } else {
238 0 : energy_input = energy_input >> (expanded_shift - input_shift);
239 : }
240 :
241 : // Calculate muting factor to use for new frame.
242 : int16_t mute_factor;
243 0 : if (energy_input > energy_expanded) {
244 : // Normalize |energy_input| to 14 bits.
245 0 : int16_t temp_shift = WebRtcSpl_NormW32(energy_input) - 17;
246 0 : energy_input = WEBRTC_SPL_SHIFT_W32(energy_input, temp_shift);
247 : // Put |energy_expanded| in a domain 14 higher, so that
248 : // energy_expanded / energy_input is in Q14.
249 0 : energy_expanded = WEBRTC_SPL_SHIFT_W32(energy_expanded, temp_shift + 14);
250 : // Calculate sqrt(energy_expanded / energy_input) in Q14.
251 0 : mute_factor = static_cast<int16_t>(
252 0 : WebRtcSpl_SqrtFloor((energy_expanded / energy_input) << 14));
253 : } else {
254 : // Set to 1 (in Q14) when |expanded| has higher energy than |input|.
255 0 : mute_factor = 16384;
256 : }
257 :
258 0 : return mute_factor;
259 : }
260 :
261 : // TODO(hlundin): There are some parameter values in this method that seem
262 : // strange. Compare with Expand::Correlation.
263 0 : void Merge::Downsample(const int16_t* input, size_t input_length,
264 : const int16_t* expanded_signal, size_t expanded_length) {
265 : const int16_t* filter_coefficients;
266 : size_t num_coefficients;
267 0 : int decimation_factor = fs_hz_ / 4000;
268 : static const size_t kCompensateDelay = 0;
269 0 : size_t length_limit = static_cast<size_t>(fs_hz_ / 100); // 10 ms in samples.
270 0 : if (fs_hz_ == 8000) {
271 0 : filter_coefficients = DspHelper::kDownsample8kHzTbl;
272 0 : num_coefficients = 3;
273 0 : } else if (fs_hz_ == 16000) {
274 0 : filter_coefficients = DspHelper::kDownsample16kHzTbl;
275 0 : num_coefficients = 5;
276 0 : } else if (fs_hz_ == 32000) {
277 0 : filter_coefficients = DspHelper::kDownsample32kHzTbl;
278 0 : num_coefficients = 7;
279 : } else { // fs_hz_ == 48000
280 0 : filter_coefficients = DspHelper::kDownsample48kHzTbl;
281 0 : num_coefficients = 7;
282 : }
283 0 : size_t signal_offset = num_coefficients - 1;
284 0 : WebRtcSpl_DownsampleFast(&expanded_signal[signal_offset],
285 : expanded_length - signal_offset,
286 : expanded_downsampled_, kExpandDownsampLength,
287 : filter_coefficients, num_coefficients,
288 0 : decimation_factor, kCompensateDelay);
289 0 : if (input_length <= length_limit) {
290 : // Not quite long enough, so we have to cheat a bit.
291 0 : size_t temp_len = input_length - signal_offset;
292 : // TODO(hlundin): Should |downsamp_temp_len| be corrected for round-off
293 : // errors? I.e., (temp_len + decimation_factor - 1) / decimation_factor?
294 0 : size_t downsamp_temp_len = temp_len / decimation_factor;
295 0 : WebRtcSpl_DownsampleFast(&input[signal_offset], temp_len,
296 : input_downsampled_, downsamp_temp_len,
297 : filter_coefficients, num_coefficients,
298 0 : decimation_factor, kCompensateDelay);
299 0 : memset(&input_downsampled_[downsamp_temp_len], 0,
300 0 : sizeof(int16_t) * (kInputDownsampLength - downsamp_temp_len));
301 : } else {
302 0 : WebRtcSpl_DownsampleFast(&input[signal_offset],
303 : input_length - signal_offset, input_downsampled_,
304 : kInputDownsampLength, filter_coefficients,
305 : num_coefficients, decimation_factor,
306 0 : kCompensateDelay);
307 : }
308 0 : }
309 :
310 0 : size_t Merge::CorrelateAndPeakSearch(size_t start_position, size_t input_length,
311 : size_t expand_period) const {
312 : // Calculate correlation without any normalization.
313 0 : const size_t max_corr_length = kMaxCorrelationLength;
314 : size_t stop_position_downsamp =
315 0 : std::min(max_corr_length, expand_->max_lag() / (fs_mult_ * 2) + 1);
316 :
317 : int32_t correlation[kMaxCorrelationLength];
318 0 : CrossCorrelationWithAutoShift(input_downsampled_, expanded_downsampled_,
319 : kInputDownsampLength, stop_position_downsamp, 1,
320 0 : correlation);
321 :
322 : // Normalize correlation to 14 bits and copy to a 16-bit array.
323 0 : const size_t pad_length = expand_->overlap_length() - 1;
324 0 : const size_t correlation_buffer_size = 2 * pad_length + kMaxCorrelationLength;
325 : std::unique_ptr<int16_t[]> correlation16(
326 0 : new int16_t[correlation_buffer_size]);
327 0 : memset(correlation16.get(), 0, correlation_buffer_size * sizeof(int16_t));
328 0 : int16_t* correlation_ptr = &correlation16[pad_length];
329 0 : int32_t max_correlation = WebRtcSpl_MaxAbsValueW32(correlation,
330 0 : stop_position_downsamp);
331 0 : int norm_shift = std::max(0, 17 - WebRtcSpl_NormW32(max_correlation));
332 : WebRtcSpl_VectorBitShiftW32ToW16(correlation_ptr, stop_position_downsamp,
333 0 : correlation, norm_shift);
334 :
335 : // Calculate allowed starting point for peak finding.
336 : // The peak location bestIndex must fulfill two criteria:
337 : // (1) w16_bestIndex + input_length <
338 : // timestamps_per_call_ + expand_->overlap_length();
339 : // (2) w16_bestIndex + input_length < start_position.
340 0 : size_t start_index = timestamps_per_call_ + expand_->overlap_length();
341 0 : start_index = std::max(start_position, start_index);
342 0 : start_index = (input_length > start_index) ? 0 : (start_index - input_length);
343 : // Downscale starting index to 4kHz domain. (fs_mult_ * 2 = fs_hz_ / 4000.)
344 0 : size_t start_index_downsamp = start_index / (fs_mult_ * 2);
345 :
346 : // Calculate a modified |stop_position_downsamp| to account for the increased
347 : // start index |start_index_downsamp| and the effective array length.
348 : size_t modified_stop_pos =
349 : std::min(stop_position_downsamp,
350 0 : kMaxCorrelationLength + pad_length - start_index_downsamp);
351 : size_t best_correlation_index;
352 : int16_t best_correlation;
353 : static const size_t kNumCorrelationCandidates = 1;
354 0 : DspHelper::PeakDetection(&correlation_ptr[start_index_downsamp],
355 : modified_stop_pos, kNumCorrelationCandidates,
356 0 : fs_mult_, &best_correlation_index,
357 0 : &best_correlation);
358 : // Compensate for modified start index.
359 0 : best_correlation_index += start_index;
360 :
361 : // Ensure that underrun does not occur for 10ms case => we have to get at
362 : // least 10ms + overlap . (This should never happen thanks to the above
363 : // modification of peak-finding starting point.)
364 0 : while (((best_correlation_index + input_length) <
365 0 : (timestamps_per_call_ + expand_->overlap_length())) ||
366 0 : ((best_correlation_index + input_length) < start_position)) {
367 0 : assert(false); // Should never happen.
368 : best_correlation_index += expand_period; // Jump one lag ahead.
369 : }
370 0 : return best_correlation_index;
371 : }
372 :
373 0 : size_t Merge::RequiredFutureSamples() {
374 0 : return fs_hz_ / 100 * num_channels_; // 10 ms.
375 : }
376 :
377 :
378 : } // namespace webrtc
|