Line data Source code
1 : /*
2 : * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 : *
4 : * Use of this source code is governed by a BSD-style license
5 : * that can be found in the LICENSE file in the root of the source
6 : * tree. An additional intellectual property rights grant can be found
7 : * in the file PATENTS. All contributing project authors may
8 : * be found in the AUTHORS file in the root of the source tree.
9 : */
10 :
11 : #include "webrtc/voice_engine/transmit_mixer.h"
12 :
13 : #include <memory>
14 :
15 : #include "webrtc/audio/utility/audio_frame_operations.h"
16 : #include "webrtc/base/format_macros.h"
17 : #include "webrtc/base/logging.h"
18 : #include "webrtc/system_wrappers/include/event_wrapper.h"
19 : #include "webrtc/system_wrappers/include/trace.h"
20 : #include "webrtc/voice_engine/channel.h"
21 : #include "webrtc/voice_engine/channel_manager.h"
22 : #include "webrtc/voice_engine/include/voe_external_media.h"
23 : #include "webrtc/voice_engine/statistics.h"
24 : #include "webrtc/voice_engine/utility.h"
25 : #include "webrtc/voice_engine/voe_base_impl.h"
26 :
27 : namespace webrtc {
28 : namespace voe {
29 :
30 : // TODO(ajm): The thread safety of this is dubious...
31 : void
32 0 : TransmitMixer::OnPeriodicProcess()
33 : {
34 : WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
35 : "TransmitMixer::OnPeriodicProcess()");
36 :
37 : #if WEBRTC_VOICE_ENGINE_TYPING_DETECTION
38 0 : bool send_typing_noise_warning = false;
39 0 : bool typing_noise_detected = false;
40 : {
41 0 : rtc::CritScope cs(&_critSect);
42 0 : if (_typingNoiseWarningPending) {
43 0 : send_typing_noise_warning = true;
44 0 : typing_noise_detected = _typingNoiseDetected;
45 0 : _typingNoiseWarningPending = false;
46 : }
47 : }
48 0 : if (send_typing_noise_warning) {
49 0 : rtc::CritScope cs(&_callbackCritSect);
50 0 : if (_voiceEngineObserverPtr) {
51 0 : if (typing_noise_detected) {
52 : WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
53 : "TransmitMixer::OnPeriodicProcess() => "
54 : "CallbackOnError(VE_TYPING_NOISE_WARNING)");
55 0 : _voiceEngineObserverPtr->CallbackOnError(
56 : -1,
57 0 : VE_TYPING_NOISE_WARNING);
58 : } else {
59 : WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
60 : "TransmitMixer::OnPeriodicProcess() => "
61 : "CallbackOnError(VE_TYPING_NOISE_OFF_WARNING)");
62 0 : _voiceEngineObserverPtr->CallbackOnError(
63 : -1,
64 0 : VE_TYPING_NOISE_OFF_WARNING);
65 : }
66 : }
67 : }
68 : #endif
69 :
70 0 : bool saturationWarning = false;
71 : {
72 : // Modify |_saturationWarning| under lock to avoid conflict with write op
73 : // in ProcessAudio and also ensure that we don't hold the lock during the
74 : // callback.
75 0 : rtc::CritScope cs(&_critSect);
76 0 : saturationWarning = _saturationWarning;
77 0 : if (_saturationWarning)
78 0 : _saturationWarning = false;
79 : }
80 :
81 0 : if (saturationWarning)
82 : {
83 0 : rtc::CritScope cs(&_callbackCritSect);
84 0 : if (_voiceEngineObserverPtr)
85 : {
86 : WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
87 : "TransmitMixer::OnPeriodicProcess() =>"
88 : " CallbackOnError(VE_SATURATION_WARNING)");
89 0 : _voiceEngineObserverPtr->CallbackOnError(-1, VE_SATURATION_WARNING);
90 : }
91 : }
92 0 : }
93 :
94 :
95 0 : void TransmitMixer::PlayNotification(int32_t id,
96 : uint32_t durationMs)
97 : {
98 : WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
99 : "TransmitMixer::PlayNotification(id=%d, durationMs=%d)",
100 : id, durationMs);
101 :
102 : // Not implement yet
103 0 : }
104 :
105 0 : void TransmitMixer::RecordNotification(int32_t id,
106 : uint32_t durationMs)
107 : {
108 : WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,-1),
109 : "TransmitMixer::RecordNotification(id=%d, durationMs=%d)",
110 : id, durationMs);
111 :
112 : // Not implement yet
113 0 : }
114 :
115 0 : void TransmitMixer::PlayFileEnded(int32_t id)
116 : {
117 : WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
118 : "TransmitMixer::PlayFileEnded(id=%d)", id);
119 :
120 0 : assert(id == _filePlayerId);
121 :
122 0 : rtc::CritScope cs(&_critSect);
123 :
124 0 : _filePlaying = false;
125 : WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
126 : "TransmitMixer::PlayFileEnded() =>"
127 : "file player module is shutdown");
128 0 : }
129 :
130 : void
131 0 : TransmitMixer::RecordFileEnded(int32_t id)
132 : {
133 : WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
134 : "TransmitMixer::RecordFileEnded(id=%d)", id);
135 :
136 0 : if (id == _fileRecorderId)
137 : {
138 0 : rtc::CritScope cs(&_critSect);
139 0 : _fileRecording = false;
140 : WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
141 : "TransmitMixer::RecordFileEnded() => fileRecorder module"
142 : "is shutdown");
143 0 : } else if (id == _fileCallRecorderId)
144 : {
145 0 : rtc::CritScope cs(&_critSect);
146 0 : _fileCallRecording = false;
147 : WEBRTC_TRACE(kTraceStateInfo, kTraceVoice, VoEId(_instanceId, -1),
148 : "TransmitMixer::RecordFileEnded() => fileCallRecorder"
149 : "module is shutdown");
150 : }
151 0 : }
152 :
153 : int32_t
154 0 : TransmitMixer::Create(TransmitMixer*& mixer, uint32_t instanceId)
155 : {
156 : WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, -1),
157 : "TransmitMixer::Create(instanceId=%d)", instanceId);
158 0 : mixer = new TransmitMixer(instanceId);
159 0 : if (mixer == NULL)
160 : {
161 : WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(instanceId, -1),
162 : "TransmitMixer::Create() unable to allocate memory"
163 : "for mixer");
164 0 : return -1;
165 : }
166 0 : return 0;
167 : }
168 :
169 : void
170 0 : TransmitMixer::Destroy(TransmitMixer*& mixer)
171 : {
172 0 : if (mixer)
173 : {
174 0 : delete mixer;
175 0 : mixer = NULL;
176 : }
177 0 : }
178 :
179 0 : TransmitMixer::TransmitMixer(uint32_t instanceId) :
180 : _engineStatisticsPtr(NULL),
181 : _channelManagerPtr(NULL),
182 : audioproc_(NULL),
183 : _voiceEngineObserverPtr(NULL),
184 : _processThreadPtr(NULL),
185 : // Avoid conflict with other channels by adding 1024 - 1026,
186 : // won't use as much as 1024 channels.
187 0 : _filePlayerId(instanceId + 1024),
188 0 : _fileRecorderId(instanceId + 1025),
189 0 : _fileCallRecorderId(instanceId + 1026),
190 : _filePlaying(false),
191 : _fileRecording(false),
192 : _fileCallRecording(false),
193 : _audioLevel(),
194 : #if WEBRTC_VOICE_ENGINE_TYPING_DETECTION
195 : _typingNoiseWarningPending(false),
196 : _typingNoiseDetected(false),
197 : #endif
198 : _saturationWarning(false),
199 : _instanceId(instanceId),
200 : _mixFileWithMicrophone(false),
201 : _captureLevel(0),
202 : external_postproc_ptr_(NULL),
203 : external_preproc_ptr_(NULL),
204 : _mute(false),
205 : stereo_codec_(false),
206 0 : swap_stereo_channels_(false)
207 : {
208 : WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
209 : "TransmitMixer::TransmitMixer() - ctor");
210 0 : }
211 :
212 0 : TransmitMixer::~TransmitMixer()
213 : {
214 : WEBRTC_TRACE(kTraceMemory, kTraceVoice, VoEId(_instanceId, -1),
215 : "TransmitMixer::~TransmitMixer() - dtor");
216 0 : _monitorModule.DeRegisterObserver();
217 0 : if (_processThreadPtr)
218 : {
219 0 : _processThreadPtr->DeRegisterModule(&_monitorModule);
220 : }
221 0 : DeRegisterExternalMediaProcessing(kRecordingAllChannelsMixed);
222 0 : DeRegisterExternalMediaProcessing(kRecordingPreprocessing);
223 : {
224 0 : rtc::CritScope cs(&_critSect);
225 0 : if (file_recorder_) {
226 0 : file_recorder_->RegisterModuleFileCallback(NULL);
227 0 : file_recorder_->StopRecording();
228 : }
229 0 : if (file_call_recorder_) {
230 0 : file_call_recorder_->RegisterModuleFileCallback(NULL);
231 0 : file_call_recorder_->StopRecording();
232 : }
233 0 : if (file_player_) {
234 0 : file_player_->RegisterModuleFileCallback(NULL);
235 0 : file_player_->StopPlayingFile();
236 : }
237 : }
238 0 : }
239 :
240 : int32_t
241 0 : TransmitMixer::SetEngineInformation(ProcessThread& processThread,
242 : Statistics& engineStatistics,
243 : ChannelManager& channelManager)
244 : {
245 : WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
246 : "TransmitMixer::SetEngineInformation()");
247 :
248 0 : _processThreadPtr = &processThread;
249 0 : _engineStatisticsPtr = &engineStatistics;
250 0 : _channelManagerPtr = &channelManager;
251 :
252 0 : _processThreadPtr->RegisterModule(&_monitorModule);
253 0 : _monitorModule.RegisterObserver(*this);
254 :
255 0 : return 0;
256 : }
257 :
258 : int32_t
259 0 : TransmitMixer::RegisterVoiceEngineObserver(VoiceEngineObserver& observer)
260 : {
261 : WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
262 : "TransmitMixer::RegisterVoiceEngineObserver()");
263 0 : rtc::CritScope cs(&_callbackCritSect);
264 :
265 0 : if (_voiceEngineObserverPtr)
266 : {
267 0 : _engineStatisticsPtr->SetLastError(
268 : VE_INVALID_OPERATION, kTraceError,
269 0 : "RegisterVoiceEngineObserver() observer already enabled");
270 0 : return -1;
271 : }
272 0 : _voiceEngineObserverPtr = &observer;
273 0 : return 0;
274 : }
275 :
276 : int32_t
277 0 : TransmitMixer::SetAudioProcessingModule(AudioProcessing* audioProcessingModule)
278 : {
279 : WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
280 : "TransmitMixer::SetAudioProcessingModule("
281 : "audioProcessingModule=0x%x)",
282 : audioProcessingModule);
283 0 : audioproc_ = audioProcessingModule;
284 0 : return 0;
285 : }
286 :
287 0 : void TransmitMixer::GetSendCodecInfo(int* max_sample_rate,
288 : size_t* max_channels) {
289 0 : *max_sample_rate = 8000;
290 0 : *max_channels = 1;
291 0 : for (ChannelManager::Iterator it(_channelManagerPtr); it.IsValid();
292 0 : it.Increment()) {
293 0 : Channel* channel = it.GetChannel();
294 0 : if (channel->Sending()) {
295 : CodecInst codec;
296 0 : channel->GetSendCodec(codec);
297 0 : *max_sample_rate = std::max(*max_sample_rate, codec.plfreq);
298 0 : *max_channels = std::max(*max_channels, codec.channels);
299 : }
300 : }
301 0 : }
302 :
303 : int32_t
304 0 : TransmitMixer::PrepareDemux(const void* audioSamples,
305 : size_t nSamples,
306 : size_t nChannels,
307 : uint32_t samplesPerSec,
308 : uint16_t totalDelayMS,
309 : int32_t clockDrift,
310 : uint16_t currentMicLevel,
311 : bool keyPressed)
312 : {
313 : WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
314 : "TransmitMixer::PrepareDemux(nSamples=%" PRIuS ", "
315 : "nChannels=%" PRIuS ", samplesPerSec=%u, totalDelayMS=%u, "
316 : "clockDrift=%d, currentMicLevel=%u)",
317 : nSamples, nChannels, samplesPerSec, totalDelayMS, clockDrift,
318 : currentMicLevel);
319 :
320 : // --- Resample input audio and create/store the initial audio frame
321 0 : GenerateAudioFrame(static_cast<const int16_t*>(audioSamples),
322 : nSamples,
323 : nChannels,
324 0 : samplesPerSec);
325 :
326 : {
327 0 : rtc::CritScope cs(&_callbackCritSect);
328 0 : if (external_preproc_ptr_) {
329 0 : external_preproc_ptr_->Process(-1, kRecordingPreprocessing,
330 : _audioFrame.data_,
331 : _audioFrame.samples_per_channel_,
332 : _audioFrame.sample_rate_hz_,
333 0 : _audioFrame.num_channels_ == 2);
334 : }
335 : }
336 :
337 : // --- Near-end audio processing.
338 0 : ProcessAudio(totalDelayMS, clockDrift, currentMicLevel, keyPressed);
339 :
340 0 : if (swap_stereo_channels_ && stereo_codec_)
341 : // Only bother swapping if we're using a stereo codec.
342 0 : AudioFrameOperations::SwapStereoChannels(&_audioFrame);
343 :
344 : // --- Annoying typing detection (utilizes the APM/VAD decision)
345 : #if WEBRTC_VOICE_ENGINE_TYPING_DETECTION
346 0 : TypingDetection(keyPressed);
347 : #endif
348 :
349 : // --- Mute signal
350 0 : AudioFrameOperations::Mute(&_audioFrame, _mute, _mute);
351 :
352 : // --- Mix with file (does not affect the mixing frequency)
353 0 : if (_filePlaying)
354 : {
355 0 : MixOrReplaceAudioWithFile(_audioFrame.sample_rate_hz_);
356 : }
357 :
358 : // --- Record to file
359 0 : bool file_recording = false;
360 : {
361 0 : rtc::CritScope cs(&_critSect);
362 0 : file_recording = _fileRecording;
363 : }
364 0 : if (file_recording)
365 : {
366 0 : RecordAudioToFile(_audioFrame.sample_rate_hz_);
367 : }
368 :
369 : {
370 0 : rtc::CritScope cs(&_callbackCritSect);
371 0 : if (external_postproc_ptr_) {
372 0 : external_postproc_ptr_->Process(-1, kRecordingAllChannelsMixed,
373 : _audioFrame.data_,
374 : _audioFrame.samples_per_channel_,
375 : _audioFrame.sample_rate_hz_,
376 0 : _audioFrame.num_channels_ == 2);
377 : }
378 : }
379 :
380 : // --- Measure audio level of speech after all processing.
381 0 : _audioLevel.ComputeLevel(_audioFrame);
382 0 : return 0;
383 : }
384 :
385 : int32_t
386 0 : TransmitMixer::DemuxAndMix()
387 : {
388 : WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
389 : "TransmitMixer::DemuxAndMix()");
390 :
391 0 : for (ChannelManager::Iterator it(_channelManagerPtr); it.IsValid();
392 0 : it.Increment())
393 : {
394 0 : Channel* channelPtr = it.GetChannel();
395 0 : if (channelPtr->Sending())
396 : {
397 : // Demultiplex makes a copy of its input.
398 0 : channelPtr->Demultiplex(_audioFrame);
399 0 : channelPtr->PrepareEncodeAndSend(_audioFrame.sample_rate_hz_);
400 : }
401 : }
402 0 : return 0;
403 : }
404 :
405 0 : void TransmitMixer::DemuxAndMix(const int voe_channels[],
406 : size_t number_of_voe_channels) {
407 0 : for (size_t i = 0; i < number_of_voe_channels; ++i) {
408 0 : voe::ChannelOwner ch = _channelManagerPtr->GetChannel(voe_channels[i]);
409 0 : voe::Channel* channel_ptr = ch.channel();
410 0 : if (channel_ptr) {
411 0 : if (channel_ptr->Sending()) {
412 : // Demultiplex makes a copy of its input.
413 0 : channel_ptr->Demultiplex(_audioFrame);
414 0 : channel_ptr->PrepareEncodeAndSend(_audioFrame.sample_rate_hz_);
415 : }
416 : }
417 : }
418 0 : }
419 :
420 : int32_t
421 0 : TransmitMixer::EncodeAndSend()
422 : {
423 : WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId, -1),
424 : "TransmitMixer::EncodeAndSend()");
425 :
426 0 : for (ChannelManager::Iterator it(_channelManagerPtr); it.IsValid();
427 0 : it.Increment())
428 : {
429 0 : Channel* channelPtr = it.GetChannel();
430 0 : if (channelPtr->Sending())
431 : {
432 0 : channelPtr->EncodeAndSend();
433 : }
434 : }
435 0 : return 0;
436 : }
437 :
438 0 : void TransmitMixer::EncodeAndSend(const int voe_channels[],
439 : size_t number_of_voe_channels) {
440 0 : for (size_t i = 0; i < number_of_voe_channels; ++i) {
441 0 : voe::ChannelOwner ch = _channelManagerPtr->GetChannel(voe_channels[i]);
442 0 : voe::Channel* channel_ptr = ch.channel();
443 0 : if (channel_ptr && channel_ptr->Sending())
444 0 : channel_ptr->EncodeAndSend();
445 : }
446 0 : }
447 :
448 0 : uint32_t TransmitMixer::CaptureLevel() const
449 : {
450 0 : return _captureLevel;
451 : }
452 :
453 : int32_t
454 0 : TransmitMixer::StopSend()
455 : {
456 : WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
457 : "TransmitMixer::StopSend()");
458 0 : _audioLevel.Clear();
459 0 : return 0;
460 : }
461 :
462 0 : int TransmitMixer::StartPlayingFileAsMicrophone(const char* fileName,
463 : bool loop,
464 : FileFormats format,
465 : int startPosition,
466 : float volumeScaling,
467 : int stopPosition,
468 : const CodecInst* codecInst)
469 : {
470 : WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
471 : "TransmitMixer::StartPlayingFileAsMicrophone("
472 : "fileNameUTF8[]=%s,loop=%d, format=%d, volumeScaling=%5.3f,"
473 : " startPosition=%d, stopPosition=%d)", fileName, loop,
474 : format, volumeScaling, startPosition, stopPosition);
475 :
476 0 : if (_filePlaying)
477 : {
478 0 : _engineStatisticsPtr->SetLastError(
479 : VE_ALREADY_PLAYING, kTraceWarning,
480 0 : "StartPlayingFileAsMicrophone() is already playing");
481 0 : return 0;
482 : }
483 :
484 0 : rtc::CritScope cs(&_critSect);
485 :
486 : // Destroy the old instance
487 0 : if (file_player_) {
488 0 : file_player_->RegisterModuleFileCallback(NULL);
489 0 : file_player_.reset();
490 : }
491 :
492 : // Dynamically create the instance
493 : file_player_ =
494 0 : FilePlayer::CreateFilePlayer(_filePlayerId, (const FileFormats)format);
495 :
496 0 : if (!file_player_) {
497 0 : _engineStatisticsPtr->SetLastError(
498 : VE_INVALID_ARGUMENT, kTraceError,
499 0 : "StartPlayingFileAsMicrophone() filePlayer format isnot correct");
500 0 : return -1;
501 : }
502 :
503 0 : const uint32_t notificationTime(0);
504 :
505 0 : if (file_player_->StartPlayingFile(
506 : fileName, loop, startPosition, volumeScaling, notificationTime,
507 0 : stopPosition, (const CodecInst*)codecInst) != 0) {
508 0 : _engineStatisticsPtr->SetLastError(
509 : VE_BAD_FILE, kTraceError,
510 0 : "StartPlayingFile() failed to start file playout");
511 0 : file_player_->StopPlayingFile();
512 0 : file_player_.reset();
513 0 : return -1;
514 : }
515 :
516 0 : file_player_->RegisterModuleFileCallback(this);
517 0 : _filePlaying = true;
518 :
519 0 : return 0;
520 : }
521 :
522 0 : int TransmitMixer::StartPlayingFileAsMicrophone(InStream* stream,
523 : FileFormats format,
524 : int startPosition,
525 : float volumeScaling,
526 : int stopPosition,
527 : const CodecInst* codecInst)
528 : {
529 : WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
530 : "TransmitMixer::StartPlayingFileAsMicrophone(format=%d,"
531 : " volumeScaling=%5.3f, startPosition=%d, stopPosition=%d)",
532 : format, volumeScaling, startPosition, stopPosition);
533 :
534 0 : if (stream == NULL)
535 : {
536 0 : _engineStatisticsPtr->SetLastError(
537 : VE_BAD_FILE, kTraceError,
538 0 : "StartPlayingFileAsMicrophone() NULL as input stream");
539 0 : return -1;
540 : }
541 :
542 0 : if (_filePlaying)
543 : {
544 0 : _engineStatisticsPtr->SetLastError(
545 : VE_ALREADY_PLAYING, kTraceWarning,
546 0 : "StartPlayingFileAsMicrophone() is already playing");
547 0 : return 0;
548 : }
549 :
550 0 : rtc::CritScope cs(&_critSect);
551 :
552 : // Destroy the old instance
553 0 : if (file_player_) {
554 0 : file_player_->RegisterModuleFileCallback(NULL);
555 0 : file_player_.reset();
556 : }
557 :
558 : // Dynamically create the instance
559 : file_player_ =
560 0 : FilePlayer::CreateFilePlayer(_filePlayerId, (const FileFormats)format);
561 :
562 0 : if (!file_player_) {
563 0 : _engineStatisticsPtr->SetLastError(
564 : VE_INVALID_ARGUMENT, kTraceWarning,
565 0 : "StartPlayingFileAsMicrophone() filePlayer format isnot correct");
566 0 : return -1;
567 : }
568 :
569 0 : const uint32_t notificationTime(0);
570 :
571 0 : if (file_player_->StartPlayingFile(stream, startPosition, volumeScaling,
572 : notificationTime, stopPosition,
573 0 : (const CodecInst*)codecInst) != 0) {
574 0 : _engineStatisticsPtr->SetLastError(
575 : VE_BAD_FILE, kTraceError,
576 0 : "StartPlayingFile() failed to start file playout");
577 0 : file_player_->StopPlayingFile();
578 0 : file_player_.reset();
579 0 : return -1;
580 : }
581 0 : file_player_->RegisterModuleFileCallback(this);
582 0 : _filePlaying = true;
583 :
584 0 : return 0;
585 : }
586 :
587 0 : int TransmitMixer::StopPlayingFileAsMicrophone()
588 : {
589 : WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId,-1),
590 : "TransmitMixer::StopPlayingFileAsMicrophone()");
591 :
592 0 : if (!_filePlaying)
593 : {
594 0 : return 0;
595 : }
596 :
597 0 : rtc::CritScope cs(&_critSect);
598 :
599 0 : if (file_player_->StopPlayingFile() != 0) {
600 0 : _engineStatisticsPtr->SetLastError(
601 : VE_CANNOT_STOP_PLAYOUT, kTraceError,
602 0 : "StopPlayingFile() couldnot stop playing file");
603 0 : return -1;
604 : }
605 :
606 0 : file_player_->RegisterModuleFileCallback(NULL);
607 0 : file_player_.reset();
608 0 : _filePlaying = false;
609 :
610 0 : return 0;
611 : }
612 :
613 0 : int TransmitMixer::IsPlayingFileAsMicrophone() const
614 : {
615 : WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
616 : "TransmitMixer::IsPlayingFileAsMicrophone()");
617 0 : return _filePlaying;
618 : }
619 :
620 0 : int TransmitMixer::StartRecordingMicrophone(const char* fileName,
621 : const CodecInst* codecInst)
622 : {
623 : WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
624 : "TransmitMixer::StartRecordingMicrophone(fileName=%s)",
625 : fileName);
626 :
627 0 : rtc::CritScope cs(&_critSect);
628 :
629 0 : if (_fileRecording)
630 : {
631 : WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
632 : "StartRecordingMicrophone() is already recording");
633 0 : return 0;
634 : }
635 :
636 : FileFormats format;
637 0 : const uint32_t notificationTime(0); // Not supported in VoE
638 0 : CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
639 :
640 0 : if (codecInst != NULL && codecInst->channels > 2)
641 : {
642 0 : _engineStatisticsPtr->SetLastError(
643 : VE_BAD_ARGUMENT, kTraceError,
644 0 : "StartRecordingMicrophone() invalid compression");
645 0 : return (-1);
646 : }
647 0 : if (codecInst == NULL)
648 : {
649 0 : format = kFileFormatPcm16kHzFile;
650 0 : codecInst = &dummyCodec;
651 0 : } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
652 0 : (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
653 0 : (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
654 : {
655 0 : format = kFileFormatWavFile;
656 : } else
657 : {
658 0 : format = kFileFormatCompressedFile;
659 : }
660 :
661 : // Destroy the old instance
662 0 : if (file_recorder_) {
663 0 : file_recorder_->RegisterModuleFileCallback(NULL);
664 0 : file_recorder_.reset();
665 : }
666 :
667 0 : file_recorder_ = FileRecorder::CreateFileRecorder(
668 0 : _fileRecorderId, (const FileFormats)format);
669 0 : if (!file_recorder_) {
670 0 : _engineStatisticsPtr->SetLastError(
671 : VE_INVALID_ARGUMENT, kTraceError,
672 0 : "StartRecordingMicrophone() fileRecorder format isnot correct");
673 0 : return -1;
674 : }
675 :
676 0 : if (file_recorder_->StartRecordingAudioFile(
677 0 : fileName, (const CodecInst&)*codecInst, notificationTime) != 0) {
678 0 : _engineStatisticsPtr->SetLastError(
679 : VE_BAD_FILE, kTraceError,
680 0 : "StartRecordingAudioFile() failed to start file recording");
681 0 : file_recorder_->StopRecording();
682 0 : file_recorder_.reset();
683 0 : return -1;
684 : }
685 0 : file_recorder_->RegisterModuleFileCallback(this);
686 0 : _fileRecording = true;
687 :
688 0 : return 0;
689 : }
690 :
691 0 : int TransmitMixer::StartRecordingMicrophone(OutStream* stream,
692 : const CodecInst* codecInst)
693 : {
694 : WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
695 : "TransmitMixer::StartRecordingMicrophone()");
696 :
697 0 : rtc::CritScope cs(&_critSect);
698 :
699 0 : if (_fileRecording)
700 : {
701 : WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
702 : "StartRecordingMicrophone() is already recording");
703 0 : return 0;
704 : }
705 :
706 : FileFormats format;
707 0 : const uint32_t notificationTime(0); // Not supported in VoE
708 0 : CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
709 :
710 0 : if (codecInst != NULL && codecInst->channels != 1)
711 : {
712 0 : _engineStatisticsPtr->SetLastError(
713 : VE_BAD_ARGUMENT, kTraceError,
714 0 : "StartRecordingMicrophone() invalid compression");
715 0 : return (-1);
716 : }
717 0 : if (codecInst == NULL)
718 : {
719 0 : format = kFileFormatPcm16kHzFile;
720 0 : codecInst = &dummyCodec;
721 0 : } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
722 0 : (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
723 0 : (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
724 : {
725 0 : format = kFileFormatWavFile;
726 : } else
727 : {
728 0 : format = kFileFormatCompressedFile;
729 : }
730 :
731 : // Destroy the old instance
732 0 : if (file_recorder_) {
733 0 : file_recorder_->RegisterModuleFileCallback(NULL);
734 0 : file_recorder_.reset();
735 : }
736 :
737 0 : file_recorder_ = FileRecorder::CreateFileRecorder(
738 0 : _fileRecorderId, (const FileFormats)format);
739 0 : if (!file_recorder_) {
740 0 : _engineStatisticsPtr->SetLastError(
741 : VE_INVALID_ARGUMENT, kTraceError,
742 0 : "StartRecordingMicrophone() fileRecorder format isnot correct");
743 0 : return -1;
744 : }
745 :
746 0 : if (file_recorder_->StartRecordingAudioFile(stream, *codecInst,
747 0 : notificationTime) != 0) {
748 0 : _engineStatisticsPtr->SetLastError(
749 : VE_BAD_FILE, kTraceError,
750 0 : "StartRecordingAudioFile() failed to start file recording");
751 0 : file_recorder_->StopRecording();
752 0 : file_recorder_.reset();
753 0 : return -1;
754 : }
755 :
756 0 : file_recorder_->RegisterModuleFileCallback(this);
757 0 : _fileRecording = true;
758 :
759 0 : return 0;
760 : }
761 :
762 :
763 0 : int TransmitMixer::StopRecordingMicrophone()
764 : {
765 : WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
766 : "TransmitMixer::StopRecordingMicrophone()");
767 :
768 0 : rtc::CritScope cs(&_critSect);
769 :
770 0 : if (!_fileRecording)
771 : {
772 : WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
773 : "StopRecordingMicrophone() isnot recording");
774 0 : return 0;
775 : }
776 :
777 0 : if (file_recorder_->StopRecording() != 0) {
778 0 : _engineStatisticsPtr->SetLastError(
779 : VE_STOP_RECORDING_FAILED, kTraceError,
780 0 : "StopRecording(), could not stop recording");
781 0 : return -1;
782 : }
783 0 : file_recorder_->RegisterModuleFileCallback(NULL);
784 0 : file_recorder_.reset();
785 0 : _fileRecording = false;
786 :
787 0 : return 0;
788 : }
789 :
790 0 : int TransmitMixer::StartRecordingCall(const char* fileName,
791 : const CodecInst* codecInst)
792 : {
793 : WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
794 : "TransmitMixer::StartRecordingCall(fileName=%s)", fileName);
795 :
796 0 : if (_fileCallRecording)
797 : {
798 : WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
799 : "StartRecordingCall() is already recording");
800 0 : return 0;
801 : }
802 :
803 : FileFormats format;
804 0 : const uint32_t notificationTime(0); // Not supported in VoE
805 0 : CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
806 :
807 0 : if (codecInst != NULL && codecInst->channels != 1)
808 : {
809 0 : _engineStatisticsPtr->SetLastError(
810 : VE_BAD_ARGUMENT, kTraceError,
811 0 : "StartRecordingCall() invalid compression");
812 0 : return (-1);
813 : }
814 0 : if (codecInst == NULL)
815 : {
816 0 : format = kFileFormatPcm16kHzFile;
817 0 : codecInst = &dummyCodec;
818 0 : } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
819 0 : (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
820 0 : (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
821 : {
822 0 : format = kFileFormatWavFile;
823 : } else
824 : {
825 0 : format = kFileFormatCompressedFile;
826 : }
827 :
828 0 : rtc::CritScope cs(&_critSect);
829 :
830 : // Destroy the old instance
831 0 : if (file_call_recorder_) {
832 0 : file_call_recorder_->RegisterModuleFileCallback(NULL);
833 0 : file_call_recorder_.reset();
834 : }
835 :
836 0 : file_call_recorder_ = FileRecorder::CreateFileRecorder(
837 0 : _fileCallRecorderId, (const FileFormats)format);
838 0 : if (!file_call_recorder_) {
839 0 : _engineStatisticsPtr->SetLastError(
840 : VE_INVALID_ARGUMENT, kTraceError,
841 0 : "StartRecordingCall() fileRecorder format isnot correct");
842 0 : return -1;
843 : }
844 :
845 0 : if (file_call_recorder_->StartRecordingAudioFile(
846 0 : fileName, (const CodecInst&)*codecInst, notificationTime) != 0) {
847 0 : _engineStatisticsPtr->SetLastError(
848 : VE_BAD_FILE, kTraceError,
849 0 : "StartRecordingAudioFile() failed to start file recording");
850 0 : file_call_recorder_->StopRecording();
851 0 : file_call_recorder_.reset();
852 0 : return -1;
853 : }
854 0 : file_call_recorder_->RegisterModuleFileCallback(this);
855 0 : _fileCallRecording = true;
856 :
857 0 : return 0;
858 : }
859 :
860 0 : int TransmitMixer::StartRecordingCall(OutStream* stream,
861 : const CodecInst* codecInst)
862 : {
863 : WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
864 : "TransmitMixer::StartRecordingCall()");
865 :
866 0 : if (_fileCallRecording)
867 : {
868 : WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
869 : "StartRecordingCall() is already recording");
870 0 : return 0;
871 : }
872 :
873 : FileFormats format;
874 0 : const uint32_t notificationTime(0); // Not supported in VoE
875 0 : CodecInst dummyCodec = { 100, "L16", 16000, 320, 1, 320000 };
876 :
877 0 : if (codecInst != NULL && codecInst->channels != 1)
878 : {
879 0 : _engineStatisticsPtr->SetLastError(
880 : VE_BAD_ARGUMENT, kTraceError,
881 0 : "StartRecordingCall() invalid compression");
882 0 : return (-1);
883 : }
884 0 : if (codecInst == NULL)
885 : {
886 0 : format = kFileFormatPcm16kHzFile;
887 0 : codecInst = &dummyCodec;
888 0 : } else if ((STR_CASE_CMP(codecInst->plname,"L16") == 0) ||
889 0 : (STR_CASE_CMP(codecInst->plname,"PCMU") == 0) ||
890 0 : (STR_CASE_CMP(codecInst->plname,"PCMA") == 0))
891 : {
892 0 : format = kFileFormatWavFile;
893 : } else
894 : {
895 0 : format = kFileFormatCompressedFile;
896 : }
897 :
898 0 : rtc::CritScope cs(&_critSect);
899 :
900 : // Destroy the old instance
901 0 : if (file_call_recorder_) {
902 0 : file_call_recorder_->RegisterModuleFileCallback(NULL);
903 0 : file_call_recorder_.reset();
904 : }
905 :
906 0 : file_call_recorder_ = FileRecorder::CreateFileRecorder(
907 0 : _fileCallRecorderId, (const FileFormats)format);
908 0 : if (!file_call_recorder_) {
909 0 : _engineStatisticsPtr->SetLastError(
910 : VE_INVALID_ARGUMENT, kTraceError,
911 0 : "StartRecordingCall() fileRecorder format isnot correct");
912 0 : return -1;
913 : }
914 :
915 0 : if (file_call_recorder_->StartRecordingAudioFile(stream, *codecInst,
916 0 : notificationTime) != 0) {
917 0 : _engineStatisticsPtr->SetLastError(
918 : VE_BAD_FILE, kTraceError,
919 0 : "StartRecordingAudioFile() failed to start file recording");
920 0 : file_call_recorder_->StopRecording();
921 0 : file_call_recorder_.reset();
922 0 : return -1;
923 : }
924 :
925 0 : file_call_recorder_->RegisterModuleFileCallback(this);
926 0 : _fileCallRecording = true;
927 :
928 0 : return 0;
929 : }
930 :
931 0 : int TransmitMixer::StopRecordingCall()
932 : {
933 : WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
934 : "TransmitMixer::StopRecordingCall()");
935 :
936 0 : if (!_fileCallRecording)
937 : {
938 : WEBRTC_TRACE(kTraceError, kTraceVoice, VoEId(_instanceId, -1),
939 : "StopRecordingCall() file isnot recording");
940 0 : return -1;
941 : }
942 :
943 0 : rtc::CritScope cs(&_critSect);
944 :
945 0 : if (file_call_recorder_->StopRecording() != 0) {
946 0 : _engineStatisticsPtr->SetLastError(
947 : VE_STOP_RECORDING_FAILED, kTraceError,
948 0 : "StopRecording(), could not stop recording");
949 0 : return -1;
950 : }
951 :
952 0 : file_call_recorder_->RegisterModuleFileCallback(NULL);
953 0 : file_call_recorder_.reset();
954 0 : _fileCallRecording = false;
955 :
956 0 : return 0;
957 : }
958 :
959 : void
960 0 : TransmitMixer::SetMixWithMicStatus(bool mix)
961 : {
962 0 : _mixFileWithMicrophone = mix;
963 0 : }
964 :
965 0 : int TransmitMixer::RegisterExternalMediaProcessing(
966 : VoEMediaProcess* object,
967 : ProcessingTypes type) {
968 : WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
969 : "TransmitMixer::RegisterExternalMediaProcessing()");
970 :
971 0 : rtc::CritScope cs(&_callbackCritSect);
972 0 : if (!object) {
973 0 : return -1;
974 : }
975 :
976 : // Store the callback object according to the processing type.
977 0 : if (type == kRecordingAllChannelsMixed) {
978 0 : external_postproc_ptr_ = object;
979 0 : } else if (type == kRecordingPreprocessing) {
980 0 : external_preproc_ptr_ = object;
981 : } else {
982 0 : return -1;
983 : }
984 0 : return 0;
985 : }
986 :
987 0 : int TransmitMixer::DeRegisterExternalMediaProcessing(ProcessingTypes type) {
988 : WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
989 : "TransmitMixer::DeRegisterExternalMediaProcessing()");
990 :
991 0 : rtc::CritScope cs(&_callbackCritSect);
992 0 : if (type == kRecordingAllChannelsMixed) {
993 0 : external_postproc_ptr_ = NULL;
994 0 : } else if (type == kRecordingPreprocessing) {
995 0 : external_preproc_ptr_ = NULL;
996 : } else {
997 0 : return -1;
998 : }
999 0 : return 0;
1000 : }
1001 :
1002 : int
1003 0 : TransmitMixer::SetMute(bool enable)
1004 : {
1005 : WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, -1),
1006 : "TransmitMixer::SetMute(enable=%d)", enable);
1007 0 : _mute = enable;
1008 0 : return 0;
1009 : }
1010 :
1011 : bool
1012 0 : TransmitMixer::Mute() const
1013 : {
1014 0 : return _mute;
1015 : }
1016 :
1017 0 : int8_t TransmitMixer::AudioLevel() const
1018 : {
1019 : // Speech + file level [0,9]
1020 0 : return _audioLevel.Level();
1021 : }
1022 :
1023 0 : int16_t TransmitMixer::AudioLevelFullRange() const
1024 : {
1025 : // Speech + file level [0,32767]
1026 0 : return _audioLevel.LevelFullRange();
1027 : }
1028 :
1029 0 : bool TransmitMixer::IsRecordingCall()
1030 : {
1031 0 : return _fileCallRecording;
1032 : }
1033 :
1034 0 : bool TransmitMixer::IsRecordingMic()
1035 : {
1036 0 : rtc::CritScope cs(&_critSect);
1037 0 : return _fileRecording;
1038 : }
1039 :
1040 : // Note that if drift compensation is done here, a buffering stage will be
1041 : // needed and this will need to switch to non-fixed resamples.
1042 0 : void TransmitMixer::GenerateAudioFrame(const int16_t* audio,
1043 : size_t samples_per_channel,
1044 : size_t num_channels,
1045 : int sample_rate_hz) {
1046 : int codec_rate;
1047 : size_t num_codec_channels;
1048 0 : GetSendCodecInfo(&codec_rate, &num_codec_channels);
1049 0 : stereo_codec_ = num_codec_channels == 2;
1050 :
1051 : // We want to process at the lowest rate possible without losing information.
1052 : // Choose the lowest native rate at least equal to the input and codec rates.
1053 0 : const int min_processing_rate = std::min(sample_rate_hz, codec_rate);
1054 0 : for (size_t i = 0; i < AudioProcessing::kNumNativeSampleRates; ++i) {
1055 0 : _audioFrame.sample_rate_hz_ = AudioProcessing::kNativeSampleRatesHz[i];
1056 0 : if (_audioFrame.sample_rate_hz_ >= min_processing_rate) {
1057 0 : break;
1058 : }
1059 : }
1060 0 : _audioFrame.num_channels_ = std::min(num_channels, num_codec_channels);
1061 0 : RemixAndResample(audio, samples_per_channel, num_channels, sample_rate_hz,
1062 0 : &resampler_, &_audioFrame);
1063 0 : }
1064 :
1065 0 : int32_t TransmitMixer::RecordAudioToFile(
1066 : uint32_t mixingFrequency)
1067 : {
1068 0 : rtc::CritScope cs(&_critSect);
1069 0 : if (!file_recorder_) {
1070 : WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
1071 : "TransmitMixer::RecordAudioToFile() filerecorder doesnot"
1072 : "exist");
1073 0 : return -1;
1074 : }
1075 :
1076 0 : if (file_recorder_->RecordAudioToFile(_audioFrame) != 0) {
1077 : WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
1078 : "TransmitMixer::RecordAudioToFile() file recording"
1079 : "failed");
1080 0 : return -1;
1081 : }
1082 :
1083 0 : return 0;
1084 : }
1085 :
1086 0 : int32_t TransmitMixer::MixOrReplaceAudioWithFile(
1087 : int mixingFrequency)
1088 : {
1089 0 : std::unique_ptr<int16_t[]> fileBuffer(new int16_t[640]);
1090 :
1091 0 : size_t fileSamples(0);
1092 : {
1093 0 : rtc::CritScope cs(&_critSect);
1094 0 : if (!file_player_) {
1095 : WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
1096 : "TransmitMixer::MixOrReplaceAudioWithFile()"
1097 : "fileplayer doesnot exist");
1098 0 : return -1;
1099 : }
1100 :
1101 0 : if (file_player_->Get10msAudioFromFile(fileBuffer.get(), &fileSamples,
1102 0 : mixingFrequency) == -1) {
1103 : WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, -1),
1104 : "TransmitMixer::MixOrReplaceAudioWithFile() file"
1105 : " mixing failed");
1106 0 : return -1;
1107 : }
1108 : }
1109 :
1110 0 : assert(_audioFrame.samples_per_channel_ == fileSamples);
1111 :
1112 0 : if (_mixFileWithMicrophone)
1113 : {
1114 : // Currently file stream is always mono.
1115 : // TODO(xians): Change the code when FilePlayer supports real stereo.
1116 0 : MixWithSat(_audioFrame.data_,
1117 : _audioFrame.num_channels_,
1118 0 : fileBuffer.get(),
1119 : 1,
1120 0 : fileSamples);
1121 : } else
1122 : {
1123 : // Replace ACM audio with file.
1124 : // Currently file stream is always mono.
1125 : // TODO(xians): Change the code when FilePlayer supports real stereo.
1126 0 : _audioFrame.UpdateFrame(-1,
1127 : 0xFFFFFFFF,
1128 0 : fileBuffer.get(),
1129 : fileSamples,
1130 : mixingFrequency,
1131 : AudioFrame::kNormalSpeech,
1132 : AudioFrame::kVadUnknown,
1133 0 : 1);
1134 : }
1135 0 : return 0;
1136 : }
1137 :
1138 0 : void TransmitMixer::ProcessAudio(int delay_ms, int clock_drift,
1139 : int current_mic_level, bool key_pressed) {
1140 0 : if (audioproc_->set_stream_delay_ms(delay_ms) != 0) {
1141 : // Silently ignore this failure to avoid flooding the logs.
1142 : }
1143 :
1144 0 : GainControl* agc = audioproc_->gain_control();
1145 0 : if (agc->set_stream_analog_level(current_mic_level) != 0) {
1146 0 : LOG(LS_ERROR) << "set_stream_analog_level failed: current_mic_level = "
1147 0 : << current_mic_level;
1148 0 : assert(false);
1149 : }
1150 :
1151 0 : EchoCancellation* aec = audioproc_->echo_cancellation();
1152 0 : if (aec->is_drift_compensation_enabled()) {
1153 0 : aec->set_stream_drift_samples(clock_drift);
1154 : }
1155 :
1156 0 : audioproc_->set_stream_key_pressed(key_pressed);
1157 :
1158 0 : int err = audioproc_->ProcessStream(&_audioFrame);
1159 0 : if (err != 0) {
1160 0 : LOG(LS_ERROR) << "ProcessStream() error: " << err;
1161 0 : assert(false);
1162 : }
1163 :
1164 : // Store new capture level. Only updated when analog AGC is enabled.
1165 0 : _captureLevel = agc->stream_analog_level();
1166 :
1167 0 : rtc::CritScope cs(&_critSect);
1168 : // Triggers a callback in OnPeriodicProcess().
1169 0 : _saturationWarning |= agc->stream_is_saturated();
1170 0 : }
1171 :
1172 : #if WEBRTC_VOICE_ENGINE_TYPING_DETECTION
1173 0 : void TransmitMixer::TypingDetection(bool keyPressed)
1174 : {
1175 : // We let the VAD determine if we're using this feature or not.
1176 0 : if (_audioFrame.vad_activity_ == AudioFrame::kVadUnknown) {
1177 0 : return;
1178 : }
1179 :
1180 0 : bool vadActive = _audioFrame.vad_activity_ == AudioFrame::kVadActive;
1181 0 : if (_typingDetection.Process(keyPressed, vadActive)) {
1182 0 : rtc::CritScope cs(&_critSect);
1183 0 : _typingNoiseWarningPending = true;
1184 0 : _typingNoiseDetected = true;
1185 : } else {
1186 0 : rtc::CritScope cs(&_critSect);
1187 : // If there is already a warning pending, do not change the state.
1188 : // Otherwise set a warning pending if last callback was for noise detected.
1189 0 : if (!_typingNoiseWarningPending && _typingNoiseDetected) {
1190 0 : _typingNoiseWarningPending = true;
1191 0 : _typingNoiseDetected = false;
1192 : }
1193 : }
1194 : }
1195 : #endif
1196 :
1197 0 : int TransmitMixer::GetMixingFrequency()
1198 : {
1199 0 : assert(_audioFrame.sample_rate_hz_ != 0);
1200 0 : return _audioFrame.sample_rate_hz_;
1201 : }
1202 :
1203 : #if WEBRTC_VOICE_ENGINE_TYPING_DETECTION
1204 0 : int TransmitMixer::TimeSinceLastTyping(int &seconds)
1205 : {
1206 : // We check in VoEAudioProcessingImpl that this is only called when
1207 : // typing detection is active.
1208 0 : seconds = _typingDetection.TimeSinceLastDetectionInSeconds();
1209 0 : return 0;
1210 : }
1211 : #endif
1212 :
1213 : #if WEBRTC_VOICE_ENGINE_TYPING_DETECTION
1214 0 : int TransmitMixer::SetTypingDetectionParameters(int timeWindow,
1215 : int costPerTyping,
1216 : int reportingThreshold,
1217 : int penaltyDecay,
1218 : int typeEventDelay)
1219 : {
1220 0 : _typingDetection.SetParameters(timeWindow,
1221 : costPerTyping,
1222 : reportingThreshold,
1223 : penaltyDecay,
1224 : typeEventDelay,
1225 0 : 0);
1226 0 : return 0;
1227 : }
1228 : #endif
1229 :
1230 0 : void TransmitMixer::EnableStereoChannelSwapping(bool enable) {
1231 0 : swap_stereo_channels_ = enable;
1232 0 : }
1233 :
1234 0 : bool TransmitMixer::IsStereoChannelSwappingEnabled() {
1235 0 : return swap_stereo_channels_;
1236 : }
1237 :
1238 : } // namespace voe
1239 : } // namespace webrtc
|