Line data Source code
1 : /*
2 : * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 : *
4 : * Use of this source code is governed by a BSD-style license
5 : * that can be found in the LICENSE file in the root of the source
6 : * tree. An additional intellectual property rights grant can be found
7 : * in the file PATENTS. All contributing project authors may
8 : * be found in the AUTHORS file in the root of the source tree.
9 : */
10 :
11 : #include <assert.h>
12 :
13 : #include "webrtc/base/checks.h"
14 : #include "webrtc/base/logging.h"
15 : #include "webrtc/modules/audio_device/audio_device_config.h"
16 : #include "webrtc/modules/audio_device/linux/audio_device_pulse_linux.h"
17 : #include "webrtc/system_wrappers/include/event_wrapper.h"
18 : #include "webrtc/system_wrappers/include/trace.h"
19 :
20 3 : webrtc_adm_linux_pulse::PulseAudioSymbolTable PaSymbolTable;
21 :
22 : // Accesses Pulse functions through our late-binding symbol table instead of
23 : // directly. This way we don't have to link to libpulse, which means our binary
24 : // will work on systems that don't have it.
25 : #define LATE(sym) \
26 : LATESYM_GET(webrtc_adm_linux_pulse::PulseAudioSymbolTable, &PaSymbolTable, sym)
27 :
28 : namespace webrtc
29 : {
30 :
31 0 : AudioDeviceLinuxPulse::AudioDeviceLinuxPulse(const int32_t id) :
32 : _ptrAudioBuffer(NULL),
33 0 : _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
34 0 : _timeEventRec(*EventWrapper::Create()),
35 0 : _timeEventPlay(*EventWrapper::Create()),
36 0 : _recStartEvent(*EventWrapper::Create()),
37 0 : _playStartEvent(*EventWrapper::Create()),
38 : _id(id),
39 : _mixerManager(id),
40 : _inputDeviceIndex(0),
41 : _outputDeviceIndex(0),
42 : _inputDeviceIsSpecified(false),
43 : _outputDeviceIsSpecified(false),
44 : sample_rate_hz_(0),
45 : _recChannels(1),
46 : _playChannels(1),
47 : _playBufType(AudioDeviceModule::kFixedBufferSize),
48 : _initialized(false),
49 : _recording(false),
50 : _playing(false),
51 : _recIsInitialized(false),
52 : _playIsInitialized(false),
53 : _startRec(false),
54 : _stopRec(false),
55 : _startPlay(false),
56 : _stopPlay(false),
57 : _AGC(false),
58 : update_speaker_volume_at_startup_(false),
59 : _playBufDelayFixed(20),
60 : _sndCardPlayDelay(0),
61 : _sndCardRecDelay(0),
62 : _writeErrors(0),
63 : _playWarning(0),
64 : _playError(0),
65 : _recWarning(0),
66 : _recError(0),
67 : _deviceIndex(-1),
68 : _numPlayDevices(0),
69 : _numRecDevices(0),
70 : _playDeviceName(NULL),
71 : _recDeviceName(NULL),
72 : _playDisplayDeviceName(NULL),
73 : _recDisplayDeviceName(NULL),
74 : _playBuffer(NULL),
75 : _playbackBufferSize(0),
76 : _playbackBufferUnused(0),
77 : _tempBufferSpace(0),
78 : _recBuffer(NULL),
79 : _recordBufferSize(0),
80 : _recordBufferUsed(0),
81 : _tempSampleData(NULL),
82 : _tempSampleDataSize(0),
83 : _configuredLatencyPlay(0),
84 : _configuredLatencyRec(0),
85 : _paDeviceIndex(-1),
86 : _paStateChanged(false),
87 : _paMainloop(NULL),
88 : _paMainloopApi(NULL),
89 : _paContext(NULL),
90 : _recStream(NULL),
91 : _playStream(NULL),
92 : _recStreamFlags(0),
93 0 : _playStreamFlags(0)
94 : {
95 : WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id,
96 : "%s created", __FUNCTION__);
97 :
98 0 : memset(_paServerVersion, 0, sizeof(_paServerVersion));
99 0 : memset(&_playBufferAttr, 0, sizeof(_playBufferAttr));
100 0 : memset(&_recBufferAttr, 0, sizeof(_recBufferAttr));
101 0 : memset(_oldKeyState, 0, sizeof(_oldKeyState));
102 0 : }
103 :
104 0 : AudioDeviceLinuxPulse::~AudioDeviceLinuxPulse()
105 : {
106 : WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
107 : "%s destroyed", __FUNCTION__);
108 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
109 0 : Terminate();
110 :
111 0 : if (_recBuffer)
112 : {
113 0 : delete [] _recBuffer;
114 0 : _recBuffer = NULL;
115 : }
116 0 : if (_playBuffer)
117 : {
118 0 : delete [] _playBuffer;
119 0 : _playBuffer = NULL;
120 : }
121 0 : if (_playDeviceName)
122 : {
123 0 : delete [] _playDeviceName;
124 0 : _playDeviceName = NULL;
125 : }
126 0 : if (_recDeviceName)
127 : {
128 0 : delete [] _recDeviceName;
129 0 : _recDeviceName = NULL;
130 : }
131 :
132 0 : delete &_recStartEvent;
133 0 : delete &_playStartEvent;
134 0 : delete &_timeEventRec;
135 0 : delete &_timeEventPlay;
136 0 : delete &_critSect;
137 0 : }
138 :
139 0 : void AudioDeviceLinuxPulse::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer)
140 : {
141 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
142 :
143 0 : _ptrAudioBuffer = audioBuffer;
144 :
145 : // Inform the AudioBuffer about default settings for this implementation.
146 : // Set all values to zero here since the actual settings will be done by
147 : // InitPlayout and InitRecording later.
148 0 : _ptrAudioBuffer->SetRecordingSampleRate(0);
149 0 : _ptrAudioBuffer->SetPlayoutSampleRate(0);
150 0 : _ptrAudioBuffer->SetRecordingChannels(0);
151 0 : _ptrAudioBuffer->SetPlayoutChannels(0);
152 0 : }
153 :
154 : // ----------------------------------------------------------------------------
155 : // ActiveAudioLayer
156 : // ----------------------------------------------------------------------------
157 :
158 0 : int32_t AudioDeviceLinuxPulse::ActiveAudioLayer(
159 : AudioDeviceModule::AudioLayer& audioLayer) const
160 : {
161 0 : audioLayer = AudioDeviceModule::kLinuxPulseAudio;
162 0 : return 0;
163 : }
164 :
165 0 : AudioDeviceGeneric::InitStatus AudioDeviceLinuxPulse::Init() {
166 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
167 0 : if (_initialized) {
168 0 : return InitStatus::OK;
169 : }
170 :
171 : // Initialize PulseAudio
172 0 : if (InitPulseAudio() < 0) {
173 0 : LOG(LS_ERROR) << "failed to initialize PulseAudio";
174 0 : if (TerminatePulseAudio() < 0) {
175 0 : LOG(LS_ERROR) << "failed to terminate PulseAudio";
176 : }
177 0 : return InitStatus::OTHER_ERROR;
178 : }
179 :
180 0 : _playWarning = 0;
181 0 : _playError = 0;
182 0 : _recWarning = 0;
183 0 : _recError = 0;
184 :
185 : #ifdef USE_X11
186 : // Get X display handle for typing detection
187 : _XDisplay = XOpenDisplay(NULL);
188 : if (!_XDisplay) {
189 : LOG(LS_WARNING)
190 : << "failed to open X display, typing detection will not work";
191 : }
192 : #endif
193 :
194 : // RECORDING
195 0 : _ptrThreadRec.reset(new rtc::PlatformThread(
196 0 : RecThreadFunc, this, "webrtc_audio_module_rec_thread"));
197 :
198 0 : _ptrThreadRec->Start();
199 0 : _ptrThreadRec->SetPriority(rtc::kRealtimePriority);
200 :
201 : // PLAYOUT
202 0 : _ptrThreadPlay.reset(new rtc::PlatformThread(
203 0 : PlayThreadFunc, this, "webrtc_audio_module_play_thread"));
204 0 : _ptrThreadPlay->Start();
205 0 : _ptrThreadPlay->SetPriority(rtc::kRealtimePriority);
206 :
207 0 : _initialized = true;
208 :
209 0 : return InitStatus::OK;
210 : }
211 :
212 0 : int32_t AudioDeviceLinuxPulse::Terminate()
213 : {
214 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
215 0 : if (!_initialized)
216 : {
217 0 : return 0;
218 : }
219 :
220 0 : _mixerManager.Close();
221 :
222 : // RECORDING
223 0 : if (_ptrThreadRec)
224 : {
225 0 : rtc::PlatformThread* tmpThread = _ptrThreadRec.release();
226 :
227 0 : _timeEventRec.Set();
228 0 : tmpThread->Stop();
229 0 : delete tmpThread;
230 : }
231 :
232 : // PLAYOUT
233 0 : if (_ptrThreadPlay)
234 : {
235 0 : rtc::PlatformThread* tmpThread = _ptrThreadPlay.release();
236 :
237 0 : _timeEventPlay.Set();
238 0 : tmpThread->Stop();
239 0 : delete tmpThread;
240 : }
241 :
242 : // Terminate PulseAudio
243 0 : if (TerminatePulseAudio() < 0)
244 : {
245 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
246 : " failed to terminate PulseAudio");
247 0 : return -1;
248 : }
249 :
250 : #ifdef USE_X11
251 : if (_XDisplay)
252 : {
253 : XCloseDisplay(_XDisplay);
254 : _XDisplay = NULL;
255 : }
256 : #endif
257 :
258 0 : _initialized = false;
259 0 : _outputDeviceIsSpecified = false;
260 0 : _inputDeviceIsSpecified = false;
261 :
262 0 : return 0;
263 : }
264 :
265 0 : bool AudioDeviceLinuxPulse::Initialized() const
266 : {
267 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
268 0 : return (_initialized);
269 : }
270 :
271 0 : int32_t AudioDeviceLinuxPulse::InitSpeaker()
272 : {
273 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
274 :
275 0 : if (_playing)
276 : {
277 0 : return -1;
278 : }
279 :
280 0 : if (!_outputDeviceIsSpecified)
281 : {
282 0 : return -1;
283 : }
284 :
285 : // check if default device
286 0 : if (_outputDeviceIndex == 0)
287 : {
288 0 : uint16_t deviceIndex = 0;
289 0 : GetDefaultDeviceInfo(false, NULL, deviceIndex);
290 0 : _paDeviceIndex = deviceIndex;
291 : } else
292 : {
293 : // get the PA device index from
294 : // the callback
295 0 : _deviceIndex = _outputDeviceIndex;
296 :
297 : // get playout devices
298 0 : PlayoutDevices();
299 : }
300 :
301 : // the callback has now set the _paDeviceIndex to
302 : // the PulseAudio index of the device
303 0 : if (_mixerManager.OpenSpeaker(_paDeviceIndex) == -1)
304 : {
305 0 : return -1;
306 : }
307 :
308 : // clear _deviceIndex
309 0 : _deviceIndex = -1;
310 0 : _paDeviceIndex = -1;
311 :
312 0 : return 0;
313 : }
314 :
315 0 : int32_t AudioDeviceLinuxPulse::InitMicrophone()
316 : {
317 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
318 0 : if (_recording)
319 : {
320 0 : return -1;
321 : }
322 :
323 0 : if (!_inputDeviceIsSpecified)
324 : {
325 0 : return -1;
326 : }
327 :
328 : // Check if default device
329 0 : if (_inputDeviceIndex == 0)
330 : {
331 0 : uint16_t deviceIndex = 0;
332 0 : GetDefaultDeviceInfo(true, NULL, deviceIndex);
333 0 : _paDeviceIndex = deviceIndex;
334 : } else
335 : {
336 : // Get the PA device index from
337 : // the callback
338 0 : _deviceIndex = _inputDeviceIndex;
339 :
340 : // get recording devices
341 0 : RecordingDevices();
342 : }
343 :
344 : // The callback has now set the _paDeviceIndex to
345 : // the PulseAudio index of the device
346 0 : if (_mixerManager.OpenMicrophone(_paDeviceIndex) == -1)
347 : {
348 0 : return -1;
349 : }
350 :
351 : // Clear _deviceIndex
352 0 : _deviceIndex = -1;
353 0 : _paDeviceIndex = -1;
354 :
355 0 : return 0;
356 : }
357 :
358 0 : bool AudioDeviceLinuxPulse::SpeakerIsInitialized() const
359 : {
360 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
361 0 : return (_mixerManager.SpeakerIsInitialized());
362 : }
363 :
364 0 : bool AudioDeviceLinuxPulse::MicrophoneIsInitialized() const
365 : {
366 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
367 0 : return (_mixerManager.MicrophoneIsInitialized());
368 : }
369 :
370 0 : int32_t AudioDeviceLinuxPulse::SpeakerVolumeIsAvailable(bool& available)
371 : {
372 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
373 0 : bool wasInitialized = _mixerManager.SpeakerIsInitialized();
374 :
375 : // Make an attempt to open up the
376 : // output mixer corresponding to the currently selected output device.
377 0 : if (!wasInitialized && InitSpeaker() == -1)
378 : {
379 : // If we end up here it means that the selected speaker has no volume
380 : // control.
381 0 : available = false;
382 0 : return 0;
383 : }
384 :
385 : // Given that InitSpeaker was successful, we know volume control exists.
386 0 : available = true;
387 :
388 : // Close the initialized output mixer
389 0 : if (!wasInitialized)
390 : {
391 0 : _mixerManager.CloseSpeaker();
392 : }
393 :
394 0 : return 0;
395 : }
396 :
397 0 : int32_t AudioDeviceLinuxPulse::SetSpeakerVolume(uint32_t volume)
398 : {
399 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
400 0 : if (!_playing) {
401 : // Only update the volume if it's been set while we weren't playing.
402 0 : update_speaker_volume_at_startup_ = true;
403 : }
404 0 : return (_mixerManager.SetSpeakerVolume(volume));
405 : }
406 :
407 0 : int32_t AudioDeviceLinuxPulse::SpeakerVolume(uint32_t& volume) const
408 : {
409 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
410 0 : uint32_t level(0);
411 :
412 0 : if (_mixerManager.SpeakerVolume(level) == -1)
413 : {
414 0 : return -1;
415 : }
416 :
417 0 : volume = level;
418 :
419 0 : return 0;
420 : }
421 :
422 0 : int32_t AudioDeviceLinuxPulse::SetWaveOutVolume(
423 : uint16_t volumeLeft,
424 : uint16_t volumeRight)
425 : {
426 :
427 : WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
428 : " API call not supported on this platform");
429 0 : return -1;
430 : }
431 :
432 0 : int32_t AudioDeviceLinuxPulse::WaveOutVolume(
433 : uint16_t& /*volumeLeft*/,
434 : uint16_t& /*volumeRight*/) const
435 : {
436 :
437 : WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
438 : " API call not supported on this platform");
439 0 : return -1;
440 : }
441 :
442 0 : int32_t AudioDeviceLinuxPulse::MaxSpeakerVolume(
443 : uint32_t& maxVolume) const
444 : {
445 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
446 0 : uint32_t maxVol(0);
447 :
448 0 : if (_mixerManager.MaxSpeakerVolume(maxVol) == -1)
449 : {
450 0 : return -1;
451 : }
452 :
453 0 : maxVolume = maxVol;
454 :
455 0 : return 0;
456 : }
457 :
458 0 : int32_t AudioDeviceLinuxPulse::MinSpeakerVolume(
459 : uint32_t& minVolume) const
460 : {
461 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
462 0 : uint32_t minVol(0);
463 :
464 0 : if (_mixerManager.MinSpeakerVolume(minVol) == -1)
465 : {
466 0 : return -1;
467 : }
468 :
469 0 : minVolume = minVol;
470 :
471 0 : return 0;
472 : }
473 :
474 0 : int32_t AudioDeviceLinuxPulse::SpeakerVolumeStepSize(
475 : uint16_t& stepSize) const
476 : {
477 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
478 0 : uint16_t delta(0);
479 :
480 0 : if (_mixerManager.SpeakerVolumeStepSize(delta) == -1)
481 : {
482 0 : return -1;
483 : }
484 :
485 0 : stepSize = delta;
486 :
487 0 : return 0;
488 : }
489 :
490 0 : int32_t AudioDeviceLinuxPulse::SpeakerMuteIsAvailable(bool& available)
491 : {
492 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
493 0 : bool isAvailable(false);
494 0 : bool wasInitialized = _mixerManager.SpeakerIsInitialized();
495 :
496 : // Make an attempt to open up the
497 : // output mixer corresponding to the currently selected output device.
498 : //
499 0 : if (!wasInitialized && InitSpeaker() == -1)
500 : {
501 : // If we end up here it means that the selected speaker has no volume
502 : // control, hence it is safe to state that there is no mute control
503 : // already at this stage.
504 0 : available = false;
505 0 : return 0;
506 : }
507 :
508 : // Check if the selected speaker has a mute control
509 0 : _mixerManager.SpeakerMuteIsAvailable(isAvailable);
510 :
511 0 : available = isAvailable;
512 :
513 : // Close the initialized output mixer
514 0 : if (!wasInitialized)
515 : {
516 0 : _mixerManager.CloseSpeaker();
517 : }
518 :
519 0 : return 0;
520 : }
521 :
522 0 : int32_t AudioDeviceLinuxPulse::SetSpeakerMute(bool enable)
523 : {
524 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
525 0 : return (_mixerManager.SetSpeakerMute(enable));
526 : }
527 :
528 0 : int32_t AudioDeviceLinuxPulse::SpeakerMute(bool& enabled) const
529 : {
530 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
531 0 : bool muted(0);
532 0 : if (_mixerManager.SpeakerMute(muted) == -1)
533 : {
534 0 : return -1;
535 : }
536 :
537 0 : enabled = muted;
538 0 : return 0;
539 : }
540 :
541 0 : int32_t AudioDeviceLinuxPulse::MicrophoneMuteIsAvailable(bool& available)
542 : {
543 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
544 0 : bool isAvailable(false);
545 0 : bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
546 :
547 : // Make an attempt to open up the
548 : // input mixer corresponding to the currently selected input device.
549 : //
550 0 : if (!wasInitialized && InitMicrophone() == -1)
551 : {
552 : // If we end up here it means that the selected microphone has no
553 : // volume control, hence it is safe to state that there is no
554 : // boost control already at this stage.
555 0 : available = false;
556 0 : return 0;
557 : }
558 :
559 : // Check if the selected microphone has a mute control
560 : //
561 0 : _mixerManager.MicrophoneMuteIsAvailable(isAvailable);
562 0 : available = isAvailable;
563 :
564 : // Close the initialized input mixer
565 : //
566 0 : if (!wasInitialized)
567 : {
568 0 : _mixerManager.CloseMicrophone();
569 : }
570 :
571 0 : return 0;
572 : }
573 :
574 0 : int32_t AudioDeviceLinuxPulse::SetMicrophoneMute(bool enable)
575 : {
576 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
577 0 : return (_mixerManager.SetMicrophoneMute(enable));
578 : }
579 :
580 0 : int32_t AudioDeviceLinuxPulse::MicrophoneMute(bool& enabled) const
581 : {
582 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
583 0 : bool muted(0);
584 0 : if (_mixerManager.MicrophoneMute(muted) == -1)
585 : {
586 0 : return -1;
587 : }
588 :
589 0 : enabled = muted;
590 0 : return 0;
591 : }
592 :
593 0 : int32_t AudioDeviceLinuxPulse::MicrophoneBoostIsAvailable(bool& available)
594 : {
595 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
596 0 : bool isAvailable(false);
597 0 : bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
598 :
599 : // Enumerate all avaliable microphone and make an attempt to open up the
600 : // input mixer corresponding to the currently selected input device.
601 : //
602 0 : if (!wasInitialized && InitMicrophone() == -1)
603 : {
604 : // If we end up here it means that the selected microphone has no
605 : // volume control, hence it is safe to state that there is no
606 : // boost control already at this stage.
607 0 : available = false;
608 0 : return 0;
609 : }
610 :
611 : // Check if the selected microphone has a boost control
612 0 : _mixerManager.MicrophoneBoostIsAvailable(isAvailable);
613 0 : available = isAvailable;
614 :
615 : // Close the initialized input mixer
616 0 : if (!wasInitialized)
617 : {
618 0 : _mixerManager.CloseMicrophone();
619 : }
620 :
621 0 : return 0;
622 : }
623 :
624 0 : int32_t AudioDeviceLinuxPulse::SetMicrophoneBoost(bool enable)
625 : {
626 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
627 0 : return (_mixerManager.SetMicrophoneBoost(enable));
628 : }
629 :
630 0 : int32_t AudioDeviceLinuxPulse::MicrophoneBoost(bool& enabled) const
631 : {
632 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
633 0 : bool onOff(0);
634 :
635 0 : if (_mixerManager.MicrophoneBoost(onOff) == -1)
636 : {
637 0 : return -1;
638 : }
639 :
640 0 : enabled = onOff;
641 :
642 0 : return 0;
643 : }
644 :
645 0 : int32_t AudioDeviceLinuxPulse::StereoRecordingIsAvailable(bool& available)
646 : {
647 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
648 0 : if (_recChannels == 2 && _recording) {
649 0 : available = true;
650 0 : return 0;
651 : }
652 :
653 0 : available = false;
654 0 : bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
655 0 : int error = 0;
656 :
657 0 : if (!wasInitialized && InitMicrophone() == -1)
658 : {
659 : // Cannot open the specified device
660 0 : available = false;
661 0 : return 0;
662 : }
663 :
664 : // Check if the selected microphone can record stereo.
665 0 : bool isAvailable(false);
666 0 : error = _mixerManager.StereoRecordingIsAvailable(isAvailable);
667 0 : if (!error)
668 0 : available = isAvailable;
669 :
670 : // Close the initialized input mixer
671 0 : if (!wasInitialized)
672 : {
673 0 : _mixerManager.CloseMicrophone();
674 : }
675 :
676 0 : return error;
677 : }
678 :
679 0 : int32_t AudioDeviceLinuxPulse::SetStereoRecording(bool enable)
680 : {
681 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
682 0 : if (enable)
683 0 : _recChannels = 2;
684 : else
685 0 : _recChannels = 1;
686 :
687 0 : return 0;
688 : }
689 :
690 0 : int32_t AudioDeviceLinuxPulse::StereoRecording(bool& enabled) const
691 : {
692 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
693 0 : if (_recChannels == 2)
694 0 : enabled = true;
695 : else
696 0 : enabled = false;
697 :
698 0 : return 0;
699 : }
700 :
701 0 : int32_t AudioDeviceLinuxPulse::StereoPlayoutIsAvailable(bool& available)
702 : {
703 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
704 0 : if (_playChannels == 2 && _playing) {
705 0 : available = true;
706 0 : return 0;
707 : }
708 :
709 0 : available = false;
710 0 : bool wasInitialized = _mixerManager.SpeakerIsInitialized();
711 0 : int error = 0;
712 :
713 0 : if (!wasInitialized && InitSpeaker() == -1)
714 : {
715 : // Cannot open the specified device.
716 0 : return -1;
717 : }
718 :
719 : // Check if the selected speaker can play stereo.
720 0 : bool isAvailable(false);
721 0 : error = _mixerManager.StereoPlayoutIsAvailable(isAvailable);
722 0 : if (!error)
723 0 : available = isAvailable;
724 :
725 : // Close the initialized input mixer
726 0 : if (!wasInitialized)
727 : {
728 0 : _mixerManager.CloseSpeaker();
729 : }
730 :
731 0 : return error;
732 : }
733 :
734 0 : int32_t AudioDeviceLinuxPulse::SetStereoPlayout(bool enable)
735 : {
736 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
737 0 : if (enable)
738 0 : _playChannels = 2;
739 : else
740 0 : _playChannels = 1;
741 :
742 0 : return 0;
743 : }
744 :
745 0 : int32_t AudioDeviceLinuxPulse::StereoPlayout(bool& enabled) const
746 : {
747 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
748 0 : if (_playChannels == 2)
749 0 : enabled = true;
750 : else
751 0 : enabled = false;
752 :
753 0 : return 0;
754 : }
755 :
756 0 : int32_t AudioDeviceLinuxPulse::SetAGC(bool enable)
757 : {
758 0 : CriticalSectionScoped lock(&_critSect);
759 0 : _AGC = enable;
760 :
761 0 : return 0;
762 : }
763 :
764 0 : bool AudioDeviceLinuxPulse::AGC() const
765 : {
766 0 : CriticalSectionScoped lock(&_critSect);
767 0 : return _AGC;
768 : }
769 :
770 0 : int32_t AudioDeviceLinuxPulse::MicrophoneVolumeIsAvailable(
771 : bool& available)
772 : {
773 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
774 0 : bool wasInitialized = _mixerManager.MicrophoneIsInitialized();
775 :
776 : // Make an attempt to open up the
777 : // input mixer corresponding to the currently selected output device.
778 0 : if (!wasInitialized && InitMicrophone() == -1)
779 : {
780 : // If we end up here it means that the selected microphone has no
781 : // volume control.
782 0 : available = false;
783 0 : return 0;
784 : }
785 :
786 : // Given that InitMicrophone was successful, we know that a volume control
787 : // exists.
788 0 : available = true;
789 :
790 : // Close the initialized input mixer
791 0 : if (!wasInitialized)
792 : {
793 0 : _mixerManager.CloseMicrophone();
794 : }
795 :
796 0 : return 0;
797 : }
798 :
799 0 : int32_t AudioDeviceLinuxPulse::SetMicrophoneVolume(uint32_t volume)
800 : {
801 0 : return (_mixerManager.SetMicrophoneVolume(volume));
802 : }
803 :
804 0 : int32_t AudioDeviceLinuxPulse::MicrophoneVolume(
805 : uint32_t& volume) const
806 : {
807 :
808 0 : uint32_t level(0);
809 :
810 0 : if (_mixerManager.MicrophoneVolume(level) == -1)
811 : {
812 : WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
813 : " failed to retrive current microphone level");
814 0 : return -1;
815 : }
816 :
817 0 : volume = level;
818 :
819 0 : return 0;
820 : }
821 :
822 0 : int32_t AudioDeviceLinuxPulse::MaxMicrophoneVolume(
823 : uint32_t& maxVolume) const
824 : {
825 :
826 0 : uint32_t maxVol(0);
827 :
828 0 : if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1)
829 : {
830 0 : return -1;
831 : }
832 :
833 0 : maxVolume = maxVol;
834 :
835 0 : return 0;
836 : }
837 :
838 0 : int32_t AudioDeviceLinuxPulse::MinMicrophoneVolume(
839 : uint32_t& minVolume) const
840 : {
841 :
842 0 : uint32_t minVol(0);
843 :
844 0 : if (_mixerManager.MinMicrophoneVolume(minVol) == -1)
845 : {
846 0 : return -1;
847 : }
848 :
849 0 : minVolume = minVol;
850 :
851 0 : return 0;
852 : }
853 :
854 0 : int32_t AudioDeviceLinuxPulse::MicrophoneVolumeStepSize(
855 : uint16_t& stepSize) const
856 : {
857 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
858 0 : uint16_t delta(0);
859 :
860 0 : if (_mixerManager.MicrophoneVolumeStepSize(delta) == -1)
861 : {
862 0 : return -1;
863 : }
864 :
865 0 : stepSize = delta;
866 :
867 0 : return 0;
868 : }
869 :
870 0 : int16_t AudioDeviceLinuxPulse::PlayoutDevices()
871 : {
872 0 : PaLock();
873 :
874 0 : pa_operation* paOperation = NULL;
875 0 : _numPlayDevices = 1; // init to 1 to account for "default"
876 :
877 : // get the whole list of devices and update _numPlayDevices
878 0 : paOperation = LATE(pa_context_get_sink_info_list)(_paContext,
879 : PaSinkInfoCallback,
880 0 : this);
881 :
882 0 : WaitForOperationCompletion(paOperation);
883 :
884 0 : PaUnLock();
885 :
886 0 : return _numPlayDevices;
887 : }
888 :
889 0 : int32_t AudioDeviceLinuxPulse::SetPlayoutDevice(uint16_t index)
890 : {
891 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
892 0 : if (_playIsInitialized)
893 : {
894 0 : return -1;
895 : }
896 :
897 0 : const uint16_t nDevices = PlayoutDevices();
898 :
899 : WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
900 : " number of availiable output devices is %u", nDevices);
901 :
902 0 : if (index > (nDevices - 1))
903 : {
904 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
905 : " device index is out of range [0,%u]", (nDevices - 1));
906 0 : return -1;
907 : }
908 :
909 0 : _outputDeviceIndex = index;
910 0 : _outputDeviceIsSpecified = true;
911 :
912 0 : return 0;
913 : }
914 :
915 0 : int32_t AudioDeviceLinuxPulse::SetPlayoutDevice(
916 : AudioDeviceModule::WindowsDeviceType /*device*/)
917 : {
918 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
919 : "WindowsDeviceType not supported");
920 0 : return -1;
921 : }
922 :
923 0 : int32_t AudioDeviceLinuxPulse::PlayoutDeviceName(
924 : uint16_t index,
925 : char name[kAdmMaxDeviceNameSize],
926 : char guid[kAdmMaxGuidSize])
927 : {
928 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
929 0 : const uint16_t nDevices = PlayoutDevices();
930 :
931 0 : if ((index > (nDevices - 1)) || (name == NULL))
932 : {
933 0 : return -1;
934 : }
935 :
936 0 : memset(name, 0, kAdmMaxDeviceNameSize);
937 :
938 0 : if (guid != NULL)
939 : {
940 0 : memset(guid, 0, kAdmMaxGuidSize);
941 : }
942 :
943 : // Check if default device
944 0 : if (index == 0)
945 : {
946 0 : uint16_t deviceIndex = 0;
947 0 : return GetDefaultDeviceInfo(false, name, deviceIndex);
948 : }
949 :
950 : // Tell the callback that we want
951 : // The name for this device
952 0 : _playDisplayDeviceName = name;
953 0 : _deviceIndex = index;
954 :
955 : // get playout devices
956 0 : PlayoutDevices();
957 :
958 : // clear device name and index
959 0 : _playDisplayDeviceName = NULL;
960 0 : _deviceIndex = -1;
961 :
962 0 : return 0;
963 : }
964 :
965 0 : int32_t AudioDeviceLinuxPulse::RecordingDeviceName(
966 : uint16_t index,
967 : char name[kAdmMaxDeviceNameSize],
968 : char guid[kAdmMaxGuidSize])
969 : {
970 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
971 0 : const uint16_t nDevices(RecordingDevices());
972 :
973 0 : if ((index > (nDevices - 1)) || (name == NULL))
974 : {
975 0 : return -1;
976 : }
977 :
978 0 : memset(name, 0, kAdmMaxDeviceNameSize);
979 :
980 0 : if (guid != NULL)
981 : {
982 0 : memset(guid, 0, kAdmMaxGuidSize);
983 : }
984 :
985 : // Check if default device
986 0 : if (index == 0)
987 : {
988 0 : uint16_t deviceIndex = 0;
989 0 : return GetDefaultDeviceInfo(true, name, deviceIndex);
990 : }
991 :
992 : // Tell the callback that we want
993 : // the name for this device
994 0 : _recDisplayDeviceName = name;
995 0 : _deviceIndex = index;
996 :
997 : // Get recording devices
998 0 : RecordingDevices();
999 :
1000 : // Clear device name and index
1001 0 : _recDisplayDeviceName = NULL;
1002 0 : _deviceIndex = -1;
1003 :
1004 0 : return 0;
1005 : }
1006 :
1007 0 : int16_t AudioDeviceLinuxPulse::RecordingDevices()
1008 : {
1009 0 : PaLock();
1010 :
1011 0 : pa_operation* paOperation = NULL;
1012 0 : _numRecDevices = 1; // Init to 1 to account for "default"
1013 :
1014 : // Get the whole list of devices and update _numRecDevices
1015 0 : paOperation = LATE(pa_context_get_source_info_list)(_paContext,
1016 : PaSourceInfoCallback,
1017 0 : this);
1018 :
1019 0 : WaitForOperationCompletion(paOperation);
1020 :
1021 0 : PaUnLock();
1022 :
1023 0 : return _numRecDevices;
1024 : }
1025 :
1026 0 : int32_t AudioDeviceLinuxPulse::SetRecordingDevice(uint16_t index)
1027 : {
1028 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
1029 0 : if (_recIsInitialized)
1030 : {
1031 0 : return -1;
1032 : }
1033 :
1034 0 : const uint16_t nDevices(RecordingDevices());
1035 :
1036 : WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
1037 : " number of availiable input devices is %u", nDevices);
1038 :
1039 0 : if (index > (nDevices - 1))
1040 : {
1041 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1042 : " device index is out of range [0,%u]", (nDevices - 1));
1043 0 : return -1;
1044 : }
1045 :
1046 0 : _inputDeviceIndex = index;
1047 0 : _inputDeviceIsSpecified = true;
1048 :
1049 0 : return 0;
1050 : }
1051 :
1052 0 : int32_t AudioDeviceLinuxPulse::SetRecordingDevice(
1053 : AudioDeviceModule::WindowsDeviceType /*device*/)
1054 : {
1055 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1056 : "WindowsDeviceType not supported");
1057 0 : return -1;
1058 : }
1059 :
1060 0 : int32_t AudioDeviceLinuxPulse::PlayoutIsAvailable(bool& available)
1061 : {
1062 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
1063 0 : available = false;
1064 :
1065 : // Try to initialize the playout side
1066 0 : int32_t res = InitPlayout();
1067 :
1068 : // Cancel effect of initialization
1069 0 : StopPlayout();
1070 :
1071 0 : if (res != -1)
1072 : {
1073 0 : available = true;
1074 : }
1075 :
1076 0 : return res;
1077 : }
1078 :
1079 0 : int32_t AudioDeviceLinuxPulse::RecordingIsAvailable(bool& available)
1080 : {
1081 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
1082 0 : available = false;
1083 :
1084 : // Try to initialize the playout side
1085 0 : int32_t res = InitRecording();
1086 :
1087 : // Cancel effect of initialization
1088 0 : StopRecording();
1089 :
1090 0 : if (res != -1)
1091 : {
1092 0 : available = true;
1093 : }
1094 :
1095 0 : return res;
1096 : }
1097 :
1098 0 : int32_t AudioDeviceLinuxPulse::InitPlayout()
1099 : {
1100 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
1101 :
1102 0 : if (_playing)
1103 : {
1104 0 : return -1;
1105 : }
1106 :
1107 0 : if (!_outputDeviceIsSpecified)
1108 : {
1109 0 : return -1;
1110 : }
1111 :
1112 0 : if (_playIsInitialized)
1113 : {
1114 0 : return 0;
1115 : }
1116 :
1117 : // Initialize the speaker (devices might have been added or removed)
1118 0 : if (InitSpeaker() == -1)
1119 : {
1120 : WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1121 : " InitSpeaker() failed");
1122 : }
1123 :
1124 : // Set the play sample specification
1125 : pa_sample_spec playSampleSpec;
1126 0 : playSampleSpec.channels = _playChannels;
1127 0 : playSampleSpec.format = PA_SAMPLE_S16LE;
1128 0 : playSampleSpec.rate = sample_rate_hz_;
1129 :
1130 : // Create a new play stream
1131 0 : _playStream = LATE(pa_stream_new)(_paContext, "playStream",
1132 : &playSampleSpec, NULL);
1133 :
1134 0 : if (!_playStream)
1135 : {
1136 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1137 : " failed to create play stream, err=%d",
1138 : LATE(pa_context_errno)(_paContext));
1139 0 : return -1;
1140 : }
1141 :
1142 : // Provide the playStream to the mixer
1143 0 : _mixerManager.SetPlayStream(_playStream);
1144 :
1145 0 : if (_ptrAudioBuffer)
1146 : {
1147 : // Update audio buffer with the selected parameters
1148 0 : _ptrAudioBuffer->SetPlayoutSampleRate(sample_rate_hz_);
1149 0 : _ptrAudioBuffer->SetPlayoutChannels((uint8_t) _playChannels);
1150 : }
1151 :
1152 : WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1153 : " stream state %d\n",
1154 : LATE(pa_stream_get_state)(_playStream));
1155 :
1156 : // Set stream flags
1157 0 : _playStreamFlags = (pa_stream_flags_t) (PA_STREAM_AUTO_TIMING_UPDATE
1158 : | PA_STREAM_INTERPOLATE_TIMING);
1159 :
1160 0 : if (_configuredLatencyPlay != WEBRTC_PA_NO_LATENCY_REQUIREMENTS)
1161 : {
1162 : // If configuring a specific latency then we want to specify
1163 : // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters
1164 : // automatically to reach that target latency. However, that flag
1165 : // doesn't exist in Ubuntu 8.04 and many people still use that,
1166 : // so we have to check the protocol version of libpulse.
1167 0 : if (LATE(pa_context_get_protocol_version)(_paContext)
1168 : >= WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION)
1169 : {
1170 0 : _playStreamFlags |= PA_STREAM_ADJUST_LATENCY;
1171 : }
1172 :
1173 : const pa_sample_spec *spec =
1174 0 : LATE(pa_stream_get_sample_spec)(_playStream);
1175 0 : if (!spec)
1176 : {
1177 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1178 : " pa_stream_get_sample_spec()");
1179 0 : return -1;
1180 : }
1181 :
1182 0 : size_t bytesPerSec = LATE(pa_bytes_per_second)(spec);
1183 0 : uint32_t latency = bytesPerSec *
1184 0 : WEBRTC_PA_PLAYBACK_LATENCY_MINIMUM_MSECS /
1185 0 : WEBRTC_PA_MSECS_PER_SEC;
1186 :
1187 : // Set the play buffer attributes
1188 0 : _playBufferAttr.maxlength = latency; // num bytes stored in the buffer
1189 0 : _playBufferAttr.tlength = latency; // target fill level of play buffer
1190 : // minimum free num bytes before server request more data
1191 0 : _playBufferAttr.minreq = latency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR;
1192 : // prebuffer tlength before starting playout
1193 0 : _playBufferAttr.prebuf = _playBufferAttr.tlength -
1194 0 : _playBufferAttr.minreq;
1195 :
1196 0 : _configuredLatencyPlay = latency;
1197 : }
1198 :
1199 : // num samples in bytes * num channels
1200 0 : _playbackBufferSize = sample_rate_hz_ / 100 * 2 * _playChannels;
1201 0 : _playbackBufferUnused = _playbackBufferSize;
1202 0 : _playBuffer = new int8_t[_playbackBufferSize];
1203 :
1204 : // Enable underflow callback
1205 0 : LATE(pa_stream_set_underflow_callback)(_playStream,
1206 0 : PaStreamUnderflowCallback, this);
1207 :
1208 : // Set the state callback function for the stream
1209 0 : LATE(pa_stream_set_state_callback)(_playStream,
1210 0 : PaStreamStateCallback, this);
1211 :
1212 : // Mark playout side as initialized
1213 0 : _playIsInitialized = true;
1214 0 : _sndCardPlayDelay = 0;
1215 0 : _sndCardRecDelay = 0;
1216 :
1217 0 : return 0;
1218 : }
1219 :
1220 0 : int32_t AudioDeviceLinuxPulse::InitRecording()
1221 : {
1222 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
1223 :
1224 0 : if (_recording)
1225 : {
1226 0 : return -1;
1227 : }
1228 :
1229 0 : if (!_inputDeviceIsSpecified)
1230 : {
1231 0 : return -1;
1232 : }
1233 :
1234 0 : if (_recIsInitialized)
1235 : {
1236 0 : return 0;
1237 : }
1238 :
1239 : // Initialize the microphone (devices might have been added or removed)
1240 0 : if (InitMicrophone() == -1)
1241 : {
1242 : WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1243 : " InitMicrophone() failed");
1244 : }
1245 :
1246 : // Set the rec sample specification
1247 : pa_sample_spec recSampleSpec;
1248 0 : recSampleSpec.channels = _recChannels;
1249 0 : recSampleSpec.format = PA_SAMPLE_S16LE;
1250 0 : recSampleSpec.rate = sample_rate_hz_;
1251 :
1252 : // Create a new rec stream
1253 0 : _recStream = LATE(pa_stream_new)(_paContext, "recStream", &recSampleSpec,
1254 : NULL);
1255 0 : if (!_recStream)
1256 : {
1257 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1258 : " failed to create rec stream, err=%d",
1259 : LATE(pa_context_errno)(_paContext));
1260 0 : return -1;
1261 : }
1262 :
1263 : // Provide the recStream to the mixer
1264 0 : _mixerManager.SetRecStream(_recStream);
1265 :
1266 0 : if (_ptrAudioBuffer)
1267 : {
1268 : // Update audio buffer with the selected parameters
1269 0 : _ptrAudioBuffer->SetRecordingSampleRate(sample_rate_hz_);
1270 0 : _ptrAudioBuffer->SetRecordingChannels((uint8_t) _recChannels);
1271 : }
1272 :
1273 0 : if (_configuredLatencyRec != WEBRTC_PA_NO_LATENCY_REQUIREMENTS)
1274 : {
1275 0 : _recStreamFlags = (pa_stream_flags_t) (PA_STREAM_AUTO_TIMING_UPDATE
1276 : | PA_STREAM_INTERPOLATE_TIMING);
1277 :
1278 : // If configuring a specific latency then we want to specify
1279 : // PA_STREAM_ADJUST_LATENCY to make the server adjust parameters
1280 : // automatically to reach that target latency. However, that flag
1281 : // doesn't exist in Ubuntu 8.04 and many people still use that,
1282 : // so we have to check the protocol version of libpulse.
1283 0 : if (LATE(pa_context_get_protocol_version)(_paContext)
1284 : >= WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION)
1285 : {
1286 0 : _recStreamFlags |= PA_STREAM_ADJUST_LATENCY;
1287 : }
1288 :
1289 : const pa_sample_spec *spec =
1290 0 : LATE(pa_stream_get_sample_spec)(_recStream);
1291 0 : if (!spec)
1292 : {
1293 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1294 : " pa_stream_get_sample_spec(rec)");
1295 0 : return -1;
1296 : }
1297 :
1298 0 : size_t bytesPerSec = LATE(pa_bytes_per_second)(spec);
1299 : uint32_t latency = bytesPerSec
1300 0 : * WEBRTC_PA_LOW_CAPTURE_LATENCY_MSECS / WEBRTC_PA_MSECS_PER_SEC;
1301 :
1302 : // Set the rec buffer attributes
1303 : // Note: fragsize specifies a maximum transfer size, not a minimum, so
1304 : // it is not possible to force a high latency setting, only a low one.
1305 0 : _recBufferAttr.fragsize = latency; // size of fragment
1306 0 : _recBufferAttr.maxlength = latency + bytesPerSec
1307 0 : * WEBRTC_PA_CAPTURE_BUFFER_EXTRA_MSECS / WEBRTC_PA_MSECS_PER_SEC;
1308 :
1309 0 : _configuredLatencyRec = latency;
1310 : }
1311 :
1312 0 : _recordBufferSize = sample_rate_hz_ / 100 * 2 * _recChannels;
1313 0 : _recordBufferUsed = 0;
1314 0 : _recBuffer = new int8_t[_recordBufferSize];
1315 :
1316 : // Enable overflow callback
1317 0 : LATE(pa_stream_set_overflow_callback)(_recStream,
1318 : PaStreamOverflowCallback,
1319 0 : this);
1320 :
1321 : // Set the state callback function for the stream
1322 0 : LATE(pa_stream_set_state_callback)(_recStream,
1323 : PaStreamStateCallback,
1324 0 : this);
1325 :
1326 : // Mark recording side as initialized
1327 0 : _recIsInitialized = true;
1328 :
1329 0 : return 0;
1330 : }
1331 :
1332 0 : int32_t AudioDeviceLinuxPulse::StartRecording()
1333 : {
1334 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
1335 0 : if (!_recIsInitialized)
1336 : {
1337 0 : return -1;
1338 : }
1339 :
1340 0 : if (_recording)
1341 : {
1342 0 : return 0;
1343 : }
1344 :
1345 : // Set state to ensure that the recording starts from the audio thread.
1346 0 : _startRec = true;
1347 :
1348 : // The audio thread will signal when recording has started.
1349 0 : _timeEventRec.Set();
1350 0 : if (kEventTimeout == _recStartEvent.Wait(10000))
1351 : {
1352 : {
1353 0 : CriticalSectionScoped lock(&_critSect);
1354 0 : _startRec = false;
1355 : }
1356 0 : StopRecording();
1357 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1358 : " failed to activate recording");
1359 0 : return -1;
1360 : }
1361 :
1362 : {
1363 0 : CriticalSectionScoped lock(&_critSect);
1364 0 : if (_recording)
1365 : {
1366 : // The recording state is set by the audio thread after recording
1367 : // has started.
1368 : } else
1369 : {
1370 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1371 : " failed to activate recording");
1372 0 : return -1;
1373 : }
1374 : }
1375 :
1376 0 : return 0;
1377 : }
1378 :
1379 0 : int32_t AudioDeviceLinuxPulse::StopRecording()
1380 : {
1381 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
1382 0 : CriticalSectionScoped lock(&_critSect);
1383 :
1384 0 : if (!_recIsInitialized)
1385 : {
1386 0 : return 0;
1387 : }
1388 :
1389 0 : if (_recStream == NULL)
1390 : {
1391 0 : return -1;
1392 : }
1393 :
1394 0 : _recIsInitialized = false;
1395 0 : _recording = false;
1396 :
1397 : WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1398 : " stopping recording");
1399 :
1400 : // Stop Recording
1401 0 : PaLock();
1402 :
1403 0 : DisableReadCallback();
1404 0 : LATE(pa_stream_set_overflow_callback)(_recStream, NULL, NULL);
1405 :
1406 : // Unset this here so that we don't get a TERMINATED callback
1407 0 : LATE(pa_stream_set_state_callback)(_recStream, NULL, NULL);
1408 :
1409 0 : if (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_UNCONNECTED)
1410 : {
1411 : // Disconnect the stream
1412 0 : if (LATE(pa_stream_disconnect)(_recStream) != PA_OK)
1413 : {
1414 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1415 : " failed to disconnect rec stream, err=%d\n",
1416 : LATE(pa_context_errno)(_paContext));
1417 0 : PaUnLock();
1418 0 : return -1;
1419 : }
1420 :
1421 : WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1422 : " disconnected recording");
1423 : }
1424 :
1425 0 : LATE(pa_stream_unref)(_recStream);
1426 0 : _recStream = NULL;
1427 :
1428 0 : PaUnLock();
1429 :
1430 : // Provide the recStream to the mixer
1431 0 : _mixerManager.SetRecStream(_recStream);
1432 :
1433 0 : if (_recBuffer)
1434 : {
1435 0 : delete [] _recBuffer;
1436 0 : _recBuffer = NULL;
1437 : }
1438 :
1439 0 : return 0;
1440 : }
1441 :
1442 0 : bool AudioDeviceLinuxPulse::RecordingIsInitialized() const
1443 : {
1444 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
1445 0 : return (_recIsInitialized);
1446 : }
1447 :
1448 0 : bool AudioDeviceLinuxPulse::Recording() const
1449 : {
1450 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
1451 0 : return (_recording);
1452 : }
1453 :
1454 0 : bool AudioDeviceLinuxPulse::PlayoutIsInitialized() const
1455 : {
1456 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
1457 0 : return (_playIsInitialized);
1458 : }
1459 :
1460 0 : int32_t AudioDeviceLinuxPulse::StartPlayout()
1461 : {
1462 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
1463 :
1464 0 : if (!_playIsInitialized)
1465 : {
1466 0 : return -1;
1467 : }
1468 :
1469 0 : if (_playing)
1470 : {
1471 0 : return 0;
1472 : }
1473 :
1474 : // Set state to ensure that playout starts from the audio thread.
1475 : {
1476 0 : CriticalSectionScoped lock(&_critSect);
1477 0 : _startPlay = true;
1478 : }
1479 :
1480 : // Both |_startPlay| and |_playing| needs protction since they are also
1481 : // accessed on the playout thread.
1482 :
1483 : // The audio thread will signal when playout has started.
1484 0 : _timeEventPlay.Set();
1485 0 : if (kEventTimeout == _playStartEvent.Wait(10000))
1486 : {
1487 : {
1488 0 : CriticalSectionScoped lock(&_critSect);
1489 0 : _startPlay = false;
1490 : }
1491 0 : StopPlayout();
1492 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1493 : " failed to activate playout");
1494 0 : return -1;
1495 : }
1496 :
1497 : {
1498 0 : CriticalSectionScoped lock(&_critSect);
1499 0 : if (_playing)
1500 : {
1501 : // The playing state is set by the audio thread after playout
1502 : // has started.
1503 : } else
1504 : {
1505 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1506 : " failed to activate playing");
1507 0 : return -1;
1508 : }
1509 : }
1510 :
1511 0 : return 0;
1512 : }
1513 :
1514 0 : int32_t AudioDeviceLinuxPulse::StopPlayout()
1515 : {
1516 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
1517 0 : CriticalSectionScoped lock(&_critSect);
1518 :
1519 0 : if (!_playIsInitialized)
1520 : {
1521 0 : return 0;
1522 : }
1523 :
1524 0 : if (_playStream == NULL)
1525 : {
1526 0 : return -1;
1527 : }
1528 :
1529 0 : _playIsInitialized = false;
1530 0 : _playing = false;
1531 0 : _sndCardPlayDelay = 0;
1532 0 : _sndCardRecDelay = 0;
1533 :
1534 : WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1535 : " stopping playback");
1536 :
1537 : // Stop Playout
1538 0 : PaLock();
1539 :
1540 0 : DisableWriteCallback();
1541 0 : LATE(pa_stream_set_underflow_callback)(_playStream, NULL, NULL);
1542 :
1543 : // Unset this here so that we don't get a TERMINATED callback
1544 0 : LATE(pa_stream_set_state_callback)(_playStream, NULL, NULL);
1545 :
1546 0 : if (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_UNCONNECTED)
1547 : {
1548 : // Disconnect the stream
1549 0 : if (LATE(pa_stream_disconnect)(_playStream) != PA_OK)
1550 : {
1551 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1552 : " failed to disconnect play stream, err=%d",
1553 : LATE(pa_context_errno)(_paContext));
1554 0 : PaUnLock();
1555 0 : return -1;
1556 : }
1557 :
1558 : WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1559 : " disconnected playback");
1560 : }
1561 :
1562 0 : LATE(pa_stream_unref)(_playStream);
1563 0 : _playStream = NULL;
1564 :
1565 0 : PaUnLock();
1566 :
1567 : // Provide the playStream to the mixer
1568 0 : _mixerManager.SetPlayStream(_playStream);
1569 :
1570 0 : if (_playBuffer)
1571 : {
1572 0 : delete [] _playBuffer;
1573 0 : _playBuffer = NULL;
1574 : }
1575 :
1576 0 : return 0;
1577 : }
1578 :
1579 0 : int32_t AudioDeviceLinuxPulse::PlayoutDelay(uint16_t& delayMS) const
1580 : {
1581 0 : CriticalSectionScoped lock(&_critSect);
1582 0 : delayMS = (uint16_t) _sndCardPlayDelay;
1583 0 : return 0;
1584 : }
1585 :
1586 0 : int32_t AudioDeviceLinuxPulse::RecordingDelay(uint16_t& delayMS) const
1587 : {
1588 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
1589 0 : delayMS = (uint16_t) _sndCardRecDelay;
1590 0 : return 0;
1591 : }
1592 :
1593 0 : bool AudioDeviceLinuxPulse::Playing() const
1594 : {
1595 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
1596 0 : return (_playing);
1597 : }
1598 :
1599 0 : int32_t AudioDeviceLinuxPulse::SetPlayoutBuffer(
1600 : const AudioDeviceModule::BufferType type,
1601 : uint16_t sizeMS)
1602 : {
1603 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
1604 0 : if (type != AudioDeviceModule::kFixedBufferSize)
1605 : {
1606 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
1607 : " Adaptive buffer size not supported on this platform");
1608 0 : return -1;
1609 : }
1610 :
1611 0 : _playBufType = type;
1612 0 : _playBufDelayFixed = sizeMS;
1613 :
1614 0 : return 0;
1615 : }
1616 :
1617 0 : int32_t AudioDeviceLinuxPulse::PlayoutBuffer(
1618 : AudioDeviceModule::BufferType& type,
1619 : uint16_t& sizeMS) const
1620 : {
1621 0 : RTC_DCHECK(thread_checker_.CalledOnValidThread());
1622 0 : type = _playBufType;
1623 0 : sizeMS = _playBufDelayFixed;
1624 :
1625 0 : return 0;
1626 : }
1627 :
1628 0 : int32_t AudioDeviceLinuxPulse::CPULoad(uint16_t& /*load*/) const
1629 : {
1630 :
1631 : WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
1632 : " API call not supported on this platform");
1633 0 : return -1;
1634 : }
1635 :
1636 0 : bool AudioDeviceLinuxPulse::PlayoutWarning() const
1637 : {
1638 0 : CriticalSectionScoped lock(&_critSect);
1639 0 : return (_playWarning > 0);
1640 : }
1641 :
1642 0 : bool AudioDeviceLinuxPulse::PlayoutError() const
1643 : {
1644 0 : CriticalSectionScoped lock(&_critSect);
1645 0 : return (_playError > 0);
1646 : }
1647 :
1648 0 : bool AudioDeviceLinuxPulse::RecordingWarning() const
1649 : {
1650 0 : CriticalSectionScoped lock(&_critSect);
1651 0 : return (_recWarning > 0);
1652 : }
1653 :
1654 0 : bool AudioDeviceLinuxPulse::RecordingError() const
1655 : {
1656 0 : CriticalSectionScoped lock(&_critSect);
1657 0 : return (_recError > 0);
1658 : }
1659 :
1660 0 : void AudioDeviceLinuxPulse::ClearPlayoutWarning()
1661 : {
1662 0 : CriticalSectionScoped lock(&_critSect);
1663 0 : _playWarning = 0;
1664 0 : }
1665 :
1666 0 : void AudioDeviceLinuxPulse::ClearPlayoutError()
1667 : {
1668 0 : CriticalSectionScoped lock(&_critSect);
1669 0 : _playError = 0;
1670 0 : }
1671 :
1672 0 : void AudioDeviceLinuxPulse::ClearRecordingWarning()
1673 : {
1674 0 : CriticalSectionScoped lock(&_critSect);
1675 0 : _recWarning = 0;
1676 0 : }
1677 :
1678 0 : void AudioDeviceLinuxPulse::ClearRecordingError()
1679 : {
1680 0 : CriticalSectionScoped lock(&_critSect);
1681 0 : _recError = 0;
1682 0 : }
1683 :
1684 : // ============================================================================
1685 : // Private Methods
1686 : // ============================================================================
1687 :
1688 0 : void AudioDeviceLinuxPulse::PaContextStateCallback(pa_context *c, void *pThis)
1689 : {
1690 : static_cast<AudioDeviceLinuxPulse*> (pThis)->
1691 0 : PaContextStateCallbackHandler(c);
1692 0 : }
1693 :
1694 : // ----------------------------------------------------------------------------
1695 : // PaSinkInfoCallback
1696 : // ----------------------------------------------------------------------------
1697 :
1698 0 : void AudioDeviceLinuxPulse::PaSinkInfoCallback(pa_context */*c*/,
1699 : const pa_sink_info *i, int eol,
1700 : void *pThis)
1701 : {
1702 : static_cast<AudioDeviceLinuxPulse*> (pThis)->PaSinkInfoCallbackHandler(
1703 0 : i, eol);
1704 0 : }
1705 :
1706 0 : void AudioDeviceLinuxPulse::PaSourceInfoCallback(pa_context */*c*/,
1707 : const pa_source_info *i,
1708 : int eol, void *pThis)
1709 : {
1710 : static_cast<AudioDeviceLinuxPulse*> (pThis)->PaSourceInfoCallbackHandler(
1711 0 : i, eol);
1712 0 : }
1713 :
1714 0 : void AudioDeviceLinuxPulse::PaServerInfoCallback(pa_context */*c*/,
1715 : const pa_server_info *i,
1716 : void *pThis)
1717 : {
1718 : static_cast<AudioDeviceLinuxPulse*> (pThis)->
1719 0 : PaServerInfoCallbackHandler(i);
1720 0 : }
1721 :
1722 0 : void AudioDeviceLinuxPulse::PaStreamStateCallback(pa_stream *p, void *pThis)
1723 : {
1724 : static_cast<AudioDeviceLinuxPulse*> (pThis)->
1725 0 : PaStreamStateCallbackHandler(p);
1726 0 : }
1727 :
1728 0 : void AudioDeviceLinuxPulse::PaContextStateCallbackHandler(pa_context *c)
1729 : {
1730 : WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1731 : " context state cb");
1732 :
1733 0 : pa_context_state_t state = LATE(pa_context_get_state)(c);
1734 0 : switch (state)
1735 : {
1736 : case PA_CONTEXT_UNCONNECTED:
1737 : WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1738 : " unconnected");
1739 0 : break;
1740 : case PA_CONTEXT_CONNECTING:
1741 : case PA_CONTEXT_AUTHORIZING:
1742 : case PA_CONTEXT_SETTING_NAME:
1743 : WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1744 : " no state");
1745 0 : break;
1746 : case PA_CONTEXT_FAILED:
1747 : case PA_CONTEXT_TERMINATED:
1748 : WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1749 : " failed");
1750 0 : _paStateChanged = true;
1751 0 : LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1752 0 : break;
1753 : case PA_CONTEXT_READY:
1754 : WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1755 : " ready");
1756 0 : _paStateChanged = true;
1757 0 : LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1758 0 : break;
1759 : }
1760 0 : }
1761 :
1762 0 : void AudioDeviceLinuxPulse::PaSinkInfoCallbackHandler(const pa_sink_info *i,
1763 : int eol)
1764 : {
1765 0 : if (eol)
1766 : {
1767 : // Signal that we are done
1768 0 : LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1769 0 : return;
1770 : }
1771 :
1772 0 : if (_numPlayDevices == _deviceIndex)
1773 : {
1774 : // Convert the device index to the one of the sink
1775 0 : _paDeviceIndex = i->index;
1776 :
1777 0 : if (_playDeviceName)
1778 : {
1779 : // Copy the sink name
1780 0 : strncpy(_playDeviceName, i->name, kAdmMaxDeviceNameSize);
1781 0 : _playDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1782 : }
1783 0 : if (_playDisplayDeviceName)
1784 : {
1785 : // Copy the sink display name
1786 0 : strncpy(_playDisplayDeviceName, i->description,
1787 0 : kAdmMaxDeviceNameSize);
1788 0 : _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1789 : }
1790 : }
1791 :
1792 0 : _numPlayDevices++;
1793 : }
1794 :
1795 0 : void AudioDeviceLinuxPulse::PaSourceInfoCallbackHandler(
1796 : const pa_source_info *i,
1797 : int eol)
1798 : {
1799 0 : if (eol)
1800 : {
1801 : // Signal that we are done
1802 0 : LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1803 0 : return;
1804 : }
1805 :
1806 : // We don't want to list output devices
1807 0 : if (i->monitor_of_sink == PA_INVALID_INDEX)
1808 : {
1809 0 : if (_numRecDevices == _deviceIndex)
1810 : {
1811 : // Convert the device index to the one of the source
1812 0 : _paDeviceIndex = i->index;
1813 :
1814 0 : if (_recDeviceName)
1815 : {
1816 : // copy the source name
1817 0 : strncpy(_recDeviceName, i->name, kAdmMaxDeviceNameSize);
1818 0 : _recDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1819 : }
1820 0 : if (_recDisplayDeviceName)
1821 : {
1822 : // Copy the source display name
1823 0 : strncpy(_recDisplayDeviceName, i->description,
1824 0 : kAdmMaxDeviceNameSize);
1825 0 : _recDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1826 : }
1827 : }
1828 :
1829 0 : _numRecDevices++;
1830 : }
1831 : }
1832 :
1833 0 : void AudioDeviceLinuxPulse::PaServerInfoCallbackHandler(
1834 : const pa_server_info *i)
1835 : {
1836 : // Use PA native sampling rate
1837 0 : sample_rate_hz_ = i->sample_spec.rate;
1838 :
1839 : // Copy the PA server version
1840 0 : strncpy(_paServerVersion, i->server_version, 31);
1841 0 : _paServerVersion[31] = '\0';
1842 :
1843 0 : if (_recDisplayDeviceName)
1844 : {
1845 : // Copy the source name
1846 0 : strncpy(_recDisplayDeviceName, i->default_source_name,
1847 0 : kAdmMaxDeviceNameSize);
1848 0 : _recDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1849 : }
1850 :
1851 0 : if (_playDisplayDeviceName)
1852 : {
1853 : // Copy the sink name
1854 0 : strncpy(_playDisplayDeviceName, i->default_sink_name,
1855 0 : kAdmMaxDeviceNameSize);
1856 0 : _playDisplayDeviceName[kAdmMaxDeviceNameSize - 1] = '\0';
1857 : }
1858 :
1859 0 : LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1860 0 : }
1861 :
1862 0 : void AudioDeviceLinuxPulse::PaStreamStateCallbackHandler(pa_stream *p)
1863 : {
1864 : WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1865 : " stream state cb");
1866 :
1867 0 : pa_stream_state_t state = LATE(pa_stream_get_state)(p);
1868 0 : switch (state)
1869 : {
1870 : case PA_STREAM_UNCONNECTED:
1871 : WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1872 : " unconnected");
1873 0 : break;
1874 : case PA_STREAM_CREATING:
1875 : WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1876 : " creating");
1877 0 : break;
1878 : case PA_STREAM_FAILED:
1879 : case PA_STREAM_TERMINATED:
1880 : WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1881 : " failed");
1882 0 : break;
1883 : case PA_STREAM_READY:
1884 : WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
1885 : " ready");
1886 0 : break;
1887 : }
1888 :
1889 0 : LATE(pa_threaded_mainloop_signal)(_paMainloop, 0);
1890 0 : }
1891 :
1892 0 : int32_t AudioDeviceLinuxPulse::CheckPulseAudioVersion()
1893 : {
1894 0 : PaLock();
1895 :
1896 0 : pa_operation* paOperation = NULL;
1897 :
1898 : // get the server info and update deviceName
1899 0 : paOperation = LATE(pa_context_get_server_info)(_paContext,
1900 : PaServerInfoCallback,
1901 0 : this);
1902 :
1903 0 : WaitForOperationCompletion(paOperation);
1904 :
1905 0 : PaUnLock();
1906 :
1907 : WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, -1,
1908 : " checking PulseAudio version: %s", _paServerVersion);
1909 :
1910 0 : return 0;
1911 : }
1912 :
1913 0 : int32_t AudioDeviceLinuxPulse::InitSamplingFrequency()
1914 : {
1915 0 : PaLock();
1916 :
1917 0 : pa_operation* paOperation = NULL;
1918 :
1919 : // Get the server info and update sample_rate_hz_
1920 0 : paOperation = LATE(pa_context_get_server_info)(_paContext,
1921 : PaServerInfoCallback,
1922 0 : this);
1923 :
1924 0 : WaitForOperationCompletion(paOperation);
1925 :
1926 0 : PaUnLock();
1927 :
1928 0 : return 0;
1929 : }
1930 :
1931 0 : int32_t AudioDeviceLinuxPulse::GetDefaultDeviceInfo(bool recDevice,
1932 : char* name,
1933 : uint16_t& index)
1934 : {
1935 0 : char tmpName[kAdmMaxDeviceNameSize] = {0};
1936 : // subtract length of "default: "
1937 0 : uint16_t nameLen = kAdmMaxDeviceNameSize - 9;
1938 0 : char* pName = NULL;
1939 :
1940 0 : if (name)
1941 : {
1942 : // Add "default: "
1943 0 : strcpy(name, "default: ");
1944 0 : pName = &name[9];
1945 : }
1946 :
1947 : // Tell the callback that we want
1948 : // the name for this device
1949 0 : if (recDevice)
1950 : {
1951 0 : _recDisplayDeviceName = tmpName;
1952 : } else
1953 : {
1954 0 : _playDisplayDeviceName = tmpName;
1955 : }
1956 :
1957 : // Set members
1958 0 : _paDeviceIndex = -1;
1959 0 : _deviceIndex = 0;
1960 0 : _numPlayDevices = 0;
1961 0 : _numRecDevices = 0;
1962 :
1963 0 : PaLock();
1964 :
1965 0 : pa_operation* paOperation = NULL;
1966 :
1967 : // Get the server info and update deviceName
1968 0 : paOperation = LATE(pa_context_get_server_info)(_paContext,
1969 : PaServerInfoCallback,
1970 0 : this);
1971 :
1972 0 : WaitForOperationCompletion(paOperation);
1973 :
1974 : // Get the device index
1975 0 : if (recDevice)
1976 : {
1977 : paOperation
1978 0 : = LATE(pa_context_get_source_info_by_name)(_paContext,
1979 : (char *) tmpName,
1980 : PaSourceInfoCallback,
1981 0 : this);
1982 : } else
1983 : {
1984 : paOperation
1985 0 : = LATE(pa_context_get_sink_info_by_name)(_paContext,
1986 : (char *) tmpName,
1987 : PaSinkInfoCallback,
1988 0 : this);
1989 : }
1990 :
1991 0 : WaitForOperationCompletion(paOperation);
1992 :
1993 0 : PaUnLock();
1994 :
1995 : // Set the index
1996 0 : index = _paDeviceIndex;
1997 :
1998 0 : if (name)
1999 : {
2000 : // Copy to name string
2001 0 : strncpy(pName, tmpName, nameLen);
2002 : }
2003 :
2004 : // Clear members
2005 0 : _playDisplayDeviceName = NULL;
2006 0 : _recDisplayDeviceName = NULL;
2007 0 : _paDeviceIndex = -1;
2008 0 : _deviceIndex = -1;
2009 0 : _numPlayDevices = 0;
2010 0 : _numRecDevices = 0;
2011 :
2012 0 : return 0;
2013 : }
2014 :
2015 0 : int32_t AudioDeviceLinuxPulse::InitPulseAudio()
2016 : {
2017 0 : int retVal = 0;
2018 :
2019 : // Load libpulse
2020 0 : if (!PaSymbolTable.Load())
2021 : {
2022 : // Most likely the Pulse library and sound server are not installed on
2023 : // this system
2024 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2025 : " failed to load symbol table");
2026 0 : return -1;
2027 : }
2028 :
2029 : // Create a mainloop API and connection to the default server
2030 : // the mainloop is the internal asynchronous API event loop
2031 0 : if (_paMainloop) {
2032 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2033 : " PA mainloop has already existed");
2034 0 : return -1;
2035 : }
2036 0 : _paMainloop = LATE(pa_threaded_mainloop_new)();
2037 0 : if (!_paMainloop)
2038 : {
2039 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2040 : " could not create mainloop");
2041 0 : return -1;
2042 : }
2043 :
2044 : // Start the threaded main loop
2045 0 : retVal = LATE(pa_threaded_mainloop_start)(_paMainloop);
2046 0 : if (retVal != PA_OK)
2047 : {
2048 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2049 : " failed to start main loop, error=%d", retVal);
2050 0 : return -1;
2051 : }
2052 :
2053 : WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2054 : " mainloop running!");
2055 :
2056 0 : PaLock();
2057 :
2058 0 : _paMainloopApi = LATE(pa_threaded_mainloop_get_api)(_paMainloop);
2059 0 : if (!_paMainloopApi)
2060 : {
2061 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2062 : " could not create mainloop API");
2063 0 : PaUnLock();
2064 0 : return -1;
2065 : }
2066 :
2067 : // Create a new PulseAudio context
2068 0 : if (_paContext){
2069 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2070 : " PA context has already existed");
2071 0 : PaUnLock();
2072 0 : return -1;
2073 : }
2074 0 : _paContext = LATE(pa_context_new)(_paMainloopApi, "WEBRTC VoiceEngine");
2075 :
2076 0 : if (!_paContext)
2077 : {
2078 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2079 : " could not create context");
2080 0 : PaUnLock();
2081 0 : return -1;
2082 : }
2083 :
2084 : // Set state callback function
2085 0 : LATE(pa_context_set_state_callback)(_paContext, PaContextStateCallback,
2086 0 : this);
2087 :
2088 : // Connect the context to a server (default)
2089 0 : _paStateChanged = false;
2090 0 : retVal = LATE(pa_context_connect)(_paContext,
2091 : NULL,
2092 : PA_CONTEXT_NOAUTOSPAWN,
2093 0 : NULL);
2094 :
2095 0 : if (retVal != PA_OK)
2096 : {
2097 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2098 : " failed to connect context, error=%d", retVal);
2099 0 : PaUnLock();
2100 0 : return -1;
2101 : }
2102 :
2103 : // Wait for state change
2104 0 : while (!_paStateChanged)
2105 : {
2106 0 : LATE(pa_threaded_mainloop_wait)(_paMainloop);
2107 : }
2108 :
2109 : // Now check to see what final state we reached.
2110 0 : pa_context_state_t state = LATE(pa_context_get_state)(_paContext);
2111 :
2112 0 : if (state != PA_CONTEXT_READY)
2113 : {
2114 : if (state == PA_CONTEXT_FAILED)
2115 : {
2116 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2117 : " failed to connect to PulseAudio sound server");
2118 : } else if (state == PA_CONTEXT_TERMINATED)
2119 : {
2120 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2121 : " PulseAudio connection terminated early");
2122 : } else
2123 : {
2124 : // Shouldn't happen, because we only signal on one of those three
2125 : // states
2126 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2127 : " unknown problem connecting to PulseAudio");
2128 : }
2129 0 : PaUnLock();
2130 0 : return -1;
2131 : }
2132 :
2133 0 : PaUnLock();
2134 :
2135 : // Give the objects to the mixer manager
2136 0 : _mixerManager.SetPulseAudioObjects(_paMainloop, _paContext);
2137 :
2138 : // Check the version
2139 0 : if (CheckPulseAudioVersion() < 0)
2140 : {
2141 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2142 : " PulseAudio version %s not supported",
2143 : _paServerVersion);
2144 0 : return -1;
2145 : }
2146 :
2147 : // Initialize sampling frequency
2148 0 : if (InitSamplingFrequency() < 0 || sample_rate_hz_ == 0)
2149 : {
2150 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2151 : " failed to initialize sampling frequency,"
2152 : " set to %d Hz",
2153 : sample_rate_hz_);
2154 0 : return -1;
2155 : }
2156 :
2157 0 : return 0;
2158 : }
2159 :
2160 0 : int32_t AudioDeviceLinuxPulse::TerminatePulseAudio()
2161 : {
2162 : // Do nothing if the instance doesn't exist
2163 : // likely PaSymbolTable.Load() fails
2164 0 : if (!_paMainloop) {
2165 0 : return 0;
2166 : }
2167 :
2168 0 : PaLock();
2169 :
2170 : // Disconnect the context
2171 0 : if (_paContext)
2172 : {
2173 0 : LATE(pa_context_disconnect)(_paContext);
2174 : }
2175 :
2176 : // Unreference the context
2177 0 : if (_paContext)
2178 : {
2179 0 : LATE(pa_context_unref)(_paContext);
2180 : }
2181 :
2182 0 : PaUnLock();
2183 0 : _paContext = NULL;
2184 :
2185 : // Stop the threaded main loop
2186 0 : if (_paMainloop)
2187 : {
2188 0 : LATE(pa_threaded_mainloop_stop)(_paMainloop);
2189 : }
2190 :
2191 : // Free the mainloop
2192 0 : if (_paMainloop)
2193 : {
2194 0 : LATE(pa_threaded_mainloop_free)(_paMainloop);
2195 : }
2196 :
2197 0 : _paMainloop = NULL;
2198 :
2199 : WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2200 : " PulseAudio terminated");
2201 :
2202 0 : return 0;
2203 : }
2204 :
2205 0 : void AudioDeviceLinuxPulse::PaLock()
2206 : {
2207 0 : LATE(pa_threaded_mainloop_lock)(_paMainloop);
2208 0 : }
2209 :
2210 0 : void AudioDeviceLinuxPulse::PaUnLock()
2211 : {
2212 0 : LATE(pa_threaded_mainloop_unlock)(_paMainloop);
2213 0 : }
2214 :
2215 0 : void AudioDeviceLinuxPulse::WaitForOperationCompletion(
2216 : pa_operation* paOperation) const
2217 : {
2218 0 : if (!paOperation)
2219 : {
2220 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2221 : "paOperation NULL in WaitForOperationCompletion");
2222 0 : return;
2223 : }
2224 :
2225 0 : while (LATE(pa_operation_get_state)(paOperation) == PA_OPERATION_RUNNING)
2226 : {
2227 0 : LATE(pa_threaded_mainloop_wait)(_paMainloop);
2228 : }
2229 :
2230 0 : LATE(pa_operation_unref)(paOperation);
2231 : }
2232 :
2233 : // ============================================================================
2234 : // Thread Methods
2235 : // ============================================================================
2236 :
2237 0 : void AudioDeviceLinuxPulse::EnableWriteCallback()
2238 : {
2239 0 : if (LATE(pa_stream_get_state)(_playStream) == PA_STREAM_READY)
2240 : {
2241 : // May already have available space. Must check.
2242 0 : _tempBufferSpace = LATE(pa_stream_writable_size)(_playStream);
2243 0 : if (_tempBufferSpace > 0)
2244 : {
2245 : // Yup, there is already space available, so if we register a
2246 : // write callback then it will not receive any event. So dispatch
2247 : // one ourself instead.
2248 0 : _timeEventPlay.Set();
2249 0 : return;
2250 : }
2251 : }
2252 :
2253 0 : LATE(pa_stream_set_write_callback)(_playStream, &PaStreamWriteCallback,
2254 0 : this);
2255 : }
2256 :
2257 0 : void AudioDeviceLinuxPulse::DisableWriteCallback()
2258 : {
2259 0 : LATE(pa_stream_set_write_callback)(_playStream, NULL, NULL);
2260 0 : }
2261 :
2262 0 : void AudioDeviceLinuxPulse::PaStreamWriteCallback(pa_stream */*unused*/,
2263 : size_t buffer_space,
2264 : void *pThis)
2265 : {
2266 : static_cast<AudioDeviceLinuxPulse*> (pThis)->PaStreamWriteCallbackHandler(
2267 0 : buffer_space);
2268 0 : }
2269 :
2270 0 : void AudioDeviceLinuxPulse::PaStreamWriteCallbackHandler(size_t bufferSpace)
2271 : {
2272 0 : _tempBufferSpace = bufferSpace;
2273 :
2274 : // Since we write the data asynchronously on a different thread, we have
2275 : // to temporarily disable the write callback or else Pulse will call it
2276 : // continuously until we write the data. We re-enable it below.
2277 0 : DisableWriteCallback();
2278 0 : _timeEventPlay.Set();
2279 0 : }
2280 :
2281 0 : void AudioDeviceLinuxPulse::PaStreamUnderflowCallback(pa_stream */*unused*/,
2282 : void *pThis)
2283 : {
2284 : static_cast<AudioDeviceLinuxPulse*> (pThis)->
2285 0 : PaStreamUnderflowCallbackHandler();
2286 0 : }
2287 :
2288 0 : void AudioDeviceLinuxPulse::PaStreamUnderflowCallbackHandler()
2289 : {
2290 : WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2291 : " Playout underflow");
2292 :
2293 0 : if (_configuredLatencyPlay == WEBRTC_PA_NO_LATENCY_REQUIREMENTS)
2294 : {
2295 : // We didn't configure a pa_buffer_attr before, so switching to
2296 : // one now would be questionable.
2297 0 : return;
2298 : }
2299 :
2300 : // Otherwise reconfigure the stream with a higher target latency.
2301 :
2302 0 : const pa_sample_spec *spec = LATE(pa_stream_get_sample_spec)(_playStream);
2303 0 : if (!spec)
2304 : {
2305 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2306 : " pa_stream_get_sample_spec()");
2307 0 : return;
2308 : }
2309 :
2310 0 : size_t bytesPerSec = LATE(pa_bytes_per_second)(spec);
2311 0 : uint32_t newLatency = _configuredLatencyPlay + bytesPerSec *
2312 0 : WEBRTC_PA_PLAYBACK_LATENCY_INCREMENT_MSECS /
2313 0 : WEBRTC_PA_MSECS_PER_SEC;
2314 :
2315 : // Set the play buffer attributes
2316 0 : _playBufferAttr.maxlength = newLatency;
2317 0 : _playBufferAttr.tlength = newLatency;
2318 0 : _playBufferAttr.minreq = newLatency / WEBRTC_PA_PLAYBACK_REQUEST_FACTOR;
2319 0 : _playBufferAttr.prebuf = _playBufferAttr.tlength - _playBufferAttr.minreq;
2320 :
2321 0 : pa_operation *op = LATE(pa_stream_set_buffer_attr)(_playStream,
2322 0 : &_playBufferAttr, NULL,
2323 0 : NULL);
2324 0 : if (!op)
2325 : {
2326 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2327 : " pa_stream_set_buffer_attr()");
2328 0 : return;
2329 : }
2330 :
2331 : // Don't need to wait for this to complete.
2332 0 : LATE(pa_operation_unref)(op);
2333 :
2334 : // Save the new latency in case we underflow again.
2335 0 : _configuredLatencyPlay = newLatency;
2336 : }
2337 :
2338 0 : void AudioDeviceLinuxPulse::EnableReadCallback()
2339 : {
2340 0 : LATE(pa_stream_set_read_callback)(_recStream,
2341 : &PaStreamReadCallback,
2342 0 : this);
2343 0 : }
2344 :
2345 0 : void AudioDeviceLinuxPulse::DisableReadCallback()
2346 : {
2347 0 : LATE(pa_stream_set_read_callback)(_recStream, NULL, NULL);
2348 0 : }
2349 :
2350 0 : void AudioDeviceLinuxPulse::PaStreamReadCallback(pa_stream */*unused1*/,
2351 : size_t /*unused2*/,
2352 : void *pThis)
2353 : {
2354 : static_cast<AudioDeviceLinuxPulse*> (pThis)->
2355 0 : PaStreamReadCallbackHandler();
2356 0 : }
2357 :
2358 0 : void AudioDeviceLinuxPulse::PaStreamReadCallbackHandler()
2359 : {
2360 : // We get the data pointer and size now in order to save one Lock/Unlock
2361 : // in the worker thread.
2362 0 : if (LATE(pa_stream_peek)(_recStream,
2363 : &_tempSampleData,
2364 : &_tempSampleDataSize) != 0)
2365 : {
2366 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2367 : " Can't read data!");
2368 0 : return;
2369 : }
2370 :
2371 : // PulseAudio record streams can have holes (for reasons not entirely clear
2372 : // to the PA developers themselves). Since version 4 of PA, these are passed
2373 : // over to the application (us), signaled by a non-zero sample data size
2374 : // (the size of the hole) and a NULL sample data.
2375 : // We handle stream holes as recommended by PulseAudio, i.e. by skipping
2376 : // it, which is done with a stream drop.
2377 0 : if (_tempSampleDataSize && !_tempSampleData) {
2378 0 : LATE(pa_stream_drop)(_recStream);
2379 0 : _tempSampleDataSize = 0; // reset
2380 0 : return;
2381 : }
2382 :
2383 : // Since we consume the data asynchronously on a different thread, we have
2384 : // to temporarily disable the read callback or else Pulse will call it
2385 : // continuously until we consume the data. We re-enable it below.
2386 0 : DisableReadCallback();
2387 0 : _timeEventRec.Set();
2388 : }
2389 :
2390 0 : void AudioDeviceLinuxPulse::PaStreamOverflowCallback(pa_stream */*unused*/,
2391 : void *pThis)
2392 : {
2393 : static_cast<AudioDeviceLinuxPulse*> (pThis)->
2394 0 : PaStreamOverflowCallbackHandler();
2395 0 : }
2396 :
2397 0 : void AudioDeviceLinuxPulse::PaStreamOverflowCallbackHandler()
2398 : {
2399 : WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2400 : " Recording overflow");
2401 0 : }
2402 :
2403 0 : int32_t AudioDeviceLinuxPulse::LatencyUsecs(pa_stream *stream)
2404 : {
2405 : if (!WEBRTC_PA_REPORT_LATENCY)
2406 : {
2407 : return 0;
2408 : }
2409 :
2410 0 : if (!stream)
2411 : {
2412 0 : return 0;
2413 : }
2414 :
2415 : pa_usec_t latency;
2416 : int negative;
2417 0 : if (LATE(pa_stream_get_latency)(stream, &latency, &negative) != 0)
2418 : {
2419 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2420 : " Can't query latency");
2421 : // We'd rather continue playout/capture with an incorrect delay than
2422 : // stop it altogether, so return a valid value.
2423 0 : return 0;
2424 : }
2425 :
2426 0 : if (negative)
2427 : {
2428 : WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2429 : " warning: pa_stream_get_latency reported negative "
2430 : "delay");
2431 :
2432 : // The delay can be negative for monitoring streams if the captured
2433 : // samples haven't been played yet. In such a case, "latency"
2434 : // contains the magnitude, so we must negate it to get the real value.
2435 0 : int32_t tmpLatency = (int32_t) -latency;
2436 0 : if (tmpLatency < 0)
2437 : {
2438 : // Make sure that we don't use a negative delay.
2439 0 : tmpLatency = 0;
2440 : }
2441 :
2442 0 : return tmpLatency;
2443 : } else
2444 : {
2445 0 : return (int32_t) latency;
2446 : }
2447 : }
2448 :
2449 0 : int32_t AudioDeviceLinuxPulse::ReadRecordedData(
2450 : const void* bufferData,
2451 : size_t bufferSize) EXCLUSIVE_LOCKS_REQUIRED(_critSect)
2452 : {
2453 0 : size_t size = bufferSize;
2454 0 : uint32_t numRecSamples = _recordBufferSize / (2 * _recChannels);
2455 :
2456 : // Account for the peeked data and the used data.
2457 0 : uint32_t recDelay = (uint32_t) ((LatencyUsecs(_recStream)
2458 0 : / 1000) + 10 * ((size + _recordBufferUsed) / _recordBufferSize));
2459 :
2460 0 : _sndCardRecDelay = recDelay;
2461 :
2462 0 : if (_playStream)
2463 : {
2464 : // Get the playout delay.
2465 0 : _sndCardPlayDelay = (uint32_t) (LatencyUsecs(_playStream) / 1000);
2466 : }
2467 :
2468 0 : if (_recordBufferUsed > 0)
2469 : {
2470 : // Have to copy to the buffer until it is full.
2471 0 : size_t copy = _recordBufferSize - _recordBufferUsed;
2472 0 : if (size < copy)
2473 : {
2474 0 : copy = size;
2475 : }
2476 :
2477 0 : memcpy(&_recBuffer[_recordBufferUsed], bufferData, copy);
2478 0 : _recordBufferUsed += copy;
2479 0 : bufferData = static_cast<const char *> (bufferData) + copy;
2480 0 : size -= copy;
2481 :
2482 0 : if (_recordBufferUsed != _recordBufferSize)
2483 : {
2484 : // Not enough data yet to pass to VoE.
2485 0 : return 0;
2486 : }
2487 :
2488 : // Provide data to VoiceEngine.
2489 0 : if (ProcessRecordedData(_recBuffer, numRecSamples, recDelay) == -1)
2490 : {
2491 : // We have stopped recording.
2492 0 : return -1;
2493 : }
2494 :
2495 0 : _recordBufferUsed = 0;
2496 : }
2497 :
2498 : // Now process full 10ms sample sets directly from the input.
2499 0 : while (size >= _recordBufferSize)
2500 : {
2501 : // Provide data to VoiceEngine.
2502 0 : if (ProcessRecordedData(
2503 : static_cast<int8_t *> (const_cast<void *> (bufferData)),
2504 : numRecSamples, recDelay) == -1)
2505 : {
2506 : // We have stopped recording.
2507 0 : return -1;
2508 : }
2509 :
2510 0 : bufferData = static_cast<const char *> (bufferData) +
2511 0 : _recordBufferSize;
2512 0 : size -= _recordBufferSize;
2513 :
2514 : // We have consumed 10ms of data.
2515 0 : recDelay -= 10;
2516 : }
2517 :
2518 : // Now save any leftovers for later.
2519 0 : if (size > 0)
2520 : {
2521 0 : memcpy(_recBuffer, bufferData, size);
2522 0 : _recordBufferUsed = size;
2523 : }
2524 :
2525 0 : return 0;
2526 : }
2527 :
2528 0 : int32_t AudioDeviceLinuxPulse::ProcessRecordedData(
2529 : int8_t *bufferData,
2530 : uint32_t bufferSizeInSamples,
2531 : uint32_t recDelay) EXCLUSIVE_LOCKS_REQUIRED(_critSect)
2532 : {
2533 0 : uint32_t currentMicLevel(0);
2534 0 : uint32_t newMicLevel(0);
2535 :
2536 0 : _ptrAudioBuffer->SetRecordedBuffer(bufferData, bufferSizeInSamples);
2537 :
2538 0 : if (AGC())
2539 : {
2540 : // Store current mic level in the audio buffer if AGC is enabled
2541 0 : if (MicrophoneVolume(currentMicLevel) == 0)
2542 : {
2543 : // This call does not affect the actual microphone volume
2544 0 : _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel);
2545 : }
2546 : }
2547 :
2548 0 : const uint32_t clockDrift(0);
2549 : // TODO(andrew): this is a temporary hack, to avoid non-causal far- and
2550 : // near-end signals at the AEC for PulseAudio. I think the system delay is
2551 : // being correctly calculated here, but for legacy reasons we add +10 ms
2552 : // to the value in the AEC. The real fix will be part of a larger
2553 : // investigation into managing system delay in the AEC.
2554 0 : if (recDelay > 10)
2555 0 : recDelay -= 10;
2556 : else
2557 0 : recDelay = 0;
2558 0 : _ptrAudioBuffer->SetVQEData(_sndCardPlayDelay, recDelay, clockDrift);
2559 0 : _ptrAudioBuffer->SetTypingStatus(KeyPressed());
2560 : // Deliver recorded samples at specified sample rate,
2561 : // mic level etc. to the observer using callback.
2562 0 : UnLock();
2563 0 : _ptrAudioBuffer->DeliverRecordedData();
2564 0 : Lock();
2565 :
2566 : // We have been unlocked - check the flag again.
2567 0 : if (!_recording)
2568 : {
2569 0 : return -1;
2570 : }
2571 :
2572 0 : if (AGC())
2573 : {
2574 0 : newMicLevel = _ptrAudioBuffer->NewMicLevel();
2575 0 : if (newMicLevel != 0)
2576 : {
2577 : // The VQE will only deliver non-zero microphone levels when a
2578 : // change is needed.
2579 : // Set this new mic level (received from the observer as return
2580 : // value in the callback).
2581 : WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id,
2582 : " AGC change of volume: old=%u => new=%u",
2583 : currentMicLevel, newMicLevel);
2584 0 : if (SetMicrophoneVolume(newMicLevel) == -1)
2585 : {
2586 : WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice,
2587 : _id,
2588 : " the required modification of the microphone "
2589 : "volume failed");
2590 : }
2591 : }
2592 : }
2593 :
2594 0 : return 0;
2595 : }
2596 :
2597 0 : bool AudioDeviceLinuxPulse::PlayThreadFunc(void* pThis)
2598 : {
2599 0 : return (static_cast<AudioDeviceLinuxPulse*> (pThis)->PlayThreadProcess());
2600 : }
2601 :
2602 0 : bool AudioDeviceLinuxPulse::RecThreadFunc(void* pThis)
2603 : {
2604 0 : return (static_cast<AudioDeviceLinuxPulse*> (pThis)->RecThreadProcess());
2605 : }
2606 :
2607 0 : bool AudioDeviceLinuxPulse::PlayThreadProcess()
2608 : {
2609 0 : switch (_timeEventPlay.Wait(1000))
2610 : {
2611 : case kEventSignaled:
2612 0 : break;
2613 : case kEventError:
2614 : WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2615 : "EventWrapper::Wait() failed");
2616 0 : return true;
2617 : case kEventTimeout:
2618 0 : return true;
2619 : }
2620 :
2621 0 : CriticalSectionScoped lock(&_critSect);
2622 :
2623 0 : if (_startPlay)
2624 : {
2625 : WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2626 : "_startPlay true, performing initial actions");
2627 :
2628 0 : _startPlay = false;
2629 0 : _playDeviceName = NULL;
2630 :
2631 : // Set if not default device
2632 0 : if (_outputDeviceIndex > 0)
2633 : {
2634 : // Get the playout device name
2635 0 : _playDeviceName = new char[kAdmMaxDeviceNameSize];
2636 0 : _deviceIndex = _outputDeviceIndex;
2637 0 : PlayoutDevices();
2638 : }
2639 :
2640 : // Start muted only supported on 0.9.11 and up
2641 0 : if (LATE(pa_context_get_protocol_version)(_paContext)
2642 : >= WEBRTC_PA_ADJUST_LATENCY_PROTOCOL_VERSION)
2643 : {
2644 : // Get the currently saved speaker mute status
2645 : // and set the initial mute status accordingly
2646 0 : bool enabled(false);
2647 0 : _mixerManager.SpeakerMute(enabled);
2648 0 : if (enabled)
2649 : {
2650 0 : _playStreamFlags |= PA_STREAM_START_MUTED;
2651 : }
2652 : }
2653 :
2654 : // Get the currently saved speaker volume
2655 0 : uint32_t volume = 0;
2656 0 : if (update_speaker_volume_at_startup_)
2657 0 : _mixerManager.SpeakerVolume(volume);
2658 :
2659 0 : PaLock();
2660 :
2661 : // NULL gives PA the choice of startup volume.
2662 0 : pa_cvolume* ptr_cvolume = NULL;
2663 0 : if (update_speaker_volume_at_startup_) {
2664 : pa_cvolume cVolumes;
2665 0 : ptr_cvolume = &cVolumes;
2666 :
2667 : // Set the same volume for all channels
2668 : const pa_sample_spec *spec =
2669 0 : LATE(pa_stream_get_sample_spec)(_playStream);
2670 0 : LATE(pa_cvolume_set)(&cVolumes, spec->channels, volume);
2671 0 : update_speaker_volume_at_startup_ = false;
2672 : }
2673 :
2674 : // Connect the stream to a sink
2675 0 : if (LATE(pa_stream_connect_playback)(
2676 : _playStream,
2677 0 : _playDeviceName,
2678 0 : &_playBufferAttr,
2679 0 : (pa_stream_flags_t) _playStreamFlags,
2680 : ptr_cvolume, NULL) != PA_OK)
2681 : {
2682 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2683 : " failed to connect play stream, err=%d",
2684 : LATE(pa_context_errno)(_paContext));
2685 : }
2686 :
2687 : WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2688 : " play stream connected");
2689 :
2690 : // Wait for state change
2691 0 : while (LATE(pa_stream_get_state)(_playStream) != PA_STREAM_READY)
2692 : {
2693 0 : LATE(pa_threaded_mainloop_wait)(_paMainloop);
2694 : }
2695 :
2696 : WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2697 : " play stream ready");
2698 :
2699 : // We can now handle write callbacks
2700 0 : EnableWriteCallback();
2701 :
2702 0 : PaUnLock();
2703 :
2704 : // Clear device name
2705 0 : if (_playDeviceName)
2706 : {
2707 0 : delete [] _playDeviceName;
2708 0 : _playDeviceName = NULL;
2709 : }
2710 :
2711 0 : _playing = true;
2712 0 : _playStartEvent.Set();
2713 :
2714 0 : return true;
2715 : }
2716 :
2717 0 : if (_playing)
2718 : {
2719 0 : if (!_recording)
2720 : {
2721 : // Update the playout delay
2722 0 : _sndCardPlayDelay = (uint32_t) (LatencyUsecs(_playStream)
2723 0 : / 1000);
2724 : }
2725 :
2726 0 : if (_playbackBufferUnused < _playbackBufferSize)
2727 : {
2728 :
2729 0 : size_t write = _playbackBufferSize - _playbackBufferUnused;
2730 0 : if (_tempBufferSpace < write)
2731 : {
2732 0 : write = _tempBufferSpace;
2733 : }
2734 :
2735 0 : PaLock();
2736 0 : if (LATE(pa_stream_write)(
2737 : _playStream,
2738 0 : (void *) &_playBuffer[_playbackBufferUnused],
2739 : write, NULL, (int64_t) 0,
2740 : PA_SEEK_RELATIVE) != PA_OK)
2741 : {
2742 0 : _writeErrors++;
2743 0 : if (_writeErrors > 10)
2744 : {
2745 0 : if (_playError == 1)
2746 : {
2747 : WEBRTC_TRACE(kTraceWarning,
2748 : kTraceUtility, _id,
2749 : " pending playout error exists");
2750 : }
2751 : // Triggers callback from module process thread.
2752 0 : _playError = 1;
2753 : WEBRTC_TRACE(
2754 : kTraceError,
2755 : kTraceUtility,
2756 : _id,
2757 : " kPlayoutError message posted: "
2758 : "_writeErrors=%u, error=%d",
2759 : _writeErrors,
2760 : LATE(pa_context_errno)(_paContext));
2761 0 : _writeErrors = 0;
2762 : }
2763 : }
2764 0 : PaUnLock();
2765 :
2766 0 : _playbackBufferUnused += write;
2767 0 : _tempBufferSpace -= write;
2768 : }
2769 :
2770 0 : uint32_t numPlaySamples = _playbackBufferSize / (2 * _playChannels);
2771 : // Might have been reduced to zero by the above.
2772 0 : if (_tempBufferSpace > 0)
2773 : {
2774 : // Ask for new PCM data to be played out using the
2775 : // AudioDeviceBuffer ensure that this callback is executed
2776 : // without taking the audio-thread lock.
2777 0 : UnLock();
2778 : WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2779 : " requesting data");
2780 : uint32_t nSamples =
2781 0 : _ptrAudioBuffer->RequestPlayoutData(numPlaySamples);
2782 0 : Lock();
2783 :
2784 : // We have been unlocked - check the flag again.
2785 0 : if (!_playing)
2786 : {
2787 0 : return true;
2788 : }
2789 :
2790 0 : nSamples = _ptrAudioBuffer->GetPlayoutData(_playBuffer);
2791 : if (nSamples != numPlaySamples)
2792 : {
2793 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
2794 : _id, " invalid number of output samples(%d)",
2795 : nSamples);
2796 : }
2797 :
2798 0 : size_t write = _playbackBufferSize;
2799 0 : if (_tempBufferSpace < write)
2800 : {
2801 0 : write = _tempBufferSpace;
2802 : }
2803 :
2804 : WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2805 : " will write");
2806 0 : PaLock();
2807 0 : if (LATE(pa_stream_write)(_playStream, (void *) &_playBuffer[0],
2808 : write, NULL, (int64_t) 0,
2809 : PA_SEEK_RELATIVE) != PA_OK)
2810 : {
2811 0 : _writeErrors++;
2812 0 : if (_writeErrors > 10)
2813 : {
2814 0 : if (_playError == 1)
2815 : {
2816 : WEBRTC_TRACE(kTraceWarning,
2817 : kTraceUtility, _id,
2818 : " pending playout error exists");
2819 : }
2820 : // Triggers callback from module process thread.
2821 0 : _playError = 1;
2822 : WEBRTC_TRACE(
2823 : kTraceError,
2824 : kTraceUtility,
2825 : _id,
2826 : " kPlayoutError message posted: "
2827 : "_writeErrors=%u, error=%d",
2828 : _writeErrors,
2829 : LATE(pa_context_errno)(_paContext));
2830 0 : _writeErrors = 0;
2831 : }
2832 : }
2833 0 : PaUnLock();
2834 :
2835 0 : _playbackBufferUnused = write;
2836 : }
2837 :
2838 0 : _tempBufferSpace = 0;
2839 0 : PaLock();
2840 0 : EnableWriteCallback();
2841 0 : PaUnLock();
2842 :
2843 : } // _playing
2844 :
2845 0 : return true;
2846 : }
2847 :
2848 0 : bool AudioDeviceLinuxPulse::RecThreadProcess()
2849 : {
2850 0 : switch (_timeEventRec.Wait(1000))
2851 : {
2852 : case kEventSignaled:
2853 0 : break;
2854 : case kEventError:
2855 : WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
2856 : "EventWrapper::Wait() failed");
2857 0 : return true;
2858 : case kEventTimeout:
2859 0 : return true;
2860 : }
2861 :
2862 0 : CriticalSectionScoped lock(&_critSect);
2863 :
2864 0 : if (_startRec)
2865 : {
2866 : WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2867 : "_startRec true, performing initial actions");
2868 :
2869 0 : _recDeviceName = NULL;
2870 :
2871 : // Set if not default device
2872 0 : if (_inputDeviceIndex > 0)
2873 : {
2874 : // Get the recording device name
2875 0 : _recDeviceName = new char[kAdmMaxDeviceNameSize];
2876 0 : _deviceIndex = _inputDeviceIndex;
2877 0 : RecordingDevices();
2878 : }
2879 :
2880 0 : PaLock();
2881 :
2882 : WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2883 : " connecting stream");
2884 :
2885 : // Connect the stream to a source
2886 0 : if (LATE(pa_stream_connect_record)(_recStream,
2887 0 : _recDeviceName,
2888 0 : &_recBufferAttr,
2889 0 : (pa_stream_flags_t) _recStreamFlags) != PA_OK)
2890 : {
2891 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2892 : " failed to connect rec stream, err=%d",
2893 : LATE(pa_context_errno)(_paContext));
2894 : }
2895 :
2896 : WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2897 : " connected");
2898 :
2899 : // Wait for state change
2900 0 : while (LATE(pa_stream_get_state)(_recStream) != PA_STREAM_READY)
2901 : {
2902 0 : LATE(pa_threaded_mainloop_wait)(_paMainloop);
2903 : }
2904 :
2905 : WEBRTC_TRACE(kTraceDebug, kTraceAudioDevice, _id,
2906 : " done");
2907 :
2908 : // We can now handle read callbacks
2909 0 : EnableReadCallback();
2910 :
2911 0 : PaUnLock();
2912 :
2913 : // Clear device name
2914 0 : if (_recDeviceName)
2915 : {
2916 0 : delete [] _recDeviceName;
2917 0 : _recDeviceName = NULL;
2918 : }
2919 :
2920 0 : _startRec = false;
2921 0 : _recording = true;
2922 0 : _recStartEvent.Set();
2923 :
2924 0 : return true;
2925 : }
2926 :
2927 0 : if (_recording)
2928 : {
2929 : // Read data and provide it to VoiceEngine
2930 0 : if (ReadRecordedData(_tempSampleData, _tempSampleDataSize) == -1)
2931 : {
2932 0 : return true;
2933 : }
2934 :
2935 0 : _tempSampleData = NULL;
2936 0 : _tempSampleDataSize = 0;
2937 :
2938 0 : PaLock();
2939 : while (true)
2940 : {
2941 : // Ack the last thing we read
2942 0 : if (LATE(pa_stream_drop)(_recStream) != 0)
2943 : {
2944 : WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice,
2945 : _id, " failed to drop, err=%d\n",
2946 : LATE(pa_context_errno)(_paContext));
2947 : }
2948 :
2949 0 : if (LATE(pa_stream_readable_size)(_recStream) <= 0)
2950 : {
2951 : // Then that was all the data
2952 0 : break;
2953 : }
2954 :
2955 : // Else more data.
2956 : const void *sampleData;
2957 : size_t sampleDataSize;
2958 :
2959 0 : if (LATE(pa_stream_peek)(_recStream, &sampleData, &sampleDataSize)
2960 : != 0)
2961 : {
2962 0 : _recError = 1; // triggers callback from module process thread
2963 : WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
2964 : _id, " RECORD_ERROR message posted, error = %d",
2965 : LATE(pa_context_errno)(_paContext));
2966 0 : break;
2967 : }
2968 :
2969 0 : _sndCardRecDelay = (uint32_t) (LatencyUsecs(_recStream)
2970 0 : / 1000);
2971 :
2972 : // Drop lock for sigslot dispatch, which could take a while.
2973 0 : PaUnLock();
2974 : // Read data and provide it to VoiceEngine
2975 0 : if (ReadRecordedData(sampleData, sampleDataSize) == -1)
2976 : {
2977 0 : return true;
2978 : }
2979 0 : PaLock();
2980 :
2981 : // Return to top of loop for the ack and the check for more data.
2982 0 : }
2983 :
2984 0 : EnableReadCallback();
2985 0 : PaUnLock();
2986 :
2987 : } // _recording
2988 :
2989 0 : return true;
2990 : }
2991 :
2992 0 : bool AudioDeviceLinuxPulse::KeyPressed() const{
2993 :
2994 : #ifdef USE_X11
2995 : char szKey[32];
2996 : unsigned int i = 0;
2997 : char state = 0;
2998 :
2999 : if (!_XDisplay)
3000 : return false;
3001 :
3002 : // Check key map status
3003 : XQueryKeymap(_XDisplay, szKey);
3004 :
3005 : // A bit change in keymap means a key is pressed
3006 : for (i = 0; i < sizeof(szKey); i++)
3007 : state |= (szKey[i] ^ _oldKeyState[i]) & szKey[i];
3008 :
3009 : // Save old state
3010 : memcpy((char*)_oldKeyState, (char*)szKey, sizeof(_oldKeyState));
3011 : return (state != 0);
3012 : #else
3013 0 : return false;
3014 : #endif
3015 : }
3016 9 : }
|