Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 : /* This Source Code Form is subject to the terms of the Mozilla Public
3 : * License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 : * You can obtain one at http://mozilla.org/MPL/2.0/. */
5 :
6 : // Original author: ekr@rtfm.com
7 :
8 : #include "MediaPipeline.h"
9 :
10 : #include "MediaStreamGraphImpl.h"
11 :
12 : #include <math.h>
13 :
14 : #include "nspr.h"
15 : #include "srtp.h"
16 :
17 : #include "VideoSegment.h"
18 : #include "Layers.h"
19 : #include "LayersLogging.h"
20 : #include "ImageTypes.h"
21 : #include "ImageContainer.h"
22 : #include "DOMMediaStream.h"
23 : #include "MediaStreamTrack.h"
24 : #include "MediaStreamListener.h"
25 : #include "MediaStreamVideoSink.h"
26 : #include "VideoUtils.h"
27 : #include "VideoStreamTrack.h"
28 : #ifdef WEBRTC_GONK
29 : #include "GrallocImages.h"
30 : #include "mozilla/layers/GrallocTextureClient.h"
31 : #endif
32 :
33 : #include "nsError.h"
34 : #include "AudioSegment.h"
35 : #include "MediaSegment.h"
36 : #include "MediaPipelineFilter.h"
37 : #include "RtpLogger.h"
38 : #include "databuffer.h"
39 : #include "transportflow.h"
40 : #include "transportlayer.h"
41 : #include "transportlayerdtls.h"
42 : #include "transportlayerice.h"
43 : #include "runnable_utils.h"
44 : #include "libyuv/convert.h"
45 : #include "mozilla/dom/RTCStatsReportBinding.h"
46 : #include "mozilla/SharedThreadPool.h"
47 : #include "mozilla/PeerIdentity.h"
48 : #include "mozilla/Preferences.h"
49 : #include "mozilla/TaskQueue.h"
50 : #include "mozilla/gfx/Point.h"
51 : #include "mozilla/gfx/Types.h"
52 : #include "mozilla/UniquePtr.h"
53 : #include "mozilla/UniquePtrExtensions.h"
54 : #include "mozilla/Sprintf.h"
55 : #include "mozilla/SizePrintfMacros.h"
56 :
57 : #include "webrtc/common_types.h"
58 : #include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
59 : #include "webrtc/common_video/include/video_frame_buffer.h"
60 : #include "webrtc/base/bind.h"
61 :
62 : #include "nsThreadUtils.h"
63 :
64 : #include "logging.h"
65 :
66 : // Max size given stereo is 480*2*2 = 1920 (10ms of 16-bits stereo audio at
67 : // 48KHz)
68 : #define AUDIO_SAMPLE_BUFFER_MAX_BYTES 480*2*2
69 : static_assert((WEBRTC_DEFAULT_SAMPLE_RATE/100)*sizeof(uint16_t) * 2
70 : <= AUDIO_SAMPLE_BUFFER_MAX_BYTES,
71 : "AUDIO_SAMPLE_BUFFER_MAX_BYTES is not large enough");
72 :
73 : using namespace mozilla;
74 : using namespace mozilla::dom;
75 : using namespace mozilla::gfx;
76 : using namespace mozilla::layers;
77 :
78 : // Logging context
79 0 : MOZ_MTLOG_MODULE("mediapipeline")
80 :
81 : namespace mozilla {
82 : extern mozilla::LogModule* AudioLogModule();
83 :
84 0 : class VideoConverterListener
85 : {
86 : public:
87 0 : NS_INLINE_DECL_THREADSAFE_REFCOUNTING(VideoConverterListener)
88 :
89 : virtual void OnVideoFrameConverted(unsigned char* aVideoFrame,
90 : unsigned int aVideoFrameLength,
91 : unsigned short aWidth,
92 : unsigned short aHeight,
93 : VideoType aVideoType,
94 : uint64_t aCaptureTime) = 0;
95 :
96 : virtual void OnVideoFrameConverted(webrtc::VideoFrame& aVideoFrame) = 0;
97 :
98 : protected:
99 0 : virtual ~VideoConverterListener() {}
100 : };
101 :
102 : // I420 buffer size macros
103 : #define YSIZE(x,y) (CheckedInt<int>(x)*(y))
104 : #define CRSIZE(x,y) ((((x)+1) >> 1) * (((y)+1) >> 1))
105 : #define I420SIZE(x,y) (YSIZE((x),(y)) + 2 * CRSIZE((x),(y)))
106 :
107 : // An async video frame format converter.
108 : //
109 : // Input is typically a MediaStream(Track)Listener driven by MediaStreamGraph.
110 : //
111 : // We keep track of the size of the TaskQueue so we can drop frames if
112 : // conversion is taking too long.
113 : //
114 : // Output is passed through to all added VideoConverterListeners on a TaskQueue
115 : // thread whenever a frame is converted.
116 : class VideoFrameConverter
117 : {
118 : public:
119 0 : NS_INLINE_DECL_THREADSAFE_REFCOUNTING(VideoFrameConverter)
120 :
121 0 : VideoFrameConverter()
122 0 : : mLength(0)
123 : , last_img_(-1) // -1 is not a guaranteed invalid serial. See bug 1262134.
124 : #ifdef DEBUG
125 : , mThrottleCount(0)
126 : , mThrottleRecord(0)
127 : #endif
128 0 : , mMutex("VideoFrameConverter")
129 : {
130 0 : MOZ_COUNT_CTOR(VideoFrameConverter);
131 :
132 : RefPtr<SharedThreadPool> pool =
133 0 : SharedThreadPool::Get(NS_LITERAL_CSTRING("VideoFrameConverter"));
134 :
135 0 : mTaskQueue = MakeAndAddRef<TaskQueue>(pool.forget());
136 0 : }
137 :
138 0 : void QueueVideoChunk(VideoChunk& aChunk, bool aForceBlack)
139 : {
140 0 : if (aChunk.IsNull()) {
141 0 : return;
142 : }
143 :
144 : // We get passed duplicate frames every ~10ms even with no frame change.
145 0 : int32_t serial = aChunk.mFrame.GetImage()->GetSerial();
146 0 : if (serial == last_img_) {
147 0 : return;
148 : }
149 0 : last_img_ = serial;
150 :
151 : // A throttling limit of 1 allows us to convert 2 frames concurrently.
152 : // It's short enough to not build up too significant a delay, while
153 : // giving us a margin to not cause some machines to drop every other frame.
154 0 : const int32_t queueThrottlingLimit = 1;
155 0 : if (mLength > queueThrottlingLimit) {
156 0 : MOZ_MTLOG(ML_DEBUG, "VideoFrameConverter " << this << " queue is full." <<
157 : " Throttling by throwing away a frame.");
158 : #ifdef DEBUG
159 0 : ++mThrottleCount;
160 0 : mThrottleRecord = std::max(mThrottleCount, mThrottleRecord);
161 : #endif
162 0 : return;
163 : }
164 :
165 : #ifdef DEBUG
166 0 : if (mThrottleCount > 0) {
167 0 : auto level = ML_DEBUG;
168 0 : if (mThrottleCount > 5) {
169 : // Log at a higher level when we have large drops.
170 0 : level = ML_INFO;
171 : }
172 0 : MOZ_MTLOG(level, "VideoFrameConverter " << this << " stopped" <<
173 : " throttling after throwing away " << mThrottleCount <<
174 : " frames. Longest throttle so far was " <<
175 : mThrottleRecord << " frames.");
176 0 : mThrottleCount = 0;
177 : }
178 : #endif
179 :
180 0 : bool forceBlack = aForceBlack || aChunk.mFrame.GetForceBlack();
181 :
182 0 : if (forceBlack) {
183 : // Reset the last-img check.
184 : // -1 is not a guaranteed invalid serial. See bug 1262134.
185 0 : last_img_ = -1;
186 :
187 : // After disabling, we still want *some* frames to flow to the other side.
188 : // It could happen that we drop the packet that carried the first disabled
189 : // frame, for instance. Note that this still requires the application to
190 : // send a frame, or it doesn't trigger at all.
191 0 : const double disabledMinFps = 1.0;
192 0 : TimeStamp t = aChunk.mTimeStamp;
193 0 : MOZ_ASSERT(!t.IsNull());
194 0 : if (!disabled_frame_sent_.IsNull() &&
195 0 : (t - disabled_frame_sent_).ToSeconds() < (1.0 / disabledMinFps)) {
196 0 : return;
197 : }
198 :
199 0 : disabled_frame_sent_ = t;
200 : } else {
201 : // This sets it to the Null time.
202 0 : disabled_frame_sent_ = TimeStamp();
203 : }
204 :
205 0 : ++mLength; // Atomic
206 :
207 : nsCOMPtr<nsIRunnable> runnable =
208 0 : NewRunnableMethod<StoreRefPtrPassByPtr<Image>, bool>(
209 : "VideoFrameConverter::ProcessVideoFrame",
210 : this, &VideoFrameConverter::ProcessVideoFrame,
211 0 : aChunk.mFrame.GetImage(), forceBlack);
212 0 : mTaskQueue->Dispatch(runnable.forget());
213 : }
214 :
215 0 : void AddListener(VideoConverterListener* aListener)
216 : {
217 0 : MutexAutoLock lock(mMutex);
218 :
219 0 : MOZ_ASSERT(!mListeners.Contains(aListener));
220 0 : mListeners.AppendElement(aListener);
221 0 : }
222 :
223 : bool RemoveListener(VideoConverterListener* aListener)
224 : {
225 : MutexAutoLock lock(mMutex);
226 :
227 : return mListeners.RemoveElement(aListener);
228 : }
229 :
230 0 : void Shutdown()
231 : {
232 0 : mTaskQueue->BeginShutdown();
233 0 : mTaskQueue->AwaitShutdownAndIdle();
234 0 : }
235 :
236 : protected:
237 0 : virtual ~VideoFrameConverter()
238 0 : {
239 0 : MOZ_COUNT_DTOR(VideoFrameConverter);
240 0 : }
241 :
242 0 : static void DeleteBuffer(uint8 *data)
243 : {
244 0 : delete[] data;
245 0 : }
246 :
247 : // This takes ownership of the buffer and attached it to the VideoFrame we send
248 : // to the listeners
249 0 : void VideoFrameConverted(UniquePtr<uint8[]> aBuffer,
250 : unsigned int aVideoFrameLength,
251 : unsigned short aWidth,
252 : unsigned short aHeight,
253 : VideoType aVideoType,
254 : uint64_t aCaptureTime)
255 : {
256 : // check for parameter sanity
257 0 : if (!aBuffer || aVideoFrameLength == 0 || aWidth == 0 || aHeight == 0) {
258 0 : MOZ_MTLOG(ML_ERROR, __FUNCTION__ << " Invalid Parameters ");
259 0 : MOZ_ASSERT(false);
260 : return;
261 : }
262 0 : MOZ_ASSERT(aVideoType == VideoType::kVideoI420);
263 :
264 0 : const int stride_y = aWidth;
265 0 : const int stride_uv = (aWidth + 1) / 2;
266 :
267 0 : const uint8_t* buffer_y = aBuffer.get();
268 0 : const uint8_t* buffer_u = buffer_y + stride_y * aHeight;
269 0 : const uint8_t* buffer_v = buffer_u + stride_uv * ((aHeight + 1) / 2);
270 : rtc::scoped_refptr<webrtc::WrappedI420Buffer> video_frame_buffer(
271 : new rtc::RefCountedObject<webrtc::WrappedI420Buffer>(
272 : aWidth, aHeight,
273 : buffer_y, stride_y,
274 : buffer_u, stride_uv,
275 : buffer_v, stride_uv,
276 0 : rtc::Bind(&DeleteBuffer, aBuffer.release())));
277 :
278 : webrtc::VideoFrame video_frame(video_frame_buffer, aCaptureTime,
279 0 : aCaptureTime, webrtc::kVideoRotation_0); // XXX
280 0 : VideoFrameConverted(video_frame);
281 0 : }
282 :
283 0 : void VideoFrameConverted(webrtc::VideoFrame& aVideoFrame)
284 : {
285 0 : MutexAutoLock lock(mMutex);
286 :
287 0 : for (RefPtr<VideoConverterListener>& listener : mListeners) {
288 0 : listener->OnVideoFrameConverted(aVideoFrame);
289 : }
290 0 : }
291 :
292 0 : void ProcessVideoFrame(Image* aImage, bool aForceBlack)
293 : {
294 0 : --mLength; // Atomic
295 0 : MOZ_ASSERT(mLength >= 0);
296 :
297 0 : if (aForceBlack) {
298 0 : IntSize size = aImage->GetSize();
299 0 : CheckedInt<int> yPlaneLen = YSIZE(size.width, size.height);
300 : // doesn't need to be CheckedInt, any overflow will be caught by YSIZE
301 0 : int cbcrPlaneLen = 2 * CRSIZE(size.width, size.height);
302 0 : CheckedInt<int> length = yPlaneLen + cbcrPlaneLen;
303 :
304 0 : if (!yPlaneLen.isValid() || !length.isValid()) {
305 0 : return;
306 : }
307 :
308 : // Send a black image.
309 0 : auto pixelData = MakeUniqueFallible<uint8_t[]>(length.value());
310 0 : if (pixelData) {
311 : // YCrCb black = 0x10 0x80 0x80
312 0 : memset(pixelData.get(), 0x10, yPlaneLen.value());
313 : // Fill Cb/Cr planes
314 0 : memset(pixelData.get() + yPlaneLen.value(), 0x80, cbcrPlaneLen);
315 :
316 0 : MOZ_MTLOG(ML_DEBUG, "Sending a black video frame");
317 0 : VideoFrameConverted(Move(pixelData), length.value(),
318 0 : size.width, size.height,
319 0 : mozilla::kVideoI420, 0);
320 : }
321 0 : return;
322 : }
323 :
324 0 : ImageFormat format = aImage->GetFormat();
325 0 : if (format == ImageFormat::PLANAR_YCBCR) {
326 : // Cast away constness b/c some of the accessors are non-const
327 : PlanarYCbCrImage* yuv = const_cast<PlanarYCbCrImage *>(
328 0 : static_cast<const PlanarYCbCrImage *>(aImage));
329 :
330 0 : const PlanarYCbCrData *data = yuv->GetData();
331 0 : if (data) {
332 0 : uint8_t *y = data->mYChannel;
333 0 : uint8_t *cb = data->mCbChannel;
334 0 : uint8_t *cr = data->mCrChannel;
335 0 : int32_t yStride = data->mYStride;
336 0 : int32_t cbCrStride = data->mCbCrStride;
337 0 : uint32_t width = yuv->GetSize().width;
338 0 : uint32_t height = yuv->GetSize().height;
339 :
340 0 : rtc::Callback0<void> callback_unused;
341 : rtc::scoped_refptr<webrtc::WrappedI420Buffer> video_frame_buffer(
342 : new rtc::RefCountedObject<webrtc::WrappedI420Buffer>(
343 : width, height,
344 : y, yStride,
345 : cb, cbCrStride,
346 : cr, cbCrStride,
347 0 : callback_unused));
348 :
349 : webrtc::VideoFrame i420_frame(video_frame_buffer,
350 : 0, 0, // not setting timestamps
351 0 : webrtc::kVideoRotation_0);
352 0 : MOZ_MTLOG(ML_DEBUG, "Sending an I420 video frame");
353 0 : VideoFrameConverted(i420_frame);
354 0 : return;
355 : }
356 : }
357 :
358 0 : RefPtr<SourceSurface> surf = aImage->GetAsSourceSurface();
359 0 : if (!surf) {
360 0 : MOZ_MTLOG(ML_ERROR, "Getting surface from " << Stringify(format) << " image failed");
361 0 : return;
362 : }
363 :
364 0 : RefPtr<DataSourceSurface> data = surf->GetDataSurface();
365 0 : if (!data) {
366 0 : MOZ_MTLOG(ML_ERROR, "Getting data surface from " << Stringify(format)
367 : << " image with " << Stringify(surf->GetType()) << "("
368 : << Stringify(surf->GetFormat()) << ") surface failed");
369 0 : return;
370 : }
371 :
372 0 : IntSize size = aImage->GetSize();
373 : // these don't need to be CheckedInt, any overflow will be caught by YSIZE
374 0 : int half_width = (size.width + 1) >> 1;
375 0 : int half_height = (size.height + 1) >> 1;
376 0 : int c_size = half_width * half_height;
377 0 : CheckedInt<int> buffer_size = YSIZE(size.width, size.height) + 2 * c_size;
378 :
379 0 : if (!buffer_size.isValid()) {
380 0 : return;
381 : }
382 :
383 0 : auto yuv_scoped = MakeUniqueFallible<uint8[]>(buffer_size.value());
384 0 : if (!yuv_scoped) {
385 0 : return;
386 : }
387 0 : uint8* yuv = yuv_scoped.get();
388 :
389 0 : DataSourceSurface::ScopedMap map(data, DataSourceSurface::READ);
390 0 : if (!map.IsMapped()) {
391 0 : MOZ_MTLOG(ML_ERROR, "Reading DataSourceSurface from " << Stringify(format)
392 : << " image with " << Stringify(surf->GetType()) << "("
393 : << Stringify(surf->GetFormat()) << ") surface failed");
394 0 : return;
395 : }
396 :
397 : int rv;
398 0 : int cb_offset = YSIZE(size.width, size.height).value();
399 0 : int cr_offset = cb_offset + c_size;
400 0 : switch (surf->GetFormat()) {
401 : case SurfaceFormat::B8G8R8A8:
402 : case SurfaceFormat::B8G8R8X8:
403 0 : rv = libyuv::ARGBToI420(static_cast<uint8*>(map.GetData()),
404 : map.GetStride(),
405 : yuv, size.width,
406 : yuv + cb_offset, half_width,
407 : yuv + cr_offset, half_width,
408 0 : size.width, size.height);
409 0 : break;
410 : case SurfaceFormat::R5G6B5_UINT16:
411 0 : rv = libyuv::RGB565ToI420(static_cast<uint8*>(map.GetData()),
412 : map.GetStride(),
413 : yuv, size.width,
414 : yuv + cb_offset, half_width,
415 : yuv + cr_offset, half_width,
416 0 : size.width, size.height);
417 0 : break;
418 : default:
419 0 : MOZ_MTLOG(ML_ERROR, "Unsupported RGB video format" << Stringify(surf->GetFormat()));
420 0 : MOZ_ASSERT(PR_FALSE);
421 : return;
422 : }
423 0 : if (rv != 0) {
424 0 : MOZ_MTLOG(ML_ERROR, Stringify(surf->GetFormat()) << " to I420 conversion failed");
425 0 : return;
426 : }
427 0 : MOZ_MTLOG(ML_DEBUG, "Sending an I420 video frame converted from " <<
428 : Stringify(surf->GetFormat()));
429 0 : VideoFrameConverted(Move(yuv_scoped), buffer_size.value(), size.width, size.height, mozilla::kVideoI420, 0);
430 : }
431 :
432 : Atomic<int32_t, Relaxed> mLength;
433 : RefPtr<TaskQueue> mTaskQueue;
434 :
435 : // Written and read from the queueing thread (normally MSG).
436 : int32_t last_img_; // serial number of last Image
437 : TimeStamp disabled_frame_sent_; // The time we sent the last disabled frame.
438 : #ifdef DEBUG
439 : uint32_t mThrottleCount;
440 : uint32_t mThrottleRecord;
441 : #endif
442 :
443 : // mMutex guards the below variables.
444 : Mutex mMutex;
445 : nsTArray<RefPtr<VideoConverterListener>> mListeners;
446 : };
447 :
448 : // An async inserter for audio data, to avoid running audio codec encoders
449 : // on the MSG/input audio thread. Basically just bounces all the audio
450 : // data to a single audio processing/input queue. We could if we wanted to
451 : // use multiple threads and a TaskQueue.
452 : class AudioProxyThread
453 : {
454 : public:
455 0 : NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioProxyThread)
456 :
457 0 : explicit AudioProxyThread(AudioSessionConduit *aConduit)
458 0 : : mConduit(aConduit)
459 : {
460 0 : MOZ_ASSERT(mConduit);
461 0 : MOZ_COUNT_CTOR(AudioProxyThread);
462 :
463 : // Use only 1 thread; also forces FIFO operation
464 : // We could use multiple threads, but that may be dicier with the webrtc.org
465 : // code. If so we'd need to use TaskQueues like the videoframe converter
466 : RefPtr<SharedThreadPool> pool =
467 0 : SharedThreadPool::Get(NS_LITERAL_CSTRING("AudioProxy"), 1);
468 :
469 0 : mThread = pool.get();
470 0 : }
471 :
472 : // called on mThread
473 0 : void InternalProcessAudioChunk(
474 : TrackRate rate,
475 : AudioChunk& chunk,
476 : bool enabled) {
477 :
478 : // Convert to interleaved, 16-bits integer audio, with a maximum of two
479 : // channels (since the WebRTC.org code below makes the assumption that the
480 : // input audio is either mono or stereo).
481 0 : uint32_t outputChannels = chunk.ChannelCount() == 1 ? 1 : 2;
482 0 : const int16_t* samples = nullptr;
483 0 : UniquePtr<int16_t[]> convertedSamples;
484 :
485 : // We take advantage of the fact that the common case (microphone directly to
486 : // PeerConnection, that is, a normal call), the samples are already 16-bits
487 : // mono, so the representation in interleaved and planar is the same, and we
488 : // can just use that.
489 0 : if (enabled && outputChannels == 1 && chunk.mBufferFormat == AUDIO_FORMAT_S16) {
490 0 : samples = chunk.ChannelData<int16_t>().Elements()[0];
491 : } else {
492 0 : convertedSamples = MakeUnique<int16_t[]>(chunk.mDuration * outputChannels);
493 :
494 0 : if (!enabled || chunk.mBufferFormat == AUDIO_FORMAT_SILENCE) {
495 0 : PodZero(convertedSamples.get(), chunk.mDuration * outputChannels);
496 0 : } else if (chunk.mBufferFormat == AUDIO_FORMAT_FLOAT32) {
497 0 : DownmixAndInterleave(chunk.ChannelData<float>(),
498 0 : chunk.mDuration, chunk.mVolume, outputChannels,
499 0 : convertedSamples.get());
500 0 : } else if (chunk.mBufferFormat == AUDIO_FORMAT_S16) {
501 0 : DownmixAndInterleave(chunk.ChannelData<int16_t>(),
502 0 : chunk.mDuration, chunk.mVolume, outputChannels,
503 0 : convertedSamples.get());
504 : }
505 0 : samples = convertedSamples.get();
506 : }
507 :
508 0 : MOZ_ASSERT(!(rate%100)); // rate should be a multiple of 100
509 :
510 : // Check if the rate or the number of channels has changed since the last time
511 : // we came through. I realize it may be overkill to check if the rate has
512 : // changed, but I believe it is possible (e.g. if we change sources) and it
513 : // costs us very little to handle this case.
514 :
515 0 : uint32_t audio_10ms = rate / 100;
516 :
517 0 : if (!packetizer_ ||
518 0 : packetizer_->PacketSize() != audio_10ms ||
519 0 : packetizer_->Channels() != outputChannels) {
520 : // It's ok to drop the audio still in the packetizer here.
521 0 : packetizer_ = new AudioPacketizer<int16_t, int16_t>(audio_10ms, outputChannels);
522 : }
523 :
524 0 : packetizer_->Input(samples, chunk.mDuration);
525 :
526 0 : while (packetizer_->PacketsAvailable()) {
527 0 : uint32_t samplesPerPacket = packetizer_->PacketSize() *
528 0 : packetizer_->Channels();
529 0 : packetizer_->Output(packet_);
530 0 : mConduit->SendAudioFrame(packet_, samplesPerPacket, rate, 0);
531 : }
532 0 : }
533 :
534 0 : void QueueAudioChunk(TrackRate rate, AudioChunk& chunk, bool enabled)
535 : {
536 0 : RUN_ON_THREAD(mThread,
537 0 : WrapRunnable(RefPtr<AudioProxyThread>(this),
538 : &AudioProxyThread::InternalProcessAudioChunk,
539 : rate, chunk, enabled),
540 0 : NS_DISPATCH_NORMAL);
541 0 : }
542 :
543 : protected:
544 0 : virtual ~AudioProxyThread()
545 0 : {
546 : // Conduits must be released on MainThread, and we might have the last reference
547 : // We don't need to worry about runnables still trying to access the conduit, since
548 : // the runnables hold a ref to AudioProxyThread.
549 : NS_ReleaseOnMainThread(
550 0 : "AudioProxyThread::mConduit", mConduit.forget());
551 0 : MOZ_COUNT_DTOR(AudioProxyThread);
552 0 : }
553 :
554 : RefPtr<AudioSessionConduit> mConduit;
555 : nsCOMPtr<nsIEventTarget> mThread;
556 : // Only accessed on mThread
557 : nsAutoPtr<AudioPacketizer<int16_t, int16_t>> packetizer_;
558 : // A buffer to hold a single packet of audio.
559 : int16_t packet_[AUDIO_SAMPLE_BUFFER_MAX_BYTES / sizeof(int16_t)];
560 : };
561 :
562 : static char kDTLSExporterLabel[] = "EXTRACTOR-dtls_srtp";
563 :
564 0 : MediaPipeline::MediaPipeline(const std::string& pc,
565 : Direction direction,
566 : nsCOMPtr<nsIEventTarget> main_thread,
567 : nsCOMPtr<nsIEventTarget> sts_thread,
568 : const std::string& track_id,
569 : int level,
570 : RefPtr<MediaSessionConduit> conduit,
571 : RefPtr<TransportFlow> rtp_transport,
572 : RefPtr<TransportFlow> rtcp_transport,
573 0 : nsAutoPtr<MediaPipelineFilter> filter)
574 : : direction_(direction),
575 : track_id_(track_id),
576 : level_(level),
577 : conduit_(conduit),
578 0 : rtp_(rtp_transport, rtcp_transport ? RTP : MUX),
579 0 : rtcp_(rtcp_transport ? rtcp_transport : rtp_transport,
580 0 : rtcp_transport ? RTCP : MUX),
581 : main_thread_(main_thread),
582 : sts_thread_(sts_thread),
583 : rtp_packets_sent_(0),
584 : rtcp_packets_sent_(0),
585 : rtp_packets_received_(0),
586 : rtcp_packets_received_(0),
587 : rtp_bytes_sent_(0),
588 : rtp_bytes_received_(0),
589 : pc_(pc),
590 : description_(),
591 : filter_(filter),
592 0 : rtp_parser_(webrtc::RtpHeaderParser::Create()){
593 : // To indicate rtcp-mux rtcp_transport should be nullptr.
594 : // Therefore it's an error to send in the same flow for
595 : // both rtp and rtcp.
596 0 : MOZ_ASSERT(rtp_transport != rtcp_transport);
597 :
598 : // PipelineTransport() will access this->sts_thread_; moved here for safety
599 0 : transport_ = new PipelineTransport(this);
600 0 : }
601 :
602 0 : MediaPipeline::~MediaPipeline() {
603 0 : ASSERT_ON_THREAD(main_thread_);
604 0 : MOZ_MTLOG(ML_INFO, "Destroying MediaPipeline: " << description_);
605 0 : }
606 :
607 0 : nsresult MediaPipeline::Init() {
608 0 : ASSERT_ON_THREAD(main_thread_);
609 :
610 0 : if (direction_ == RECEIVE) {
611 0 : conduit_->SetReceiverTransport(transport_);
612 : } else {
613 0 : conduit_->SetTransmitterTransport(transport_);
614 : }
615 :
616 0 : RUN_ON_THREAD(sts_thread_,
617 0 : WrapRunnable(
618 0 : RefPtr<MediaPipeline>(this),
619 : &MediaPipeline::Init_s),
620 0 : NS_DISPATCH_NORMAL);
621 :
622 0 : return NS_OK;
623 : }
624 :
625 0 : nsresult MediaPipeline::Init_s() {
626 0 : ASSERT_ON_THREAD(sts_thread_);
627 :
628 0 : return AttachTransport_s();
629 : }
630 :
631 :
632 : // Disconnect us from the transport so that we can cleanly destruct the
633 : // pipeline on the main thread. ShutdownMedia_m() must have already been
634 : // called
635 : void
636 0 : MediaPipeline::DetachTransport_s()
637 : {
638 0 : ASSERT_ON_THREAD(sts_thread_);
639 :
640 0 : disconnect_all();
641 0 : transport_->Detach();
642 0 : rtp_.Detach();
643 0 : rtcp_.Detach();
644 0 : }
645 :
646 : nsresult
647 0 : MediaPipeline::AttachTransport_s()
648 : {
649 0 : ASSERT_ON_THREAD(sts_thread_);
650 : nsresult res;
651 0 : MOZ_ASSERT(rtp_.transport_);
652 0 : MOZ_ASSERT(rtcp_.transport_);
653 0 : res = ConnectTransport_s(rtp_);
654 0 : if (NS_FAILED(res)) {
655 0 : return res;
656 : }
657 :
658 0 : if (rtcp_.transport_ != rtp_.transport_) {
659 0 : res = ConnectTransport_s(rtcp_);
660 0 : if (NS_FAILED(res)) {
661 0 : return res;
662 : }
663 : }
664 :
665 0 : transport_->Attach(this);
666 :
667 0 : return NS_OK;
668 : }
669 :
670 : void
671 0 : MediaPipeline::UpdateTransport_m(int level,
672 : RefPtr<TransportFlow> rtp_transport,
673 : RefPtr<TransportFlow> rtcp_transport,
674 : nsAutoPtr<MediaPipelineFilter> filter)
675 : {
676 0 : RUN_ON_THREAD(sts_thread_,
677 0 : WrapRunnable(
678 : this,
679 : &MediaPipeline::UpdateTransport_s,
680 : level,
681 : rtp_transport,
682 : rtcp_transport,
683 : filter),
684 0 : NS_DISPATCH_NORMAL);
685 0 : }
686 :
687 : void
688 0 : MediaPipeline::UpdateTransport_s(int level,
689 : RefPtr<TransportFlow> rtp_transport,
690 : RefPtr<TransportFlow> rtcp_transport,
691 : nsAutoPtr<MediaPipelineFilter> filter)
692 : {
693 0 : bool rtcp_mux = false;
694 0 : if (!rtcp_transport) {
695 0 : rtcp_transport = rtp_transport;
696 0 : rtcp_mux = true;
697 : }
698 :
699 0 : if ((rtp_transport != rtp_.transport_) ||
700 0 : (rtcp_transport != rtcp_.transport_)) {
701 0 : DetachTransport_s();
702 0 : rtp_ = TransportInfo(rtp_transport, rtcp_mux ? MUX : RTP);
703 0 : rtcp_ = TransportInfo(rtcp_transport, rtcp_mux ? MUX : RTCP);
704 0 : AttachTransport_s();
705 : }
706 :
707 0 : level_ = level;
708 :
709 0 : if (filter_ && filter) {
710 : // Use the new filter, but don't forget any remote SSRCs that we've learned
711 : // by receiving traffic.
712 0 : filter_->Update(*filter);
713 : } else {
714 0 : filter_ = filter;
715 : }
716 0 : }
717 :
718 : void
719 0 : MediaPipeline::AddRIDExtension_m(size_t extension_id)
720 : {
721 0 : RUN_ON_THREAD(sts_thread_,
722 0 : WrapRunnable(RefPtr<MediaPipeline>(this),
723 : &MediaPipeline::AddRIDExtension_s,
724 : extension_id),
725 0 : NS_DISPATCH_NORMAL);
726 0 : }
727 :
728 : void
729 0 : MediaPipeline::AddRIDExtension_s(size_t extension_id)
730 : {
731 0 : rtp_parser_->RegisterRtpHeaderExtension(webrtc::kRtpExtensionRtpStreamId,
732 0 : extension_id);
733 0 : }
734 :
735 : void
736 0 : MediaPipeline::AddRIDFilter_m(const std::string& rid)
737 : {
738 0 : RUN_ON_THREAD(sts_thread_,
739 0 : WrapRunnable(RefPtr<MediaPipeline>(this),
740 : &MediaPipeline::AddRIDFilter_s,
741 : rid),
742 0 : NS_DISPATCH_NORMAL);
743 0 : }
744 :
745 : void
746 0 : MediaPipeline::AddRIDFilter_s(const std::string& rid)
747 : {
748 0 : filter_ = new MediaPipelineFilter;
749 0 : filter_->AddRemoteRtpStreamId(rid);
750 0 : }
751 :
752 : void
753 0 : MediaPipeline::GetContributingSourceStats(
754 : const nsString& aInboundRtpStreamId,
755 : FallibleTArray<dom::RTCRTPContributingSourceStats>& aArr) const
756 : {
757 : // Get the expiry from now
758 0 : DOMHighResTimeStamp expiry = RtpCSRCStats::GetExpiryFromTime(GetNow());
759 0 : for (auto info : csrc_stats_) {
760 0 : if (!info.second.Expired(expiry)) {
761 0 : RTCRTPContributingSourceStats stats;
762 0 : info.second.GetWebidlInstance(stats, aInboundRtpStreamId);
763 0 : aArr.AppendElement(stats, fallible);
764 : }
765 : }
766 0 : }
767 :
768 0 : void MediaPipeline::StateChange(TransportFlow *flow, TransportLayer::State state) {
769 0 : TransportInfo* info = GetTransportInfo_s(flow);
770 0 : MOZ_ASSERT(info);
771 :
772 0 : if (state == TransportLayer::TS_OPEN) {
773 0 : MOZ_MTLOG(ML_INFO, "Flow is ready");
774 0 : TransportReady_s(*info);
775 0 : } else if (state == TransportLayer::TS_CLOSED ||
776 : state == TransportLayer::TS_ERROR) {
777 0 : TransportFailed_s(*info);
778 : }
779 0 : }
780 :
781 0 : static bool MakeRtpTypeToStringArray(const char** array) {
782 : static const char* RTP_str = "RTP";
783 : static const char* RTCP_str = "RTCP";
784 : static const char* MUX_str = "RTP/RTCP mux";
785 0 : array[MediaPipeline::RTP] = RTP_str;
786 0 : array[MediaPipeline::RTCP] = RTCP_str;
787 0 : array[MediaPipeline::MUX] = MUX_str;
788 0 : return true;
789 : }
790 :
791 0 : static const char* ToString(MediaPipeline::RtpType type) {
792 : static const char* array[(int)MediaPipeline::MAX_RTP_TYPE] = {nullptr};
793 : // Dummy variable to cause init to happen only on first call
794 0 : static bool dummy = MakeRtpTypeToStringArray(array);
795 : (void)dummy;
796 0 : return array[type];
797 : }
798 :
799 0 : nsresult MediaPipeline::TransportReady_s(TransportInfo &info) {
800 0 : MOZ_ASSERT(!description_.empty());
801 :
802 : // TODO(ekr@rtfm.com): implement some kind of notification on
803 : // failure. bug 852665.
804 0 : if (info.state_ != MP_CONNECTING) {
805 0 : MOZ_MTLOG(ML_ERROR, "Transport ready for flow in wrong state:" <<
806 : description_ << ": " << ToString(info.type_));
807 0 : return NS_ERROR_FAILURE;
808 : }
809 :
810 0 : MOZ_MTLOG(ML_INFO, "Transport ready for pipeline " <<
811 : static_cast<void *>(this) << " flow " << description_ << ": " <<
812 : ToString(info.type_));
813 :
814 : // TODO(bcampen@mozilla.com): Should we disconnect from the flow on failure?
815 : nsresult res;
816 :
817 : // Now instantiate the SRTP objects
818 : TransportLayerDtls *dtls = static_cast<TransportLayerDtls *>(
819 0 : info.transport_->GetLayer(TransportLayerDtls::ID()));
820 0 : MOZ_ASSERT(dtls); // DTLS is mandatory
821 :
822 : uint16_t cipher_suite;
823 0 : res = dtls->GetSrtpCipher(&cipher_suite);
824 0 : if (NS_FAILED(res)) {
825 0 : MOZ_MTLOG(ML_ERROR, "Failed to negotiate DTLS-SRTP. This is an error");
826 0 : info.state_ = MP_CLOSED;
827 0 : UpdateRtcpMuxState(info);
828 0 : return res;
829 : }
830 :
831 : // SRTP Key Exporter as per RFC 5764 S 4.2
832 : unsigned char srtp_block[SRTP_TOTAL_KEY_LENGTH * 2];
833 0 : res = dtls->ExportKeyingMaterial(kDTLSExporterLabel, false, "",
834 0 : srtp_block, sizeof(srtp_block));
835 0 : if (NS_FAILED(res)) {
836 0 : MOZ_MTLOG(ML_ERROR, "Failed to compute DTLS-SRTP keys. This is an error");
837 0 : info.state_ = MP_CLOSED;
838 0 : UpdateRtcpMuxState(info);
839 0 : MOZ_CRASH(); // TODO: Remove once we have enough field experience to
840 : // know it doesn't happen. bug 798797. Note that the
841 : // code after this never executes.
842 : return res;
843 : }
844 :
845 : // Slice and dice as per RFC 5764 S 4.2
846 : unsigned char client_write_key[SRTP_TOTAL_KEY_LENGTH];
847 : unsigned char server_write_key[SRTP_TOTAL_KEY_LENGTH];
848 0 : int offset = 0;
849 0 : memcpy(client_write_key, srtp_block + offset, SRTP_MASTER_KEY_LENGTH);
850 0 : offset += SRTP_MASTER_KEY_LENGTH;
851 0 : memcpy(server_write_key, srtp_block + offset, SRTP_MASTER_KEY_LENGTH);
852 0 : offset += SRTP_MASTER_KEY_LENGTH;
853 0 : memcpy(client_write_key + SRTP_MASTER_KEY_LENGTH,
854 0 : srtp_block + offset, SRTP_MASTER_SALT_LENGTH);
855 0 : offset += SRTP_MASTER_SALT_LENGTH;
856 0 : memcpy(server_write_key + SRTP_MASTER_KEY_LENGTH,
857 0 : srtp_block + offset, SRTP_MASTER_SALT_LENGTH);
858 0 : offset += SRTP_MASTER_SALT_LENGTH;
859 0 : MOZ_ASSERT(offset == sizeof(srtp_block));
860 :
861 : unsigned char *write_key;
862 : unsigned char *read_key;
863 :
864 0 : if (dtls->role() == TransportLayerDtls::CLIENT) {
865 0 : write_key = client_write_key;
866 0 : read_key = server_write_key;
867 : } else {
868 0 : write_key = server_write_key;
869 0 : read_key = client_write_key;
870 : }
871 :
872 0 : MOZ_ASSERT(!info.send_srtp_ && !info.recv_srtp_);
873 0 : info.send_srtp_ = SrtpFlow::Create(cipher_suite, false, write_key,
874 0 : SRTP_TOTAL_KEY_LENGTH);
875 0 : info.recv_srtp_ = SrtpFlow::Create(cipher_suite, true, read_key,
876 0 : SRTP_TOTAL_KEY_LENGTH);
877 0 : if (!info.send_srtp_ || !info.recv_srtp_) {
878 0 : MOZ_MTLOG(ML_ERROR, "Couldn't create SRTP flow for "
879 : << ToString(info.type_));
880 0 : info.state_ = MP_CLOSED;
881 0 : UpdateRtcpMuxState(info);
882 0 : return NS_ERROR_FAILURE;
883 : }
884 :
885 0 : MOZ_MTLOG(ML_INFO, "Listening for " << ToString(info.type_)
886 : << " packets received on " <<
887 : static_cast<void *>(dtls->downward()));
888 :
889 0 : switch (info.type_) {
890 : case RTP:
891 0 : dtls->downward()->SignalPacketReceived.connect(
892 : this,
893 0 : &MediaPipeline::RtpPacketReceived);
894 0 : break;
895 : case RTCP:
896 0 : dtls->downward()->SignalPacketReceived.connect(
897 : this,
898 0 : &MediaPipeline::RtcpPacketReceived);
899 0 : break;
900 : case MUX:
901 0 : dtls->downward()->SignalPacketReceived.connect(
902 : this,
903 0 : &MediaPipeline::PacketReceived);
904 0 : break;
905 : default:
906 0 : MOZ_CRASH();
907 : }
908 :
909 0 : info.state_ = MP_OPEN;
910 0 : UpdateRtcpMuxState(info);
911 0 : return NS_OK;
912 : }
913 :
914 0 : nsresult MediaPipeline::TransportFailed_s(TransportInfo &info) {
915 0 : ASSERT_ON_THREAD(sts_thread_);
916 :
917 0 : info.state_ = MP_CLOSED;
918 0 : UpdateRtcpMuxState(info);
919 :
920 0 : MOZ_MTLOG(ML_INFO, "Transport closed for flow " << ToString(info.type_));
921 :
922 : NS_WARNING(
923 0 : "MediaPipeline Transport failed. This is not properly cleaned up yet");
924 :
925 : // TODO(ekr@rtfm.com): SECURITY: Figure out how to clean up if the
926 : // connection was good and now it is bad.
927 : // TODO(ekr@rtfm.com): Report up so that the PC knows we
928 : // have experienced an error.
929 :
930 0 : return NS_OK;
931 : }
932 :
933 0 : void MediaPipeline::UpdateRtcpMuxState(TransportInfo &info) {
934 0 : if (info.type_ == MUX) {
935 0 : if (info.transport_ == rtcp_.transport_) {
936 0 : rtcp_.state_ = info.state_;
937 0 : if (!rtcp_.send_srtp_) {
938 0 : rtcp_.send_srtp_ = info.send_srtp_;
939 0 : rtcp_.recv_srtp_ = info.recv_srtp_;
940 : }
941 : }
942 : }
943 0 : }
944 :
945 0 : nsresult MediaPipeline::SendPacket(TransportFlow *flow, const void *data,
946 : int len) {
947 0 : ASSERT_ON_THREAD(sts_thread_);
948 :
949 : // Note that we bypass the DTLS layer here
950 : TransportLayerDtls *dtls = static_cast<TransportLayerDtls *>(
951 0 : flow->GetLayer(TransportLayerDtls::ID()));
952 0 : MOZ_ASSERT(dtls);
953 :
954 0 : TransportResult res = dtls->downward()->
955 0 : SendPacket(static_cast<const unsigned char *>(data), len);
956 :
957 0 : if (res != len) {
958 : // Ignore blocking indications
959 0 : if (res == TE_WOULDBLOCK)
960 0 : return NS_OK;
961 :
962 0 : MOZ_MTLOG(ML_ERROR, "Failed write on stream " << description_);
963 0 : return NS_BASE_STREAM_CLOSED;
964 : }
965 :
966 0 : return NS_OK;
967 : }
968 :
969 0 : void MediaPipeline::increment_rtp_packets_sent(int32_t bytes) {
970 0 : ++rtp_packets_sent_;
971 0 : rtp_bytes_sent_ += bytes;
972 :
973 0 : if (!(rtp_packets_sent_ % 100)) {
974 0 : MOZ_MTLOG(ML_INFO, "RTP sent packet count for " << description_
975 : << " Pipeline " << static_cast<void *>(this)
976 : << " Flow : " << static_cast<void *>(rtp_.transport_)
977 : << ": " << rtp_packets_sent_
978 : << " (" << rtp_bytes_sent_ << " bytes)");
979 : }
980 0 : }
981 :
982 0 : void MediaPipeline::increment_rtcp_packets_sent() {
983 0 : ++rtcp_packets_sent_;
984 0 : if (!(rtcp_packets_sent_ % 100)) {
985 0 : MOZ_MTLOG(ML_INFO, "RTCP sent packet count for " << description_
986 : << " Pipeline " << static_cast<void *>(this)
987 : << " Flow : " << static_cast<void *>(rtcp_.transport_)
988 : << ": " << rtcp_packets_sent_);
989 : }
990 0 : }
991 :
992 0 : void MediaPipeline::increment_rtp_packets_received(int32_t bytes) {
993 0 : ++rtp_packets_received_;
994 0 : rtp_bytes_received_ += bytes;
995 0 : if (!(rtp_packets_received_ % 100)) {
996 0 : MOZ_MTLOG(ML_INFO, "RTP received packet count for " << description_
997 : << " Pipeline " << static_cast<void *>(this)
998 : << " Flow : " << static_cast<void *>(rtp_.transport_)
999 : << ": " << rtp_packets_received_
1000 : << " (" << rtp_bytes_received_ << " bytes)");
1001 : }
1002 0 : }
1003 :
1004 0 : void MediaPipeline::increment_rtcp_packets_received() {
1005 0 : ++rtcp_packets_received_;
1006 0 : if (!(rtcp_packets_received_ % 100)) {
1007 0 : MOZ_MTLOG(ML_INFO, "RTCP received packet count for " << description_
1008 : << " Pipeline " << static_cast<void *>(this)
1009 : << " Flow : " << static_cast<void *>(rtcp_.transport_)
1010 : << ": " << rtcp_packets_received_);
1011 : }
1012 0 : }
1013 :
1014 0 : void MediaPipeline::RtpPacketReceived(TransportLayer *layer,
1015 : const unsigned char *data,
1016 : size_t len) {
1017 0 : if (!transport_->pipeline()) {
1018 0 : MOZ_MTLOG(ML_ERROR, "Discarding incoming packet; transport disconnected");
1019 0 : return;
1020 : }
1021 :
1022 0 : if (!conduit_) {
1023 0 : MOZ_MTLOG(ML_DEBUG, "Discarding incoming packet; media disconnected");
1024 0 : return;
1025 : }
1026 :
1027 0 : if (rtp_.state_ != MP_OPEN) {
1028 0 : MOZ_MTLOG(ML_ERROR, "Discarding incoming packet; pipeline not open");
1029 0 : return;
1030 : }
1031 :
1032 0 : if (rtp_.transport_->state() != TransportLayer::TS_OPEN) {
1033 0 : MOZ_MTLOG(ML_ERROR, "Discarding incoming packet; transport not open");
1034 0 : return;
1035 : }
1036 :
1037 : // This should never happen.
1038 0 : MOZ_ASSERT(rtp_.recv_srtp_);
1039 :
1040 0 : if (direction_ == TRANSMIT) {
1041 0 : return;
1042 : }
1043 :
1044 0 : if (!len) {
1045 0 : return;
1046 : }
1047 :
1048 : // Filter out everything but RTP/RTCP
1049 0 : if (data[0] < 128 || data[0] > 191) {
1050 0 : return;
1051 : }
1052 :
1053 0 : webrtc::RTPHeader header;
1054 0 : if (!rtp_parser_->Parse(data, len, &header)) {
1055 0 : return;
1056 : }
1057 :
1058 0 : if (filter_ && !filter_->Filter(header)) {
1059 0 : return;
1060 : }
1061 :
1062 : // Make sure to only get the time once, and only if we need it by
1063 : // using getTimestamp() for access
1064 0 : DOMHighResTimeStamp now = 0.0;
1065 0 : bool hasTime = false;
1066 :
1067 : // Remove expired RtpCSRCStats
1068 0 : if (!csrc_stats_.empty()) {
1069 0 : if (!hasTime) {
1070 0 : now = GetNow();
1071 0 : hasTime = true;
1072 : }
1073 0 : auto expiry = RtpCSRCStats::GetExpiryFromTime(now);
1074 0 : for (auto p = csrc_stats_.begin(); p != csrc_stats_.end();) {
1075 0 : if (p->second.Expired(expiry)) {
1076 0 : p = csrc_stats_.erase(p);
1077 0 : continue;
1078 : }
1079 0 : p++;
1080 : }
1081 : }
1082 :
1083 : // Add new RtpCSRCStats
1084 0 : if (header.numCSRCs) {
1085 0 : for (auto i = 0; i < header.numCSRCs; i++) {
1086 0 : if (!hasTime) {
1087 0 : now = GetNow();
1088 0 : hasTime = true;
1089 : }
1090 0 : auto csrcInfo = csrc_stats_.find(header.arrOfCSRCs[i]);
1091 0 : if (csrcInfo == csrc_stats_.end()) {
1092 0 : csrc_stats_.insert(std::make_pair(header.arrOfCSRCs[i],
1093 0 : RtpCSRCStats(header.arrOfCSRCs[i],now)));
1094 : } else {
1095 0 : csrcInfo->second.SetTimestamp(now);
1096 : }
1097 : }
1098 : }
1099 :
1100 : // Make a copy rather than cast away constness
1101 0 : auto inner_data = MakeUnique<unsigned char[]>(len);
1102 0 : memcpy(inner_data.get(), data, len);
1103 0 : int out_len = 0;
1104 0 : nsresult res = rtp_.recv_srtp_->UnprotectRtp(inner_data.get(),
1105 0 : len, len, &out_len);
1106 0 : if (!NS_SUCCEEDED(res)) {
1107 : char tmp[16];
1108 :
1109 0 : SprintfLiteral(tmp, "%.2x %.2x %.2x %.2x",
1110 0 : inner_data[0],
1111 0 : inner_data[1],
1112 0 : inner_data[2],
1113 0 : inner_data[3]);
1114 :
1115 0 : MOZ_MTLOG(ML_NOTICE, "Error unprotecting RTP in " << description_
1116 : << "len= " << len << "[" << tmp << "...]");
1117 0 : return;
1118 : }
1119 0 : MOZ_MTLOG(ML_DEBUG, description_ << " received RTP packet.");
1120 0 : increment_rtp_packets_received(out_len);
1121 :
1122 0 : RtpLogger::LogPacket(inner_data.get(), out_len, true, true, header.headerLength,
1123 0 : description_);
1124 :
1125 0 : (void)conduit_->ReceivedRTPPacket(inner_data.get(), out_len, header.ssrc); // Ignore error codes
1126 : }
1127 :
1128 0 : void MediaPipeline::RtcpPacketReceived(TransportLayer *layer,
1129 : const unsigned char *data,
1130 : size_t len) {
1131 0 : if (!transport_->pipeline()) {
1132 0 : MOZ_MTLOG(ML_DEBUG, "Discarding incoming packet; transport disconnected");
1133 0 : return;
1134 : }
1135 :
1136 0 : if (!conduit_) {
1137 0 : MOZ_MTLOG(ML_DEBUG, "Discarding incoming packet; media disconnected");
1138 0 : return;
1139 : }
1140 :
1141 0 : if (rtcp_.state_ != MP_OPEN) {
1142 0 : MOZ_MTLOG(ML_DEBUG, "Discarding incoming packet; pipeline not open");
1143 0 : return;
1144 : }
1145 :
1146 0 : if (rtcp_.transport_->state() != TransportLayer::TS_OPEN) {
1147 0 : MOZ_MTLOG(ML_ERROR, "Discarding incoming packet; transport not open");
1148 0 : return;
1149 : }
1150 :
1151 0 : if (!len) {
1152 0 : return;
1153 : }
1154 :
1155 : // Filter out everything but RTP/RTCP
1156 0 : if (data[0] < 128 || data[0] > 191) {
1157 0 : return;
1158 : }
1159 :
1160 : // We do not filter RTCP for send pipelines, since the webrtc.org code for
1161 : // senders already has logic to ignore RRs that do not apply.
1162 : // TODO bug 1279153: remove SR check for reduced size RTCP
1163 0 : if (filter_ && direction_ == RECEIVE) {
1164 0 : if (!filter_->FilterSenderReport(data, len)) {
1165 0 : MOZ_MTLOG(ML_NOTICE, "Dropping incoming RTCP packet; filtered out");
1166 0 : return;
1167 : }
1168 : }
1169 :
1170 : // Make a copy rather than cast away constness
1171 0 : auto inner_data = MakeUnique<unsigned char[]>(len);
1172 0 : memcpy(inner_data.get(), data, len);
1173 : int out_len;
1174 :
1175 0 : nsresult res = rtcp_.recv_srtp_->UnprotectRtcp(inner_data.get(),
1176 : len,
1177 : len,
1178 0 : &out_len);
1179 :
1180 0 : if (!NS_SUCCEEDED(res))
1181 0 : return;
1182 :
1183 0 : MOZ_MTLOG(ML_DEBUG, description_ << " received RTCP packet.");
1184 0 : increment_rtcp_packets_received();
1185 :
1186 0 : RtpLogger::LogPacket(inner_data.get(), out_len, true, false, 0, description_);
1187 :
1188 0 : MOZ_ASSERT(rtcp_.recv_srtp_); // This should never happen
1189 :
1190 0 : (void)conduit_->ReceivedRTCPPacket(inner_data.get(), out_len); // Ignore error codes
1191 : }
1192 :
1193 0 : bool MediaPipeline::IsRtp(const unsigned char *data, size_t len) {
1194 0 : if (len < 2)
1195 0 : return false;
1196 :
1197 : // Check if this is a RTCP packet. Logic based on the types listed in
1198 : // media/webrtc/trunk/src/modules/rtp_rtcp/source/rtp_utility.cc
1199 :
1200 : // Anything outside this range is RTP.
1201 0 : if ((data[1] < 192) || (data[1] > 207))
1202 0 : return true;
1203 :
1204 0 : if (data[1] == 192) // FIR
1205 0 : return false;
1206 :
1207 0 : if (data[1] == 193) // NACK, but could also be RTP. This makes us sad
1208 0 : return true; // but it's how webrtc.org behaves.
1209 :
1210 0 : if (data[1] == 194)
1211 0 : return true;
1212 :
1213 0 : if (data[1] == 195) // IJ.
1214 0 : return false;
1215 :
1216 0 : if ((data[1] > 195) && (data[1] < 200)) // the > 195 is redundant
1217 0 : return true;
1218 :
1219 0 : if ((data[1] >= 200) && (data[1] <= 207)) // SR, RR, SDES, BYE,
1220 0 : return false; // APP, RTPFB, PSFB, XR
1221 :
1222 0 : MOZ_ASSERT(false); // Not reached, belt and suspenders.
1223 : return true;
1224 : }
1225 :
1226 0 : void MediaPipeline::PacketReceived(TransportLayer *layer,
1227 : const unsigned char *data,
1228 : size_t len) {
1229 0 : if (!transport_->pipeline()) {
1230 0 : MOZ_MTLOG(ML_DEBUG, "Discarding incoming packet; transport disconnected");
1231 0 : return;
1232 : }
1233 :
1234 0 : if (IsRtp(data, len)) {
1235 0 : RtpPacketReceived(layer, data, len);
1236 : } else {
1237 0 : RtcpPacketReceived(layer, data, len);
1238 : }
1239 : }
1240 :
1241 : class MediaPipelineTransmit::PipelineListener
1242 : : public MediaStreamVideoSink
1243 : {
1244 : friend class MediaPipelineTransmit;
1245 : public:
1246 0 : explicit PipelineListener(const RefPtr<MediaSessionConduit>& conduit)
1247 0 : : conduit_(conduit),
1248 : track_id_(TRACK_INVALID),
1249 : mMutex("MediaPipelineTransmit::PipelineListener"),
1250 : track_id_external_(TRACK_INVALID),
1251 : active_(false),
1252 : enabled_(false),
1253 0 : direct_connect_(false)
1254 : {
1255 0 : }
1256 :
1257 0 : ~PipelineListener()
1258 0 : {
1259 0 : if (!NS_IsMainThread()) {
1260 : // release conduit on mainthread. Must use forget()!
1261 0 : nsresult rv = NS_DispatchToMainThread(new
1262 0 : ConduitDeleteEvent(conduit_.forget()));
1263 0 : MOZ_ASSERT(!NS_FAILED(rv),"Could not dispatch conduit shutdown to main");
1264 0 : if (NS_FAILED(rv)) {
1265 0 : MOZ_CRASH();
1266 : }
1267 : } else {
1268 0 : conduit_ = nullptr;
1269 : }
1270 0 : if (converter_) {
1271 0 : converter_->Shutdown();
1272 : }
1273 0 : }
1274 :
1275 : // Dispatches setting the internal TrackID to TRACK_INVALID to the media
1276 : // graph thread to keep it in sync with other MediaStreamGraph operations
1277 : // like RemoveListener() and AddListener(). The TrackID will be updated on
1278 : // the next NewData() callback.
1279 : void UnsetTrackId(MediaStreamGraphImpl* graph);
1280 :
1281 0 : void SetActive(bool active) { active_ = active; }
1282 0 : void SetEnabled(bool enabled) { enabled_ = enabled; }
1283 :
1284 : // These are needed since nested classes don't have access to any particular
1285 : // instance of the parent
1286 0 : void SetAudioProxy(const RefPtr<AudioProxyThread>& proxy)
1287 : {
1288 0 : audio_processing_ = proxy;
1289 0 : }
1290 :
1291 0 : void SetVideoFrameConverter(const RefPtr<VideoFrameConverter>& converter)
1292 : {
1293 0 : converter_ = converter;
1294 0 : }
1295 :
1296 0 : void OnVideoFrameConverted(unsigned char* aVideoFrame,
1297 : unsigned int aVideoFrameLength,
1298 : unsigned short aWidth,
1299 : unsigned short aHeight,
1300 : VideoType aVideoType,
1301 : uint64_t aCaptureTime)
1302 : {
1303 0 : MOZ_ASSERT(conduit_->type() == MediaSessionConduit::VIDEO);
1304 0 : static_cast<VideoSessionConduit*>(conduit_.get())->SendVideoFrame(
1305 0 : aVideoFrame, aVideoFrameLength, aWidth, aHeight, aVideoType, aCaptureTime);
1306 0 : }
1307 :
1308 0 : void OnVideoFrameConverted(webrtc::VideoFrame& aVideoFrame)
1309 : {
1310 0 : MOZ_ASSERT(conduit_->type() == MediaSessionConduit::VIDEO);
1311 0 : static_cast<VideoSessionConduit*>(conduit_.get())->SendVideoFrame(aVideoFrame);
1312 0 : }
1313 :
1314 : // Implement MediaStreamTrackListener
1315 : void NotifyQueuedChanges(MediaStreamGraph* aGraph,
1316 : StreamTime aTrackOffset,
1317 : const MediaSegment& aQueuedMedia) override;
1318 :
1319 : // Implement DirectMediaStreamTrackListener
1320 : void NotifyRealtimeTrackData(MediaStreamGraph* aGraph,
1321 : StreamTime aTrackOffset,
1322 : const MediaSegment& aMedia) override;
1323 : void NotifyDirectListenerInstalled(InstallationResult aResult) override;
1324 : void NotifyDirectListenerUninstalled() override;
1325 :
1326 : // Implement MediaStreamVideoSink
1327 : void SetCurrentFrames(const VideoSegment& aSegment) override;
1328 0 : void ClearFrames() override {}
1329 :
1330 : private:
1331 0 : void UnsetTrackIdImpl() {
1332 0 : MutexAutoLock lock(mMutex);
1333 0 : track_id_ = track_id_external_ = TRACK_INVALID;
1334 0 : }
1335 :
1336 : void NewData(const MediaSegment& media, TrackRate aRate = 0);
1337 :
1338 : RefPtr<MediaSessionConduit> conduit_;
1339 : RefPtr<AudioProxyThread> audio_processing_;
1340 : RefPtr<VideoFrameConverter> converter_;
1341 :
1342 : // May be TRACK_INVALID until we see data from the track
1343 : TrackID track_id_; // this is the current TrackID this listener is attached to
1344 : Mutex mMutex;
1345 : // protected by mMutex
1346 : // May be TRACK_INVALID until we see data from the track
1347 : TrackID track_id_external_; // this is queried from other threads
1348 :
1349 : // active is true if there is a transport to send on
1350 : mozilla::Atomic<bool> active_;
1351 : // enabled is true if the media access control permits sending
1352 : // actual content; when false you get black/silence
1353 : mozilla::Atomic<bool> enabled_;
1354 :
1355 : // Written and read on the MediaStreamGraph thread
1356 : bool direct_connect_;
1357 : };
1358 :
1359 : // Implements VideoConverterListener for MediaPipeline.
1360 : //
1361 : // We pass converted frames on to MediaPipelineTransmit::PipelineListener
1362 : // where they are further forwarded to VideoConduit.
1363 : // MediaPipelineTransmit calls Detach() during shutdown to ensure there is
1364 : // no cyclic dependencies between us and PipelineListener.
1365 : class MediaPipelineTransmit::VideoFrameFeeder
1366 : : public VideoConverterListener
1367 : {
1368 : public:
1369 0 : explicit VideoFrameFeeder(const RefPtr<PipelineListener>& listener)
1370 0 : : listener_(listener),
1371 0 : mutex_("VideoFrameFeeder")
1372 : {
1373 0 : MOZ_COUNT_CTOR(VideoFrameFeeder);
1374 0 : }
1375 :
1376 0 : void Detach()
1377 : {
1378 0 : MutexAutoLock lock(mutex_);
1379 :
1380 0 : listener_ = nullptr;
1381 0 : }
1382 :
1383 0 : void OnVideoFrameConverted(unsigned char* aVideoFrame,
1384 : unsigned int aVideoFrameLength,
1385 : unsigned short aWidth,
1386 : unsigned short aHeight,
1387 : VideoType aVideoType,
1388 : uint64_t aCaptureTime) override
1389 : {
1390 0 : MutexAutoLock lock(mutex_);
1391 :
1392 0 : if (!listener_) {
1393 0 : return;
1394 : }
1395 :
1396 0 : listener_->OnVideoFrameConverted(aVideoFrame, aVideoFrameLength,
1397 0 : aWidth, aHeight, aVideoType, aCaptureTime);
1398 : }
1399 :
1400 0 : void OnVideoFrameConverted(webrtc::VideoFrame& aVideoFrame) override
1401 : {
1402 0 : MutexAutoLock lock(mutex_);
1403 :
1404 0 : if (!listener_) {
1405 0 : return;
1406 : }
1407 :
1408 0 : listener_->OnVideoFrameConverted(aVideoFrame);
1409 : }
1410 :
1411 : protected:
1412 0 : virtual ~VideoFrameFeeder()
1413 0 : {
1414 0 : MOZ_COUNT_DTOR(VideoFrameFeeder);
1415 0 : }
1416 :
1417 : RefPtr<PipelineListener> listener_;
1418 : Mutex mutex_;
1419 : };
1420 :
1421 0 : MediaPipelineTransmit::MediaPipelineTransmit(
1422 : const std::string& pc,
1423 : nsCOMPtr<nsIEventTarget> main_thread,
1424 : nsCOMPtr<nsIEventTarget> sts_thread,
1425 : dom::MediaStreamTrack* domtrack,
1426 : const std::string& track_id,
1427 : int level,
1428 : RefPtr<MediaSessionConduit> conduit,
1429 : RefPtr<TransportFlow> rtp_transport,
1430 : RefPtr<TransportFlow> rtcp_transport,
1431 0 : nsAutoPtr<MediaPipelineFilter> filter) :
1432 : MediaPipeline(pc, TRANSMIT, main_thread, sts_thread, track_id, level,
1433 : conduit, rtp_transport, rtcp_transport, filter),
1434 0 : listener_(new PipelineListener(conduit)),
1435 0 : domtrack_(domtrack)
1436 : {
1437 0 : if (!IsVideo()) {
1438 0 : audio_processing_ = MakeAndAddRef<AudioProxyThread>(static_cast<AudioSessionConduit*>(conduit.get()));
1439 0 : listener_->SetAudioProxy(audio_processing_);
1440 : }
1441 : else { // Video
1442 : // For video we send frames to an async VideoFrameConverter that calls
1443 : // back to a VideoFrameFeeder that feeds I420 frames to VideoConduit.
1444 :
1445 0 : feeder_ = MakeAndAddRef<VideoFrameFeeder>(listener_);
1446 :
1447 0 : converter_ = MakeAndAddRef<VideoFrameConverter>();
1448 0 : converter_->AddListener(feeder_);
1449 :
1450 0 : listener_->SetVideoFrameConverter(converter_);
1451 : }
1452 0 : }
1453 :
1454 0 : MediaPipelineTransmit::~MediaPipelineTransmit()
1455 : {
1456 0 : if (feeder_) {
1457 0 : feeder_->Detach();
1458 : }
1459 0 : }
1460 :
1461 0 : nsresult MediaPipelineTransmit::Init() {
1462 0 : AttachToTrack(track_id_);
1463 :
1464 0 : return MediaPipeline::Init();
1465 : }
1466 :
1467 0 : void MediaPipelineTransmit::AttachToTrack(const std::string& track_id) {
1468 0 : ASSERT_ON_THREAD(main_thread_);
1469 :
1470 0 : description_ = pc_ + "| ";
1471 0 : description_ += conduit_->type() == MediaSessionConduit::AUDIO ?
1472 0 : "Transmit audio[" : "Transmit video[";
1473 0 : description_ += track_id;
1474 0 : description_ += "]";
1475 :
1476 : // TODO(ekr@rtfm.com): Check for errors
1477 0 : MOZ_MTLOG(ML_DEBUG, "Attaching pipeline to track "
1478 : << static_cast<void *>(domtrack_) << " conduit type=" <<
1479 : (conduit_->type() == MediaSessionConduit::AUDIO ?"audio":"video"));
1480 :
1481 : #if !defined(MOZILLA_EXTERNAL_LINKAGE)
1482 : // With full duplex we don't risk having audio come in late to the MSG
1483 : // so we won't need a direct listener.
1484 : const bool enableDirectListener =
1485 0 : !Preferences::GetBool("media.navigator.audio.full_duplex", false);
1486 : #else
1487 : const bool enableDirectListener = true;
1488 : #endif
1489 :
1490 0 : if (domtrack_->AsAudioStreamTrack()) {
1491 0 : if (enableDirectListener) {
1492 : // Register the Listener directly with the source if we can.
1493 : // We also register it as a non-direct listener so we fall back to that
1494 : // if installing the direct listener fails. As a direct listener we get access
1495 : // to direct unqueued (and not resampled) data.
1496 0 : domtrack_->AddDirectListener(listener_);
1497 : }
1498 0 : domtrack_->AddListener(listener_);
1499 0 : } else if (VideoStreamTrack* video = domtrack_->AsVideoStreamTrack()) {
1500 0 : video->AddVideoOutput(listener_);
1501 : } else {
1502 0 : MOZ_ASSERT(false, "Unknown track type");
1503 : }
1504 0 : }
1505 :
1506 : bool
1507 0 : MediaPipelineTransmit::IsVideo() const
1508 : {
1509 0 : return !!domtrack_->AsVideoStreamTrack();
1510 : }
1511 :
1512 0 : void MediaPipelineTransmit::UpdateSinkIdentity_m(MediaStreamTrack* track,
1513 : nsIPrincipal* principal,
1514 : const PeerIdentity* sinkIdentity) {
1515 0 : ASSERT_ON_THREAD(main_thread_);
1516 :
1517 0 : if (track != nullptr && track != domtrack_) {
1518 : // If a track is specified, then it might not be for this pipeline,
1519 : // since we receive notifications for all tracks on the PC.
1520 : // nullptr means that the PeerIdentity has changed and shall be applied
1521 : // to all tracks of the PC.
1522 0 : return;
1523 : }
1524 :
1525 0 : bool enableTrack = principal->Subsumes(domtrack_->GetPrincipal());
1526 0 : if (!enableTrack) {
1527 : // first try didn't work, but there's a chance that this is still available
1528 : // if our track is bound to a peerIdentity, and the peer connection (our
1529 : // sink) is bound to the same identity, then we can enable the track.
1530 0 : const PeerIdentity* trackIdentity = domtrack_->GetPeerIdentity();
1531 0 : if (sinkIdentity && trackIdentity) {
1532 0 : enableTrack = (*sinkIdentity == *trackIdentity);
1533 : }
1534 : }
1535 :
1536 0 : listener_->SetEnabled(enableTrack);
1537 : }
1538 :
1539 : void
1540 0 : MediaPipelineTransmit::DetachMedia()
1541 : {
1542 0 : ASSERT_ON_THREAD(main_thread_);
1543 0 : if (domtrack_) {
1544 0 : if (domtrack_->AsAudioStreamTrack()) {
1545 0 : domtrack_->RemoveDirectListener(listener_);
1546 0 : domtrack_->RemoveListener(listener_);
1547 0 : } else if (VideoStreamTrack* video = domtrack_->AsVideoStreamTrack()) {
1548 0 : video->RemoveVideoOutput(listener_);
1549 : } else {
1550 0 : MOZ_ASSERT(false, "Unknown track type");
1551 : }
1552 0 : domtrack_ = nullptr;
1553 : }
1554 : // Let the listener be destroyed with the pipeline (or later).
1555 0 : }
1556 :
1557 0 : nsresult MediaPipelineTransmit::TransportReady_s(TransportInfo &info) {
1558 0 : ASSERT_ON_THREAD(sts_thread_);
1559 : // Call base ready function.
1560 0 : MediaPipeline::TransportReady_s(info);
1561 :
1562 : // Should not be set for a transmitter
1563 0 : if (&info == &rtp_) {
1564 0 : listener_->SetActive(true);
1565 : }
1566 :
1567 0 : return NS_OK;
1568 : }
1569 :
1570 0 : nsresult MediaPipelineTransmit::ReplaceTrack(MediaStreamTrack& domtrack) {
1571 : // MainThread, checked in calls we make
1572 0 : nsString nsTrackId;
1573 0 : domtrack.GetId(nsTrackId);
1574 0 : std::string track_id(NS_ConvertUTF16toUTF8(nsTrackId).get());
1575 0 : MOZ_MTLOG(ML_DEBUG, "Reattaching pipeline " << description_ << " to track "
1576 : << static_cast<void *>(&domtrack)
1577 : << " track " << track_id << " conduit type=" <<
1578 : (conduit_->type() == MediaSessionConduit::AUDIO ?"audio":"video"));
1579 :
1580 0 : DetachMedia();
1581 0 : domtrack_ = &domtrack; // Detach clears it
1582 : // Unsets the track id after RemoveListener() takes effect.
1583 0 : listener_->UnsetTrackId(domtrack_->GraphImpl());
1584 0 : track_id_ = track_id;
1585 0 : AttachToTrack(track_id);
1586 0 : return NS_OK;
1587 : }
1588 :
1589 0 : void MediaPipeline::DisconnectTransport_s(TransportInfo &info) {
1590 0 : MOZ_ASSERT(info.transport_);
1591 0 : ASSERT_ON_THREAD(sts_thread_);
1592 :
1593 0 : info.transport_->SignalStateChange.disconnect(this);
1594 : // We do this even if we're a transmitter, since we are still possibly
1595 : // registered to receive RTCP.
1596 : TransportLayerDtls *dtls = static_cast<TransportLayerDtls *>(
1597 0 : info.transport_->GetLayer(TransportLayerDtls::ID()));
1598 0 : MOZ_ASSERT(dtls); // DTLS is mandatory
1599 0 : MOZ_ASSERT(dtls->downward());
1600 0 : dtls->downward()->SignalPacketReceived.disconnect(this);
1601 0 : }
1602 :
1603 0 : nsresult MediaPipeline::ConnectTransport_s(TransportInfo &info) {
1604 0 : MOZ_ASSERT(info.transport_);
1605 0 : ASSERT_ON_THREAD(sts_thread_);
1606 :
1607 : // Look to see if the transport is ready
1608 0 : if (info.transport_->state() == TransportLayer::TS_OPEN) {
1609 0 : nsresult res = TransportReady_s(info);
1610 0 : if (NS_FAILED(res)) {
1611 0 : MOZ_MTLOG(ML_ERROR, "Error calling TransportReady(); res="
1612 : << static_cast<uint32_t>(res) << " in " << __FUNCTION__);
1613 0 : return res;
1614 : }
1615 0 : } else if (info.transport_->state() == TransportLayer::TS_ERROR) {
1616 0 : MOZ_MTLOG(ML_ERROR, ToString(info.type_)
1617 : << "transport is already in error state");
1618 0 : TransportFailed_s(info);
1619 0 : return NS_ERROR_FAILURE;
1620 : }
1621 :
1622 0 : info.transport_->SignalStateChange.connect(this,
1623 0 : &MediaPipeline::StateChange);
1624 :
1625 0 : return NS_OK;
1626 : }
1627 :
1628 0 : MediaPipeline::TransportInfo* MediaPipeline::GetTransportInfo_s(
1629 : TransportFlow *flow) {
1630 0 : ASSERT_ON_THREAD(sts_thread_);
1631 0 : if (flow == rtp_.transport_) {
1632 0 : return &rtp_;
1633 : }
1634 :
1635 0 : if (flow == rtcp_.transport_) {
1636 0 : return &rtcp_;
1637 : }
1638 :
1639 0 : return nullptr;
1640 : }
1641 :
1642 0 : nsresult MediaPipeline::PipelineTransport::SendRtpPacket(
1643 : const uint8_t* data, size_t len) {
1644 :
1645 0 : nsAutoPtr<DataBuffer> buf(new DataBuffer(data, len, len + SRTP_MAX_EXPANSION));
1646 :
1647 0 : RUN_ON_THREAD(sts_thread_,
1648 0 : WrapRunnable(
1649 0 : RefPtr<MediaPipeline::PipelineTransport>(this),
1650 : &MediaPipeline::PipelineTransport::SendRtpRtcpPacket_s,
1651 : buf, true),
1652 0 : NS_DISPATCH_NORMAL);
1653 :
1654 0 : return NS_OK;
1655 : }
1656 :
1657 0 : nsresult MediaPipeline::PipelineTransport::SendRtpRtcpPacket_s(
1658 : nsAutoPtr<DataBuffer> data,
1659 : bool is_rtp) {
1660 :
1661 0 : ASSERT_ON_THREAD(sts_thread_);
1662 0 : if (!pipeline_) {
1663 0 : return NS_OK; // Detached
1664 : }
1665 0 : TransportInfo& transport = is_rtp ? pipeline_->rtp_ : pipeline_->rtcp_;
1666 :
1667 0 : if (!transport.send_srtp_) {
1668 0 : MOZ_MTLOG(ML_DEBUG, "Couldn't write RTP/RTCP packet; SRTP not set up yet");
1669 0 : return NS_OK;
1670 : }
1671 :
1672 0 : MOZ_ASSERT(transport.transport_);
1673 0 : NS_ENSURE_TRUE(transport.transport_, NS_ERROR_NULL_POINTER);
1674 :
1675 : // libsrtp enciphers in place, so we need a big enough buffer.
1676 0 : MOZ_ASSERT(data->capacity() >= data->len() + SRTP_MAX_EXPANSION);
1677 :
1678 0 : if (RtpLogger::IsPacketLoggingOn()) {
1679 0 : int header_len = 12;
1680 0 : webrtc::RTPHeader header;
1681 0 : if (pipeline_->rtp_parser_ &&
1682 0 : pipeline_->rtp_parser_->Parse(data->data(), data->len(), &header)) {
1683 0 : header_len = header.headerLength;
1684 : }
1685 0 : RtpLogger::LogPacket(data->data(), data->len(), false, is_rtp, header_len,
1686 0 : pipeline_->description_);
1687 : }
1688 :
1689 : int out_len;
1690 : nsresult res;
1691 0 : if (is_rtp) {
1692 0 : res = transport.send_srtp_->ProtectRtp(data->data(),
1693 0 : data->len(),
1694 0 : data->capacity(),
1695 0 : &out_len);
1696 : } else {
1697 0 : res = transport.send_srtp_->ProtectRtcp(data->data(),
1698 0 : data->len(),
1699 0 : data->capacity(),
1700 0 : &out_len);
1701 : }
1702 0 : if (!NS_SUCCEEDED(res)) {
1703 0 : return res;
1704 : }
1705 :
1706 : // paranoia; don't have uninitialized bytes included in data->len()
1707 0 : data->SetLength(out_len);
1708 :
1709 0 : MOZ_MTLOG(ML_DEBUG, pipeline_->description_ << " sending " <<
1710 : (is_rtp ? "RTP" : "RTCP") << " packet");
1711 0 : if (is_rtp) {
1712 0 : pipeline_->increment_rtp_packets_sent(out_len);
1713 : } else {
1714 0 : pipeline_->increment_rtcp_packets_sent();
1715 : }
1716 0 : return pipeline_->SendPacket(transport.transport_, data->data(), out_len);
1717 : }
1718 :
1719 0 : nsresult MediaPipeline::PipelineTransport::SendRtcpPacket(
1720 : const uint8_t* data, size_t len) {
1721 :
1722 0 : nsAutoPtr<DataBuffer> buf(new DataBuffer(data, len, len + SRTP_MAX_EXPANSION));
1723 :
1724 0 : RUN_ON_THREAD(sts_thread_,
1725 0 : WrapRunnable(
1726 0 : RefPtr<MediaPipeline::PipelineTransport>(this),
1727 : &MediaPipeline::PipelineTransport::SendRtpRtcpPacket_s,
1728 : buf, false),
1729 0 : NS_DISPATCH_NORMAL);
1730 :
1731 0 : return NS_OK;
1732 : }
1733 :
1734 0 : void MediaPipelineTransmit::PipelineListener::
1735 : UnsetTrackId(MediaStreamGraphImpl* graph) {
1736 0 : class Message : public ControlMessage {
1737 : public:
1738 0 : explicit Message(PipelineListener* listener) :
1739 0 : ControlMessage(nullptr), listener_(listener) {}
1740 0 : virtual void Run() override
1741 : {
1742 0 : listener_->UnsetTrackIdImpl();
1743 0 : }
1744 : RefPtr<PipelineListener> listener_;
1745 : };
1746 0 : graph->AppendMessage(MakeUnique<Message>(this));
1747 0 : }
1748 : // Called if we're attached with AddDirectListener()
1749 0 : void MediaPipelineTransmit::PipelineListener::
1750 : NotifyRealtimeTrackData(MediaStreamGraph* graph,
1751 : StreamTime offset,
1752 : const MediaSegment& media) {
1753 0 : MOZ_MTLOG(ML_DEBUG, "MediaPipeline::NotifyRealtimeTrackData() listener=" <<
1754 : this << ", offset=" << offset <<
1755 : ", duration=" << media.GetDuration());
1756 :
1757 0 : if (media.GetType() == MediaSegment::VIDEO) {
1758 : // We have to call the upstream NotifyRealtimeTrackData and
1759 : // MediaStreamVideoSink will route them to SetCurrentFrames.
1760 0 : MediaStreamVideoSink::NotifyRealtimeTrackData(graph, offset, media);
1761 0 : return;
1762 : }
1763 :
1764 0 : NewData(media, graph->GraphRate());
1765 : }
1766 :
1767 0 : void MediaPipelineTransmit::PipelineListener::
1768 : NotifyQueuedChanges(MediaStreamGraph* graph,
1769 : StreamTime offset,
1770 : const MediaSegment& queued_media) {
1771 0 : MOZ_MTLOG(ML_DEBUG, "MediaPipeline::NotifyQueuedChanges()");
1772 :
1773 0 : if (queued_media.GetType() == MediaSegment::VIDEO) {
1774 : // We always get video from SetCurrentFrames().
1775 0 : return;
1776 : }
1777 :
1778 0 : if (direct_connect_) {
1779 : // ignore non-direct data if we're also getting direct data
1780 0 : return;
1781 : }
1782 :
1783 : size_t rate;
1784 0 : if (graph) {
1785 0 : rate = graph->GraphRate();
1786 : } else {
1787 : // When running tests, graph may be null. In that case use a default.
1788 0 : rate = 16000;
1789 : }
1790 0 : NewData(queued_media, rate);
1791 : }
1792 :
1793 0 : void MediaPipelineTransmit::PipelineListener::
1794 : NotifyDirectListenerInstalled(InstallationResult aResult) {
1795 0 : MOZ_MTLOG(ML_INFO, "MediaPipeline::NotifyDirectListenerInstalled() listener= " <<
1796 : this << ", result=" << static_cast<int32_t>(aResult));
1797 :
1798 0 : direct_connect_ = InstallationResult::SUCCESS == aResult;
1799 0 : }
1800 :
1801 0 : void MediaPipelineTransmit::PipelineListener::
1802 : NotifyDirectListenerUninstalled() {
1803 0 : MOZ_MTLOG(ML_INFO, "MediaPipeline::NotifyDirectListenerUninstalled() listener=" << this);
1804 :
1805 0 : direct_connect_ = false;
1806 0 : }
1807 :
1808 0 : void MediaPipelineTransmit::PipelineListener::
1809 : NewData(const MediaSegment& media, TrackRate aRate /* = 0 */) {
1810 0 : if (!active_) {
1811 0 : MOZ_MTLOG(ML_DEBUG, "Discarding packets because transport not ready");
1812 0 : return;
1813 : }
1814 :
1815 0 : if (conduit_->type() !=
1816 0 : (media.GetType() == MediaSegment::AUDIO ? MediaSessionConduit::AUDIO :
1817 : MediaSessionConduit::VIDEO)) {
1818 0 : MOZ_ASSERT(false, "The media type should always be correct since the "
1819 : "listener is locked to a specific track");
1820 : return;
1821 : }
1822 :
1823 : // TODO(ekr@rtfm.com): For now assume that we have only one
1824 : // track type and it's destined for us
1825 : // See bug 784517
1826 0 : if (media.GetType() == MediaSegment::AUDIO) {
1827 0 : MOZ_RELEASE_ASSERT(aRate > 0);
1828 :
1829 0 : AudioSegment* audio = const_cast<AudioSegment *>(static_cast<const AudioSegment*>(&media));
1830 0 : for(AudioSegment::ChunkIterator iter(*audio); !iter.IsEnded(); iter.Next()) {
1831 0 : audio_processing_->QueueAudioChunk(aRate, *iter, enabled_);
1832 : }
1833 : } else {
1834 0 : VideoSegment* video = const_cast<VideoSegment *>(static_cast<const VideoSegment*>(&media));
1835 0 : VideoSegment::ChunkIterator iter(*video);
1836 0 : for(VideoSegment::ChunkIterator iter(*video); !iter.IsEnded(); iter.Next()) {
1837 0 : converter_->QueueVideoChunk(*iter, !enabled_);
1838 : }
1839 : }
1840 : }
1841 :
1842 0 : void MediaPipelineTransmit::PipelineListener::
1843 : SetCurrentFrames(const VideoSegment& aSegment)
1844 : {
1845 0 : NewData(aSegment);
1846 0 : }
1847 :
1848 : class TrackAddedCallback {
1849 : public:
1850 : virtual void TrackAdded(TrackTicks current_ticks) = 0;
1851 :
1852 : NS_INLINE_DECL_THREADSAFE_REFCOUNTING(TrackAddedCallback);
1853 :
1854 : protected:
1855 : virtual ~TrackAddedCallback() {}
1856 : };
1857 :
1858 : class GenericReceiveListener;
1859 :
1860 : class GenericReceiveCallback : public TrackAddedCallback
1861 : {
1862 : public:
1863 : explicit GenericReceiveCallback(GenericReceiveListener* listener)
1864 : : listener_(listener) {}
1865 :
1866 : void TrackAdded(TrackTicks time);
1867 :
1868 : private:
1869 : RefPtr<GenericReceiveListener> listener_;
1870 : };
1871 :
1872 : // Add a listener on the MSG thread using the MSG command queue
1873 0 : static void AddListener(MediaStream* source, MediaStreamListener* listener) {
1874 0 : class Message : public ControlMessage {
1875 : public:
1876 0 : Message(MediaStream* stream, MediaStreamListener* listener)
1877 0 : : ControlMessage(stream),
1878 0 : listener_(listener) {}
1879 :
1880 0 : virtual void Run() override {
1881 0 : mStream->AddListenerImpl(listener_.forget());
1882 0 : }
1883 : private:
1884 : RefPtr<MediaStreamListener> listener_;
1885 : };
1886 :
1887 0 : MOZ_ASSERT(listener);
1888 :
1889 0 : if (source->GraphImpl()) {
1890 0 : source->GraphImpl()->AppendMessage(MakeUnique<Message>(source, listener));
1891 : }
1892 0 : }
1893 :
1894 : class GenericReceiveListener : public MediaStreamListener
1895 : {
1896 : public:
1897 0 : GenericReceiveListener(SourceMediaStream *source, TrackID track_id)
1898 0 : : source_(source),
1899 : track_id_(track_id),
1900 : played_ticks_(0),
1901 : last_log_(0),
1902 0 : principal_handle_(PRINCIPAL_HANDLE_NONE) {}
1903 :
1904 0 : virtual ~GenericReceiveListener() {}
1905 :
1906 0 : void AddSelf()
1907 : {
1908 0 : AddListener(source_, this);
1909 0 : }
1910 :
1911 0 : void EndTrack()
1912 : {
1913 0 : source_->EndTrack(track_id_);
1914 0 : }
1915 :
1916 : // Must be called on the main thread
1917 0 : void SetPrincipalHandle_m(const PrincipalHandle& principal_handle)
1918 : {
1919 0 : class Message : public ControlMessage
1920 : {
1921 : public:
1922 0 : Message(GenericReceiveListener* listener,
1923 : MediaStream* stream,
1924 : const PrincipalHandle& principal_handle)
1925 0 : : ControlMessage(stream),
1926 : listener_(listener),
1927 0 : principal_handle_(principal_handle)
1928 0 : {}
1929 :
1930 0 : void Run() override {
1931 0 : listener_->SetPrincipalHandle_msg(principal_handle_);
1932 0 : }
1933 :
1934 : RefPtr<GenericReceiveListener> listener_;
1935 : PrincipalHandle principal_handle_;
1936 : };
1937 :
1938 0 : source_->GraphImpl()->AppendMessage(MakeUnique<Message>(this, source_, principal_handle));
1939 0 : }
1940 :
1941 : // Must be called on the MediaStreamGraph thread
1942 0 : void SetPrincipalHandle_msg(const PrincipalHandle& principal_handle)
1943 : {
1944 0 : principal_handle_ = principal_handle;
1945 0 : }
1946 :
1947 : protected:
1948 : SourceMediaStream *source_;
1949 : const TrackID track_id_;
1950 : TrackTicks played_ticks_;
1951 : TrackTicks last_log_; // played_ticks_ when we last logged
1952 : PrincipalHandle principal_handle_;
1953 : };
1954 :
1955 0 : MediaPipelineReceive::MediaPipelineReceive(
1956 : const std::string& pc,
1957 : nsCOMPtr<nsIEventTarget> main_thread,
1958 : nsCOMPtr<nsIEventTarget> sts_thread,
1959 : SourceMediaStream *stream,
1960 : const std::string& track_id,
1961 : int level,
1962 : RefPtr<MediaSessionConduit> conduit,
1963 : RefPtr<TransportFlow> rtp_transport,
1964 : RefPtr<TransportFlow> rtcp_transport,
1965 0 : nsAutoPtr<MediaPipelineFilter> filter) :
1966 : MediaPipeline(pc, RECEIVE, main_thread, sts_thread,
1967 : track_id, level, conduit, rtp_transport,
1968 : rtcp_transport, filter),
1969 : stream_(stream),
1970 0 : segments_added_(0)
1971 : {
1972 0 : MOZ_ASSERT(stream_);
1973 0 : }
1974 :
1975 0 : MediaPipelineReceive::~MediaPipelineReceive()
1976 : {
1977 0 : MOZ_ASSERT(!stream_); // Check that we have shut down already.
1978 0 : }
1979 :
1980 : class MediaPipelineReceiveAudio::PipelineListener
1981 : : public GenericReceiveListener
1982 : {
1983 : public:
1984 0 : PipelineListener(SourceMediaStream * source, TrackID track_id,
1985 : const RefPtr<MediaSessionConduit>& conduit)
1986 0 : : GenericReceiveListener(source, track_id),
1987 0 : conduit_(conduit)
1988 : {
1989 0 : }
1990 :
1991 0 : ~PipelineListener()
1992 0 : {
1993 0 : if (!NS_IsMainThread()) {
1994 : // release conduit on mainthread. Must use forget()!
1995 0 : nsresult rv = NS_DispatchToMainThread(new
1996 0 : ConduitDeleteEvent(conduit_.forget()));
1997 0 : MOZ_ASSERT(!NS_FAILED(rv),"Could not dispatch conduit shutdown to main");
1998 0 : if (NS_FAILED(rv)) {
1999 0 : MOZ_CRASH();
2000 : }
2001 : } else {
2002 0 : conduit_ = nullptr;
2003 : }
2004 0 : }
2005 :
2006 : // Implement MediaStreamListener
2007 0 : void NotifyPull(MediaStreamGraph* graph, StreamTime desired_time) override
2008 : {
2009 0 : MOZ_ASSERT(source_);
2010 0 : if (!source_) {
2011 0 : MOZ_MTLOG(ML_ERROR, "NotifyPull() called from a non-SourceMediaStream");
2012 0 : return;
2013 : }
2014 :
2015 : // This comparison is done in total time to avoid accumulated roundoff errors.
2016 0 : while (source_->TicksToTimeRoundDown(WEBRTC_DEFAULT_SAMPLE_RATE,
2017 : played_ticks_) < desired_time) {
2018 : int16_t scratch_buffer[AUDIO_SAMPLE_BUFFER_MAX_BYTES / sizeof(int16_t)];
2019 :
2020 : int samples_length;
2021 :
2022 : // This fetches 10ms of data, either mono or stereo
2023 : MediaConduitErrorCode err =
2024 0 : static_cast<AudioSessionConduit*>(conduit_.get())->GetAudioFrame(
2025 : scratch_buffer,
2026 : WEBRTC_DEFAULT_SAMPLE_RATE,
2027 : 0, // TODO(ekr@rtfm.com): better estimate of "capture" (really playout) delay
2028 0 : samples_length);
2029 :
2030 0 : if (err != kMediaConduitNoError) {
2031 : // Insert silence on conduit/GIPS failure (extremely unlikely)
2032 0 : MOZ_MTLOG(ML_ERROR, "Audio conduit failed (" << err
2033 : << ") to return data @ " << played_ticks_
2034 : << " (desired " << desired_time << " -> "
2035 : << source_->StreamTimeToSeconds(desired_time) << ")");
2036 : // if this is not enough we'll loop and provide more
2037 0 : samples_length = WEBRTC_DEFAULT_SAMPLE_RATE/100;
2038 0 : PodArrayZero(scratch_buffer);
2039 : }
2040 :
2041 0 : MOZ_ASSERT(samples_length * sizeof(uint16_t) < AUDIO_SAMPLE_BUFFER_MAX_BYTES);
2042 :
2043 0 : MOZ_MTLOG(ML_DEBUG, "Audio conduit returned buffer of length "
2044 : << samples_length);
2045 :
2046 0 : RefPtr<SharedBuffer> samples = SharedBuffer::Create(samples_length * sizeof(uint16_t));
2047 0 : int16_t *samples_data = static_cast<int16_t *>(samples->Data());
2048 0 : AudioSegment segment;
2049 : // We derive the number of channels of the stream from the number of samples
2050 : // the AudioConduit gives us, considering it gives us packets of 10ms and we
2051 : // know the rate.
2052 0 : uint32_t channelCount = samples_length / (WEBRTC_DEFAULT_SAMPLE_RATE / 100);
2053 0 : AutoTArray<int16_t*,2> channels;
2054 0 : AutoTArray<const int16_t*,2> outputChannels;
2055 0 : size_t frames = samples_length / channelCount;
2056 :
2057 0 : channels.SetLength(channelCount);
2058 :
2059 0 : size_t offset = 0;
2060 0 : for (size_t i = 0; i < channelCount; i++) {
2061 0 : channels[i] = samples_data + offset;
2062 0 : offset += frames;
2063 : }
2064 :
2065 0 : DeinterleaveAndConvertBuffer(scratch_buffer,
2066 : frames,
2067 : channelCount,
2068 0 : channels.Elements());
2069 :
2070 0 : outputChannels.AppendElements(channels);
2071 :
2072 0 : segment.AppendFrames(samples.forget(), outputChannels, frames,
2073 0 : principal_handle_);
2074 :
2075 : // Handle track not actually added yet or removed/finished
2076 0 : if (source_->AppendToTrack(track_id_, &segment)) {
2077 0 : played_ticks_ += frames;
2078 0 : if (MOZ_LOG_TEST(AudioLogModule(), LogLevel::Debug)) {
2079 0 : if (played_ticks_ > last_log_ + WEBRTC_DEFAULT_SAMPLE_RATE) { // ~ 1 second
2080 0 : MOZ_LOG(AudioLogModule(), LogLevel::Debug,
2081 : ("%p: Inserting %" PRIuSIZE " samples into track %d, total = %" PRIu64,
2082 : (void*) this, frames, track_id_, played_ticks_));
2083 0 : last_log_ = played_ticks_;
2084 : }
2085 : }
2086 : } else {
2087 0 : MOZ_MTLOG(ML_ERROR, "AppendToTrack failed");
2088 : // we can't un-read the data, but that's ok since we don't want to
2089 : // buffer - but don't i-loop!
2090 0 : return;
2091 : }
2092 : }
2093 : }
2094 :
2095 : private:
2096 : RefPtr<MediaSessionConduit> conduit_;
2097 : };
2098 :
2099 0 : MediaPipelineReceiveAudio::MediaPipelineReceiveAudio(
2100 : const std::string& pc,
2101 : nsCOMPtr<nsIEventTarget> main_thread,
2102 : nsCOMPtr<nsIEventTarget> sts_thread,
2103 : SourceMediaStream* stream,
2104 : const std::string& media_stream_track_id,
2105 : TrackID numeric_track_id,
2106 : int level,
2107 : RefPtr<AudioSessionConduit> conduit,
2108 : RefPtr<TransportFlow> rtp_transport,
2109 : RefPtr<TransportFlow> rtcp_transport,
2110 0 : nsAutoPtr<MediaPipelineFilter> filter) :
2111 : MediaPipelineReceive(pc, main_thread, sts_thread,
2112 : stream, media_stream_track_id, level, conduit,
2113 : rtp_transport, rtcp_transport, filter),
2114 0 : listener_(new PipelineListener(stream, numeric_track_id, conduit))
2115 0 : {}
2116 :
2117 0 : void MediaPipelineReceiveAudio::DetachMedia()
2118 : {
2119 0 : ASSERT_ON_THREAD(main_thread_);
2120 0 : if (stream_ && listener_) {
2121 0 : listener_->EndTrack();
2122 :
2123 0 : if (stream_->GraphImpl()) {
2124 0 : stream_->RemoveListener(listener_);
2125 : }
2126 0 : stream_ = nullptr;
2127 : }
2128 0 : }
2129 :
2130 0 : nsresult MediaPipelineReceiveAudio::Init() {
2131 0 : ASSERT_ON_THREAD(main_thread_);
2132 0 : MOZ_MTLOG(ML_DEBUG, __FUNCTION__);
2133 :
2134 0 : description_ = pc_ + "| Receive audio[";
2135 0 : description_ += track_id_;
2136 0 : description_ += "]";
2137 :
2138 0 : listener_->AddSelf();
2139 :
2140 0 : return MediaPipelineReceive::Init();
2141 : }
2142 :
2143 0 : void MediaPipelineReceiveAudio::SetPrincipalHandle_m(const PrincipalHandle& principal_handle)
2144 : {
2145 0 : listener_->SetPrincipalHandle_m(principal_handle);
2146 0 : }
2147 :
2148 0 : class MediaPipelineReceiveVideo::PipelineListener
2149 : : public GenericReceiveListener {
2150 : public:
2151 0 : PipelineListener(SourceMediaStream * source, TrackID track_id)
2152 0 : : GenericReceiveListener(source, track_id),
2153 : width_(0),
2154 : height_(0),
2155 : image_container_(),
2156 : image_(),
2157 0 : monitor_("Video PipelineListener")
2158 : {
2159 : image_container_ =
2160 0 : LayerManager::CreateImageContainer(ImageContainer::ASYNCHRONOUS);
2161 0 : }
2162 :
2163 : // Implement MediaStreamListener
2164 0 : void NotifyPull(MediaStreamGraph* graph, StreamTime desired_time) override
2165 : {
2166 0 : ReentrantMonitorAutoEnter enter(monitor_);
2167 :
2168 0 : RefPtr<Image> image = image_;
2169 0 : StreamTime delta = desired_time - played_ticks_;
2170 :
2171 : // Don't append if we've already provided a frame that supposedly
2172 : // goes past the current aDesiredTime Doing so means a negative
2173 : // delta and thus messes up handling of the graph
2174 0 : if (delta > 0) {
2175 0 : VideoSegment segment;
2176 0 : segment.AppendFrame(image.forget(), delta, IntSize(width_, height_),
2177 0 : principal_handle_);
2178 : // Handle track not actually added yet or removed/finished
2179 0 : if (source_->AppendToTrack(track_id_, &segment)) {
2180 0 : played_ticks_ = desired_time;
2181 : } else {
2182 0 : MOZ_MTLOG(ML_ERROR, "AppendToTrack failed");
2183 0 : return;
2184 : }
2185 : }
2186 : }
2187 :
2188 : // Accessors for external writes from the renderer
2189 0 : void FrameSizeChange(unsigned int width,
2190 : unsigned int height,
2191 : unsigned int number_of_streams) {
2192 0 : ReentrantMonitorAutoEnter enter(monitor_);
2193 :
2194 0 : width_ = width;
2195 0 : height_ = height;
2196 0 : }
2197 :
2198 0 : void RenderVideoFrame(const webrtc::VideoFrameBuffer& buffer,
2199 : uint32_t time_stamp,
2200 : int64_t render_time,
2201 : const RefPtr<layers::Image>& video_image)
2202 : {
2203 0 : RenderVideoFrame(buffer.DataY(),
2204 0 : buffer.StrideY(),
2205 0 : buffer.DataU(),
2206 0 : buffer.StrideU(),
2207 0 : buffer.DataV(),
2208 0 : buffer.StrideV(),
2209 0 : time_stamp, render_time, video_image);
2210 0 : }
2211 :
2212 0 : void RenderVideoFrame(const uint8_t* buffer_y,
2213 : uint32_t y_stride,
2214 : const uint8_t* buffer_u,
2215 : uint32_t u_stride,
2216 : const uint8_t* buffer_v,
2217 : uint32_t v_stride,
2218 : uint32_t time_stamp,
2219 : int64_t render_time,
2220 : const RefPtr<layers::Image>& video_image)
2221 : {
2222 0 : ReentrantMonitorAutoEnter enter(monitor_);
2223 :
2224 0 : if (buffer_y) {
2225 : // Create a video frame using |buffer|.
2226 0 : RefPtr<PlanarYCbCrImage> yuvImage = image_container_->CreatePlanarYCbCrImage();
2227 :
2228 0 : PlanarYCbCrData yuvData;
2229 0 : yuvData.mYChannel = const_cast<uint8_t*>(buffer_y);
2230 0 : yuvData.mYSize = IntSize(y_stride, height_);
2231 0 : yuvData.mYStride = y_stride;
2232 0 : MOZ_ASSERT(u_stride == v_stride);
2233 0 : yuvData.mCbCrStride = u_stride;
2234 0 : yuvData.mCbChannel = const_cast<uint8_t*>(buffer_u);
2235 0 : yuvData.mCrChannel = const_cast<uint8_t*>(buffer_v);
2236 0 : yuvData.mCbCrSize = IntSize(yuvData.mCbCrStride, (height_ + 1) >> 1);
2237 0 : yuvData.mPicX = 0;
2238 0 : yuvData.mPicY = 0;
2239 0 : yuvData.mPicSize = IntSize(width_, height_);
2240 0 : yuvData.mStereoMode = StereoMode::MONO;
2241 :
2242 0 : if (!yuvImage->CopyData(yuvData)) {
2243 0 : MOZ_ASSERT(false);
2244 : return;
2245 : }
2246 :
2247 0 : image_ = yuvImage;
2248 : }
2249 0 : }
2250 :
2251 : private:
2252 : int width_;
2253 : int height_;
2254 : RefPtr<layers::ImageContainer> image_container_;
2255 : RefPtr<layers::Image> image_;
2256 : mozilla::ReentrantMonitor monitor_; // Monitor for processing WebRTC frames.
2257 : // Protects image_ against:
2258 : // - Writing from the GIPS thread
2259 : // - Reading from the MSG thread
2260 : };
2261 :
2262 0 : class MediaPipelineReceiveVideo::PipelineRenderer : public mozilla::VideoRenderer
2263 : {
2264 : public:
2265 0 : explicit PipelineRenderer(MediaPipelineReceiveVideo *pipeline) :
2266 0 : pipeline_(pipeline) {}
2267 :
2268 : void Detach() { pipeline_ = nullptr; }
2269 :
2270 : // Implement VideoRenderer
2271 0 : void FrameSizeChange(unsigned int width,
2272 : unsigned int height,
2273 : unsigned int number_of_streams) override
2274 : {
2275 0 : pipeline_->listener_->FrameSizeChange(width, height, number_of_streams);
2276 0 : }
2277 :
2278 0 : void RenderVideoFrame(const webrtc::VideoFrameBuffer& buffer,
2279 : uint32_t time_stamp,
2280 : int64_t render_time,
2281 : const ImageHandle& handle) override
2282 : {
2283 0 : pipeline_->listener_->RenderVideoFrame(buffer,
2284 : time_stamp, render_time,
2285 0 : handle.GetImage());
2286 0 : }
2287 :
2288 0 : void RenderVideoFrame(const uint8_t* buffer_y,
2289 : uint32_t y_stride,
2290 : const uint8_t* buffer_u,
2291 : uint32_t u_stride,
2292 : const uint8_t* buffer_v,
2293 : uint32_t v_stride,
2294 : uint32_t time_stamp,
2295 : int64_t render_time,
2296 : const ImageHandle& handle) override
2297 : {
2298 0 : pipeline_->listener_->RenderVideoFrame(buffer_y, y_stride,
2299 : buffer_u, u_stride,
2300 : buffer_v, v_stride,
2301 : time_stamp, render_time,
2302 0 : handle.GetImage());
2303 0 : }
2304 :
2305 : private:
2306 : MediaPipelineReceiveVideo *pipeline_; // Raw pointer to avoid cycles
2307 : };
2308 :
2309 :
2310 0 : MediaPipelineReceiveVideo::MediaPipelineReceiveVideo(
2311 : const std::string& pc,
2312 : nsCOMPtr<nsIEventTarget> main_thread,
2313 : nsCOMPtr<nsIEventTarget> sts_thread,
2314 : SourceMediaStream *stream,
2315 : const std::string& media_stream_track_id,
2316 : TrackID numeric_track_id,
2317 : int level,
2318 : RefPtr<VideoSessionConduit> conduit,
2319 : RefPtr<TransportFlow> rtp_transport,
2320 : RefPtr<TransportFlow> rtcp_transport,
2321 0 : nsAutoPtr<MediaPipelineFilter> filter) :
2322 : MediaPipelineReceive(pc, main_thread, sts_thread,
2323 : stream, media_stream_track_id, level, conduit,
2324 : rtp_transport, rtcp_transport, filter),
2325 0 : renderer_(new PipelineRenderer(this)),
2326 0 : listener_(new PipelineListener(stream, numeric_track_id))
2327 0 : {}
2328 :
2329 0 : void MediaPipelineReceiveVideo::DetachMedia()
2330 : {
2331 0 : ASSERT_ON_THREAD(main_thread_);
2332 :
2333 : // stop generating video and thus stop invoking the PipelineRenderer
2334 : // and PipelineListener - the renderer has a raw ptr to the Pipeline to
2335 : // avoid cycles, and the render callbacks are invoked from a different
2336 : // thread so simple null-checks would cause TSAN bugs without locks.
2337 0 : static_cast<VideoSessionConduit*>(conduit_.get())->DetachRenderer();
2338 0 : if (stream_ && listener_) {
2339 0 : listener_->EndTrack();
2340 0 : stream_->RemoveListener(listener_);
2341 0 : stream_ = nullptr;
2342 : }
2343 0 : }
2344 :
2345 0 : nsresult MediaPipelineReceiveVideo::Init() {
2346 0 : ASSERT_ON_THREAD(main_thread_);
2347 0 : MOZ_MTLOG(ML_DEBUG, __FUNCTION__);
2348 :
2349 0 : description_ = pc_ + "| Receive video[";
2350 0 : description_ += track_id_;
2351 0 : description_ += "]";
2352 :
2353 0 : listener_->AddSelf();
2354 :
2355 : // Always happens before we can DetachMedia()
2356 0 : static_cast<VideoSessionConduit *>(conduit_.get())->
2357 0 : AttachRenderer(renderer_);
2358 :
2359 0 : return MediaPipelineReceive::Init();
2360 : }
2361 :
2362 0 : void MediaPipelineReceiveVideo::SetPrincipalHandle_m(const PrincipalHandle& principal_handle)
2363 : {
2364 0 : listener_->SetPrincipalHandle_m(principal_handle);
2365 0 : }
2366 :
2367 0 : DOMHighResTimeStamp MediaPipeline::GetNow() {
2368 0 : return webrtc::Clock::GetRealTimeClock()->TimeInMilliseconds();
2369 : }
2370 :
2371 : DOMHighResTimeStamp
2372 0 : MediaPipeline::RtpCSRCStats::GetExpiryFromTime(
2373 : const DOMHighResTimeStamp aTime) {
2374 : // DOMHighResTimeStamp is a unit measured in ms
2375 0 : return aTime - EXPIRY_TIME_MILLISECONDS;
2376 : }
2377 :
2378 0 : MediaPipeline::RtpCSRCStats::RtpCSRCStats(const uint32_t aCsrc,
2379 0 : const DOMHighResTimeStamp aTime)
2380 : : mCsrc(aCsrc)
2381 0 : , mTimestamp(aTime) {}
2382 :
2383 : void
2384 0 : MediaPipeline::RtpCSRCStats::GetWebidlInstance(
2385 : dom::RTCRTPContributingSourceStats& aWebidlObj,
2386 : const nsString &aInboundRtpStreamId) const
2387 : {
2388 0 : nsString statId = NS_LITERAL_STRING("csrc_") + aInboundRtpStreamId;
2389 0 : statId.AppendLiteral("_");
2390 0 : statId.AppendInt(mCsrc);
2391 0 : aWebidlObj.mId.Construct(statId);
2392 0 : aWebidlObj.mType.Construct(RTCStatsType::Csrc);
2393 0 : aWebidlObj.mTimestamp.Construct(mTimestamp);
2394 0 : aWebidlObj.mContributorSsrc.Construct(mCsrc);
2395 0 : aWebidlObj.mInboundRtpStreamId.Construct(aInboundRtpStreamId);
2396 0 : }
2397 :
2398 : } // end namespace
|