Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 : /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 : /* This Source Code Form is subject to the terms of the Mozilla Public
4 : * License, v. 2.0. If a copy of the MPL was not distributed with this
5 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 :
7 : #include "mozilla/AbstractThread.h"
8 : #include "mozilla/CheckedInt.h"
9 : #include "mozilla/gfx/Point.h"
10 : #include "mozilla/SyncRunnable.h"
11 :
12 : #include "AudioSegment.h"
13 : #include "DecodedStream.h"
14 : #include "MediaData.h"
15 : #include "MediaQueue.h"
16 : #include "MediaStreamGraph.h"
17 : #include "MediaStreamListener.h"
18 : #include "OutputStreamManager.h"
19 : #include "SharedBuffer.h"
20 : #include "VideoSegment.h"
21 : #include "VideoUtils.h"
22 :
23 : namespace mozilla {
24 :
25 : using media::TimeUnit;
26 :
27 : /*
28 : * A container class to make it easier to pass the playback info all the
29 : * way to DecodedStreamGraphListener from DecodedStream.
30 : */
31 0 : struct PlaybackInfoInit {
32 : TimeUnit mStartTime;
33 : MediaInfo mInfo;
34 : };
35 :
36 0 : class DecodedStreamGraphListener : public MediaStreamListener {
37 : public:
38 0 : DecodedStreamGraphListener(MediaStream* aStream,
39 : MozPromiseHolder<GenericPromise>&& aPromise,
40 : AbstractThread* aMainThread)
41 0 : : mMutex("DecodedStreamGraphListener::mMutex")
42 : , mStream(aStream)
43 0 : , mAbstractMainThread(aMainThread)
44 : {
45 0 : mFinishPromise = Move(aPromise);
46 0 : }
47 :
48 0 : void NotifyOutput(MediaStreamGraph* aGraph, GraphTime aCurrentTime) override
49 : {
50 0 : MutexAutoLock lock(mMutex);
51 0 : if (mStream) {
52 0 : int64_t t = mStream->StreamTimeToMicroseconds(
53 0 : mStream->GraphTimeToStreamTime(aCurrentTime));
54 0 : mOnOutput.Notify(t);
55 : }
56 0 : }
57 :
58 0 : void NotifyEvent(MediaStreamGraph* aGraph, MediaStreamGraphEvent event) override
59 : {
60 0 : if (event == MediaStreamGraphEvent::EVENT_FINISHED) {
61 0 : aGraph->DispatchToMainThreadAfterStreamStateUpdate(
62 0 : NewRunnableMethod("DecodedStreamGraphListener::DoNotifyFinished",
63 : this,
64 0 : &DecodedStreamGraphListener::DoNotifyFinished));
65 : }
66 0 : }
67 :
68 0 : void DoNotifyFinished()
69 : {
70 0 : MOZ_ASSERT(NS_IsMainThread());
71 0 : mFinishPromise.ResolveIfExists(true, __func__);
72 0 : }
73 :
74 0 : void Forget()
75 : {
76 0 : RefPtr<DecodedStreamGraphListener> self = this;
77 0 : mAbstractMainThread->Dispatch(
78 0 : NS_NewRunnableFunction("DecodedStreamGraphListener::Forget", [self]() {
79 0 : MOZ_ASSERT(NS_IsMainThread());
80 0 : self->mFinishPromise.ResolveIfExists(true, __func__);
81 0 : }));
82 0 : MutexAutoLock lock(mMutex);
83 0 : mStream = nullptr;
84 0 : }
85 :
86 0 : MediaEventSource<int64_t>& OnOutput()
87 : {
88 0 : return mOnOutput;
89 : }
90 :
91 : private:
92 : MediaEventProducer<int64_t> mOnOutput;
93 :
94 : Mutex mMutex;
95 : // Members below are protected by mMutex.
96 : RefPtr<MediaStream> mStream;
97 : // Main thread only.
98 : MozPromiseHolder<GenericPromise> mFinishPromise;
99 :
100 : const RefPtr<AbstractThread> mAbstractMainThread;
101 : };
102 :
103 : static void
104 0 : UpdateStreamSuspended(AbstractThread* aMainThread, MediaStream* aStream, bool aBlocking)
105 : {
106 0 : if (NS_IsMainThread()) {
107 0 : if (aBlocking) {
108 0 : aStream->Suspend();
109 : } else {
110 0 : aStream->Resume();
111 : }
112 : } else {
113 0 : nsCOMPtr<nsIRunnable> r;
114 0 : if (aBlocking) {
115 0 : r = NewRunnableMethod(
116 0 : "MediaStream::Suspend", aStream, &MediaStream::Suspend);
117 : } else {
118 : r =
119 0 : NewRunnableMethod("MediaStream::Resume", aStream, &MediaStream::Resume);
120 : }
121 0 : aMainThread->Dispatch(r.forget());
122 : }
123 0 : }
124 :
125 : /*
126 : * All MediaStream-related data is protected by the decoder's monitor.
127 : * We have at most one DecodedStreamDaata per MediaDecoder. Its stream
128 : * is used as the input for each ProcessedMediaStream created by calls to
129 : * captureStream(UntilEnded). Seeking creates a new source stream, as does
130 : * replaying after the input as ended. In the latter case, the new source is
131 : * not connected to streams created by captureStreamUntilEnded.
132 : */
133 : class DecodedStreamData {
134 : public:
135 : DecodedStreamData(OutputStreamManager* aOutputStreamManager,
136 : PlaybackInfoInit&& aInit,
137 : MozPromiseHolder<GenericPromise>&& aPromise,
138 : AbstractThread* aMainThread);
139 : ~DecodedStreamData();
140 : void SetPlaying(bool aPlaying);
141 : MediaEventSource<int64_t>& OnOutput();
142 : void Forget();
143 : nsCString GetDebugInfo();
144 :
145 : /* The following group of fields are protected by the decoder's monitor
146 : * and can be read or written on any thread.
147 : */
148 : // Count of audio frames written to the stream
149 : int64_t mAudioFramesWritten;
150 : // mNextVideoTime is the end timestamp for the last packet sent to the stream.
151 : // Therefore video packets starting at or after this time need to be copied
152 : // to the output stream.
153 : TimeUnit mNextVideoTime;
154 : TimeUnit mNextAudioTime;
155 : // The last video image sent to the stream. Useful if we need to replicate
156 : // the image.
157 : RefPtr<layers::Image> mLastVideoImage;
158 : gfx::IntSize mLastVideoImageDisplaySize;
159 : bool mHaveSentFinish;
160 : bool mHaveSentFinishAudio;
161 : bool mHaveSentFinishVideo;
162 :
163 : // The decoder is responsible for calling Destroy() on this stream.
164 : const RefPtr<SourceMediaStream> mStream;
165 : const RefPtr<DecodedStreamGraphListener> mListener;
166 : bool mPlaying;
167 : // True if we need to send a compensation video frame to ensure the
168 : // StreamTime going forward.
169 : bool mEOSVideoCompensation;
170 :
171 : const RefPtr<OutputStreamManager> mOutputStreamManager;
172 : const RefPtr<AbstractThread> mAbstractMainThread;
173 : };
174 :
175 0 : DecodedStreamData::DecodedStreamData(OutputStreamManager* aOutputStreamManager,
176 : PlaybackInfoInit&& aInit,
177 : MozPromiseHolder<GenericPromise>&& aPromise,
178 0 : AbstractThread* aMainThread)
179 : : mAudioFramesWritten(0)
180 : , mNextVideoTime(aInit.mStartTime)
181 : , mNextAudioTime(aInit.mStartTime)
182 : , mHaveSentFinish(false)
183 : , mHaveSentFinishAudio(false)
184 : , mHaveSentFinishVideo(false)
185 : , mStream(aOutputStreamManager->Graph()->CreateSourceStream())
186 : // DecodedStreamGraphListener will resolve this promise.
187 0 : , mListener(new DecodedStreamGraphListener(mStream, Move(aPromise), aMainThread))
188 : // mPlaying is initially true because MDSM won't start playback until playing
189 : // becomes true. This is consistent with the settings of AudioSink.
190 : , mPlaying(true)
191 : , mEOSVideoCompensation(false)
192 : , mOutputStreamManager(aOutputStreamManager)
193 0 : , mAbstractMainThread(aMainThread)
194 : {
195 0 : mStream->AddListener(mListener);
196 0 : mOutputStreamManager->Connect(mStream);
197 :
198 : // Initialize tracks.
199 0 : if (aInit.mInfo.HasAudio()) {
200 0 : mStream->AddAudioTrack(aInit.mInfo.mAudio.mTrackId,
201 0 : aInit.mInfo.mAudio.mRate,
202 0 : 0, new AudioSegment());
203 : }
204 0 : if (aInit.mInfo.HasVideo()) {
205 0 : mStream->AddTrack(aInit.mInfo.mVideo.mTrackId, 0, new VideoSegment());
206 : }
207 0 : }
208 :
209 0 : DecodedStreamData::~DecodedStreamData()
210 : {
211 0 : mOutputStreamManager->Disconnect();
212 0 : mStream->Destroy();
213 0 : }
214 :
215 : MediaEventSource<int64_t>&
216 0 : DecodedStreamData::OnOutput()
217 : {
218 0 : return mListener->OnOutput();
219 : }
220 :
221 : void
222 0 : DecodedStreamData::SetPlaying(bool aPlaying)
223 : {
224 0 : if (mPlaying != aPlaying) {
225 0 : mPlaying = aPlaying;
226 0 : UpdateStreamSuspended(mAbstractMainThread, mStream, !mPlaying);
227 : }
228 0 : }
229 :
230 : void
231 0 : DecodedStreamData::Forget()
232 : {
233 0 : mListener->Forget();
234 0 : }
235 :
236 : nsCString
237 0 : DecodedStreamData::GetDebugInfo()
238 : {
239 0 : return nsPrintfCString(
240 : "DecodedStreamData=%p mPlaying=%d mAudioFramesWritten=%" PRId64
241 : " mNextAudioTime=%" PRId64 " mNextVideoTime=%" PRId64 " mHaveSentFinish=%d "
242 : "mHaveSentFinishAudio=%d mHaveSentFinishVideo=%d",
243 0 : this, mPlaying, mAudioFramesWritten, mNextAudioTime.ToMicroseconds(),
244 0 : mNextVideoTime.ToMicroseconds(), mHaveSentFinish, mHaveSentFinishAudio,
245 0 : mHaveSentFinishVideo);
246 : }
247 :
248 0 : DecodedStream::DecodedStream(AbstractThread* aOwnerThread,
249 : AbstractThread* aMainThread,
250 : MediaQueue<AudioData>& aAudioQueue,
251 : MediaQueue<VideoData>& aVideoQueue,
252 : OutputStreamManager* aOutputStreamManager,
253 : const bool& aSameOrigin,
254 0 : const PrincipalHandle& aPrincipalHandle)
255 : : mOwnerThread(aOwnerThread)
256 : , mAbstractMainThread(aMainThread)
257 : , mOutputStreamManager(aOutputStreamManager)
258 : , mPlaying(false)
259 : , mSameOrigin(aSameOrigin)
260 : , mPrincipalHandle(aPrincipalHandle)
261 : , mAudioQueue(aAudioQueue)
262 0 : , mVideoQueue(aVideoQueue)
263 : {
264 0 : }
265 :
266 0 : DecodedStream::~DecodedStream()
267 : {
268 0 : MOZ_ASSERT(mStartTime.isNothing(), "playback should've ended.");
269 0 : }
270 :
271 : const media::MediaSink::PlaybackParams&
272 0 : DecodedStream::GetPlaybackParams() const
273 : {
274 0 : AssertOwnerThread();
275 0 : return mParams;
276 : }
277 :
278 : void
279 0 : DecodedStream::SetPlaybackParams(const PlaybackParams& aParams)
280 : {
281 0 : AssertOwnerThread();
282 0 : mParams = aParams;
283 0 : }
284 :
285 : RefPtr<GenericPromise>
286 0 : DecodedStream::OnEnded(TrackType aType)
287 : {
288 0 : AssertOwnerThread();
289 0 : MOZ_ASSERT(mStartTime.isSome());
290 :
291 0 : if (aType == TrackInfo::kAudioTrack && mInfo.HasAudio()) {
292 : // TODO: we should return a promise which is resolved when the audio track
293 : // is finished. For now this promise is resolved when the whole stream is
294 : // finished.
295 0 : return mFinishPromise;
296 0 : } else if (aType == TrackInfo::kVideoTrack && mInfo.HasVideo()) {
297 0 : return mFinishPromise;
298 : }
299 0 : return nullptr;
300 : }
301 :
302 : void
303 0 : DecodedStream::Start(const TimeUnit& aStartTime, const MediaInfo& aInfo)
304 : {
305 0 : AssertOwnerThread();
306 0 : MOZ_ASSERT(mStartTime.isNothing(), "playback already started.");
307 :
308 0 : mStartTime.emplace(aStartTime);
309 0 : mLastOutputTime = TimeUnit::Zero();
310 0 : mInfo = aInfo;
311 0 : mPlaying = true;
312 0 : ConnectListener();
313 :
314 0 : class R : public Runnable {
315 : typedef MozPromiseHolder<GenericPromise> Promise;
316 : public:
317 0 : R(PlaybackInfoInit&& aInit, Promise&& aPromise,
318 : OutputStreamManager* aManager, AbstractThread* aMainThread)
319 0 : : Runnable("CreateDecodedStreamData")
320 0 : , mInit(Move(aInit))
321 : , mOutputStreamManager(aManager)
322 0 : , mAbstractMainThread(aMainThread)
323 : {
324 0 : mPromise = Move(aPromise);
325 0 : }
326 0 : NS_IMETHOD Run() override
327 : {
328 0 : MOZ_ASSERT(NS_IsMainThread());
329 : // No need to create a source stream when there are no output streams. This
330 : // happens when RemoveOutput() is called immediately after StartPlayback().
331 0 : if (!mOutputStreamManager->Graph()) {
332 : // Resolve the promise to indicate the end of playback.
333 0 : mPromise.Resolve(true, __func__);
334 0 : return NS_OK;
335 : }
336 0 : mData = MakeUnique<DecodedStreamData>(
337 0 : mOutputStreamManager, Move(mInit), Move(mPromise), mAbstractMainThread);
338 0 : return NS_OK;
339 : }
340 0 : UniquePtr<DecodedStreamData> ReleaseData()
341 : {
342 0 : return Move(mData);
343 : }
344 : private:
345 : PlaybackInfoInit mInit;
346 : Promise mPromise;
347 : RefPtr<OutputStreamManager> mOutputStreamManager;
348 : UniquePtr<DecodedStreamData> mData;
349 : const RefPtr<AbstractThread> mAbstractMainThread;
350 : };
351 :
352 0 : MozPromiseHolder<GenericPromise> promise;
353 0 : mFinishPromise = promise.Ensure(__func__);
354 : PlaybackInfoInit init {
355 : aStartTime, aInfo
356 0 : };
357 : nsCOMPtr<nsIRunnable> r =
358 0 : new R(Move(init), Move(promise), mOutputStreamManager, mAbstractMainThread);
359 0 : SyncRunnable::DispatchToThread(
360 0 : SystemGroup::EventTargetFor(mozilla::TaskCategory::Other), r);
361 0 : mData = static_cast<R*>(r.get())->ReleaseData();
362 :
363 0 : if (mData) {
364 0 : mOutputListener = mData->OnOutput().Connect(
365 0 : mOwnerThread, this, &DecodedStream::NotifyOutput);
366 0 : mData->SetPlaying(mPlaying);
367 0 : SendData();
368 : }
369 0 : }
370 :
371 : void
372 0 : DecodedStream::Stop()
373 : {
374 0 : AssertOwnerThread();
375 0 : MOZ_ASSERT(mStartTime.isSome(), "playback not started.");
376 :
377 0 : mStartTime.reset();
378 0 : DisconnectListener();
379 0 : mFinishPromise = nullptr;
380 :
381 : // Clear mData immediately when this playback session ends so we won't
382 : // send data to the wrong stream in SendData() in next playback session.
383 0 : DestroyData(Move(mData));
384 0 : }
385 :
386 : bool
387 0 : DecodedStream::IsStarted() const
388 : {
389 0 : AssertOwnerThread();
390 0 : return mStartTime.isSome();
391 : }
392 :
393 : bool
394 0 : DecodedStream::IsPlaying() const
395 : {
396 0 : AssertOwnerThread();
397 0 : return IsStarted() && mPlaying;
398 : }
399 :
400 : void
401 0 : DecodedStream::DestroyData(UniquePtr<DecodedStreamData> aData)
402 : {
403 0 : AssertOwnerThread();
404 :
405 0 : if (!aData) {
406 0 : return;
407 : }
408 :
409 0 : mOutputListener.Disconnect();
410 :
411 0 : DecodedStreamData* data = aData.release();
412 0 : data->Forget();
413 0 : nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction("DecodedStream::DestroyData",
414 0 : [=]() { delete data; });
415 0 : mAbstractMainThread->Dispatch(r.forget());
416 : }
417 :
418 : void
419 0 : DecodedStream::SetPlaying(bool aPlaying)
420 : {
421 0 : AssertOwnerThread();
422 :
423 : // Resume/pause matters only when playback started.
424 0 : if (mStartTime.isNothing()) {
425 0 : return;
426 : }
427 :
428 0 : mPlaying = aPlaying;
429 0 : if (mData) {
430 0 : mData->SetPlaying(aPlaying);
431 : }
432 : }
433 :
434 : void
435 0 : DecodedStream::SetVolume(double aVolume)
436 : {
437 0 : AssertOwnerThread();
438 0 : mParams.mVolume = aVolume;
439 0 : }
440 :
441 : void
442 0 : DecodedStream::SetPlaybackRate(double aPlaybackRate)
443 : {
444 0 : AssertOwnerThread();
445 0 : mParams.mPlaybackRate = aPlaybackRate;
446 0 : }
447 :
448 : void
449 0 : DecodedStream::SetPreservesPitch(bool aPreservesPitch)
450 : {
451 0 : AssertOwnerThread();
452 0 : mParams.mPreservesPitch = aPreservesPitch;
453 0 : }
454 :
455 : static void
456 0 : SendStreamAudio(DecodedStreamData* aStream, const TimeUnit& aStartTime,
457 : AudioData* aData, AudioSegment* aOutput, uint32_t aRate,
458 : const PrincipalHandle& aPrincipalHandle)
459 : {
460 : // The amount of audio frames that is used to fuzz rounding errors.
461 : static const int64_t AUDIO_FUZZ_FRAMES = 1;
462 :
463 0 : MOZ_ASSERT(aData);
464 0 : AudioData* audio = aData;
465 : // This logic has to mimic AudioSink closely to make sure we write
466 : // the exact same silences
467 : CheckedInt64 audioWrittenOffset = aStream->mAudioFramesWritten
468 0 : + TimeUnitToFrames(aStartTime, aRate);
469 0 : CheckedInt64 frameOffset = TimeUnitToFrames(audio->mTime, aRate);
470 :
471 0 : if (!audioWrittenOffset.isValid() ||
472 0 : !frameOffset.isValid() ||
473 : // ignore packet that we've already processed
474 0 : audio->GetEndTime() <= aStream->mNextAudioTime) {
475 0 : return;
476 : }
477 :
478 0 : if (audioWrittenOffset.value() + AUDIO_FUZZ_FRAMES < frameOffset.value()) {
479 0 : int64_t silentFrames = frameOffset.value() - audioWrittenOffset.value();
480 : // Write silence to catch up
481 0 : AudioSegment silence;
482 0 : silence.InsertNullDataAtStart(silentFrames);
483 0 : aStream->mAudioFramesWritten += silentFrames;
484 0 : audioWrittenOffset += silentFrames;
485 0 : aOutput->AppendFrom(&silence);
486 : }
487 :
488 : // Always write the whole sample without truncation to be consistent with
489 : // DecodedAudioDataSink::PlayFromAudioQueue()
490 0 : audio->EnsureAudioBuffer();
491 0 : RefPtr<SharedBuffer> buffer = audio->mAudioBuffer;
492 0 : AudioDataValue* bufferData = static_cast<AudioDataValue*>(buffer->Data());
493 0 : AutoTArray<const AudioDataValue*, 2> channels;
494 0 : for (uint32_t i = 0; i < audio->mChannels; ++i) {
495 0 : channels.AppendElement(bufferData + i * audio->mFrames);
496 : }
497 0 : aOutput->AppendFrames(buffer.forget(), channels, audio->mFrames, aPrincipalHandle);
498 0 : aStream->mAudioFramesWritten += audio->mFrames;
499 :
500 0 : aStream->mNextAudioTime = audio->GetEndTime();
501 : }
502 :
503 : void
504 0 : DecodedStream::SendAudio(double aVolume, bool aIsSameOrigin,
505 : const PrincipalHandle& aPrincipalHandle)
506 : {
507 0 : AssertOwnerThread();
508 :
509 0 : if (!mInfo.HasAudio()) {
510 0 : return;
511 : }
512 :
513 0 : AudioSegment output;
514 0 : uint32_t rate = mInfo.mAudio.mRate;
515 0 : AutoTArray<RefPtr<AudioData>,10> audio;
516 0 : TrackID audioTrackId = mInfo.mAudio.mTrackId;
517 0 : SourceMediaStream* sourceStream = mData->mStream;
518 :
519 : // It's OK to hold references to the AudioData because AudioData
520 : // is ref-counted.
521 0 : mAudioQueue.GetElementsAfter(mData->mNextAudioTime, &audio);
522 0 : for (uint32_t i = 0; i < audio.Length(); ++i) {
523 0 : SendStreamAudio(mData.get(), mStartTime.ref(), audio[i], &output, rate,
524 0 : aPrincipalHandle);
525 : }
526 :
527 0 : output.ApplyVolume(aVolume);
528 :
529 0 : if (!aIsSameOrigin) {
530 0 : output.ReplaceWithDisabled();
531 : }
532 :
533 : // |mNextAudioTime| is updated as we process each audio sample in
534 : // SendStreamAudio(). This is consistent with how |mNextVideoTime|
535 : // is updated for video samples.
536 0 : if (output.GetDuration() > 0) {
537 0 : sourceStream->AppendToTrack(audioTrackId, &output);
538 : }
539 :
540 0 : if (mAudioQueue.IsFinished() && !mData->mHaveSentFinishAudio) {
541 0 : sourceStream->EndTrack(audioTrackId);
542 0 : mData->mHaveSentFinishAudio = true;
543 : }
544 : }
545 :
546 : static void
547 0 : WriteVideoToMediaStream(MediaStream* aStream,
548 : layers::Image* aImage,
549 : const TimeUnit& aEnd,
550 : const TimeUnit& aStart,
551 : const mozilla::gfx::IntSize& aIntrinsicSize,
552 : const TimeStamp& aTimeStamp,
553 : VideoSegment* aOutput,
554 : const PrincipalHandle& aPrincipalHandle)
555 : {
556 0 : RefPtr<layers::Image> image = aImage;
557 0 : auto end = aStream->MicrosecondsToStreamTimeRoundDown(aEnd.ToMicroseconds());
558 0 : auto start = aStream->MicrosecondsToStreamTimeRoundDown(aStart.ToMicroseconds());
559 0 : StreamTime duration = end - start;
560 0 : aOutput->AppendFrame(image.forget(), duration, aIntrinsicSize,
561 0 : aPrincipalHandle, false, aTimeStamp);
562 0 : }
563 :
564 : static bool
565 0 : ZeroDurationAtLastChunk(VideoSegment& aInput)
566 : {
567 : // Get the last video frame's start time in VideoSegment aInput.
568 : // If the start time is equal to the duration of aInput, means the last video
569 : // frame's duration is zero.
570 : StreamTime lastVideoStratTime;
571 0 : aInput.GetLastFrame(&lastVideoStratTime);
572 0 : return lastVideoStratTime == aInput.GetDuration();
573 : }
574 :
575 : void
576 0 : DecodedStream::SendVideo(bool aIsSameOrigin, const PrincipalHandle& aPrincipalHandle)
577 : {
578 0 : AssertOwnerThread();
579 :
580 0 : if (!mInfo.HasVideo()) {
581 0 : return;
582 : }
583 :
584 0 : VideoSegment output;
585 0 : TrackID videoTrackId = mInfo.mVideo.mTrackId;
586 0 : AutoTArray<RefPtr<VideoData>, 10> video;
587 0 : SourceMediaStream* sourceStream = mData->mStream;
588 :
589 : // It's OK to hold references to the VideoData because VideoData
590 : // is ref-counted.
591 0 : mVideoQueue.GetElementsAfter(mData->mNextVideoTime, &video);
592 :
593 : // tracksStartTimeStamp might be null when the SourceMediaStream not yet
594 : // be added to MediaStreamGraph.
595 0 : TimeStamp tracksStartTimeStamp = sourceStream->GetStreamTracksStrartTimeStamp();
596 0 : if (tracksStartTimeStamp.IsNull()) {
597 0 : tracksStartTimeStamp = TimeStamp::Now();
598 : }
599 :
600 0 : for (uint32_t i = 0; i < video.Length(); ++i) {
601 0 : VideoData* v = video[i];
602 :
603 0 : if (mData->mNextVideoTime < v->mTime) {
604 : // Write last video frame to catch up. mLastVideoImage can be null here
605 : // which is fine, it just means there's no video.
606 :
607 : // TODO: |mLastVideoImage| should come from the last image rendered
608 : // by the state machine. This will avoid the black frame when capture
609 : // happens in the middle of playback (especially in th middle of a
610 : // video frame). E.g. if we have a video frame that is 30 sec long
611 : // and capture happens at 15 sec, we'll have to append a black frame
612 : // that is 15 sec long.
613 0 : WriteVideoToMediaStream(sourceStream, mData->mLastVideoImage, v->mTime,
614 0 : mData->mNextVideoTime, mData->mLastVideoImageDisplaySize,
615 0 : tracksStartTimeStamp + v->mTime.ToTimeDuration(),
616 0 : &output, aPrincipalHandle);
617 0 : mData->mNextVideoTime = v->mTime;
618 : }
619 :
620 0 : if (mData->mNextVideoTime < v->GetEndTime()) {
621 0 : WriteVideoToMediaStream(sourceStream, v->mImage, v->GetEndTime(),
622 0 : mData->mNextVideoTime, v->mDisplay,
623 0 : tracksStartTimeStamp + v->GetEndTime().ToTimeDuration(),
624 0 : &output, aPrincipalHandle);
625 0 : mData->mNextVideoTime = v->GetEndTime();
626 0 : mData->mLastVideoImage = v->mImage;
627 0 : mData->mLastVideoImageDisplaySize = v->mDisplay;
628 : }
629 : }
630 :
631 : // Check the output is not empty.
632 0 : if (output.GetLastFrame()) {
633 0 : mData->mEOSVideoCompensation = ZeroDurationAtLastChunk(output);
634 : }
635 :
636 0 : if (!aIsSameOrigin) {
637 0 : output.ReplaceWithDisabled();
638 : }
639 :
640 0 : if (output.GetDuration() > 0) {
641 0 : sourceStream->AppendToTrack(videoTrackId, &output);
642 : }
643 :
644 0 : if (mVideoQueue.IsFinished() && !mData->mHaveSentFinishVideo) {
645 0 : if (mData->mEOSVideoCompensation) {
646 0 : VideoSegment endSegment;
647 : // Calculate the deviation clock time from DecodedStream.
648 0 : auto deviation = FromMicroseconds(sourceStream->StreamTimeToMicroseconds(1));
649 0 : WriteVideoToMediaStream(sourceStream, mData->mLastVideoImage,
650 0 : mData->mNextVideoTime + deviation, mData->mNextVideoTime,
651 0 : mData->mLastVideoImageDisplaySize,
652 0 : tracksStartTimeStamp + (mData->mNextVideoTime + deviation).ToTimeDuration(),
653 0 : &endSegment, aPrincipalHandle);
654 0 : mData->mNextVideoTime += deviation;
655 0 : MOZ_ASSERT(endSegment.GetDuration() > 0);
656 0 : if (!aIsSameOrigin) {
657 0 : endSegment.ReplaceWithDisabled();
658 : }
659 0 : sourceStream->AppendToTrack(videoTrackId, &endSegment);
660 : }
661 0 : sourceStream->EndTrack(videoTrackId);
662 0 : mData->mHaveSentFinishVideo = true;
663 : }
664 : }
665 :
666 : void
667 0 : DecodedStream::AdvanceTracks()
668 : {
669 0 : AssertOwnerThread();
670 :
671 0 : StreamTime endPosition = 0;
672 :
673 0 : if (mInfo.HasAudio()) {
674 0 : StreamTime audioEnd = mData->mStream->TicksToTimeRoundDown(
675 0 : mInfo.mAudio.mRate, mData->mAudioFramesWritten);
676 0 : endPosition = std::max(endPosition, audioEnd);
677 : }
678 :
679 0 : if (mInfo.HasVideo()) {
680 0 : StreamTime videoEnd = mData->mStream->MicrosecondsToStreamTimeRoundDown(
681 0 : (mData->mNextVideoTime - mStartTime.ref()).ToMicroseconds());
682 0 : endPosition = std::max(endPosition, videoEnd);
683 : }
684 :
685 0 : if (!mData->mHaveSentFinish) {
686 0 : mData->mStream->AdvanceKnownTracksTime(endPosition);
687 : }
688 0 : }
689 :
690 : void
691 0 : DecodedStream::SendData()
692 : {
693 0 : AssertOwnerThread();
694 0 : MOZ_ASSERT(mStartTime.isSome(), "Must be called after StartPlayback()");
695 :
696 : // Not yet created on the main thread. MDSM will try again later.
697 0 : if (!mData) {
698 0 : return;
699 : }
700 :
701 : // Nothing to do when the stream is finished.
702 0 : if (mData->mHaveSentFinish) {
703 0 : return;
704 : }
705 :
706 0 : SendAudio(mParams.mVolume, mSameOrigin, mPrincipalHandle);
707 0 : SendVideo(mSameOrigin, mPrincipalHandle);
708 0 : AdvanceTracks();
709 :
710 0 : bool finished = (!mInfo.HasAudio() || mAudioQueue.IsFinished()) &&
711 0 : (!mInfo.HasVideo() || mVideoQueue.IsFinished());
712 :
713 0 : if (finished && !mData->mHaveSentFinish) {
714 0 : mData->mHaveSentFinish = true;
715 0 : mData->mStream->Finish();
716 : }
717 : }
718 :
719 : TimeUnit
720 0 : DecodedStream::GetEndTime(TrackType aType) const
721 : {
722 0 : AssertOwnerThread();
723 0 : if (aType == TrackInfo::kAudioTrack && mInfo.HasAudio() && mData) {
724 0 : auto t = mStartTime.ref() + FramesToTimeUnit(
725 0 : mData->mAudioFramesWritten, mInfo.mAudio.mRate);
726 0 : if (t.IsValid()) {
727 0 : return t;
728 : }
729 0 : } else if (aType == TrackInfo::kVideoTrack && mData) {
730 0 : return mData->mNextVideoTime;
731 : }
732 0 : return TimeUnit::Zero();
733 : }
734 :
735 : TimeUnit
736 0 : DecodedStream::GetPosition(TimeStamp* aTimeStamp) const
737 : {
738 0 : AssertOwnerThread();
739 : // This is only called after MDSM starts playback. So mStartTime is
740 : // guaranteed to be something.
741 0 : MOZ_ASSERT(mStartTime.isSome());
742 0 : if (aTimeStamp) {
743 0 : *aTimeStamp = TimeStamp::Now();
744 : }
745 0 : return mStartTime.ref() + mLastOutputTime;
746 : }
747 :
748 : void
749 0 : DecodedStream::NotifyOutput(int64_t aTime)
750 : {
751 0 : AssertOwnerThread();
752 0 : mLastOutputTime = FromMicroseconds(aTime);
753 0 : auto currentTime = GetPosition();
754 :
755 : // Remove audio samples that have been played by MSG from the queue.
756 0 : RefPtr<AudioData> a = mAudioQueue.PeekFront();
757 0 : for (; a && a->mTime < currentTime;) {
758 0 : RefPtr<AudioData> releaseMe = mAudioQueue.PopFront();
759 0 : a = mAudioQueue.PeekFront();
760 : }
761 0 : }
762 :
763 : void
764 0 : DecodedStream::ConnectListener()
765 : {
766 0 : AssertOwnerThread();
767 :
768 0 : mAudioPushListener = mAudioQueue.PushEvent().Connect(
769 0 : mOwnerThread, this, &DecodedStream::SendData);
770 0 : mAudioFinishListener = mAudioQueue.FinishEvent().Connect(
771 0 : mOwnerThread, this, &DecodedStream::SendData);
772 0 : mVideoPushListener = mVideoQueue.PushEvent().Connect(
773 0 : mOwnerThread, this, &DecodedStream::SendData);
774 0 : mVideoFinishListener = mVideoQueue.FinishEvent().Connect(
775 0 : mOwnerThread, this, &DecodedStream::SendData);
776 0 : }
777 :
778 : void
779 0 : DecodedStream::DisconnectListener()
780 : {
781 0 : AssertOwnerThread();
782 :
783 0 : mAudioPushListener.Disconnect();
784 0 : mVideoPushListener.Disconnect();
785 0 : mAudioFinishListener.Disconnect();
786 0 : mVideoFinishListener.Disconnect();
787 0 : }
788 :
789 : nsCString
790 0 : DecodedStream::GetDebugInfo()
791 : {
792 0 : AssertOwnerThread();
793 0 : int64_t startTime = mStartTime.isSome() ? mStartTime->ToMicroseconds() : -1;
794 0 : return nsPrintfCString(
795 : "DecodedStream=%p mStartTime=%" PRId64 " mLastOutputTime=%" PRId64 " mPlaying=%d mData=%p",
796 0 : this, startTime, mLastOutputTime.ToMicroseconds(), mPlaying, mData.get())
797 0 : + (mData ? nsCString("\n") + mData->GetDebugInfo() : nsCString());
798 : }
799 :
800 : } // namespace mozilla
|