Line data Source code
1 : /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
2 : /* This Source Code Form is subject to the terms of the Mozilla Public
3 : * License, v. 2.0. If a copy of the MPL was not distributed with this file,
4 : * You can obtain one at http://mozilla.org/MPL/2.0/. */
5 :
6 : #include "AudioNodeStream.h"
7 :
8 : #include "MediaStreamGraphImpl.h"
9 : #include "MediaStreamListener.h"
10 : #include "AudioNodeEngine.h"
11 : #include "ThreeDPoint.h"
12 : #include "AudioChannelFormat.h"
13 : #include "AudioParamTimeline.h"
14 : #include "AudioContext.h"
15 : #include "nsMathUtils.h"
16 :
17 : using namespace mozilla::dom;
18 :
19 : namespace mozilla {
20 :
21 : /**
22 : * An AudioNodeStream produces a single audio track with ID
23 : * AUDIO_TRACK. This track has rate AudioContext::sIdealAudioRate
24 : * for regular audio contexts, and the rate requested by the web content
25 : * for offline audio contexts.
26 : * Each chunk in the track is a single block of WEBAUDIO_BLOCK_SIZE samples.
27 : * Note: This must be a different value than MEDIA_STREAM_DEST_TRACK_ID
28 : */
29 :
30 0 : AudioNodeStream::AudioNodeStream(AudioNodeEngine* aEngine,
31 : Flags aFlags,
32 0 : TrackRate aSampleRate)
33 : : ProcessedMediaStream()
34 : , mEngine(aEngine)
35 : , mSampleRate(aSampleRate)
36 : , mFlags(aFlags)
37 : , mNumberOfInputChannels(2)
38 0 : , mIsActive(aEngine->IsActive())
39 : , mMarkAsFinishedAfterThisBlock(false)
40 : , mAudioParamStream(false)
41 0 : , mPassThrough(false)
42 : {
43 0 : MOZ_ASSERT(NS_IsMainThread());
44 0 : mSuspendedCount = !(mIsActive || mFlags & EXTERNAL_OUTPUT);
45 0 : mChannelCountMode = ChannelCountMode::Max;
46 0 : mChannelInterpretation = ChannelInterpretation::Speakers;
47 : // AudioNodes are always producing data
48 0 : mHasCurrentData = true;
49 0 : mLastChunks.SetLength(std::max(uint16_t(1), mEngine->OutputCount()));
50 0 : MOZ_COUNT_CTOR(AudioNodeStream);
51 0 : }
52 :
53 0 : AudioNodeStream::~AudioNodeStream()
54 : {
55 0 : MOZ_ASSERT(mActiveInputCount == 0);
56 0 : MOZ_COUNT_DTOR(AudioNodeStream);
57 0 : }
58 :
59 : void
60 0 : AudioNodeStream::DestroyImpl()
61 : {
62 : // These are graph thread objects, so clean up on graph thread.
63 0 : mInputChunks.Clear();
64 0 : mLastChunks.Clear();
65 :
66 0 : ProcessedMediaStream::DestroyImpl();
67 0 : }
68 :
69 : /* static */ already_AddRefed<AudioNodeStream>
70 0 : AudioNodeStream::Create(AudioContext* aCtx, AudioNodeEngine* aEngine,
71 : Flags aFlags, MediaStreamGraph* aGraph)
72 : {
73 0 : MOZ_ASSERT(NS_IsMainThread());
74 0 : MOZ_RELEASE_ASSERT(aGraph);
75 :
76 : // MediaRecorders use an AudioNodeStream, but no AudioNode
77 0 : AudioNode* node = aEngine->NodeMainThread();
78 :
79 : RefPtr<AudioNodeStream> stream =
80 0 : new AudioNodeStream(aEngine, aFlags, aGraph->GraphRate());
81 0 : stream->mSuspendedCount += aCtx->ShouldSuspendNewStream();
82 0 : if (node) {
83 0 : stream->SetChannelMixingParametersImpl(node->ChannelCount(),
84 : node->ChannelCountModeValue(),
85 0 : node->ChannelInterpretationValue());
86 : }
87 0 : aGraph->AddStream(stream);
88 0 : return stream.forget();
89 : }
90 :
91 : size_t
92 0 : AudioNodeStream::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
93 : {
94 0 : size_t amount = 0;
95 :
96 : // Not reported:
97 : // - mEngine
98 :
99 0 : amount += ProcessedMediaStream::SizeOfExcludingThis(aMallocSizeOf);
100 0 : amount += mLastChunks.ShallowSizeOfExcludingThis(aMallocSizeOf);
101 0 : for (size_t i = 0; i < mLastChunks.Length(); i++) {
102 : // NB: This is currently unshared only as there are instances of
103 : // double reporting in DMD otherwise.
104 0 : amount += mLastChunks[i].SizeOfExcludingThisIfUnshared(aMallocSizeOf);
105 : }
106 :
107 0 : return amount;
108 : }
109 :
110 : size_t
111 0 : AudioNodeStream::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
112 : {
113 0 : return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
114 : }
115 :
116 : void
117 0 : AudioNodeStream::SizeOfAudioNodesIncludingThis(MallocSizeOf aMallocSizeOf,
118 : AudioNodeSizes& aUsage) const
119 : {
120 : // Explicitly separate out the stream memory.
121 0 : aUsage.mStream = SizeOfIncludingThis(aMallocSizeOf);
122 :
123 0 : if (mEngine) {
124 : // This will fill out the rest of |aUsage|.
125 0 : mEngine->SizeOfIncludingThis(aMallocSizeOf, aUsage);
126 : }
127 0 : }
128 :
129 : void
130 0 : AudioNodeStream::SetStreamTimeParameter(uint32_t aIndex, AudioContext* aContext,
131 : double aStreamTime)
132 : {
133 0 : class Message final : public ControlMessage
134 : {
135 : public:
136 0 : Message(AudioNodeStream* aStream, uint32_t aIndex, MediaStream* aRelativeToStream,
137 : double aStreamTime)
138 0 : : ControlMessage(aStream), mStreamTime(aStreamTime),
139 0 : mRelativeToStream(aRelativeToStream), mIndex(aIndex)
140 0 : {}
141 0 : void Run() override
142 : {
143 0 : static_cast<AudioNodeStream*>(mStream)->
144 0 : SetStreamTimeParameterImpl(mIndex, mRelativeToStream, mStreamTime);
145 0 : }
146 : double mStreamTime;
147 : MediaStream* mRelativeToStream;
148 : uint32_t mIndex;
149 : };
150 :
151 0 : GraphImpl()->AppendMessage(MakeUnique<Message>(this, aIndex,
152 0 : aContext->DestinationStream(),
153 0 : aStreamTime));
154 0 : }
155 :
156 : void
157 0 : AudioNodeStream::SetStreamTimeParameterImpl(uint32_t aIndex, MediaStream* aRelativeToStream,
158 : double aStreamTime)
159 : {
160 0 : StreamTime ticks = aRelativeToStream->SecondsToNearestStreamTime(aStreamTime);
161 0 : mEngine->SetStreamTimeParameter(aIndex, ticks);
162 0 : }
163 :
164 : void
165 0 : AudioNodeStream::SetDoubleParameter(uint32_t aIndex, double aValue)
166 : {
167 0 : class Message final : public ControlMessage
168 : {
169 : public:
170 0 : Message(AudioNodeStream* aStream, uint32_t aIndex, double aValue)
171 0 : : ControlMessage(aStream), mValue(aValue), mIndex(aIndex)
172 0 : {}
173 0 : void Run() override
174 : {
175 0 : static_cast<AudioNodeStream*>(mStream)->Engine()->
176 0 : SetDoubleParameter(mIndex, mValue);
177 0 : }
178 : double mValue;
179 : uint32_t mIndex;
180 : };
181 :
182 0 : GraphImpl()->AppendMessage(MakeUnique<Message>(this, aIndex, aValue));
183 0 : }
184 :
185 : void
186 0 : AudioNodeStream::SetInt32Parameter(uint32_t aIndex, int32_t aValue)
187 : {
188 0 : class Message final : public ControlMessage
189 : {
190 : public:
191 0 : Message(AudioNodeStream* aStream, uint32_t aIndex, int32_t aValue)
192 0 : : ControlMessage(aStream), mValue(aValue), mIndex(aIndex)
193 0 : {}
194 0 : void Run() override
195 : {
196 0 : static_cast<AudioNodeStream*>(mStream)->Engine()->
197 0 : SetInt32Parameter(mIndex, mValue);
198 0 : }
199 : int32_t mValue;
200 : uint32_t mIndex;
201 : };
202 :
203 0 : GraphImpl()->AppendMessage(MakeUnique<Message>(this, aIndex, aValue));
204 0 : }
205 :
206 : void
207 0 : AudioNodeStream::SendTimelineEvent(uint32_t aIndex,
208 : const AudioTimelineEvent& aEvent)
209 : {
210 0 : class Message final : public ControlMessage
211 : {
212 : public:
213 0 : Message(AudioNodeStream* aStream, uint32_t aIndex,
214 : const AudioTimelineEvent& aEvent)
215 0 : : ControlMessage(aStream),
216 : mEvent(aEvent),
217 0 : mSampleRate(aStream->SampleRate()),
218 0 : mIndex(aIndex)
219 0 : {}
220 0 : void Run() override
221 : {
222 0 : static_cast<AudioNodeStream*>(mStream)->Engine()->
223 0 : RecvTimelineEvent(mIndex, mEvent);
224 0 : }
225 : AudioTimelineEvent mEvent;
226 : TrackRate mSampleRate;
227 : uint32_t mIndex;
228 : };
229 0 : GraphImpl()->AppendMessage(MakeUnique<Message>(this, aIndex, aEvent));
230 0 : }
231 :
232 : void
233 0 : AudioNodeStream::SetThreeDPointParameter(uint32_t aIndex, const ThreeDPoint& aValue)
234 : {
235 0 : class Message final : public ControlMessage
236 : {
237 : public:
238 0 : Message(AudioNodeStream* aStream, uint32_t aIndex, const ThreeDPoint& aValue)
239 0 : : ControlMessage(aStream), mValue(aValue), mIndex(aIndex)
240 0 : {}
241 0 : void Run() override
242 : {
243 0 : static_cast<AudioNodeStream*>(mStream)->Engine()->
244 0 : SetThreeDPointParameter(mIndex, mValue);
245 0 : }
246 : ThreeDPoint mValue;
247 : uint32_t mIndex;
248 : };
249 :
250 0 : GraphImpl()->AppendMessage(MakeUnique<Message>(this, aIndex, aValue));
251 0 : }
252 :
253 : void
254 0 : AudioNodeStream::SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList>&& aBuffer)
255 : {
256 0 : class Message final : public ControlMessage
257 : {
258 : public:
259 0 : Message(AudioNodeStream* aStream,
260 : already_AddRefed<ThreadSharedFloatArrayBufferList>& aBuffer)
261 0 : : ControlMessage(aStream), mBuffer(aBuffer)
262 0 : {}
263 0 : void Run() override
264 : {
265 0 : static_cast<AudioNodeStream*>(mStream)->Engine()->
266 0 : SetBuffer(mBuffer.forget());
267 0 : }
268 : RefPtr<ThreadSharedFloatArrayBufferList> mBuffer;
269 : };
270 :
271 0 : GraphImpl()->AppendMessage(MakeUnique<Message>(this, aBuffer));
272 0 : }
273 :
274 : void
275 0 : AudioNodeStream::SetRawArrayData(nsTArray<float>& aData)
276 : {
277 0 : class Message final : public ControlMessage
278 : {
279 : public:
280 0 : Message(AudioNodeStream* aStream,
281 : nsTArray<float>& aData)
282 0 : : ControlMessage(aStream)
283 : {
284 0 : mData.SwapElements(aData);
285 0 : }
286 0 : void Run() override
287 : {
288 0 : static_cast<AudioNodeStream*>(mStream)->Engine()->SetRawArrayData(mData);
289 0 : }
290 : nsTArray<float> mData;
291 : };
292 :
293 0 : GraphImpl()->AppendMessage(MakeUnique<Message>(this, aData));
294 0 : }
295 :
296 : void
297 0 : AudioNodeStream::SetChannelMixingParameters(uint32_t aNumberOfChannels,
298 : ChannelCountMode aChannelCountMode,
299 : ChannelInterpretation aChannelInterpretation)
300 : {
301 0 : class Message final : public ControlMessage
302 : {
303 : public:
304 0 : Message(AudioNodeStream* aStream,
305 : uint32_t aNumberOfChannels,
306 : ChannelCountMode aChannelCountMode,
307 : ChannelInterpretation aChannelInterpretation)
308 0 : : ControlMessage(aStream),
309 : mNumberOfChannels(aNumberOfChannels),
310 : mChannelCountMode(aChannelCountMode),
311 0 : mChannelInterpretation(aChannelInterpretation)
312 0 : {}
313 0 : void Run() override
314 : {
315 0 : static_cast<AudioNodeStream*>(mStream)->
316 0 : SetChannelMixingParametersImpl(mNumberOfChannels, mChannelCountMode,
317 0 : mChannelInterpretation);
318 0 : }
319 : uint32_t mNumberOfChannels;
320 : ChannelCountMode mChannelCountMode;
321 : ChannelInterpretation mChannelInterpretation;
322 : };
323 :
324 0 : GraphImpl()->AppendMessage(MakeUnique<Message>(this, aNumberOfChannels,
325 : aChannelCountMode,
326 0 : aChannelInterpretation));
327 0 : }
328 :
329 : void
330 0 : AudioNodeStream::SetPassThrough(bool aPassThrough)
331 : {
332 0 : class Message final : public ControlMessage
333 : {
334 : public:
335 0 : Message(AudioNodeStream* aStream, bool aPassThrough)
336 0 : : ControlMessage(aStream), mPassThrough(aPassThrough)
337 0 : {}
338 0 : void Run() override
339 : {
340 0 : static_cast<AudioNodeStream*>(mStream)->mPassThrough = mPassThrough;
341 0 : }
342 : bool mPassThrough;
343 : };
344 :
345 0 : GraphImpl()->AppendMessage(MakeUnique<Message>(this, aPassThrough));
346 0 : }
347 :
348 : void
349 0 : AudioNodeStream::SetChannelMixingParametersImpl(uint32_t aNumberOfChannels,
350 : ChannelCountMode aChannelCountMode,
351 : ChannelInterpretation aChannelInterpretation)
352 : {
353 0 : mNumberOfInputChannels = aNumberOfChannels;
354 0 : mChannelCountMode = aChannelCountMode;
355 0 : mChannelInterpretation = aChannelInterpretation;
356 0 : }
357 :
358 : uint32_t
359 0 : AudioNodeStream::ComputedNumberOfChannels(uint32_t aInputChannelCount)
360 : {
361 0 : switch (mChannelCountMode) {
362 : case ChannelCountMode::Explicit:
363 : // Disregard the channel count we've calculated from inputs, and just use
364 : // mNumberOfInputChannels.
365 0 : return mNumberOfInputChannels;
366 : case ChannelCountMode::Clamped_max:
367 : // Clamp the computed output channel count to mNumberOfInputChannels.
368 0 : return std::min(aInputChannelCount, mNumberOfInputChannels);
369 : default:
370 : case ChannelCountMode::Max:
371 : // Nothing to do here, just shut up the compiler warning.
372 0 : return aInputChannelCount;
373 : }
374 : }
375 :
376 0 : class AudioNodeStream::AdvanceAndResumeMessage final : public ControlMessage {
377 : public:
378 0 : AdvanceAndResumeMessage(AudioNodeStream* aStream, StreamTime aAdvance) :
379 0 : ControlMessage(aStream), mAdvance(aAdvance) {}
380 0 : void Run() override
381 : {
382 0 : auto ns = static_cast<AudioNodeStream*>(mStream);
383 0 : ns->mTracksStartTime -= mAdvance;
384 :
385 0 : StreamTracks::Track* track = ns->EnsureTrack(AUDIO_TRACK);
386 0 : track->Get<AudioSegment>()->AppendNullData(mAdvance);
387 :
388 0 : ns->GraphImpl()->DecrementSuspendCount(mStream);
389 0 : }
390 : private:
391 : StreamTime mAdvance;
392 : };
393 :
394 : void
395 0 : AudioNodeStream::AdvanceAndResume(StreamTime aAdvance)
396 : {
397 0 : mMainThreadCurrentTime += aAdvance;
398 0 : GraphImpl()->AppendMessage(MakeUnique<AdvanceAndResumeMessage>(this, aAdvance));
399 0 : }
400 :
401 : void
402 0 : AudioNodeStream::ObtainInputBlock(AudioBlock& aTmpChunk,
403 : uint32_t aPortIndex)
404 : {
405 0 : uint32_t inputCount = mInputs.Length();
406 0 : uint32_t outputChannelCount = 1;
407 0 : AutoTArray<const AudioBlock*,250> inputChunks;
408 0 : for (uint32_t i = 0; i < inputCount; ++i) {
409 0 : if (aPortIndex != mInputs[i]->InputNumber()) {
410 : // This input is connected to a different port
411 0 : continue;
412 : }
413 0 : MediaStream* s = mInputs[i]->GetSource();
414 0 : AudioNodeStream* a = static_cast<AudioNodeStream*>(s);
415 0 : MOZ_ASSERT(a == s->AsAudioNodeStream());
416 0 : if (a->IsAudioParamStream()) {
417 0 : continue;
418 : }
419 :
420 0 : const AudioBlock* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()];
421 0 : MOZ_ASSERT(chunk);
422 0 : if (chunk->IsNull() || chunk->mChannelData.IsEmpty()) {
423 0 : continue;
424 : }
425 :
426 0 : inputChunks.AppendElement(chunk);
427 : outputChannelCount =
428 0 : GetAudioChannelsSuperset(outputChannelCount, chunk->ChannelCount());
429 : }
430 :
431 0 : outputChannelCount = ComputedNumberOfChannels(outputChannelCount);
432 :
433 0 : uint32_t inputChunkCount = inputChunks.Length();
434 0 : if (inputChunkCount == 0 ||
435 0 : (inputChunkCount == 1 && inputChunks[0]->ChannelCount() == 0)) {
436 0 : aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
437 0 : return;
438 : }
439 :
440 0 : if (inputChunkCount == 1 &&
441 0 : inputChunks[0]->ChannelCount() == outputChannelCount) {
442 0 : aTmpChunk = *inputChunks[0];
443 0 : return;
444 : }
445 :
446 0 : if (outputChannelCount == 0) {
447 0 : aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
448 0 : return;
449 : }
450 :
451 0 : aTmpChunk.AllocateChannels(outputChannelCount);
452 0 : DownmixBufferType downmixBuffer;
453 0 : ASSERT_ALIGNED16(downmixBuffer.Elements());
454 :
455 0 : for (uint32_t i = 0; i < inputChunkCount; ++i) {
456 0 : AccumulateInputChunk(i, *inputChunks[i], &aTmpChunk, &downmixBuffer);
457 : }
458 : }
459 :
460 : void
461 0 : AudioNodeStream::AccumulateInputChunk(uint32_t aInputIndex,
462 : const AudioBlock& aChunk,
463 : AudioBlock* aBlock,
464 : DownmixBufferType* aDownmixBuffer)
465 : {
466 0 : AutoTArray<const float*,GUESS_AUDIO_CHANNELS> channels;
467 0 : UpMixDownMixChunk(&aChunk, aBlock->ChannelCount(), channels, *aDownmixBuffer);
468 :
469 0 : for (uint32_t c = 0; c < channels.Length(); ++c) {
470 0 : const float* inputData = static_cast<const float*>(channels[c]);
471 0 : float* outputData = aBlock->ChannelFloatsForWrite(c);
472 0 : if (inputData) {
473 0 : if (aInputIndex == 0) {
474 0 : AudioBlockCopyChannelWithScale(inputData, aChunk.mVolume, outputData);
475 : } else {
476 0 : AudioBlockAddChannelWithScale(inputData, aChunk.mVolume, outputData);
477 : }
478 : } else {
479 0 : if (aInputIndex == 0) {
480 0 : PodZero(outputData, WEBAUDIO_BLOCK_SIZE);
481 : }
482 : }
483 : }
484 0 : }
485 :
486 : void
487 0 : AudioNodeStream::UpMixDownMixChunk(const AudioBlock* aChunk,
488 : uint32_t aOutputChannelCount,
489 : nsTArray<const float*>& aOutputChannels,
490 : DownmixBufferType& aDownmixBuffer)
491 : {
492 0 : for (uint32_t i = 0; i < aChunk->ChannelCount(); i++) {
493 0 : aOutputChannels.AppendElement(static_cast<const float*>(aChunk->mChannelData[i]));
494 : }
495 0 : if (aOutputChannels.Length() < aOutputChannelCount) {
496 0 : if (mChannelInterpretation == ChannelInterpretation::Speakers) {
497 0 : AudioChannelsUpMix<float>(&aOutputChannels, aOutputChannelCount, nullptr);
498 0 : NS_ASSERTION(aOutputChannelCount == aOutputChannels.Length(),
499 : "We called GetAudioChannelsSuperset to avoid this");
500 : } else {
501 : // Fill up the remaining aOutputChannels by zeros
502 0 : for (uint32_t j = aOutputChannels.Length(); j < aOutputChannelCount; ++j) {
503 0 : aOutputChannels.AppendElement(nullptr);
504 : }
505 : }
506 0 : } else if (aOutputChannels.Length() > aOutputChannelCount) {
507 0 : if (mChannelInterpretation == ChannelInterpretation::Speakers) {
508 0 : AutoTArray<float*,GUESS_AUDIO_CHANNELS> outputChannels;
509 0 : outputChannels.SetLength(aOutputChannelCount);
510 0 : aDownmixBuffer.SetLength(aOutputChannelCount * WEBAUDIO_BLOCK_SIZE);
511 0 : for (uint32_t j = 0; j < aOutputChannelCount; ++j) {
512 0 : outputChannels[j] = &aDownmixBuffer[j * WEBAUDIO_BLOCK_SIZE];
513 : }
514 :
515 0 : AudioChannelsDownMix(aOutputChannels, outputChannels.Elements(),
516 0 : aOutputChannelCount, WEBAUDIO_BLOCK_SIZE);
517 :
518 0 : aOutputChannels.SetLength(aOutputChannelCount);
519 0 : for (uint32_t j = 0; j < aOutputChannels.Length(); ++j) {
520 0 : aOutputChannels[j] = outputChannels[j];
521 : }
522 : } else {
523 : // Drop the remaining aOutputChannels
524 0 : aOutputChannels.RemoveElementsAt(aOutputChannelCount,
525 0 : aOutputChannels.Length() - aOutputChannelCount);
526 : }
527 : }
528 0 : }
529 :
530 : // The MediaStreamGraph guarantees that this is actually one block, for
531 : // AudioNodeStreams.
532 : void
533 0 : AudioNodeStream::ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags)
534 : {
535 0 : uint16_t outputCount = mLastChunks.Length();
536 0 : MOZ_ASSERT(outputCount == std::max(uint16_t(1), mEngine->OutputCount()));
537 :
538 0 : if (!mIsActive) {
539 : // mLastChunks are already null.
540 : #ifdef DEBUG
541 0 : for (const auto& chunk : mLastChunks) {
542 0 : MOZ_ASSERT(chunk.IsNull());
543 : }
544 : #endif
545 0 : } else if (InMutedCycle()) {
546 0 : mInputChunks.Clear();
547 0 : for (uint16_t i = 0; i < outputCount; ++i) {
548 0 : mLastChunks[i].SetNull(WEBAUDIO_BLOCK_SIZE);
549 : }
550 : } else {
551 : // We need to generate at least one input
552 0 : uint16_t maxInputs = std::max(uint16_t(1), mEngine->InputCount());
553 0 : mInputChunks.SetLength(maxInputs);
554 0 : for (uint16_t i = 0; i < maxInputs; ++i) {
555 0 : ObtainInputBlock(mInputChunks[i], i);
556 : }
557 0 : bool finished = false;
558 0 : if (mPassThrough) {
559 0 : MOZ_ASSERT(outputCount == 1, "For now, we only support nodes that have one output port");
560 0 : mLastChunks[0] = mInputChunks[0];
561 : } else {
562 0 : if (maxInputs <= 1 && outputCount <= 1) {
563 0 : mEngine->ProcessBlock(this, aFrom,
564 0 : mInputChunks[0], &mLastChunks[0], &finished);
565 : } else {
566 0 : mEngine->ProcessBlocksOnPorts(this, mInputChunks, mLastChunks, &finished);
567 : }
568 : }
569 0 : for (uint16_t i = 0; i < outputCount; ++i) {
570 0 : NS_ASSERTION(mLastChunks[i].GetDuration() == WEBAUDIO_BLOCK_SIZE,
571 : "Invalid WebAudio chunk size");
572 : }
573 0 : if (finished) {
574 0 : mMarkAsFinishedAfterThisBlock = true;
575 0 : if (mIsActive) {
576 0 : ScheduleCheckForInactive();
577 : }
578 : }
579 :
580 0 : if (GetDisabledTrackMode(static_cast<TrackID>(AUDIO_TRACK)) != DisabledTrackMode::ENABLED) {
581 0 : for (uint32_t i = 0; i < outputCount; ++i) {
582 0 : mLastChunks[i].SetNull(WEBAUDIO_BLOCK_SIZE);
583 : }
584 : }
585 : }
586 :
587 0 : if (!mFinished) {
588 : // Don't output anything while finished
589 0 : if (mFlags & EXTERNAL_OUTPUT) {
590 0 : AdvanceOutputSegment();
591 : }
592 0 : if (mMarkAsFinishedAfterThisBlock && (aFlags & ALLOW_FINISH)) {
593 : // This stream was finished the last time that we looked at it, and all
594 : // of the depending streams have finished their output as well, so now
595 : // it's time to mark this stream as finished.
596 0 : if (mFlags & EXTERNAL_OUTPUT) {
597 0 : FinishOutput();
598 : }
599 0 : FinishOnGraphThread();
600 : }
601 : }
602 0 : }
603 :
604 : void
605 0 : AudioNodeStream::ProduceOutputBeforeInput(GraphTime aFrom)
606 : {
607 0 : MOZ_ASSERT(mEngine->AsDelayNodeEngine());
608 0 : MOZ_ASSERT(mEngine->OutputCount() == 1,
609 : "DelayNodeEngine output count should be 1");
610 0 : MOZ_ASSERT(!InMutedCycle(), "DelayNodes should break cycles");
611 0 : MOZ_ASSERT(mLastChunks.Length() == 1);
612 :
613 0 : if (!mIsActive) {
614 0 : mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
615 : } else {
616 0 : mEngine->ProduceBlockBeforeInput(this, aFrom, &mLastChunks[0]);
617 0 : NS_ASSERTION(mLastChunks[0].GetDuration() == WEBAUDIO_BLOCK_SIZE,
618 : "Invalid WebAudio chunk size");
619 0 : if (GetDisabledTrackMode(static_cast<TrackID>(AUDIO_TRACK)) != DisabledTrackMode::ENABLED) {
620 0 : mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
621 : }
622 : }
623 0 : }
624 :
625 : void
626 0 : AudioNodeStream::AdvanceOutputSegment()
627 : {
628 0 : StreamTracks::Track* track = EnsureTrack(AUDIO_TRACK);
629 : // No more tracks will be coming
630 0 : mTracks.AdvanceKnownTracksTime(STREAM_TIME_MAX);
631 :
632 0 : AudioSegment* segment = track->Get<AudioSegment>();
633 :
634 0 : if (!mLastChunks[0].IsNull()) {
635 0 : segment->AppendAndConsumeChunk(mLastChunks[0].AsMutableChunk());
636 : } else {
637 0 : segment->AppendNullData(mLastChunks[0].GetDuration());
638 : }
639 :
640 0 : for (uint32_t j = 0; j < mListeners.Length(); ++j) {
641 0 : MediaStreamListener* l = mListeners[j];
642 0 : AudioChunk copyChunk = mLastChunks[0].AsAudioChunk();
643 0 : AudioSegment tmpSegment;
644 0 : tmpSegment.AppendAndConsumeChunk(©Chunk);
645 0 : l->NotifyQueuedTrackChanges(Graph(), AUDIO_TRACK,
646 0 : segment->GetDuration(), TrackEventCommand::TRACK_EVENT_NONE, tmpSegment);
647 : }
648 0 : }
649 :
650 : void
651 0 : AudioNodeStream::FinishOutput()
652 : {
653 0 : StreamTracks::Track* track = EnsureTrack(AUDIO_TRACK);
654 0 : track->SetEnded();
655 :
656 0 : for (uint32_t j = 0; j < mListeners.Length(); ++j) {
657 0 : MediaStreamListener* l = mListeners[j];
658 0 : AudioSegment emptySegment;
659 0 : l->NotifyQueuedTrackChanges(Graph(), AUDIO_TRACK,
660 : track->GetSegment()->GetDuration(),
661 0 : TrackEventCommand::TRACK_EVENT_ENDED, emptySegment);
662 : }
663 0 : }
664 :
665 : void
666 0 : AudioNodeStream::AddInput(MediaInputPort* aPort)
667 : {
668 0 : ProcessedMediaStream::AddInput(aPort);
669 0 : AudioNodeStream* ns = aPort->GetSource()->AsAudioNodeStream();
670 : // Streams that are not AudioNodeStreams are considered active.
671 0 : if (!ns || (ns->mIsActive && !ns->IsAudioParamStream())) {
672 0 : IncrementActiveInputCount();
673 : }
674 0 : }
675 : void
676 0 : AudioNodeStream::RemoveInput(MediaInputPort* aPort)
677 : {
678 0 : ProcessedMediaStream::RemoveInput(aPort);
679 0 : AudioNodeStream* ns = aPort->GetSource()->AsAudioNodeStream();
680 : // Streams that are not AudioNodeStreams are considered active.
681 0 : if (!ns || (ns->mIsActive && !ns->IsAudioParamStream())) {
682 0 : DecrementActiveInputCount();
683 : }
684 0 : }
685 :
686 : void
687 0 : AudioNodeStream::SetActive()
688 : {
689 0 : if (mIsActive || mMarkAsFinishedAfterThisBlock) {
690 0 : return;
691 : }
692 :
693 0 : mIsActive = true;
694 0 : if (!(mFlags & EXTERNAL_OUTPUT)) {
695 0 : GraphImpl()->DecrementSuspendCount(this);
696 : }
697 0 : if (IsAudioParamStream()) {
698 : // Consumers merely influence stream order.
699 : // They do not read from the stream.
700 0 : return;
701 : }
702 :
703 0 : for (const auto& consumer : mConsumers) {
704 0 : AudioNodeStream* ns = consumer->GetDestination()->AsAudioNodeStream();
705 0 : if (ns) {
706 0 : ns->IncrementActiveInputCount();
707 : }
708 : }
709 : }
710 :
711 0 : class AudioNodeStream::CheckForInactiveMessage final : public ControlMessage
712 : {
713 : public:
714 0 : explicit CheckForInactiveMessage(AudioNodeStream* aStream) :
715 0 : ControlMessage(aStream) {}
716 0 : void Run() override
717 : {
718 0 : auto ns = static_cast<AudioNodeStream*>(mStream);
719 0 : ns->CheckForInactive();
720 0 : }
721 : };
722 :
723 : void
724 0 : AudioNodeStream::ScheduleCheckForInactive()
725 : {
726 0 : if (mActiveInputCount > 0 && !mMarkAsFinishedAfterThisBlock) {
727 0 : return;
728 : }
729 :
730 0 : auto message = MakeUnique<CheckForInactiveMessage>(this);
731 0 : GraphImpl()->RunMessageAfterProcessing(Move(message));
732 : }
733 :
734 : void
735 0 : AudioNodeStream::CheckForInactive()
736 : {
737 0 : if (((mActiveInputCount > 0 || mEngine->IsActive()) &&
738 0 : !mMarkAsFinishedAfterThisBlock) ||
739 0 : !mIsActive) {
740 0 : return;
741 : }
742 :
743 0 : mIsActive = false;
744 0 : mInputChunks.Clear(); // not required for foreseeable future
745 0 : for (auto& chunk : mLastChunks) {
746 0 : chunk.SetNull(WEBAUDIO_BLOCK_SIZE);
747 : }
748 0 : if (!(mFlags & EXTERNAL_OUTPUT)) {
749 0 : GraphImpl()->IncrementSuspendCount(this);
750 : }
751 0 : if (IsAudioParamStream()) {
752 0 : return;
753 : }
754 :
755 0 : for (const auto& consumer : mConsumers) {
756 0 : AudioNodeStream* ns = consumer->GetDestination()->AsAudioNodeStream();
757 0 : if (ns) {
758 0 : ns->DecrementActiveInputCount();
759 : }
760 : }
761 : }
762 :
763 : void
764 0 : AudioNodeStream::IncrementActiveInputCount()
765 : {
766 0 : ++mActiveInputCount;
767 0 : SetActive();
768 0 : }
769 :
770 : void
771 0 : AudioNodeStream::DecrementActiveInputCount()
772 : {
773 0 : MOZ_ASSERT(mActiveInputCount > 0);
774 0 : --mActiveInputCount;
775 0 : CheckForInactive();
776 0 : }
777 :
778 : } // namespace mozilla
|