Line data Source code
1 : /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 : /* vim:set ts=2 sw=2 sts=2 et cindent: */
3 : /* This Source Code Form is subject to the terms of the Mozilla Public
4 : * License, v. 2.0. If a copy of the MPL was not distributed with this
5 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 :
7 : #include "ScriptProcessorNode.h"
8 : #include "mozilla/dom/ScriptProcessorNodeBinding.h"
9 : #include "AudioBuffer.h"
10 : #include "AudioDestinationNode.h"
11 : #include "AudioNodeEngine.h"
12 : #include "AudioNodeStream.h"
13 : #include "AudioProcessingEvent.h"
14 : #include "WebAudioUtils.h"
15 : #include "mozilla/dom/ScriptSettings.h"
16 : #include "mozilla/Mutex.h"
17 : #include "mozilla/PodOperations.h"
18 : #include "nsAutoPtr.h"
19 : #include <deque>
20 :
21 : namespace mozilla {
22 : namespace dom {
23 :
24 : // The maximum latency, in seconds, that we can live with before dropping
25 : // buffers.
26 : static const float MAX_LATENCY_S = 0.5;
27 :
28 0 : NS_IMPL_ISUPPORTS_INHERITED0(ScriptProcessorNode, AudioNode)
29 :
30 : // This class manages a queue of output buffers shared between
31 : // the main thread and the Media Stream Graph thread.
32 0 : class SharedBuffers final
33 : {
34 : private:
35 0 : class OutputQueue final
36 : {
37 : public:
38 0 : explicit OutputQueue(const char* aName)
39 0 : : mMutex(aName)
40 0 : {}
41 :
42 0 : size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
43 : {
44 0 : mMutex.AssertCurrentThreadOwns();
45 :
46 0 : size_t amount = 0;
47 0 : for (size_t i = 0; i < mBufferList.size(); i++) {
48 0 : amount += mBufferList[i].SizeOfExcludingThis(aMallocSizeOf, false);
49 : }
50 :
51 0 : return amount;
52 : }
53 :
54 0 : Mutex& Lock() const { return const_cast<OutputQueue*>(this)->mMutex; }
55 :
56 0 : size_t ReadyToConsume() const
57 : {
58 : // Accessed on both main thread and media graph thread.
59 0 : mMutex.AssertCurrentThreadOwns();
60 0 : return mBufferList.size();
61 : }
62 :
63 : // Produce one buffer
64 0 : AudioChunk& Produce()
65 : {
66 0 : mMutex.AssertCurrentThreadOwns();
67 0 : MOZ_ASSERT(NS_IsMainThread());
68 0 : mBufferList.push_back(AudioChunk());
69 0 : return mBufferList.back();
70 : }
71 :
72 : // Consumes one buffer.
73 0 : AudioChunk Consume()
74 : {
75 0 : mMutex.AssertCurrentThreadOwns();
76 0 : MOZ_ASSERT(!NS_IsMainThread());
77 0 : MOZ_ASSERT(ReadyToConsume() > 0);
78 0 : AudioChunk front = mBufferList.front();
79 0 : mBufferList.pop_front();
80 0 : return front;
81 : }
82 :
83 : // Empties the buffer queue.
84 0 : void Clear()
85 : {
86 0 : mMutex.AssertCurrentThreadOwns();
87 0 : mBufferList.clear();
88 0 : }
89 :
90 : private:
91 : typedef std::deque<AudioChunk> BufferList;
92 :
93 : // Synchronizes access to mBufferList. Note that it's the responsibility
94 : // of the callers to perform the required locking, and we assert that every
95 : // time we access mBufferList.
96 : Mutex mMutex;
97 : // The list representing the queue.
98 : BufferList mBufferList;
99 : };
100 :
101 : public:
102 0 : explicit SharedBuffers(float aSampleRate)
103 0 : : mOutputQueue("SharedBuffers::outputQueue")
104 : , mDelaySoFar(STREAM_TIME_MAX)
105 : , mSampleRate(aSampleRate)
106 : , mLatency(0.0)
107 0 : , mDroppingBuffers(false)
108 : {
109 0 : }
110 :
111 0 : size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
112 : {
113 0 : size_t amount = aMallocSizeOf(this);
114 :
115 : {
116 0 : MutexAutoLock lock(mOutputQueue.Lock());
117 0 : amount += mOutputQueue.SizeOfExcludingThis(aMallocSizeOf);
118 : }
119 :
120 0 : return amount;
121 : }
122 :
123 : // main thread
124 0 : void FinishProducingOutputBuffer(ThreadSharedFloatArrayBufferList* aBuffer,
125 : uint32_t aBufferSize)
126 : {
127 0 : MOZ_ASSERT(NS_IsMainThread());
128 :
129 0 : TimeStamp now = TimeStamp::Now();
130 :
131 0 : if (mLastEventTime.IsNull()) {
132 0 : mLastEventTime = now;
133 : } else {
134 : // When main thread blocking has built up enough so
135 : // |mLatency > MAX_LATENCY_S|, frame dropping starts. It continues until
136 : // the output buffer is completely empty, at which point the accumulated
137 : // latency is also reset to 0.
138 : // It could happen that the output queue becomes empty before the input
139 : // node has fully caught up. In this case there will be events where
140 : // |(now - mLastEventTime)| is very short, making mLatency negative.
141 : // As this happens and the size of |mLatency| becomes greater than
142 : // MAX_LATENCY_S, frame dropping starts again to maintain an as short
143 : // output queue as possible.
144 0 : float latency = (now - mLastEventTime).ToSeconds();
145 0 : float bufferDuration = aBufferSize / mSampleRate;
146 0 : mLatency += latency - bufferDuration;
147 0 : mLastEventTime = now;
148 0 : if (fabs(mLatency) > MAX_LATENCY_S) {
149 0 : mDroppingBuffers = true;
150 : }
151 : }
152 :
153 0 : MutexAutoLock lock(mOutputQueue.Lock());
154 0 : if (mDroppingBuffers) {
155 0 : if (mOutputQueue.ReadyToConsume()) {
156 0 : return;
157 : }
158 0 : mDroppingBuffers = false;
159 0 : mLatency = 0;
160 : }
161 :
162 0 : for (uint32_t offset = 0; offset < aBufferSize; offset += WEBAUDIO_BLOCK_SIZE) {
163 0 : AudioChunk& chunk = mOutputQueue.Produce();
164 0 : if (aBuffer) {
165 0 : chunk.mDuration = WEBAUDIO_BLOCK_SIZE;
166 0 : chunk.mBuffer = aBuffer;
167 0 : chunk.mChannelData.SetLength(aBuffer->GetChannels());
168 0 : for (uint32_t i = 0; i < aBuffer->GetChannels(); ++i) {
169 0 : chunk.mChannelData[i] = aBuffer->GetData(i) + offset;
170 : }
171 0 : chunk.mVolume = 1.0f;
172 0 : chunk.mBufferFormat = AUDIO_FORMAT_FLOAT32;
173 : } else {
174 0 : chunk.SetNull(WEBAUDIO_BLOCK_SIZE);
175 : }
176 : }
177 : }
178 :
179 : // graph thread
180 0 : AudioChunk GetOutputBuffer()
181 : {
182 0 : MOZ_ASSERT(!NS_IsMainThread());
183 0 : AudioChunk buffer;
184 :
185 : {
186 0 : MutexAutoLock lock(mOutputQueue.Lock());
187 0 : if (mOutputQueue.ReadyToConsume() > 0) {
188 0 : if (mDelaySoFar == STREAM_TIME_MAX) {
189 0 : mDelaySoFar = 0;
190 : }
191 0 : buffer = mOutputQueue.Consume();
192 : } else {
193 : // If we're out of buffers to consume, just output silence
194 0 : buffer.SetNull(WEBAUDIO_BLOCK_SIZE);
195 0 : if (mDelaySoFar != STREAM_TIME_MAX) {
196 : // Remember the delay that we just hit
197 0 : mDelaySoFar += WEBAUDIO_BLOCK_SIZE;
198 : }
199 : }
200 : }
201 :
202 0 : return buffer;
203 : }
204 :
205 0 : StreamTime DelaySoFar() const
206 : {
207 0 : MOZ_ASSERT(!NS_IsMainThread());
208 0 : return mDelaySoFar == STREAM_TIME_MAX ? 0 : mDelaySoFar;
209 : }
210 :
211 0 : void Reset()
212 : {
213 0 : MOZ_ASSERT(!NS_IsMainThread());
214 0 : mDelaySoFar = STREAM_TIME_MAX;
215 0 : mLatency = 0.0f;
216 : {
217 0 : MutexAutoLock lock(mOutputQueue.Lock());
218 0 : mOutputQueue.Clear();
219 : }
220 0 : mLastEventTime = TimeStamp();
221 0 : }
222 :
223 : private:
224 : OutputQueue mOutputQueue;
225 : // How much delay we've seen so far. This measures the amount of delay
226 : // caused by the main thread lagging behind in producing output buffers.
227 : // STREAM_TIME_MAX means that we have not received our first buffer yet.
228 : StreamTime mDelaySoFar;
229 : // The samplerate of the context.
230 : float mSampleRate;
231 : // This is the latency caused by the buffering. If this grows too high, we
232 : // will drop buffers until it is acceptable.
233 : float mLatency;
234 : // This is the time at which we last produced a buffer, to detect if the main
235 : // thread has been blocked.
236 : TimeStamp mLastEventTime;
237 : // True if we should be dropping buffers.
238 : bool mDroppingBuffers;
239 : };
240 :
241 0 : class ScriptProcessorNodeEngine final : public AudioNodeEngine
242 : {
243 : public:
244 0 : ScriptProcessorNodeEngine(ScriptProcessorNode* aNode,
245 : AudioDestinationNode* aDestination,
246 : uint32_t aBufferSize,
247 : uint32_t aNumberOfInputChannels)
248 0 : : AudioNodeEngine(aNode)
249 0 : , mDestination(aDestination->Stream())
250 0 : , mSharedBuffers(new SharedBuffers(mDestination->SampleRate()))
251 : , mBufferSize(aBufferSize)
252 : , mInputChannelCount(aNumberOfInputChannels)
253 0 : , mInputWriteIndex(0)
254 : {
255 0 : }
256 :
257 0 : SharedBuffers* GetSharedBuffers() const
258 : {
259 0 : return mSharedBuffers;
260 : }
261 :
262 : enum {
263 : IS_CONNECTED,
264 : };
265 :
266 0 : void SetInt32Parameter(uint32_t aIndex, int32_t aParam) override
267 : {
268 0 : switch (aIndex) {
269 : case IS_CONNECTED:
270 0 : mIsConnected = aParam;
271 0 : break;
272 : default:
273 0 : NS_ERROR("Bad Int32Parameter");
274 : } // End index switch.
275 0 : }
276 :
277 0 : void ProcessBlock(AudioNodeStream* aStream,
278 : GraphTime aFrom,
279 : const AudioBlock& aInput,
280 : AudioBlock* aOutput,
281 : bool* aFinished) override
282 : {
283 : // This node is not connected to anything. Per spec, we don't fire the
284 : // onaudioprocess event. We also want to clear out the input and output
285 : // buffer queue, and output a null buffer.
286 0 : if (!mIsConnected) {
287 0 : aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
288 0 : mSharedBuffers->Reset();
289 0 : mInputWriteIndex = 0;
290 0 : return;
291 : }
292 :
293 : // The input buffer is allocated lazily when non-null input is received.
294 0 : if (!aInput.IsNull() && !mInputBuffer) {
295 : mInputBuffer = ThreadSharedFloatArrayBufferList::
296 0 : Create(mInputChannelCount, mBufferSize, fallible);
297 0 : if (mInputBuffer && mInputWriteIndex) {
298 : // Zero leading for null chunks that were skipped.
299 0 : for (uint32_t i = 0; i < mInputChannelCount; ++i) {
300 0 : float* channelData = mInputBuffer->GetDataForWrite(i);
301 0 : PodZero(channelData, mInputWriteIndex);
302 : }
303 : }
304 : }
305 :
306 : // First, record our input buffer, if its allocation succeeded.
307 0 : uint32_t inputChannelCount = mInputBuffer ? mInputBuffer->GetChannels() : 0;
308 0 : for (uint32_t i = 0; i < inputChannelCount; ++i) {
309 0 : float* writeData = mInputBuffer->GetDataForWrite(i) + mInputWriteIndex;
310 0 : if (aInput.IsNull()) {
311 0 : PodZero(writeData, aInput.GetDuration());
312 : } else {
313 0 : MOZ_ASSERT(aInput.GetDuration() == WEBAUDIO_BLOCK_SIZE, "sanity check");
314 0 : MOZ_ASSERT(aInput.ChannelCount() == inputChannelCount);
315 0 : AudioBlockCopyChannelWithScale(static_cast<const float*>(aInput.mChannelData[i]),
316 0 : aInput.mVolume, writeData);
317 : }
318 : }
319 0 : mInputWriteIndex += aInput.GetDuration();
320 :
321 : // Now, see if we have data to output
322 : // Note that we need to do this before sending the buffer to the main
323 : // thread so that our delay time is updated.
324 0 : *aOutput = mSharedBuffers->GetOutputBuffer();
325 :
326 0 : if (mInputWriteIndex >= mBufferSize) {
327 0 : SendBuffersToMainThread(aStream, aFrom);
328 0 : mInputWriteIndex -= mBufferSize;
329 : }
330 : }
331 :
332 0 : bool IsActive() const override
333 : {
334 : // Could return false when !mIsConnected after all output chunks produced
335 : // by main thread events calling
336 : // SharedBuffers::FinishProducingOutputBuffer() have been processed.
337 0 : return true;
338 : }
339 :
340 0 : size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override
341 : {
342 : // Not owned:
343 : // - mDestination (probably)
344 0 : size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf);
345 0 : amount += mSharedBuffers->SizeOfIncludingThis(aMallocSizeOf);
346 0 : if (mInputBuffer) {
347 0 : amount += mInputBuffer->SizeOfIncludingThis(aMallocSizeOf);
348 : }
349 :
350 0 : return amount;
351 : }
352 :
353 0 : size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override
354 : {
355 0 : return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
356 : }
357 :
358 : private:
359 0 : void SendBuffersToMainThread(AudioNodeStream* aStream, GraphTime aFrom)
360 : {
361 0 : MOZ_ASSERT(!NS_IsMainThread());
362 :
363 : // we now have a full input buffer ready to be sent to the main thread.
364 0 : StreamTime playbackTick = mDestination->GraphTimeToStreamTime(aFrom);
365 : // Add the duration of the current sample
366 0 : playbackTick += WEBAUDIO_BLOCK_SIZE;
367 : // Add the delay caused by the main thread
368 0 : playbackTick += mSharedBuffers->DelaySoFar();
369 : // Compute the playback time in the coordinate system of the destination
370 0 : double playbackTime = mDestination->StreamTimeToSeconds(playbackTick);
371 :
372 0 : class Command final : public Runnable
373 : {
374 : public:
375 0 : Command(AudioNodeStream* aStream,
376 : already_AddRefed<ThreadSharedFloatArrayBufferList> aInputBuffer,
377 : double aPlaybackTime)
378 0 : : mozilla::Runnable("Command")
379 : , mStream(aStream)
380 : , mInputBuffer(aInputBuffer)
381 0 : , mPlaybackTime(aPlaybackTime)
382 : {
383 0 : }
384 :
385 0 : NS_IMETHOD Run() override
386 : {
387 0 : RefPtr<ThreadSharedFloatArrayBufferList> output;
388 :
389 : auto engine =
390 0 : static_cast<ScriptProcessorNodeEngine*>(mStream->Engine());
391 : {
392 : auto node = static_cast<ScriptProcessorNode*>
393 0 : (engine->NodeMainThread());
394 0 : if (!node) {
395 0 : return NS_OK;
396 : }
397 :
398 0 : if (node->HasListenersFor(nsGkAtoms::onaudioprocess)) {
399 0 : output = DispatchAudioProcessEvent(node);
400 : }
401 : // The node may have been destroyed during event dispatch.
402 : }
403 :
404 : // Append it to our output buffer queue
405 : engine->GetSharedBuffers()->
406 0 : FinishProducingOutputBuffer(output, engine->mBufferSize);
407 :
408 0 : return NS_OK;
409 : }
410 :
411 : // Returns the output buffers if set in event handlers.
412 : ThreadSharedFloatArrayBufferList*
413 0 : DispatchAudioProcessEvent(ScriptProcessorNode* aNode)
414 : {
415 0 : AudioContext* context = aNode->Context();
416 0 : if (!context) {
417 0 : return nullptr;
418 : }
419 :
420 0 : AutoJSAPI jsapi;
421 0 : if (NS_WARN_IF(!jsapi.Init(aNode->GetOwner()))) {
422 0 : return nullptr;
423 : }
424 0 : JSContext* cx = jsapi.cx();
425 0 : uint32_t inputChannelCount = aNode->ChannelCount();
426 :
427 : // Create the input buffer
428 0 : RefPtr<AudioBuffer> inputBuffer;
429 0 : if (mInputBuffer) {
430 0 : ErrorResult rv;
431 : inputBuffer =
432 0 : AudioBuffer::Create(context->GetOwner(), inputChannelCount,
433 : aNode->BufferSize(), context->SampleRate(),
434 0 : mInputBuffer.forget(), rv);
435 0 : if (rv.Failed()) {
436 0 : rv.SuppressException();
437 0 : return nullptr;
438 : }
439 : }
440 :
441 : // Ask content to produce data in the output buffer
442 : // Note that we always avoid creating the output buffer here, and we try to
443 : // avoid creating the input buffer as well. The AudioProcessingEvent class
444 : // knows how to lazily create them if needed once the script tries to access
445 : // them. Otherwise, we may be able to get away without creating them!
446 : RefPtr<AudioProcessingEvent> event =
447 0 : new AudioProcessingEvent(aNode, nullptr, nullptr);
448 0 : event->InitEvent(inputBuffer, inputChannelCount, mPlaybackTime);
449 0 : aNode->DispatchTrustedEvent(event);
450 :
451 : // Steal the output buffers if they have been set.
452 : // Don't create a buffer if it hasn't been used to return output;
453 : // FinishProducingOutputBuffer() will optimize output = null.
454 : // GetThreadSharedChannelsForRate() may also return null after OOM.
455 0 : if (event->HasOutputBuffer()) {
456 0 : ErrorResult rv;
457 0 : AudioBuffer* buffer = event->GetOutputBuffer(rv);
458 : // HasOutputBuffer() returning true means that GetOutputBuffer()
459 : // will not fail.
460 0 : MOZ_ASSERT(!rv.Failed());
461 0 : return buffer->GetThreadSharedChannelsForRate(cx);
462 : }
463 :
464 0 : return nullptr;
465 : }
466 : private:
467 : RefPtr<AudioNodeStream> mStream;
468 : RefPtr<ThreadSharedFloatArrayBufferList> mInputBuffer;
469 : double mPlaybackTime;
470 : };
471 :
472 0 : RefPtr<Command> command = new Command(aStream, mInputBuffer.forget(),
473 0 : playbackTime);
474 0 : mAbstractMainThread->Dispatch(command.forget());
475 0 : }
476 :
477 : friend class ScriptProcessorNode;
478 :
479 : AudioNodeStream* mDestination;
480 : nsAutoPtr<SharedBuffers> mSharedBuffers;
481 : RefPtr<ThreadSharedFloatArrayBufferList> mInputBuffer;
482 : const uint32_t mBufferSize;
483 : const uint32_t mInputChannelCount;
484 : // The write index into the current input buffer
485 : uint32_t mInputWriteIndex;
486 : bool mIsConnected = false;
487 : };
488 :
489 0 : ScriptProcessorNode::ScriptProcessorNode(AudioContext* aContext,
490 : uint32_t aBufferSize,
491 : uint32_t aNumberOfInputChannels,
492 0 : uint32_t aNumberOfOutputChannels)
493 : : AudioNode(aContext,
494 : aNumberOfInputChannels,
495 : mozilla::dom::ChannelCountMode::Explicit,
496 : mozilla::dom::ChannelInterpretation::Speakers)
497 0 : , mBufferSize(aBufferSize ?
498 : aBufferSize : // respect what the web developer requested
499 : 4096) // choose our own buffer size -- 4KB for now
500 0 : , mNumberOfOutputChannels(aNumberOfOutputChannels)
501 : {
502 0 : MOZ_ASSERT(BufferSize() % WEBAUDIO_BLOCK_SIZE == 0, "Invalid buffer size");
503 : ScriptProcessorNodeEngine* engine =
504 : new ScriptProcessorNodeEngine(this,
505 0 : aContext->Destination(),
506 0 : BufferSize(),
507 0 : aNumberOfInputChannels);
508 0 : mStream = AudioNodeStream::Create(aContext, engine,
509 : AudioNodeStream::NO_STREAM_FLAGS,
510 0 : aContext->Graph());
511 0 : }
512 :
513 0 : ScriptProcessorNode::~ScriptProcessorNode()
514 : {
515 0 : }
516 :
517 : size_t
518 0 : ScriptProcessorNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
519 : {
520 0 : size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf);
521 0 : return amount;
522 : }
523 :
524 : size_t
525 0 : ScriptProcessorNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
526 : {
527 0 : return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
528 : }
529 :
530 : void
531 0 : ScriptProcessorNode::EventListenerAdded(nsIAtom* aType)
532 : {
533 0 : AudioNode::EventListenerAdded(aType);
534 0 : if (aType == nsGkAtoms::onaudioprocess) {
535 0 : UpdateConnectedStatus();
536 : }
537 0 : }
538 :
539 : void
540 0 : ScriptProcessorNode::EventListenerRemoved(nsIAtom* aType)
541 : {
542 0 : AudioNode::EventListenerRemoved(aType);
543 0 : if (aType == nsGkAtoms::onaudioprocess) {
544 0 : UpdateConnectedStatus();
545 : }
546 0 : }
547 :
548 : JSObject*
549 0 : ScriptProcessorNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto)
550 : {
551 0 : return ScriptProcessorNodeBinding::Wrap(aCx, this, aGivenProto);
552 : }
553 :
554 : void
555 0 : ScriptProcessorNode::UpdateConnectedStatus()
556 : {
557 0 : bool isConnected = mHasPhantomInput ||
558 0 : !(OutputNodes().IsEmpty() && OutputParams().IsEmpty()
559 0 : && InputNodes().IsEmpty());
560 :
561 : // Events are queued even when there is no listener because a listener
562 : // may be added while events are in the queue.
563 0 : SendInt32ParameterToStream(ScriptProcessorNodeEngine::IS_CONNECTED,
564 0 : isConnected);
565 :
566 0 : if (isConnected && HasListenersFor(nsGkAtoms::onaudioprocess)) {
567 0 : MarkActive();
568 : } else {
569 0 : MarkInactive();
570 : }
571 0 : }
572 :
573 : } // namespace dom
574 : } // namespace mozilla
575 :
|