LCOV - code coverage report
Current view: top level - dom/media/webaudio - AudioNodeExternalInputStream.cpp (source / functions) Hit Total Coverage
Test: output.info Lines: 0 113 0.0 %
Date: 2017-07-14 16:53:18 Functions: 0 9 0.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
       2             : /* This Source Code Form is subject to the terms of the Mozilla Public
       3             :  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
       4             :  * You can obtain one at http://mozilla.org/MPL/2.0/. */
       5             : 
       6             : #include "AlignedTArray.h"
       7             : #include "AlignmentUtils.h"
       8             : #include "AudioNodeEngine.h"
       9             : #include "AudioNodeExternalInputStream.h"
      10             : #include "AudioChannelFormat.h"
      11             : #include "mozilla/dom/MediaStreamAudioSourceNode.h"
      12             : 
      13             : using namespace mozilla::dom;
      14             : 
      15             : namespace mozilla {
      16             : 
      17           0 : AudioNodeExternalInputStream::AudioNodeExternalInputStream(
      18             :   AudioNodeEngine* aEngine,
      19           0 :   TrackRate aSampleRate)
      20           0 :   : AudioNodeStream(aEngine, NO_STREAM_FLAGS, aSampleRate)
      21             : {
      22           0 :   MOZ_COUNT_CTOR(AudioNodeExternalInputStream);
      23           0 : }
      24             : 
      25           0 : AudioNodeExternalInputStream::~AudioNodeExternalInputStream()
      26             : {
      27           0 :   MOZ_COUNT_DTOR(AudioNodeExternalInputStream);
      28           0 : }
      29             : 
      30             : /* static */ already_AddRefed<AudioNodeExternalInputStream>
      31           0 : AudioNodeExternalInputStream::Create(MediaStreamGraph* aGraph,
      32             :                                      AudioNodeEngine* aEngine)
      33             : {
      34           0 :   AudioContext* ctx = aEngine->NodeMainThread()->Context();
      35           0 :   MOZ_ASSERT(NS_IsMainThread());
      36           0 :   MOZ_ASSERT(aGraph->GraphRate() == ctx->SampleRate());
      37             : 
      38             :   RefPtr<AudioNodeExternalInputStream> stream =
      39           0 :     new AudioNodeExternalInputStream(aEngine, aGraph->GraphRate());
      40           0 :   stream->mSuspendedCount += ctx->ShouldSuspendNewStream();
      41           0 :   aGraph->AddStream(stream);
      42           0 :   return stream.forget();
      43             : }
      44             : 
      45             : /**
      46             :  * Copies the data in aInput to aOffsetInBlock within aBlock.
      47             :  * aBlock must have been allocated with AllocateInputBlock and have a channel
      48             :  * count that's a superset of the channels in aInput.
      49             :  */
      50             : template <typename T>
      51             : static void
      52           0 : CopyChunkToBlock(AudioChunk& aInput, AudioBlock *aBlock,
      53             :                  uint32_t aOffsetInBlock)
      54             : {
      55           0 :   uint32_t blockChannels = aBlock->ChannelCount();
      56           0 :   AutoTArray<const T*,2> channels;
      57           0 :   if (aInput.IsNull()) {
      58           0 :     channels.SetLength(blockChannels);
      59           0 :     PodZero(channels.Elements(), blockChannels);
      60             :   } else {
      61           0 :     const nsTArray<const T*>& inputChannels = aInput.ChannelData<T>();
      62           0 :     channels.SetLength(inputChannels.Length());
      63           0 :     PodCopy(channels.Elements(), inputChannels.Elements(), channels.Length());
      64           0 :     if (channels.Length() != blockChannels) {
      65             :       // We only need to upmix here because aBlock's channel count has been
      66             :       // chosen to be a superset of the channel count of every chunk.
      67           0 :       AudioChannelsUpMix(&channels, blockChannels, static_cast<T*>(nullptr));
      68             :     }
      69             :   }
      70             : 
      71           0 :   for (uint32_t c = 0; c < blockChannels; ++c) {
      72           0 :     float* outputData = aBlock->ChannelFloatsForWrite(c) + aOffsetInBlock;
      73           0 :     if (channels[c]) {
      74           0 :       ConvertAudioSamplesWithScale(channels[c], outputData, aInput.GetDuration(), aInput.mVolume);
      75             :     } else {
      76           0 :       PodZero(outputData, aInput.GetDuration());
      77             :     }
      78             :   }
      79           0 : }
      80             : 
      81             : /**
      82             :  * Converts the data in aSegment to a single chunk aBlock. aSegment must have
      83             :  * duration WEBAUDIO_BLOCK_SIZE. aFallbackChannelCount is a superset of the
      84             :  * channels in every chunk of aSegment. aBlock must be float format or null.
      85             :  */
      86           0 : static void ConvertSegmentToAudioBlock(AudioSegment* aSegment,
      87             :                                        AudioBlock* aBlock,
      88             :                                        int32_t aFallbackChannelCount)
      89             : {
      90           0 :   NS_ASSERTION(aSegment->GetDuration() == WEBAUDIO_BLOCK_SIZE, "Bad segment duration");
      91             : 
      92             :   {
      93           0 :     AudioSegment::ChunkIterator ci(*aSegment);
      94           0 :     NS_ASSERTION(!ci.IsEnded(), "Should be at least one chunk!");
      95           0 :     if (ci->GetDuration() == WEBAUDIO_BLOCK_SIZE &&
      96           0 :         (ci->IsNull() || ci->mBufferFormat == AUDIO_FORMAT_FLOAT32)) {
      97             : 
      98           0 :       bool aligned = true;
      99           0 :       for (size_t i = 0; i < ci->mChannelData.Length(); ++i) {
     100           0 :         if (!IS_ALIGNED16(ci->mChannelData[i])) {
     101           0 :             aligned = false;
     102           0 :             break;
     103             :         }
     104             :       }
     105             : 
     106             :       // Return this chunk directly to avoid copying data.
     107           0 :       if (aligned) {
     108           0 :         *aBlock = *ci;
     109           0 :         return;
     110             :       }
     111             :     }
     112             :   }
     113             : 
     114           0 :   aBlock->AllocateChannels(aFallbackChannelCount);
     115             : 
     116           0 :   uint32_t duration = 0;
     117           0 :   for (AudioSegment::ChunkIterator ci(*aSegment); !ci.IsEnded(); ci.Next()) {
     118           0 :     switch (ci->mBufferFormat) {
     119             :       case AUDIO_FORMAT_S16: {
     120           0 :         CopyChunkToBlock<int16_t>(*ci, aBlock, duration);
     121           0 :         break;
     122             :       }
     123             :       case AUDIO_FORMAT_FLOAT32: {
     124           0 :         CopyChunkToBlock<float>(*ci, aBlock, duration);
     125           0 :         break;
     126             :       }
     127             :       case AUDIO_FORMAT_SILENCE: {
     128             :         // The actual type of the sample does not matter here, but we still need
     129             :         // to send some audio to the graph.
     130           0 :         CopyChunkToBlock<float>(*ci, aBlock, duration);
     131           0 :         break;
     132             :       }
     133             :     }
     134           0 :     duration += ci->GetDuration();
     135             :   }
     136             : }
     137             : 
     138             : void
     139           0 : AudioNodeExternalInputStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
     140             :                                            uint32_t aFlags)
     141             : {
     142             :   // According to spec, number of outputs is always 1.
     143           0 :   MOZ_ASSERT(mLastChunks.Length() == 1);
     144             : 
     145             :   // GC stuff can result in our input stream being destroyed before this stream.
     146             :   // Handle that.
     147           0 :   if (!IsEnabled() || mInputs.IsEmpty() || mPassThrough) {
     148           0 :     mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
     149           0 :     return;
     150             :   }
     151             : 
     152           0 :   MOZ_ASSERT(mInputs.Length() == 1);
     153             : 
     154           0 :   MediaStream* source = mInputs[0]->GetSource();
     155           0 :   AutoTArray<AudioSegment,1> audioSegments;
     156           0 :   uint32_t inputChannels = 0;
     157           0 :   for (StreamTracks::TrackIter tracks(source->mTracks);
     158           0 :        !tracks.IsEnded(); tracks.Next()) {
     159           0 :     const StreamTracks::Track& inputTrack = *tracks;
     160           0 :     if (!mInputs[0]->PassTrackThrough(tracks->GetID())) {
     161           0 :       continue;
     162             :     }
     163             : 
     164           0 :     if (inputTrack.GetSegment()->GetType() == MediaSegment::VIDEO) {
     165           0 :       MOZ_ASSERT(false, "AudioNodeExternalInputStream shouldn't have video tracks");
     166             :       continue;
     167             :     }
     168             : 
     169             :     const AudioSegment& inputSegment =
     170           0 :         *static_cast<AudioSegment*>(inputTrack.GetSegment());
     171           0 :     if (inputSegment.IsNull()) {
     172           0 :       continue;
     173             :     }
     174             : 
     175           0 :     AudioSegment& segment = *audioSegments.AppendElement();
     176             :     GraphTime next;
     177           0 :     for (GraphTime t = aFrom; t < aTo; t = next) {
     178           0 :       MediaInputPort::InputInterval interval = mInputs[0]->GetNextInputInterval(t);
     179           0 :       interval.mEnd = std::min(interval.mEnd, aTo);
     180           0 :       if (interval.mStart >= interval.mEnd)
     181           0 :         break;
     182           0 :       next = interval.mEnd;
     183             : 
     184             :       // We know this stream does not block during the processing interval ---
     185             :       // we're not finished, we don't underrun, and we're not suspended.
     186           0 :       StreamTime outputStart = GraphTimeToStreamTime(interval.mStart);
     187           0 :       StreamTime outputEnd = GraphTimeToStreamTime(interval.mEnd);
     188           0 :       StreamTime ticks = outputEnd - outputStart;
     189             : 
     190           0 :       if (interval.mInputIsBlocked) {
     191           0 :         segment.AppendNullData(ticks);
     192             :       } else {
     193             :         // The input stream is not blocked in this interval, so no need to call
     194             :         // GraphTimeToStreamTimeWithBlocking.
     195             :         StreamTime inputStart =
     196           0 :           std::min(inputSegment.GetDuration(),
     197           0 :                    source->GraphTimeToStreamTime(interval.mStart));
     198             :         StreamTime inputEnd =
     199           0 :           std::min(inputSegment.GetDuration(),
     200           0 :                    source->GraphTimeToStreamTime(interval.mEnd));
     201             : 
     202           0 :         segment.AppendSlice(inputSegment, inputStart, inputEnd);
     203             :         // Pad if we're looking past the end of the track
     204           0 :         segment.AppendNullData(ticks - (inputEnd - inputStart));
     205             :       }
     206             :     }
     207             : 
     208           0 :     for (AudioSegment::ChunkIterator iter(segment); !iter.IsEnded(); iter.Next()) {
     209           0 :       inputChannels = GetAudioChannelsSuperset(inputChannels, iter->ChannelCount());
     210             :     }
     211             :   }
     212             : 
     213           0 :   uint32_t accumulateIndex = 0;
     214           0 :   if (inputChannels) {
     215           0 :     DownmixBufferType downmixBuffer;
     216           0 :     ASSERT_ALIGNED16(downmixBuffer.Elements());
     217           0 :     for (uint32_t i = 0; i < audioSegments.Length(); ++i) {
     218           0 :       AudioBlock tmpChunk;
     219           0 :       ConvertSegmentToAudioBlock(&audioSegments[i], &tmpChunk, inputChannels);
     220           0 :       if (!tmpChunk.IsNull()) {
     221           0 :         if (accumulateIndex == 0) {
     222           0 :           mLastChunks[0].AllocateChannels(inputChannels);
     223             :         }
     224           0 :         AccumulateInputChunk(accumulateIndex, tmpChunk, &mLastChunks[0], &downmixBuffer);
     225           0 :         accumulateIndex++;
     226             :       }
     227             :     }
     228             :   }
     229           0 :   if (accumulateIndex == 0) {
     230           0 :     mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
     231             :   }
     232             : }
     233             : 
     234             : bool
     235           0 : AudioNodeExternalInputStream::IsEnabled()
     236             : {
     237           0 :   return ((MediaStreamAudioSourceNodeEngine*)Engine())->IsEnabled();
     238             : }
     239             : 
     240             : } // namespace mozilla

Generated by: LCOV version 1.13