Line data Source code
1 : /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 : /* vim:set ts=2 sw=2 sts=2 et cindent: */
3 : /* This Source Code Form is subject to the terms of the Mozilla Public
4 : * License, v. 2.0. If a copy of the MPL was not distributed with this
5 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 :
7 : #include "PannerNode.h"
8 : #include "AlignmentUtils.h"
9 : #include "AudioDestinationNode.h"
10 : #include "AudioNodeEngine.h"
11 : #include "AudioNodeStream.h"
12 : #include "AudioListener.h"
13 : #include "PanningUtils.h"
14 : #include "AudioBufferSourceNode.h"
15 : #include "PlayingRefChangeHandler.h"
16 : #include "blink/HRTFPanner.h"
17 : #include "blink/HRTFDatabaseLoader.h"
18 : #include "nsAutoPtr.h"
19 :
20 : using WebCore::HRTFDatabaseLoader;
21 : using WebCore::HRTFPanner;
22 :
23 : namespace mozilla {
24 : namespace dom {
25 :
26 : using namespace std;
27 :
28 : NS_IMPL_CYCLE_COLLECTION_CLASS(PannerNode)
29 0 : NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN_INHERITED(PannerNode, AudioNode)
30 0 : if (tmp->Context()) {
31 0 : tmp->Context()->UnregisterPannerNode(tmp);
32 : }
33 0 : NS_IMPL_CYCLE_COLLECTION_UNLINK(mPositionX, mPositionY, mPositionZ, mOrientationX, mOrientationY, mOrientationZ)
34 0 : NS_IMPL_CYCLE_COLLECTION_UNLINK_END
35 0 : NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(PannerNode, AudioNode)
36 0 : NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPositionX, mPositionY, mPositionZ, mOrientationX, mOrientationY, mOrientationZ)
37 0 : NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
38 :
39 0 : NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(PannerNode)
40 0 : NS_INTERFACE_MAP_END_INHERITING(AudioNode)
41 :
42 0 : NS_IMPL_ADDREF_INHERITED(PannerNode, AudioNode)
43 0 : NS_IMPL_RELEASE_INHERITED(PannerNode, AudioNode)
44 :
45 0 : class PannerNodeEngine final : public AudioNodeEngine
46 : {
47 : public:
48 0 : explicit PannerNodeEngine(AudioNode* aNode, AudioDestinationNode* aDestination)
49 0 : : AudioNodeEngine(aNode)
50 0 : , mDestination(aDestination->Stream())
51 : // Please keep these default values consistent with PannerNode::PannerNode below.
52 : , mPanningModelFunction(&PannerNodeEngine::EqualPowerPanningFunction)
53 : , mDistanceModelFunction(&PannerNodeEngine::InverseGainFunction)
54 : , mPositionX(0.)
55 : , mPositionY(0.)
56 : , mPositionZ(0.)
57 : , mOrientationX(1.)
58 : , mOrientationY(0.)
59 : , mOrientationZ(0.)
60 : , mVelocity()
61 : , mRefDistance(1.)
62 : , mMaxDistance(10000.)
63 : , mRolloffFactor(1.)
64 : , mConeInnerAngle(360.)
65 : , mConeOuterAngle(360.)
66 : , mConeOuterGain(0.)
67 : // These will be initialized when a PannerNode is created, so just initialize them
68 : // to some dummy values here.
69 : , mListenerDopplerFactor(0.)
70 : , mListenerSpeedOfSound(0.)
71 0 : , mLeftOverData(INT_MIN)
72 : {
73 0 : }
74 :
75 0 : void RecvTimelineEvent(uint32_t aIndex, AudioTimelineEvent& aEvent) override
76 : {
77 0 : MOZ_ASSERT(mDestination);
78 0 : WebAudioUtils::ConvertAudioTimelineEventToTicks(aEvent,
79 0 : mDestination);
80 0 : switch (aIndex) {
81 : case PannerNode::POSITIONX:
82 0 : mPositionX.InsertEvent<int64_t>(aEvent);
83 0 : break;
84 : case PannerNode::POSITIONY:
85 0 : mPositionY.InsertEvent<int64_t>(aEvent);
86 0 : break;
87 : case PannerNode::POSITIONZ:
88 0 : mPositionZ.InsertEvent<int64_t>(aEvent);
89 0 : break;
90 : case PannerNode::ORIENTATIONX:
91 0 : mOrientationX.InsertEvent<int64_t>(aEvent);
92 0 : break;
93 : case PannerNode::ORIENTATIONY:
94 0 : mOrientationY.InsertEvent<int64_t>(aEvent);
95 0 : break;
96 : case PannerNode::ORIENTATIONZ:
97 0 : mOrientationZ.InsertEvent<int64_t>(aEvent);
98 0 : break;
99 : default:
100 0 : NS_ERROR("Bad PannerNode TimelineParameter");
101 : }
102 0 : }
103 :
104 0 : void CreateHRTFPanner()
105 : {
106 0 : MOZ_ASSERT(NS_IsMainThread());
107 0 : if (mHRTFPanner) {
108 0 : return;
109 : }
110 : // HRTFDatabaseLoader needs to be fetched on the main thread.
111 : already_AddRefed<HRTFDatabaseLoader> loader =
112 0 : HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(NodeMainThread()->Context()->SampleRate());
113 0 : mHRTFPanner = new HRTFPanner(NodeMainThread()->Context()->SampleRate(), Move(loader));
114 : }
115 :
116 0 : void SetInt32Parameter(uint32_t aIndex, int32_t aParam) override
117 : {
118 0 : switch (aIndex) {
119 : case PannerNode::PANNING_MODEL:
120 0 : switch (PanningModelType(aParam)) {
121 : case PanningModelType::Equalpower:
122 0 : mPanningModelFunction = &PannerNodeEngine::EqualPowerPanningFunction;
123 0 : break;
124 : case PanningModelType::HRTF:
125 0 : mPanningModelFunction = &PannerNodeEngine::HRTFPanningFunction;
126 0 : break;
127 : default:
128 0 : NS_NOTREACHED("We should never see the alternate names here");
129 0 : break;
130 : }
131 0 : break;
132 : case PannerNode::DISTANCE_MODEL:
133 0 : switch (DistanceModelType(aParam)) {
134 : case DistanceModelType::Inverse:
135 0 : mDistanceModelFunction = &PannerNodeEngine::InverseGainFunction;
136 0 : break;
137 : case DistanceModelType::Linear:
138 0 : mDistanceModelFunction = &PannerNodeEngine::LinearGainFunction;
139 0 : break;
140 : case DistanceModelType::Exponential:
141 0 : mDistanceModelFunction = &PannerNodeEngine::ExponentialGainFunction;
142 0 : break;
143 : default:
144 0 : NS_NOTREACHED("We should never see the alternate names here");
145 0 : break;
146 : }
147 0 : break;
148 : default:
149 0 : NS_ERROR("Bad PannerNodeEngine Int32Parameter");
150 : }
151 0 : }
152 0 : void SetThreeDPointParameter(uint32_t aIndex, const ThreeDPoint& aParam) override
153 : {
154 0 : switch (aIndex) {
155 0 : case PannerNode::LISTENER_POSITION: mListenerPosition = aParam; break;
156 0 : case PannerNode::LISTENER_FRONT_VECTOR: mListenerFrontVector = aParam; break;
157 0 : case PannerNode::LISTENER_RIGHT_VECTOR: mListenerRightVector = aParam; break;
158 0 : case PannerNode::LISTENER_VELOCITY: mListenerVelocity = aParam; break;
159 : case PannerNode::POSITION:
160 0 : mPositionX.SetValue(aParam.x);
161 0 : mPositionY.SetValue(aParam.y);
162 0 : mPositionZ.SetValue(aParam.z);
163 0 : break;
164 : case PannerNode::ORIENTATION:
165 0 : mOrientationX.SetValue(aParam.x);
166 0 : mOrientationY.SetValue(aParam.y);
167 0 : mOrientationZ.SetValue(aParam.z);
168 0 : break;
169 0 : case PannerNode::VELOCITY: mVelocity = aParam; break;
170 : default:
171 0 : NS_ERROR("Bad PannerNodeEngine ThreeDPointParameter");
172 : }
173 0 : }
174 0 : void SetDoubleParameter(uint32_t aIndex, double aParam) override
175 : {
176 0 : switch (aIndex) {
177 0 : case PannerNode::LISTENER_DOPPLER_FACTOR: mListenerDopplerFactor = aParam; break;
178 0 : case PannerNode::LISTENER_SPEED_OF_SOUND: mListenerSpeedOfSound = aParam; break;
179 0 : case PannerNode::REF_DISTANCE: mRefDistance = aParam; break;
180 0 : case PannerNode::MAX_DISTANCE: mMaxDistance = aParam; break;
181 0 : case PannerNode::ROLLOFF_FACTOR: mRolloffFactor = aParam; break;
182 0 : case PannerNode::CONE_INNER_ANGLE: mConeInnerAngle = aParam; break;
183 0 : case PannerNode::CONE_OUTER_ANGLE: mConeOuterAngle = aParam; break;
184 0 : case PannerNode::CONE_OUTER_GAIN: mConeOuterGain = aParam; break;
185 : default:
186 0 : NS_ERROR("Bad PannerNodeEngine DoubleParameter");
187 : }
188 0 : }
189 :
190 0 : void ProcessBlock(AudioNodeStream* aStream,
191 : GraphTime aFrom,
192 : const AudioBlock& aInput,
193 : AudioBlock* aOutput,
194 : bool *aFinished) override
195 : {
196 0 : if (aInput.IsNull()) {
197 : // mLeftOverData != INT_MIN means that the panning model was HRTF and a
198 : // tail-time reference was added. Even if the model is now equalpower,
199 : // the reference will need to be removed.
200 0 : if (mLeftOverData > 0 &&
201 0 : mPanningModelFunction == &PannerNodeEngine::HRTFPanningFunction) {
202 0 : mLeftOverData -= WEBAUDIO_BLOCK_SIZE;
203 : } else {
204 0 : if (mLeftOverData != INT_MIN) {
205 0 : mLeftOverData = INT_MIN;
206 0 : aStream->ScheduleCheckForInactive();
207 0 : mHRTFPanner->reset();
208 :
209 : RefPtr<PlayingRefChangeHandler> refchanged =
210 0 : new PlayingRefChangeHandler(aStream, PlayingRefChangeHandler::RELEASE);
211 0 : aStream->Graph()->DispatchToMainThreadAfterStreamStateUpdate(
212 0 : refchanged.forget());
213 : }
214 0 : aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
215 0 : return;
216 : }
217 0 : } else if (mPanningModelFunction == &PannerNodeEngine::HRTFPanningFunction) {
218 0 : if (mLeftOverData == INT_MIN) {
219 : RefPtr<PlayingRefChangeHandler> refchanged =
220 0 : new PlayingRefChangeHandler(aStream, PlayingRefChangeHandler::ADDREF);
221 0 : aStream->Graph()->DispatchToMainThreadAfterStreamStateUpdate(
222 0 : refchanged.forget());
223 : }
224 0 : mLeftOverData = mHRTFPanner->maxTailFrames();
225 : }
226 :
227 0 : StreamTime tick = mDestination->GraphTimeToStreamTime(aFrom);
228 0 : (this->*mPanningModelFunction)(aInput, aOutput, tick);
229 : }
230 :
231 0 : bool IsActive() const override
232 : {
233 0 : return mLeftOverData != INT_MIN;
234 : }
235 :
236 : void ComputeAzimuthAndElevation(const ThreeDPoint& position, float& aAzimuth, float& aElevation);
237 : float ComputeConeGain(const ThreeDPoint& position, const ThreeDPoint& orientation);
238 : // Compute how much the distance contributes to the gain reduction.
239 : double ComputeDistanceGain(const ThreeDPoint& position);
240 :
241 : void EqualPowerPanningFunction(const AudioBlock& aInput, AudioBlock* aOutput, StreamTime tick);
242 : void HRTFPanningFunction(const AudioBlock& aInput, AudioBlock* aOutput, StreamTime tick);
243 :
244 : float LinearGainFunction(double aDistance);
245 : float InverseGainFunction(double aDistance);
246 : float ExponentialGainFunction(double aDistance);
247 :
248 : ThreeDPoint ConvertAudioParamTimelineTo3DP(AudioParamTimeline& aX, AudioParamTimeline& aY, AudioParamTimeline& aZ, StreamTime& tick);
249 :
250 0 : size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override
251 : {
252 0 : size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf);
253 0 : if (mHRTFPanner) {
254 0 : amount += mHRTFPanner->sizeOfIncludingThis(aMallocSizeOf);
255 : }
256 :
257 0 : return amount;
258 : }
259 :
260 0 : size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override
261 : {
262 0 : return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
263 : }
264 :
265 : AudioNodeStream* mDestination;
266 : // This member is set on the main thread, but is not accessed on the rendering
267 : // thread untile mPanningModelFunction has changed, and this happens strictly
268 : // later, via a MediaStreamGraph ControlMessage.
269 : nsAutoPtr<HRTFPanner> mHRTFPanner;
270 : typedef void (PannerNodeEngine::*PanningModelFunction)(const AudioBlock& aInput, AudioBlock* aOutput, StreamTime tick);
271 : PanningModelFunction mPanningModelFunction;
272 : typedef float (PannerNodeEngine::*DistanceModelFunction)(double aDistance);
273 : DistanceModelFunction mDistanceModelFunction;
274 : AudioParamTimeline mPositionX;
275 : AudioParamTimeline mPositionY;
276 : AudioParamTimeline mPositionZ;
277 : AudioParamTimeline mOrientationX;
278 : AudioParamTimeline mOrientationY;
279 : AudioParamTimeline mOrientationZ;
280 : ThreeDPoint mVelocity;
281 : double mRefDistance;
282 : double mMaxDistance;
283 : double mRolloffFactor;
284 : double mConeInnerAngle;
285 : double mConeOuterAngle;
286 : double mConeOuterGain;
287 : ThreeDPoint mListenerPosition;
288 : ThreeDPoint mListenerFrontVector;
289 : ThreeDPoint mListenerRightVector;
290 : ThreeDPoint mListenerVelocity;
291 : double mListenerDopplerFactor;
292 : double mListenerSpeedOfSound;
293 : int mLeftOverData;
294 : };
295 :
296 0 : PannerNode::PannerNode(AudioContext* aContext)
297 : : AudioNode(aContext,
298 : 2,
299 : ChannelCountMode::Clamped_max,
300 : ChannelInterpretation::Speakers)
301 : // Please keep these default values consistent with PannerNodeEngine::PannerNodeEngine above.
302 : , mPanningModel(PanningModelType::Equalpower)
303 : , mDistanceModel(DistanceModelType::Inverse)
304 0 : , mPositionX(new AudioParam(this, PannerNode::POSITIONX, this->NodeType(), 0.f))
305 0 : , mPositionY(new AudioParam(this, PannerNode::POSITIONY, this->NodeType(), 0.f))
306 0 : , mPositionZ(new AudioParam(this, PannerNode::POSITIONZ, this->NodeType(), 0.f))
307 0 : , mOrientationX(new AudioParam(this, PannerNode::ORIENTATIONX, this->NodeType(), 1.0f))
308 0 : , mOrientationY(new AudioParam(this, PannerNode::ORIENTATIONY, this->NodeType(), 0.f))
309 0 : , mOrientationZ(new AudioParam(this, PannerNode::ORIENTATIONZ, this->NodeType(), 0.f))
310 : , mVelocity()
311 : , mRefDistance(1.)
312 : , mMaxDistance(10000.)
313 : , mRolloffFactor(1.)
314 : , mConeInnerAngle(360.)
315 : , mConeOuterAngle(360.)
316 0 : , mConeOuterGain(0.)
317 : {
318 0 : mStream = AudioNodeStream::Create(aContext,
319 0 : new PannerNodeEngine(this, aContext->Destination()),
320 : AudioNodeStream::NO_STREAM_FLAGS,
321 0 : aContext->Graph());
322 : // We should register once we have set up our stream and engine.
323 0 : Context()->Listener()->RegisterPannerNode(this);
324 0 : }
325 :
326 0 : PannerNode::~PannerNode()
327 : {
328 0 : if (Context()) {
329 0 : Context()->UnregisterPannerNode(this);
330 : }
331 0 : }
332 :
333 : /* static */ already_AddRefed<PannerNode>
334 0 : PannerNode::Create(AudioContext& aAudioContext,
335 : const PannerOptions& aOptions,
336 : ErrorResult& aRv)
337 : {
338 0 : if (aAudioContext.CheckClosed(aRv)) {
339 0 : return nullptr;
340 : }
341 :
342 0 : RefPtr<PannerNode> audioNode = new PannerNode(&aAudioContext);
343 :
344 0 : audioNode->Initialize(aOptions, aRv);
345 0 : if (NS_WARN_IF(aRv.Failed())) {
346 0 : return nullptr;
347 : }
348 :
349 0 : audioNode->SetPanningModel(aOptions.mPanningModel);
350 0 : audioNode->SetDistanceModel(aOptions.mDistanceModel);
351 0 : audioNode->SetPosition(aOptions.mPositionX, aOptions.mPositionY,
352 0 : aOptions.mPositionZ);
353 0 : audioNode->SetOrientation(aOptions.mOrientationX, aOptions.mOrientationY,
354 0 : aOptions.mOrientationZ);
355 0 : audioNode->SetRefDistance(aOptions.mRefDistance);
356 0 : audioNode->SetMaxDistance(aOptions.mMaxDistance);
357 0 : audioNode->SetRolloffFactor(aOptions.mRolloffFactor);
358 0 : audioNode->SetConeInnerAngle(aOptions.mConeInnerAngle);
359 0 : audioNode->SetConeOuterAngle(aOptions.mConeOuterAngle);
360 0 : audioNode->SetConeOuterGain(aOptions.mConeOuterGain);
361 :
362 0 : return audioNode.forget();
363 : }
364 :
365 0 : void PannerNode::SetPanningModel(PanningModelType aPanningModel)
366 : {
367 0 : mPanningModel = aPanningModel;
368 0 : if (mPanningModel == PanningModelType::HRTF) {
369 : // We can set the engine's `mHRTFPanner` member here from the main thread,
370 : // because the engine will not touch it from the MediaStreamGraph
371 : // thread until the PANNING_MODEL message sent below is received.
372 0 : static_cast<PannerNodeEngine*>(mStream->Engine())->CreateHRTFPanner();
373 : }
374 0 : SendInt32ParameterToStream(PANNING_MODEL, int32_t(mPanningModel));
375 0 : }
376 :
377 : size_t
378 0 : PannerNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
379 : {
380 0 : size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf);
381 0 : amount += mSources.ShallowSizeOfExcludingThis(aMallocSizeOf);
382 0 : return amount;
383 : }
384 :
385 : size_t
386 0 : PannerNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
387 : {
388 0 : return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
389 : }
390 :
391 : JSObject*
392 0 : PannerNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto)
393 : {
394 0 : return PannerNodeBinding::Wrap(aCx, this, aGivenProto);
395 : }
396 :
397 0 : void PannerNode::DestroyMediaStream()
398 : {
399 0 : if (Context()) {
400 0 : Context()->UnregisterPannerNode(this);
401 : }
402 0 : AudioNode::DestroyMediaStream();
403 0 : }
404 :
405 : // Those three functions are described in the spec.
406 : float
407 0 : PannerNodeEngine::LinearGainFunction(double aDistance)
408 : {
409 0 : return 1 - mRolloffFactor * (std::max(std::min(aDistance, mMaxDistance), mRefDistance) - mRefDistance) / (mMaxDistance - mRefDistance);
410 : }
411 :
412 : float
413 0 : PannerNodeEngine::InverseGainFunction(double aDistance)
414 : {
415 0 : return mRefDistance / (mRefDistance + mRolloffFactor * (std::max(aDistance, mRefDistance) - mRefDistance));
416 : }
417 :
418 : float
419 0 : PannerNodeEngine::ExponentialGainFunction(double aDistance)
420 : {
421 0 : return pow(std::max(aDistance, mRefDistance) / mRefDistance, -mRolloffFactor);
422 : }
423 :
424 : void
425 0 : PannerNodeEngine::HRTFPanningFunction(const AudioBlock& aInput,
426 : AudioBlock* aOutput,
427 : StreamTime tick)
428 : {
429 : // The output of this node is always stereo, no matter what the inputs are.
430 0 : aOutput->AllocateChannels(2);
431 :
432 : float azimuth, elevation;
433 :
434 0 : ThreeDPoint position = ConvertAudioParamTimelineTo3DP(mPositionX, mPositionY, mPositionZ, tick);
435 0 : ThreeDPoint orientation = ConvertAudioParamTimelineTo3DP(mOrientationX, mOrientationY, mOrientationZ, tick);
436 0 : if (!orientation.IsZero()) {
437 0 : orientation.Normalize();
438 : }
439 0 : ComputeAzimuthAndElevation(position, azimuth, elevation);
440 :
441 0 : AudioBlock input = aInput;
442 : // Gain is applied before the delay and convolution of the HRTF.
443 0 : input.mVolume *= ComputeConeGain(position, orientation) * ComputeDistanceGain(position);
444 :
445 0 : mHRTFPanner->pan(azimuth, elevation, &input, aOutput);
446 0 : }
447 :
448 : ThreeDPoint
449 0 : PannerNodeEngine::ConvertAudioParamTimelineTo3DP(AudioParamTimeline& aX, AudioParamTimeline& aY, AudioParamTimeline& aZ, StreamTime &tick)
450 : {
451 0 : return ThreeDPoint(aX.GetValueAtTime(tick),
452 0 : aY.GetValueAtTime(tick),
453 0 : aZ.GetValueAtTime(tick));
454 : }
455 :
456 : void
457 0 : PannerNodeEngine::EqualPowerPanningFunction(const AudioBlock& aInput,
458 : AudioBlock* aOutput,
459 : StreamTime tick)
460 : {
461 : float azimuth, elevation, gainL, gainR, normalizedAzimuth, distanceGain, coneGain;
462 0 : int inputChannels = aInput.ChannelCount();
463 :
464 : // Optimize the case where the position and orientation is constant for this
465 : // processing block: we can just apply a constant gain on the left and right
466 : // channel
467 0 : if (mPositionX.HasSimpleValue() &&
468 0 : mPositionY.HasSimpleValue() &&
469 0 : mPositionZ.HasSimpleValue() &&
470 0 : mOrientationX.HasSimpleValue() &&
471 0 : mOrientationY.HasSimpleValue() &&
472 0 : mOrientationZ.HasSimpleValue()) {
473 :
474 0 : ThreeDPoint position = ConvertAudioParamTimelineTo3DP(mPositionX, mPositionY, mPositionZ, tick);
475 0 : ThreeDPoint orientation = ConvertAudioParamTimelineTo3DP(mOrientationX, mOrientationY, mOrientationZ, tick);
476 0 : if (!orientation.IsZero()) {
477 0 : orientation.Normalize();
478 : }
479 :
480 : // If both the listener are in the same spot, and no cone gain is specified,
481 : // this node is noop.
482 0 : if (mListenerPosition == position &&
483 0 : mConeInnerAngle == 360 &&
484 0 : mConeOuterAngle == 360) {
485 0 : *aOutput = aInput;
486 0 : return;
487 : }
488 :
489 : // The output of this node is always stereo, no matter what the inputs are.
490 0 : aOutput->AllocateChannels(2);
491 :
492 0 : ComputeAzimuthAndElevation(position, azimuth, elevation);
493 0 : coneGain = ComputeConeGain(position, orientation);
494 :
495 : // The following algorithm is described in the spec.
496 : // Clamp azimuth in the [-90, 90] range.
497 0 : azimuth = min(180.f, max(-180.f, azimuth));
498 :
499 : // Wrap around
500 0 : if (azimuth < -90.f) {
501 0 : azimuth = -180.f - azimuth;
502 0 : } else if (azimuth > 90) {
503 0 : azimuth = 180.f - azimuth;
504 : }
505 :
506 : // Normalize the value in the [0, 1] range.
507 0 : if (inputChannels == 1) {
508 0 : normalizedAzimuth = (azimuth + 90.f) / 180.f;
509 : } else {
510 0 : if (azimuth <= 0) {
511 0 : normalizedAzimuth = (azimuth + 90.f) / 90.f;
512 : } else {
513 0 : normalizedAzimuth = azimuth / 90.f;
514 : }
515 : }
516 :
517 0 : distanceGain = ComputeDistanceGain(position);
518 :
519 : // Actually compute the left and right gain.
520 0 : gainL = cos(0.5 * M_PI * normalizedAzimuth);
521 0 : gainR = sin(0.5 * M_PI * normalizedAzimuth);
522 :
523 : // Compute the output.
524 0 : ApplyStereoPanning(aInput, aOutput, gainL, gainR, azimuth <= 0);
525 :
526 0 : aOutput->mVolume = aInput.mVolume * distanceGain * coneGain;
527 : } else {
528 : float positionX[WEBAUDIO_BLOCK_SIZE];
529 : float positionY[WEBAUDIO_BLOCK_SIZE];
530 : float positionZ[WEBAUDIO_BLOCK_SIZE];
531 : float orientationX[WEBAUDIO_BLOCK_SIZE];
532 : float orientationY[WEBAUDIO_BLOCK_SIZE];
533 : float orientationZ[WEBAUDIO_BLOCK_SIZE];
534 :
535 : // The output of this node is always stereo, no matter what the inputs are.
536 0 : aOutput->AllocateChannels(2);
537 :
538 0 : if (!mPositionX.HasSimpleValue()) {
539 0 : mPositionX.GetValuesAtTime(tick, positionX, WEBAUDIO_BLOCK_SIZE);
540 : } else {
541 0 : positionX[0] = mPositionX.GetValueAtTime(tick);
542 : }
543 0 : if (!mPositionY.HasSimpleValue()) {
544 0 : mPositionY.GetValuesAtTime(tick, positionY, WEBAUDIO_BLOCK_SIZE);
545 : } else {
546 0 : positionY[0] = mPositionY.GetValueAtTime(tick);
547 : }
548 0 : if (!mPositionZ.HasSimpleValue()) {
549 0 : mPositionZ.GetValuesAtTime(tick, positionZ, WEBAUDIO_BLOCK_SIZE);
550 : } else {
551 0 : positionZ[0] = mPositionZ.GetValueAtTime(tick);
552 : }
553 0 : if (!mOrientationX.HasSimpleValue()) {
554 0 : mOrientationX.GetValuesAtTime(tick, orientationX, WEBAUDIO_BLOCK_SIZE);
555 : } else {
556 0 : orientationX[0] = mOrientationX.GetValueAtTime(tick);
557 : }
558 0 : if (!mOrientationY.HasSimpleValue()) {
559 0 : mOrientationY.GetValuesAtTime(tick, orientationY, WEBAUDIO_BLOCK_SIZE);
560 : } else {
561 0 : orientationY[0] = mOrientationY.GetValueAtTime(tick);
562 : }
563 0 : if (!mOrientationZ.HasSimpleValue()) {
564 0 : mOrientationZ.GetValuesAtTime(tick, orientationZ, WEBAUDIO_BLOCK_SIZE);
565 : } else {
566 0 : orientationZ[0] = mOrientationZ.GetValueAtTime(tick);
567 : }
568 :
569 : float computedGain[2*WEBAUDIO_BLOCK_SIZE + 4];
570 : bool onLeft[WEBAUDIO_BLOCK_SIZE];
571 :
572 0 : float* alignedComputedGain = ALIGNED16(computedGain);
573 0 : ASSERT_ALIGNED16(alignedComputedGain);
574 0 : for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) {
575 0 : ThreeDPoint position(mPositionX.HasSimpleValue() ? positionX[0] : positionX[counter],
576 0 : mPositionY.HasSimpleValue() ? positionY[0] : positionY[counter],
577 0 : mPositionZ.HasSimpleValue() ? positionZ[0] : positionZ[counter]);
578 0 : ThreeDPoint orientation(mOrientationX.HasSimpleValue() ? orientationX[0] : orientationX[counter],
579 0 : mOrientationY.HasSimpleValue() ? orientationY[0] : orientationY[counter],
580 0 : mOrientationZ.HasSimpleValue() ? orientationZ[0] : orientationZ[counter]);
581 0 : if (!orientation.IsZero()) {
582 0 : orientation.Normalize();
583 : }
584 :
585 0 : ComputeAzimuthAndElevation(position, azimuth, elevation);
586 0 : coneGain = ComputeConeGain(position, orientation);
587 :
588 : // The following algorithm is described in the spec.
589 : // Clamp azimuth in the [-90, 90] range.
590 0 : azimuth = min(180.f, max(-180.f, azimuth));
591 :
592 : // Wrap around
593 0 : if (azimuth < -90.f) {
594 0 : azimuth = -180.f - azimuth;
595 0 : } else if (azimuth > 90) {
596 0 : azimuth = 180.f - azimuth;
597 : }
598 :
599 : // Normalize the value in the [0, 1] range.
600 0 : if (inputChannels == 1) {
601 0 : normalizedAzimuth = (azimuth + 90.f) / 180.f;
602 : } else {
603 0 : if (azimuth <= 0) {
604 0 : normalizedAzimuth = (azimuth + 90.f) / 90.f;
605 : } else {
606 0 : normalizedAzimuth = azimuth / 90.f;
607 : }
608 : }
609 :
610 0 : distanceGain = ComputeDistanceGain(position);
611 :
612 : // Actually compute the left and right gain.
613 0 : float gainL = cos(0.5 * M_PI * normalizedAzimuth) * aInput.mVolume * distanceGain * coneGain;
614 0 : float gainR = sin(0.5 * M_PI * normalizedAzimuth) * aInput.mVolume * distanceGain * coneGain;
615 :
616 0 : alignedComputedGain[counter] = gainL;
617 0 : alignedComputedGain[WEBAUDIO_BLOCK_SIZE + counter] = gainR;
618 0 : onLeft[counter] = azimuth <= 0;
619 : }
620 :
621 : // Apply the gain to the output buffer
622 0 : ApplyStereoPanning(aInput, aOutput, alignedComputedGain, &alignedComputedGain[WEBAUDIO_BLOCK_SIZE], onLeft);
623 :
624 : }
625 : }
626 :
627 : // This algorithm is specified in the webaudio spec.
628 : void
629 0 : PannerNodeEngine::ComputeAzimuthAndElevation(const ThreeDPoint& position, float& aAzimuth, float& aElevation)
630 : {
631 0 : ThreeDPoint sourceListener = position - mListenerPosition;
632 0 : if (sourceListener.IsZero()) {
633 0 : aAzimuth = 0.0;
634 0 : aElevation = 0.0;
635 0 : return;
636 : }
637 :
638 0 : sourceListener.Normalize();
639 :
640 : // Project the source-listener vector on the x-z plane.
641 0 : const ThreeDPoint& listenerFront = mListenerFrontVector;
642 0 : const ThreeDPoint& listenerRight = mListenerRightVector;
643 0 : ThreeDPoint up = listenerRight.CrossProduct(listenerFront);
644 :
645 0 : double upProjection = sourceListener.DotProduct(up);
646 0 : aElevation = 90 - 180 * acos(upProjection) / M_PI;
647 :
648 0 : if (aElevation > 90) {
649 0 : aElevation = 180 - aElevation;
650 0 : } else if (aElevation < -90) {
651 0 : aElevation = -180 - aElevation;
652 : }
653 :
654 0 : ThreeDPoint projectedSource = sourceListener - up * upProjection;
655 0 : if (projectedSource.IsZero()) {
656 : // source - listener direction is up or down.
657 0 : aAzimuth = 0.0;
658 0 : return;
659 : }
660 0 : projectedSource.Normalize();
661 :
662 : // Actually compute the angle, and convert to degrees
663 0 : double projection = projectedSource.DotProduct(listenerRight);
664 0 : aAzimuth = 180 * acos(projection) / M_PI;
665 :
666 : // Compute whether the source is in front or behind the listener.
667 0 : double frontBack = projectedSource.DotProduct(listenerFront);
668 0 : if (frontBack < 0) {
669 0 : aAzimuth = 360 - aAzimuth;
670 : }
671 : // Rotate the azimuth so it is relative to the listener front vector instead
672 : // of the right vector.
673 0 : if ((aAzimuth >= 0) && (aAzimuth <= 270)) {
674 0 : aAzimuth = 90 - aAzimuth;
675 : } else {
676 0 : aAzimuth = 450 - aAzimuth;
677 : }
678 : }
679 :
680 : // This algorithm is described in the WebAudio spec.
681 : float
682 0 : PannerNodeEngine::ComputeConeGain(const ThreeDPoint& position,
683 : const ThreeDPoint& orientation)
684 : {
685 : // Omnidirectional source
686 0 : if (orientation.IsZero() || ((mConeInnerAngle == 360) && (mConeOuterAngle == 360))) {
687 0 : return 1;
688 : }
689 :
690 : // Normalized source-listener vector
691 0 : ThreeDPoint sourceToListener = mListenerPosition - position;
692 0 : sourceToListener.Normalize();
693 :
694 : // Angle between the source orientation vector and the source-listener vector
695 0 : double dotProduct = sourceToListener.DotProduct(orientation);
696 0 : double angle = 180 * acos(dotProduct) / M_PI;
697 0 : double absAngle = fabs(angle);
698 :
699 : // Divide by 2 here since API is entire angle (not half-angle)
700 0 : double absInnerAngle = fabs(mConeInnerAngle) / 2;
701 0 : double absOuterAngle = fabs(mConeOuterAngle) / 2;
702 0 : double gain = 1;
703 :
704 0 : if (absAngle <= absInnerAngle) {
705 : // No attenuation
706 0 : gain = 1;
707 0 : } else if (absAngle >= absOuterAngle) {
708 : // Max attenuation
709 0 : gain = mConeOuterGain;
710 : } else {
711 : // Between inner and outer cones
712 : // inner -> outer, x goes from 0 -> 1
713 0 : double x = (absAngle - absInnerAngle) / (absOuterAngle - absInnerAngle);
714 0 : gain = (1 - x) + mConeOuterGain * x;
715 : }
716 :
717 0 : return gain;
718 : }
719 :
720 : double
721 0 : PannerNodeEngine::ComputeDistanceGain(const ThreeDPoint& position)
722 : {
723 0 : ThreeDPoint distanceVec = position - mListenerPosition;
724 0 : float distance = sqrt(distanceVec.DotProduct(distanceVec));
725 0 : return std::max(0.0f, (this->*mDistanceModelFunction)(distance));
726 : }
727 :
728 : float
729 0 : PannerNode::ComputeDopplerShift()
730 : {
731 0 : double dopplerShift = 1.0; // Initialize to default value
732 :
733 0 : AudioListener* listener = Context()->Listener();
734 :
735 0 : if (listener->DopplerFactor() > 0) {
736 : // Don't bother if both source and listener have no velocity.
737 0 : if (!mVelocity.IsZero() || !listener->Velocity().IsZero()) {
738 : // Calculate the source to listener vector.
739 0 : ThreeDPoint sourceToListener = ConvertAudioParamTo3DP(mPositionX, mPositionY, mPositionZ) - listener->Velocity();
740 :
741 0 : double sourceListenerMagnitude = sourceToListener.Magnitude();
742 :
743 0 : double listenerProjection = sourceToListener.DotProduct(listener->Velocity()) / sourceListenerMagnitude;
744 0 : double sourceProjection = sourceToListener.DotProduct(mVelocity) / sourceListenerMagnitude;
745 :
746 0 : listenerProjection = -listenerProjection;
747 0 : sourceProjection = -sourceProjection;
748 :
749 0 : double scaledSpeedOfSound = listener->SpeedOfSound() / listener->DopplerFactor();
750 0 : listenerProjection = min(listenerProjection, scaledSpeedOfSound);
751 0 : sourceProjection = min(sourceProjection, scaledSpeedOfSound);
752 :
753 0 : dopplerShift = ((listener->SpeedOfSound() - listener->DopplerFactor() * listenerProjection) / (listener->SpeedOfSound() - listener->DopplerFactor() * sourceProjection));
754 :
755 0 : WebAudioUtils::FixNaN(dopplerShift); // Avoid illegal values
756 :
757 : // Limit the pitch shifting to 4 octaves up and 3 octaves down.
758 0 : dopplerShift = min(dopplerShift, 16.);
759 0 : dopplerShift = max(dopplerShift, 0.125);
760 : }
761 : }
762 :
763 0 : return dopplerShift;
764 : }
765 :
766 : void
767 0 : PannerNode::FindConnectedSources()
768 : {
769 0 : mSources.Clear();
770 0 : std::set<AudioNode*> cycleSet;
771 0 : FindConnectedSources(this, mSources, cycleSet);
772 0 : }
773 :
774 : void
775 0 : PannerNode::FindConnectedSources(AudioNode* aNode,
776 : nsTArray<AudioBufferSourceNode*>& aSources,
777 : std::set<AudioNode*>& aNodesSeen)
778 : {
779 0 : if (!aNode) {
780 0 : return;
781 : }
782 :
783 0 : const nsTArray<InputNode>& inputNodes = aNode->InputNodes();
784 :
785 0 : for(unsigned i = 0; i < inputNodes.Length(); i++) {
786 : // Return if we find a node that we have seen already.
787 0 : if (aNodesSeen.find(inputNodes[i].mInputNode) != aNodesSeen.end()) {
788 0 : return;
789 : }
790 0 : aNodesSeen.insert(inputNodes[i].mInputNode);
791 : // Recurse
792 0 : FindConnectedSources(inputNodes[i].mInputNode, aSources, aNodesSeen);
793 :
794 : // Check if this node is an AudioBufferSourceNode that still have a stream,
795 : // which means it has not finished playing.
796 0 : AudioBufferSourceNode* node = inputNodes[i].mInputNode->AsAudioBufferSourceNode();
797 0 : if (node && node->GetStream()) {
798 0 : aSources.AppendElement(node);
799 : }
800 : }
801 : }
802 :
803 : void
804 0 : PannerNode::SendDopplerToSourcesIfNeeded()
805 : {
806 : // Don't bother sending the doppler shift if both the source and the listener
807 : // are not moving, because the doppler shift is going to be 1.0.
808 0 : if (!(Context()->Listener()->Velocity().IsZero() && mVelocity.IsZero())) {
809 0 : for(uint32_t i = 0; i < mSources.Length(); i++) {
810 0 : mSources[i]->SendDopplerShiftToStream(ComputeDopplerShift());
811 : }
812 : }
813 0 : }
814 :
815 :
816 : } // namespace dom
817 : } // namespace mozilla
|