예제 #1
0
        //
        // Applies the Phonon effect to audio.
        //
        void OnAudioFilterRead(float[] data, int channels)
        {
            mutex.WaitOne();

            if (!initialized || destroying)
            {
                mutex.ReleaseMutex();
                return;
            }

            if ((data == null) || (environmentRenderer == IntPtr.Zero) || (!processMixedAudio) || (wetData == null) ||
                (wetAmbisonicsDataMarshal == null))
            {
                mutex.ReleaseMutex();
                return;
            }

#if !UNITY_ANDROID
            AudioBuffer wetAmbisonicsBuffer;
            wetAmbisonicsBuffer.audioFormat         = ambisonicsFormat;
            wetAmbisonicsBuffer.numSamples          = data.Length / channels;
            wetAmbisonicsBuffer.deInterleavedBuffer = wetAmbisonicsDataMarshal;
            wetAmbisonicsBuffer.interleavedBuffer   = null;
            PhononCore.iplGetMixedEnvironmentalAudio(environmentRenderer, listenerPosition, listenerAhead, listenerUp, wetAmbisonicsBuffer);

            AudioBuffer wetBufferMarshal;
            wetBufferMarshal.audioFormat = outputFormat;
            wetBufferMarshal.audioFormat.channelOrder = ChannelOrder.Deinterleaved;     // Set format to deinterleave.
            wetBufferMarshal.numSamples          = data.Length / channels;
            wetBufferMarshal.deInterleavedBuffer = wetDataMarshal;
            wetBufferMarshal.interleavedBuffer   = null;

            if ((outputFormat.channelLayout == ChannelLayout.Stereo) && indirectBinauralEnabled)
            {
                PhononCore.iplApplyAmbisonicsBinauralEffect(propagationBinauralEffect, wetAmbisonicsBuffer, wetBufferMarshal);
            }
            else
            {
                PhononCore.iplApplyAmbisonicsPanningEffect(propagationPanningEffect, wetAmbisonicsBuffer, wetBufferMarshal);
            }

            AudioBuffer wetBuffer;
            wetBuffer.audioFormat         = outputFormat;
            wetBuffer.numSamples          = data.Length / channels;
            wetBuffer.deInterleavedBuffer = null;
            wetBuffer.interleavedBuffer   = wetData;
            PhononCore.iplInterleaveAudioBuffer(wetBufferMarshal, wetBuffer);
#endif

            for (int i = 0; i < data.Length; ++i)
            {
                data[i] += wetData[i];
            }

            mutex.ReleaseMutex();
        }
예제 #2
0
        public void AudioFrameUpdate(float[] data, int channels, IntPtr environmentalRenderer, Vector3 listenerPosition,
                                     Vector3 listenerAhead, Vector3 listenerUp, bool indirectBinauralEnabled)
        {
#if !UNITY_ANDROID
            AudioBuffer ambisonicsBuffer;
            ambisonicsBuffer.audioFormat         = ambisonicsFormat;
            ambisonicsBuffer.numSamples          = data.Length / channels;
            ambisonicsBuffer.deInterleavedBuffer = wetAmbisonicsDataMarshal;
            ambisonicsBuffer.interleavedBuffer   = null;

            PhononCore.iplGetMixedEnvironmentalAudio(environmentalRenderer, listenerPosition, listenerAhead, listenerUp,
                                                     ambisonicsBuffer);

            AudioBuffer spatializedBuffer;
            spatializedBuffer.audioFormat = outputFormat;
            spatializedBuffer.audioFormat.channelOrder = ChannelOrder.Deinterleaved;     // Set format to deinterleave.
            spatializedBuffer.numSamples          = data.Length / channels;
            spatializedBuffer.deInterleavedBuffer = wetDataMarshal;
            spatializedBuffer.interleavedBuffer   = null;

            if ((outputFormat.channelLayout == ChannelLayout.Stereo) && indirectBinauralEnabled)
            {
                PhononCore.iplApplyAmbisonicsBinauralEffect(propagationBinauralEffect, ambisonicsBuffer, spatializedBuffer);
            }
            else
            {
                PhononCore.iplApplyAmbisonicsPanningEffect(propagationPanningEffect, ambisonicsBuffer, spatializedBuffer);
            }

            AudioBuffer interleavedBuffer;
            interleavedBuffer.audioFormat         = outputFormat;
            interleavedBuffer.numSamples          = data.Length / channels;
            interleavedBuffer.deInterleavedBuffer = null;
            interleavedBuffer.interleavedBuffer   = wetData;
            PhononCore.iplInterleaveAudioBuffer(spatializedBuffer, interleavedBuffer);

            for (int i = 0; i < data.Length; ++i)
            {
                data[i] += wetData[i];
            }
#endif
        }
예제 #3
0
        public float[] AudioFrameUpdate(float[] data, int channels, Vector3 sourcePosition, Vector3 listenerPosition,
                                        Vector3 listenerAhead, Vector3 listenerUp, bool enableReflections, float indirectMixFraction,
                                        bool indirectBinauralEnabled, PhononListener phononListener)
        {
#if !UNITY_ANDROID
            AudioBuffer inputBuffer;
            inputBuffer.audioFormat         = inputFormat;
            inputBuffer.numSamples          = data.Length / channels;
            inputBuffer.deInterleavedBuffer = null;
            inputBuffer.interleavedBuffer   = data;

            AudioBuffer outputBuffer;
            outputBuffer.audioFormat         = outputFormat;
            outputBuffer.numSamples          = data.Length / channels;
            outputBuffer.deInterleavedBuffer = null;
            outputBuffer.interleavedBuffer   = data;

            // Input data is sent (where it is copied) for indirect propagation effect processing.
            // This data must be sent before applying any other effect to the input audio.
            if (enableReflections && (wetData != null) && (wetDataMarshal != null) && (wetAmbisonicsDataMarshal != null) && (propagationAmbisonicsEffect != IntPtr.Zero))
            {
                for (int i = 0; i < data.Length; ++i)
                {
                    wetData[i] = data[i] * indirectMixFraction;
                }

                AudioBuffer propagationInputBuffer;
                propagationInputBuffer.audioFormat         = inputFormat;
                propagationInputBuffer.numSamples          = wetData.Length / channels;
                propagationInputBuffer.deInterleavedBuffer = null;
                propagationInputBuffer.interleavedBuffer   = wetData;

                PhononCore.iplSetDryAudioForConvolutionEffect(propagationAmbisonicsEffect, sourcePosition, propagationInputBuffer);

                if (fourierMixingEnabled)
                {
                    phononListener.processMixedAudio = true;
                    return(null);
                }

                AudioBuffer wetAmbisonicsBuffer;
                wetAmbisonicsBuffer.audioFormat         = ambisonicsFormat;
                wetAmbisonicsBuffer.numSamples          = data.Length / channels;
                wetAmbisonicsBuffer.deInterleavedBuffer = wetAmbisonicsDataMarshal;
                wetAmbisonicsBuffer.interleavedBuffer   = null;
                PhononCore.iplGetWetAudioForConvolutionEffect(propagationAmbisonicsEffect, listenerPosition, listenerAhead, listenerUp, wetAmbisonicsBuffer);

                AudioBuffer wetBufferMarshal;
                wetBufferMarshal.audioFormat = outputFormat;
                wetBufferMarshal.audioFormat.channelOrder = ChannelOrder.Deinterleaved;     // Set format to deinterleave.
                wetBufferMarshal.numSamples          = data.Length / channels;
                wetBufferMarshal.deInterleavedBuffer = wetDataMarshal;
                wetBufferMarshal.interleavedBuffer   = null;

                if ((outputFormat.channelLayout == ChannelLayout.Stereo) && indirectBinauralEnabled)
                {
                    PhononCore.iplApplyAmbisonicsBinauralEffect(propagationBinauralEffect, wetAmbisonicsBuffer, wetBufferMarshal);
                }
                else
                {
                    PhononCore.iplApplyAmbisonicsPanningEffect(propagationPanningEffect, wetAmbisonicsBuffer, wetBufferMarshal);
                }

                AudioBuffer wetBuffer;
                wetBuffer.audioFormat         = outputFormat;
                wetBuffer.numSamples          = data.Length / channels;
                wetBuffer.deInterleavedBuffer = null;
                wetBuffer.interleavedBuffer   = wetData;
                PhononCore.iplInterleaveAudioBuffer(wetBufferMarshal, wetBuffer);

                return(wetData);
            }
#endif

            return(null);
        }
예제 #4
0
        //
        // Applies the Phonon 3D effect to dry audio.
        //
        void OnAudioFilterRead(float[] data, int channels)
        {
            mutex.WaitOne();

            if (!initialized || destroying)
            {
                mutex.ReleaseMutex();
                Array.Clear(data, 0, data.Length);
                return;
            }

            if (data == null)
            {
                mutex.ReleaseMutex();
                Array.Clear(data, 0, data.Length);
                return;
            }

            float distanceAttenuation = (physicsBasedAttenuation) ? directSoundPath.distanceAttenuation : 1f;

            directAttnInterlop.Set(directSoundPath.occlusionFactor * directMixFraction);
            float   occlusionFactor = directAttnInterlop.Update();
            Vector3 directDirection = directSoundPath.direction;

            AudioBuffer inputBuffer;

            inputBuffer.audioFormat         = inputFormat;
            inputBuffer.numSamples          = data.Length / channels;
            inputBuffer.deInterleavedBuffer = null;
            inputBuffer.interleavedBuffer   = data;

            AudioBuffer outputBuffer;

            outputBuffer.audioFormat         = outputFormat;
            outputBuffer.numSamples          = data.Length / channels;
            outputBuffer.deInterleavedBuffer = null;
            outputBuffer.interleavedBuffer   = data;

            // Input data is sent (where it is copied) for indirect propagation effect processing.
            // This data must be sent before applying any other effect to the input audio.
#if !UNITY_ANDROID
            if (enableReflections && (wetData != null) && (wetDataMarshal != null) && (wetAmbisonicsDataMarshal != null) && (propagationAmbisonicsEffect != IntPtr.Zero))
            {
                for (int i = 0; i < data.Length; ++i)
                {
                    wetData[i] = data[i] * indirectMixFraction;
                }

                AudioBuffer propagationInputBuffer;
                propagationInputBuffer.audioFormat         = inputFormat;
                propagationInputBuffer.numSamples          = wetData.Length / channels;
                propagationInputBuffer.deInterleavedBuffer = null;
                propagationInputBuffer.interleavedBuffer   = wetData;

                PhononCore.iplSetDryAudioForConvolutionEffect(propagationAmbisonicsEffect, sourcePosition, propagationInputBuffer);
            }
#endif

            if ((outputFormat.channelLayout == ChannelLayout.Stereo) && directBinauralEnabled)
            {
                // Apply binaural audio to direct sound.
                PhononCore.iplApplyBinauralEffect(directBinauralEffect, inputBuffer, directDirection, hrtfInterpolation, outputBuffer);
            }
            else if (outputFormat.channelLayout == ChannelLayout.Custom)
            {
                // Apply panning fo custom speaker layout.
                PhononCore.iplApplyPanningEffect(directCustomPanningEffect, inputBuffer, directDirection, outputBuffer);
            }

            // Process direct sound occlusion
            for (int i = 0; i < data.Length; ++i)
            {
                data[i] *= occlusionFactor * distanceAttenuation;
            }

#if !UNITY_ANDROID
            if (enableReflections && (wetData != null) && (wetDataMarshal != null) && (wetAmbisonicsDataMarshal != null) && (propagationAmbisonicsEffect != IntPtr.Zero))
            {
                if (fourierMixingEnabled)
                {
                    phononMixer.processMixedAudio = true;
                    mutex.ReleaseMutex();
                    return;
                }

                AudioBuffer wetAmbisonicsBuffer;
                wetAmbisonicsBuffer.audioFormat         = ambisonicsFormat;
                wetAmbisonicsBuffer.numSamples          = data.Length / channels;
                wetAmbisonicsBuffer.deInterleavedBuffer = wetAmbisonicsDataMarshal;
                wetAmbisonicsBuffer.interleavedBuffer   = null;
                PhononCore.iplGetWetAudioForConvolutionEffect(propagationAmbisonicsEffect, listenerPosition, listenerAhead, listenerUp, wetAmbisonicsBuffer);

                AudioBuffer wetBufferMarshal;
                wetBufferMarshal.audioFormat = outputFormat;
                wetBufferMarshal.audioFormat.channelOrder = ChannelOrder.Deinterleaved;     // Set format to deinterleave.
                wetBufferMarshal.numSamples          = data.Length / channels;
                wetBufferMarshal.deInterleavedBuffer = wetDataMarshal;
                wetBufferMarshal.interleavedBuffer   = null;

                if ((outputFormat.channelLayout == ChannelLayout.Stereo) && indirectBinauralEnabled)
                {
                    PhononCore.iplApplyAmbisonicsBinauralEffect(propagationBinauralEffect, wetAmbisonicsBuffer, wetBufferMarshal);
                }
                else
                {
                    PhononCore.iplApplyAmbisonicsPanningEffect(propagationPanningEffect, wetAmbisonicsBuffer, wetBufferMarshal);
                }

                AudioBuffer wetBuffer;
                wetBuffer.audioFormat         = outputFormat;
                wetBuffer.numSamples          = data.Length / channels;
                wetBuffer.deInterleavedBuffer = null;
                wetBuffer.interleavedBuffer   = wetData;
                PhononCore.iplInterleaveAudioBuffer(wetBufferMarshal, wetBuffer);

                for (int i = 0; i < data.Length; ++i)
                {
                    data[i] += wetData[i];
                }
            }
#endif

            mutex.ReleaseMutex();
        }