示例#1
0
 /// <summary>
 ///     Creates a new instance of the <see cref="VoiceSendDescriptor" /> structure.
 /// </summary>
 /// <param name="flags">The <see cref="VoiceSendFlags"/>. Must be either <see cref="VoiceSendFlags.None"/> or <see cref="VoiceSendFlags.UseFilter"/>.</param>
 /// <param name="outputVoicePtr">Pointer to the destination voice. Must not be <see cref="IntPtr.Zero"/>.</param>
 public VoiceSendDescriptor(VoiceSendFlags flags, IntPtr outputVoicePtr)
 {
     if (flags != VoiceSendFlags.None && flags != VoiceSendFlags.UseFilter)
         throw new InvalidEnumArgumentException("flags", (int)flags, typeof(VoiceSendFlags));
     if(outputVoicePtr == IntPtr.Zero)
         throw new ArgumentException("Must not be Zero.", "outputVoicePtr");
     Flags = flags;
     OutputVoicePtr = outputVoicePtr;
 }
示例#2
0
 /// <summary>
 ///     Creates a new instance of the <see cref="VoiceSendDescriptor" /> structure.
 /// </summary>
 /// <param name="flags">The <see cref="VoiceSendFlags"/>. Must be either <see cref="VoiceSendFlags.None"/> or <see cref="VoiceSendFlags.UseFilter"/>.</param>
 /// <param name="outputVoice">The destination voice. Must not be null.</param>
 public VoiceSendDescriptor(VoiceSendFlags flags, XAudio2Voice outputVoice)
 {
     if(flags != VoiceSendFlags.None && flags != VoiceSendFlags.UseFilter)
         throw new InvalidEnumArgumentException("flags", (int)flags, typeof(VoiceSendFlags));
     if (outputVoice == null)
         throw new ArgumentNullException("outputVoice");
     Flags = flags;
     OutputVoicePtr = outputVoice.BasePtr;
 }
示例#3
0
 /// <summary>
 ///     Creates a new instance of the <see cref="VoiceSendDescriptor" /> structure.
 /// </summary>
 /// <param name="flags">The <see cref="VoiceSendFlags"/>. Must be either <see cref="VoiceSendFlags.None"/> or <see cref="VoiceSendFlags.UseFilter"/>.</param>
 /// <param name="outputVoicePtr">Pointer to the destination voice. Must not be <see cref="IntPtr.Zero"/>.</param>
 public VoiceSendDescriptor(VoiceSendFlags flags, IntPtr outputVoicePtr)
 {
     if (flags != VoiceSendFlags.None && flags != VoiceSendFlags.UseFilter)
     {
         throw new InvalidEnumArgumentException("flags", (int)flags, typeof(VoiceSendFlags));
     }
     if (outputVoicePtr == IntPtr.Zero)
     {
         throw new ArgumentException("Must not be Zero.", "outputVoicePtr");
     }
     Flags          = flags;
     OutputVoicePtr = outputVoicePtr;
 }
示例#4
0
 /// <summary>
 ///     Creates a new instance of the <see cref="VoiceSendDescriptor" /> structure.
 /// </summary>
 /// <param name="flags">The <see cref="VoiceSendFlags"/>. Must be either <see cref="VoiceSendFlags.None"/> or <see cref="VoiceSendFlags.UseFilter"/>.</param>
 /// <param name="outputVoice">The destination voice. Must not be null.</param>
 public VoiceSendDescriptor(VoiceSendFlags flags, XAudio2Voice outputVoice)
 {
     if (flags != VoiceSendFlags.None && flags != VoiceSendFlags.UseFilter)
     {
         throw new InvalidEnumArgumentException("flags", (int)flags, typeof(VoiceSendFlags));
     }
     if (outputVoice == null)
     {
         throw new ArgumentNullException("outputVoice");
     }
     Flags          = flags;
     OutputVoicePtr = outputVoice.BasePtr;
 }
        private void Apply3D(Vector3 listenerForward, Vector3 listenerUp, Vector3 listenerPosition, Vector3 listenerVelocity, Vector3 emitterForward, Vector3 emitterUp, Vector3 emitterPosition, Vector3 emitterVelocity)
        {
            if (!Effect.AudioManager.IsSpatialAudioEnabled)
            {
                throw new InvalidOperationException("Spatial audio must be enabled first.");
            }

            if (emitter == null)
            {
                emitter = new Emitter();
            }

            emitter.OrientFront         = emitterForward;
            emitter.OrientTop           = emitterUp;
            emitter.Position            = emitterPosition;
            emitter.Velocity            = emitterVelocity;
            emitter.DopplerScaler       = SoundEffect.DopplerScale;
            emitter.CurveDistanceScaler = SoundEffect.DistanceScale;
            emitter.ChannelCount        = Effect.Format.Channels;

            //TODO: work out what ChannelAzimuths is supposed to be.
            if (emitter.ChannelCount > 1)
            {
                emitter.ChannelAzimuths = new float[emitter.ChannelCount];
            }

            if (listener == null)
            {
                listener = new Listener();
            }

            listener.OrientFront = listenerForward;
            listener.OrientTop   = listenerUp;
            listener.Position    = listenerPosition;
            listener.Velocity    = listenerVelocity;

            if (dspSettings == null)
            {
                dspSettings = new DspSettings(Effect.Format.Channels, Effect.AudioManager.MasteringVoice.VoiceDetails.InputChannelCount);
            }

            CalculateFlags flags = CalculateFlags.Matrix | CalculateFlags.Doppler | CalculateFlags.LpfDirect;

            if ((Effect.AudioManager.Speakers & Speakers.LowFrequency) > 0)
            {
                // On devices with an LFE channel, allow the mono source data to be routed to the LFE destination channel.
                flags |= CalculateFlags.RedirectToLfe;
            }

            if (Effect.AudioManager.IsReverbEffectEnabled)
            {
                flags |= CalculateFlags.Reverb | CalculateFlags.LpfReverb;

                if (!isReverbSubmixEnabled)
                {
                    VoiceSendFlags        sendFlags    = Effect.AudioManager.IsReverbFilterEnabled ? VoiceSendFlags.UseFilter : VoiceSendFlags.None;
                    VoiceSendDescriptor[] outputVoices = new VoiceSendDescriptor[]
                    {
                        new VoiceSendDescriptor {
                            OutputVoice = Effect.AudioManager.MasteringVoice, Flags = sendFlags
                        },
                        new VoiceSendDescriptor {
                            OutputVoice = Effect.AudioManager.ReverbVoice, Flags = sendFlags
                        }
                    };

                    voice.SetOutputVoices(outputVoices);
                    isReverbSubmixEnabled = true;
                }
            }

            Effect.AudioManager.Calculate3D(listener, emitter, flags, dspSettings);

            voice.SetFrequencyRatio(dspSettings.DopplerFactor);
            voice.SetOutputMatrix(Effect.AudioManager.MasteringVoice, dspSettings.SourceChannelCount, dspSettings.DestinationChannelCount, dspSettings.MatrixCoefficients);

            if (Effect.AudioManager.IsReverbEffectEnabled)
            {
                if (reverbLevels == null || reverbLevels.Length != Effect.Format.Channels)
                {
                    reverbLevels = new float[Effect.Format.Channels];
                }

                for (int i = 0; i < reverbLevels.Length; i++)
                {
                    reverbLevels[i] = dspSettings.ReverbLevel;
                }

                voice.SetOutputMatrix(Effect.AudioManager.ReverbVoice, Effect.Format.Channels, 1, reverbLevels);
            }

            if (Effect.AudioManager.IsReverbFilterEnabled)
            {
                FilterParameters filterDirect = new FilterParameters
                {
                    Type = FilterType.LowPassFilter,
                    // see XAudio2CutoffFrequencyToRadians() in XAudio2.h for more information on the formula used here
                    Frequency = 2.0f * (float)Math.Sin(X3DAudio.PI / 6.0f * dspSettings.LpfDirectCoefficient),
                    OneOverQ  = 1.0f
                };

                voice.SetOutputFilterParameters(Effect.AudioManager.MasteringVoice, filterDirect);

                if (Effect.AudioManager.IsReverbEffectEnabled)
                {
                    FilterParameters filterReverb = new FilterParameters
                    {
                        Type = FilterType.LowPassFilter,
                        // see XAudio2CutoffFrequencyToRadians() in XAudio2.h for more information on the formula used here
                        Frequency = 2.0f * (float)Math.Sin(X3DAudio.PI / 6.0f * dspSettings.LpfReverbCoefficient),
                        OneOverQ  = 1.0f
                    };

                    voice.SetOutputFilterParameters(Effect.AudioManager.ReverbVoice, filterReverb);
                }
            }
        }
示例#6
0
 /// <summary>
 /// Initializes a new instance of the <see cref="VoiceSendDescriptor"/> struct.
 /// </summary>
 /// <param name="flags">The send flags.</param>
 /// <param name="outputVoice">The output voice.</param>
 public VoiceSendDescriptor(VoiceSendFlags flags, Voice outputVoice)
 {
     Flags       = flags;
     OutputVoice = outputVoice;
 }
示例#7
0
 /// <summary>
 /// Initializes a new instance of the <see cref="VoiceSendDescriptor"/> struct.
 /// </summary>
 /// <param name="flags">The send flags.</param>
 /// <param name="outputVoice">The output voice.</param>
 public VoiceSendDescriptor(VoiceSendFlags flags, Voice outputVoice)
 {
     Flags = flags;
     OutputVoicePointer = IntPtr.Zero;
     OutputVoice        = outputVoice;
 }
示例#8
0
 /// <summary>
 /// Initializes a new instance of the <see cref="VoiceSendDescriptor"/> struct.
 /// </summary>
 /// <param name="flags">The send flags.</param>
 /// <param name="outputVoice">The output voice.</param>
 public VoiceSendDescriptor(VoiceSendFlags flags, Voice outputVoice)
 {
     Flags = flags;
     OutputVoicePointer = IntPtr.Zero;
     OutputVoice = outputVoice;
 }
示例#9
0
 /// <summary>	
 /// Creates and configures a submix voice.	
 /// </summary>	
 /// <param name="device">an instance of <see cref = "SharpDX.XAudio2.XAudio2" /></param>
 /// <param name="inputChannels">[in]  Number of channels in the input audio data of the submix voice. InputChannels must be less than or equal to XAUDIO2_MAX_AUDIO_CHANNELS. </param>
 /// <param name="inputSampleRate">[in]  Sample rate of the input audio data of submix voice. This rate must be a multiple of  XAUDIO2_QUANTUM_DENOMINATOR.  InputSampleRate must be between XAUDIO2_MIN_SAMPLE_RATE and XAUDIO2_MAX_SAMPLE_RATE. </param>
 /// <param name="flags">[in]  Flags that specify the behavior of the submix voice. Can be 0 or the following: ValueDescriptionXAUDIO2_VOICE_USEFILTERThe filter effect should be available on this voice.? </param>
 /// <param name="processingStage">[in]  An arbitrary number that specifies when this voice is processed with respect to other submix  voices, if the XAudio2 engine is running other submix voices. The voice is processed after all other  voices that include a smaller ProcessingStage value, and before all other voices  that include a larger ProcessingStage value. Voices that include the same  ProcessingStage value are processed in any order. A submix voice cannot send to  another submix voice with a lower or equal ProcessingStage value; this prevents  audio being lost due to a submix cycle. </param>
 /// <returns>No documentation.</returns>
 /// <unmanaged>HRESULT IXAudio2::CreateSubmixVoice([Out] IXAudio2SubmixVoice** ppSubmixVoice,[None] UINT32 InputChannels,[None] UINT32 InputSampleRate,[None] UINT32 Flags,[None] UINT32 ProcessingStage,[In, Optional] const XAUDIO2_VOICE_SENDS* pSendList,[In, Optional] const XAUDIO2_EFFECT_CHAIN* pEffectChain)</unmanaged>
 public SubmixVoice(XAudio2 device, int inputChannels, int inputSampleRate, VoiceSendFlags flags, int processingStage)
     : base(IntPtr.Zero)
 {
     device.CreateSubmixVoice(this, inputChannels, inputSampleRate,  flags, processingStage, null, null);
 }
示例#10
0
 /// <summary>
 /// Creates and configures a submix voice.
 /// </summary>
 /// <param name="device">an instance of <see cref = "SharpDX.XAudio2.XAudio2" /></param>
 /// <param name="inputChannels">[in]  Number of channels in the input audio data of the submix voice. InputChannels must be less than or equal to XAUDIO2_MAX_AUDIO_CHANNELS. </param>
 /// <param name="inputSampleRate">[in]  Sample rate of the input audio data of submix voice. This rate must be a multiple of  XAUDIO2_QUANTUM_DENOMINATOR.  InputSampleRate must be between XAUDIO2_MIN_SAMPLE_RATE and XAUDIO2_MAX_SAMPLE_RATE. </param>
 /// <param name="flags">[in]  Flags that specify the behavior of the submix voice. Can be 0 or the following: ValueDescriptionXAUDIO2_VOICE_USEFILTERThe filter effect should be available on this voice.? </param>
 /// <param name="processingStage">[in]  An arbitrary number that specifies when this voice is processed with respect to other submix  voices, if the XAudio2 engine is running other submix voices. The voice is processed after all other  voices that include a smaller ProcessingStage value, and before all other voices  that include a larger ProcessingStage value. Voices that include the same  ProcessingStage value are processed in any order. A submix voice cannot send to  another submix voice with a lower or equal ProcessingStage value; this prevents  audio being lost due to a submix cycle. </param>
 /// <returns>No documentation.</returns>
 /// <unmanaged>HRESULT IXAudio2::CreateSubmixVoice([Out] IXAudio2SubmixVoice** ppSubmixVoice,[None] UINT32 InputChannels,[None] UINT32 InputSampleRate,[None] UINT32 Flags,[None] UINT32 ProcessingStage,[In, Optional] const XAUDIO2_VOICE_SENDS* pSendList,[In, Optional] const XAUDIO2_EFFECT_CHAIN* pEffectChain)</unmanaged>
 public SubmixVoice(XAudio2 device, int inputChannels, int inputSampleRate, VoiceSendFlags flags, int processingStage)
     : base(IntPtr.Zero)
 {
     device.CreateSubmixVoice(this, inputChannels, inputSampleRate, flags, processingStage, null, null);
 }