예제 #1
0
        /// <summary>	
        /// Creates and configures a submix voice with an effect chain.	
        /// </summary>	
        /// <param name="device">an instance of <see cref = "SharpDX.XAudio2.XAudio2" /></param>
        /// <param name="inputChannels">[in]  Number of channels in the input audio data of the submix voice. InputChannels must be less than or equal to XAUDIO2_MAX_AUDIO_CHANNELS. </param>
        /// <param name="inputSampleRate">[in]  Sample rate of the input audio data of submix voice. This rate must be a multiple of  XAUDIO2_QUANTUM_DENOMINATOR.  InputSampleRate must be between XAUDIO2_MIN_SAMPLE_RATE and XAUDIO2_MAX_SAMPLE_RATE. </param>
        /// <param name="flags">[in]  Flags that specify the behavior of the submix voice. Can be 0 or the following: ValueDescriptionXAUDIO2_VOICE_USEFILTERThe filter effect should be available on this voice.? </param>
        /// <param name="processingStage">[in]  An arbitrary number that specifies when this voice is processed with respect to other submix  voices, if the XAudio2 engine is running other submix voices. The voice is processed after all other  voices that include a smaller ProcessingStage value, and before all other voices  that include a larger ProcessingStage value. Voices that include the same  ProcessingStage value are processed in any order. A submix voice cannot send to  another submix voice with a lower or equal ProcessingStage value; this prevents  audio being lost due to a submix cycle. </param>
        /// <param name="effectDescriptors">[in, optional] Pointer to a list of XAUDIO2_EFFECT_CHAIN structures that describe an effect chain to use in the submix voice.</param>
        /// <returns>No documentation.</returns>
        /// <unmanaged>HRESULT IXAudio2::CreateSubmixVoice([Out] IXAudio2SubmixVoice** ppSubmixVoice,[None] UINT32 InputChannels,[None] UINT32 InputSampleRate,[None] UINT32 Flags,[None] UINT32 ProcessingStage,[In, Optional] const XAUDIO2_VOICE_SENDS* pSendList,[In, Optional] const XAUDIO2_EFFECT_CHAIN* pEffectChain)</unmanaged>
        public SubmixVoice(XAudio2 device, int inputChannels, int inputSampleRate, SubmixVoiceFlags flags, int processingStage, EffectDescriptor[] effectDescriptors)
            : base(IntPtr.Zero)
        {

            if (effectDescriptors != null)
            {
                unsafe
                {
                    var tempSendDescriptor = new EffectChain();
                    var effectDescriptorNatives = new EffectDescriptor.__Native[effectDescriptors.Length];
                    for (int i = 0; i < effectDescriptorNatives.Length; i++)
                        effectDescriptors[i].__MarshalTo(ref effectDescriptorNatives[i]);
                    tempSendDescriptor.EffectCount = effectDescriptorNatives.Length;
                    fixed (void* pEffectDescriptors = &effectDescriptorNatives[0])
                    {
                        tempSendDescriptor.EffectDescriptorPointer = (IntPtr)pEffectDescriptors;
                        device.CreateSubmixVoice(this, inputChannels, inputSampleRate, unchecked((int)flags), processingStage, null, tempSendDescriptor);
                    }
                }
            }
            else
            {
                device.CreateSubmixVoice(this, inputChannels, inputSampleRate, unchecked((int)flags), processingStage, null, null);
            }
        }
예제 #2
0
        public PlayForm()
        {
            InitializeComponent();

            // Initalize XAudio2
            xaudio2 = new XAudio2(XAudio2Flags.None, ProcessorSpecifier.DefaultProcessor);
            masteringVoice = new MasteringVoice(xaudio2);

            var waveFormat = new WaveFormat(44100, 32, 2);
             sourceVoice = new SourceVoice(xaudio2, waveFormat);

            int bufferSize = waveFormat.ConvertLatencyToByteSize(60000);
            DataStream dataStream = new DataStream(bufferSize, true, true);

            // Prepare the initial sound to modulate
            int numberOfSamples = bufferSize / waveFormat.BlockAlign;
            for (int i = 0; i < numberOfSamples; i++)
            {
                float value = (float)(Math.Cos(2 * Math.PI * 220.0 * i / waveFormat.SampleRate) * 0.5);
                dataStream.Write(value);
                dataStream.Write(value);
            }
            dataStream.Position = 0;

            audioBuffer = new AudioBuffer
                              {
                                  Stream = dataStream,
                                  Flags = BufferFlags.EndOfStream,
                                  AudioBytes = bufferSize,
                                  LoopBegin = 0,
                                  LoopLength = numberOfSamples,
                                  LoopCount = AudioBuffer.LoopInfinite
                              };

            // Set the effect on the source
            ModulatorEffect = new ModulatorEffect();
            modulatorDescriptor = new EffectDescriptor(ModulatorEffect);
            reverb = new Reverb(xaudio2);
            effectDescriptor = new EffectDescriptor(reverb);
            //sourceVoice.SetEffectChain(modulatorDescriptor, effectDescriptor);
            sourceVoice.SetEffectChain(modulatorDescriptor);
            //sourceVoice.EnableEffect(0);

            this.Closed += new EventHandler(PlayForm_Closed);
        }
예제 #3
0
        /// <summary>
        /// SharpDX XAudio2 sample. Plays a generated sound with some reverb.
        /// </summary>
        static void Main(string[] args)
        {
            var xaudio2 = new XAudio2();
            var masteringVoice = new MasteringVoice(xaudio2);

            var waveFormat = new WaveFormat(44100, 32, 2);
            var sourceVoice = new SourceVoice(xaudio2, waveFormat);

            int bufferSize = waveFormat.ConvertLatencyToByteSize(60000);
            var dataStream = new DataStream(bufferSize, true, true);

            int numberOfSamples = bufferSize/waveFormat.BlockAlign;
            for (int i = 0; i < numberOfSamples; i++)
            {
                double vibrato = Math.Cos(2 * Math.PI * 10.0 * i / waveFormat.SampleRate);
                float value = (float) (Math.Cos(2*Math.PI*(220.0 + 4.0*vibrato)*i/waveFormat.SampleRate)*0.5); 
                dataStream.Write(value);
                dataStream.Write(value);
            }
            dataStream.Position = 0;

            var audioBuffer = new AudioBuffer {Stream = dataStream, Flags = BufferFlags.EndOfStream, AudioBytes = bufferSize};

            var reverb = new Reverb();
            var effectDescriptor = new EffectDescriptor(reverb);
            sourceVoice.SetEffectChain(effectDescriptor);
            sourceVoice.EnableEffect(0);

            sourceVoice.SubmitSourceBuffer(audioBuffer, null);

            sourceVoice.Start();

            Console.WriteLine("Play sound");
            for(int i = 0; i < 60; i++)
            {
                Console.Write(".");
                Console.Out.Flush();
                Thread.Sleep(1000);
            }
        }
예제 #4
0
        private void StartEngine()
        {
            if (m_audioEngine != null)
            {
                DisposeVoices();
                m_audioEngine.CriticalError -= m_audioEngine_CriticalError;
                m_audioEngine.Dispose();
            }

            // Init/reinit engine
            m_audioEngine = new XAudio2(XAudio2Version.Version27);

            // A way to disable SharpDX callbacks
            //var meth = m_audioEngine.GetType().GetMethod("UnregisterForCallbacks_", System.Reflection.BindingFlags.NonPublic | System.Reflection.BindingFlags.Instance);
            //var callbacks = m_audioEngine.GetType().GetField("engineShadowPtr", System.Reflection.BindingFlags.Instance | System.Reflection.BindingFlags.NonPublic);
            //meth.Invoke((object)m_audioEngine, new object[] { callbacks.GetValue(m_audioEngine) });

            m_audioEngine.CriticalError += m_audioEngine_CriticalError;
            m_lastDeviceCount = m_audioEngine.DeviceCount;


            m_deviceNumber = 0;
            while (true) //find first non com device
            {
                try
                {
                    m_deviceDetails = m_audioEngine.GetDeviceDetails(m_deviceNumber);
                    if (m_deviceDetails.Role == DeviceRole.DefaultCommunicationsDevice)
                    {
                        m_deviceNumber++;
                        if (m_deviceNumber == m_audioEngine.DeviceCount)
                        {
                            m_deviceNumber--;
                            break;
                        }
                    }
                    else
                        break;
                }
                catch(Exception e)
                {
                    MyLog.Default.WriteLine(string.Format("Failed to get device details.\n\tdevice no.: {0}\n\tdevice count: {1}",m_deviceNumber, m_audioEngine.DeviceCount),LoggingOptions.AUDIO);
                    MyLog.Default.WriteLine(e.ToString());
                    m_deviceNumber = 0;
                    m_deviceDetails = m_audioEngine.GetDeviceDetails(m_deviceNumber);
                    break;
                }
            }

            m_masterVoice = new MasteringVoice(m_audioEngine, XAudio2.DefaultChannels, XAudio2.DefaultSampleRate, m_deviceNumber);
            
            if (m_useVolumeLimiter)
            {
                var limiter = new SharpDX.XAPO.Fx.MasteringLimiter(m_audioEngine);
                var param = limiter.Parameter;
                param.Loudness = 0;
                limiter.Parameter = param;
                //TODO: this throws exception in 3.0.1 version
                var effectDescriptor = new EffectDescriptor(limiter);
                m_masterVoice.SetEffectChain(effectDescriptor);
                m_soundLimiterReady = true;
                m_masterVoice.DisableEffect(0);
                //m_masterVoice.EnableEffect(0);
                //limiter.Dispose();
            }

            m_calculateFlags = CalculateFlags.Matrix | CalculateFlags.Doppler;
            if ((m_deviceDetails.OutputFormat.ChannelMask & Speakers.LowFrequency) != 0)
            {
                m_calculateFlags |= CalculateFlags.RedirectToLfe;
            }

			var masterDetails = m_masterVoice.VoiceDetails;
            m_gameAudioVoice = new SubmixVoice(m_audioEngine, masterDetails.InputChannelCount, masterDetails.InputSampleRate);
            m_musicAudioVoice = new SubmixVoice(m_audioEngine, masterDetails.InputChannelCount, masterDetails.InputSampleRate);
            m_hudAudioVoice = new SubmixVoice(m_audioEngine, masterDetails.InputChannelCount, masterDetails.InputSampleRate);
            m_gameAudioVoiceDesc = new VoiceSendDescriptor[] { new VoiceSendDescriptor(m_gameAudioVoice) };
            m_musicAudioVoiceDesc = new VoiceSendDescriptor[] { new VoiceSendDescriptor(m_musicAudioVoice) };
            m_hudAudioVoiceDesc = new VoiceSendDescriptor[] { new VoiceSendDescriptor(m_hudAudioVoice) };

            if (m_mute)
            { // keep sounds muted 
                m_gameAudioVoice.SetVolume(0);
                m_musicAudioVoice.SetVolume(0);
            }

            m_reverb = new Reverb(m_audioEngine);
            m_gameAudioVoice.SetEffectChain(new EffectDescriptor(m_reverb, masterDetails.InputChannelCount));
            m_gameAudioVoice.DisableEffect(0);
        }
예제 #5
0
 private void CreateSourceVoice(XAudio2 device, SharpDX.Multimedia.WaveFormat sourceFormat, SharpDX.XAudio2.VoiceFlags flags, float maxFrequencyRatio, IntPtr callback, EffectDescriptor[] effectDescriptors)
 {
     var waveformatPtr = WaveFormat.MarshalToPtr(sourceFormat);
     try
     {
         if (effectDescriptors != null)
         {
             unsafe
             {
                 var tempSendDescriptor = new EffectChain();
                 var effectDescriptorNatives = new EffectDescriptor.__Native[effectDescriptors.Length];
                 for (int i = 0; i < effectDescriptorNatives.Length; i++)
                     effectDescriptors[i].__MarshalTo(ref effectDescriptorNatives[i]);
                 tempSendDescriptor.EffectCount = effectDescriptorNatives.Length;
                 fixed (void* pEffectDescriptors = &effectDescriptorNatives[0])
                 {
                     tempSendDescriptor.EffectDescriptorPointer = (IntPtr)pEffectDescriptors;
                     device.CreateSourceVoice_(this, waveformatPtr, flags, maxFrequencyRatio, callback, null, tempSendDescriptor);
                 }
             }
         }
         else
         {
             device.CreateSourceVoice_(this, waveformatPtr, flags, maxFrequencyRatio, callback, null, null);
         }
     }
     finally
     {
         Marshal.FreeHGlobal(waveformatPtr);
     }
 }
예제 #6
0
 /// <summary>
 /// Creates and configures a source voice with callback through delegates.
 /// </summary>
 /// <param name="device">an instance of <see cref="SharpDX.XAudio2.XAudio2" /></param>
 /// <param name="sourceFormat">[in]  Pointer to a <see cref="SharpDX.Multimedia.WaveFormat" /> structure. This structure contains the expected format for all audio buffers submitted to the source voice. XAudio2 supports voice types of PCM, xWMA, ADPCM (Windows only), and XMA (Xbox 360 only). XAudio2 supports the following PCM formats.   8-bit (unsigned) integer PCM   16-bit integer PCM (Optimal format for XAudio2)   20-bit integer PCM (either in 24 or 32 bit containers)   24-bit integer PCM (either in 24 or 32 bit containers)   32-bit integer PCM   32-bit float PCM (Preferred format after 16-bit integer)   The number of channels in a source voice must be less than or equal to XAUDIO2_MAX_AUDIO_CHANNELS. The sample rate of a source voice must be between XAUDIO2_MIN_SAMPLE_RATE and XAUDIO2_MAX_SAMPLE_RATE. Note Data formats such as XMA, {{ADPCM}}, and {{xWMA}} that require more information than provided by <see cref="SharpDX.Multimedia.WaveFormat" /> have a <see cref="SharpDX.Multimedia.WaveFormat" /> structure as the first member in their format structure. When creating a source voice with one of those formats cast the format's structure as a <see cref="SharpDX.Multimedia.WaveFormat" /> structure and use it as the value for pSourceFormat.</param>
 /// <param name="flags">[in]  Flags that specify the behavior of the source voice. A flag can be 0 or a combination of one or more of the following: ValueDescriptionXAUDIO2_VOICE_NOPITCHNo pitch control is available on the voice.?XAUDIO2_VOICE_NOSRCNo sample rate conversion is available on the voice, the voice's  outputs must have the same sample rate.Note The XAUDIO2_VOICE_NOSRC flag causes the voice to behave as though the XAUDIO2_VOICE_NOPITCH flag also is specified. ?XAUDIO2_VOICE_USEFILTERThe filter effect should be available on this voice.?XAUDIO2_VOICE_MUSICThe voice is used to play background music. The system automatically  can replace the voice with music selected by the user.?</param>
 /// <param name="maxFrequencyRatio">[in]  Highest allowable frequency ratio that can be set on this voice. The value for this argument must be between XAUDIO2_MIN_FREQ_RATIO and XAUDIO2_MAX_FREQ_RATIO. Subsequent calls to <see cref="SharpDX.XAudio2.SourceVoice.SetFrequencyRatio" /> are clamped between XAUDIO2_MIN_FREQ_RATIO and MaxFrequencyRatio. The maximum value for this argument is defined as XAUDIO2_MAX_FREQ_RATIO, which allows pitch to be raised by up to 10 octaves. If MaxFrequencyRatio is less than 1.0, the voice will use that ratio immediately after being created (rather than the default of 1.0). Xbox 360  For XMA voices there is an additional restriction on the MaxFrequencyRatio argument and the voice's sample rate. The product of these two numbers cannot exceed XAUDIO2_MAX_RATIO_TIMES_RATE_XMA_MONO for one-channel voices or XAUDIO2_MAX_RATIO_TIMES_RATE_XMA_MULTICHANNEL for voices with any other number of channels. If the value specified for MaxFrequencyRatio is too high for the specified format, the call to CreateSourceVoice fails and produces a debug message.  Note XAudio2's memory usage can be reduced by using the lowest possible MaxFrequencyRatio value.</param>
 /// <param name="enableCallbackEvents">if set to <c>true</c> [enable callback events].</param>
 /// <param name="effectDescriptors">[in, optional] Pointer to a list of XAUDIO2_EFFECT_CHAIN structures that describe an effect chain to use in the source voice.</param>
 /// <returns>No enableCallbackEvents.</returns>
 ///   <unmanaged>HRESULT IXAudio2::CreateSourceVoice([Out] IXAudio2SourceVoice** ppSourceVoice,[In] const WAVEFORMATEX* pSourceFormat,[None] UINT32 Flags,[None] float MaxFrequencyRatio,[In, Optional] IXAudio2VoiceCallback* pCallback,[In, Optional] const XAUDIO2_VOICE_SENDS* pSendList,[In, Optional] const XAUDIO2_EFFECT_CHAIN* pEffectChain)</unmanaged>
 public SourceVoice(XAudio2 device, SharpDX.Multimedia.WaveFormat sourceFormat, SharpDX.XAudio2.VoiceFlags flags, float maxFrequencyRatio, bool enableCallbackEvents, EffectDescriptor[] effectDescriptors)
     : base(device)
 {
     CreateSourceVoice(device, sourceFormat, flags, maxFrequencyRatio, enableCallbackEvents ? VoiceShadow.ToIntPtr(voiceCallbackImpl = new VoiceCallbackImpl(this)) : IntPtr.Zero, effectDescriptors);
 }
예제 #7
0
 /// <summary>	
 /// Creates and configures a source voice.	
 /// </summary>	
 /// <param name="device">an instance of <see cref = "SharpDX.XAudio2.XAudio2" /></param>
 /// <param name="sourceFormat">[in]  Pointer to a <see cref="SharpDX.Multimedia.WaveFormat"/> structure. This structure contains the expected format for all audio buffers submitted to the source voice. XAudio2 supports voice types of PCM, xWMA, ADPCM (Windows only), and XMA (Xbox 360 only). XAudio2 supports the following PCM formats.   8-bit (unsigned) integer PCM   16-bit integer PCM (Optimal format for XAudio2)   20-bit integer PCM (either in 24 or 32 bit containers)   24-bit integer PCM (either in 24 or 32 bit containers)   32-bit integer PCM   32-bit float PCM (Preferred format after 16-bit integer)   The number of channels in a source voice must be less than or equal to XAUDIO2_MAX_AUDIO_CHANNELS. The sample rate of a source voice must be between XAUDIO2_MIN_SAMPLE_RATE and XAUDIO2_MAX_SAMPLE_RATE. Note Data formats such as XMA, {{ADPCM}}, and {{xWMA}} that require more information than provided by <see cref="SharpDX.Multimedia.WaveFormat"/> have a <see cref="SharpDX.Multimedia.WaveFormat"/> structure as the first member in their format structure. When creating a source voice with one of those formats cast the format's structure as a <see cref="SharpDX.Multimedia.WaveFormat"/> structure and use it as the value for pSourceFormat. </param>
 /// <param name="flags">[in]  Flags that specify the behavior of the source voice. A flag can be 0 or a combination of one or more of the following: ValueDescriptionXAUDIO2_VOICE_NOPITCHNo pitch control is available on the voice.?XAUDIO2_VOICE_NOSRCNo sample rate conversion is available on the voice, the voice's  outputs must have the same sample rate.Note The XAUDIO2_VOICE_NOSRC flag causes the voice to behave as though the XAUDIO2_VOICE_NOPITCH flag also is specified. ?XAUDIO2_VOICE_USEFILTERThe filter effect should be available on this voice.?XAUDIO2_VOICE_MUSICThe voice is used to play background music. The system automatically  can replace the voice with music selected by the user.? </param>
 /// <param name="maxFrequencyRatio">[in]  Highest allowable frequency ratio that can be set on this voice. The value for this argument must be between XAUDIO2_MIN_FREQ_RATIO and XAUDIO2_MAX_FREQ_RATIO. Subsequent calls to <see cref="SharpDX.XAudio2.SourceVoice.SetFrequencyRatio"/> are clamped between XAUDIO2_MIN_FREQ_RATIO and MaxFrequencyRatio. The maximum value for this argument is defined as XAUDIO2_MAX_FREQ_RATIO, which allows pitch to be raised by up to 10 octaves. If MaxFrequencyRatio is less than 1.0, the voice will use that ratio immediately after being created (rather than the default of 1.0). Xbox 360  For XMA voices there is an additional restriction on the MaxFrequencyRatio argument and the voice's sample rate. The product of these two numbers cannot exceed XAUDIO2_MAX_RATIO_TIMES_RATE_XMA_MONO for one-channel voices or XAUDIO2_MAX_RATIO_TIMES_RATE_XMA_MULTICHANNEL for voices with any other number of channels. If the value specified for MaxFrequencyRatio is too high for the specified format, the call to CreateSourceVoice fails and produces a debug message.  Note XAudio2's memory usage can be reduced by using the lowest possible MaxFrequencyRatio value. </param>
 /// <param name="callback">[in, optional]  Pointer to a client-provided callback interface, <see cref="SharpDX.XAudio2.VoiceCallback"/>. </param>
 /// <param name="effectDescriptors">[in, optional] Pointer to a list of XAUDIO2_EFFECT_CHAIN structures that describe an effect chain to use in the source voice.</param>
 /// <returns>No documentation.</returns>
 /// <unmanaged>HRESULT IXAudio2::CreateSourceVoice([Out] IXAudio2SourceVoice** ppSourceVoice,[In] const WAVEFORMATEX* pSourceFormat,[None] UINT32 Flags,[None] float MaxFrequencyRatio,[In, Optional] IXAudio2VoiceCallback* pCallback,[In, Optional] const XAUDIO2_VOICE_SENDS* pSendList,[In, Optional] const XAUDIO2_EFFECT_CHAIN* pEffectChain)</unmanaged>
 public SourceVoice(XAudio2 device, SharpDX.Multimedia.WaveFormat sourceFormat, SharpDX.XAudio2.VoiceFlags flags, float maxFrequencyRatio, VoiceCallback callback, EffectDescriptor[] effectDescriptors)
     : base(device)
 {
     CreateSourceVoice(device, sourceFormat, flags, maxFrequencyRatio, callback == null ? IntPtr.Zero : VoiceShadow.ToIntPtr(callback), effectDescriptors);
 }