Esempio n. 1
0
 /// <summary>
 /// Adds the wave to the source voices and starts playing it.
 /// </summary>
 /// <param name="wave">The wave.</param>
 public void AddSound(Wave wave)
 {
     SourceVoice source = new SourceVoice(audio, wave.Data.Format);
     source.Start();
     source.SubmitSourceBuffer(wave.Buffer);
     sources.Add(source);
 }
 public NormalSourceVoice(SoundSystem system, WaveFormat format)
 {
     this.system = system;
     sourceVoice = new SourceVoice(system.AudioDevice, format);
     sourceVoice.StreamEnd += new EventHandler(sourceVoice_StreamEnd);
     defaultOutputMatrix = sourceVoice.GetOutputMatrix(sourceVoice.VoiceDetails.InputChannels, system.DeviceDetails.OutputFormat.Channels);
 }
Esempio n. 3
0
		public override void Dispose(bool disposing)
		{
			if (disposing)
			{
				if (sourceVoice != null)
				{
					sourceVoice.FlushSourceBuffers();
					sourceVoice.Stop();
					sourceVoice.Dispose();
					sourceVoice = null;
				}
				if (audioBuffer != null)
				{
					audioBuffer.AudioData.Dispose();
					audioBuffer.AudioData = null; // Just to be clean…
					audioBuffer.Dispose();
					audioBuffer = null;
				}
				if (xAudio != null)
				{
					xAudio.StopEngine();
					xAudio.Dispose();
					xAudio = null;
				}
			}
		}
Esempio n. 4
0
        static void PlayPCM(XAudio2 device, string fileName)
        {
            //WaveStream stream = new WaveStream(fileName);
            var s = System.IO.File.OpenRead(fileName);
            WaveStream stream = new WaveStream(s);
            s.Close();

            AudioBuffer buffer = new AudioBuffer();
            buffer.AudioData = stream;
            buffer.AudioBytes = (int)stream.Length;
            buffer.Flags = BufferFlags.EndOfStream;

            SourceVoice sourceVoice = new SourceVoice(device, stream.Format);
            sourceVoice.SubmitSourceBuffer(buffer);
            sourceVoice.Start();

            // loop until the sound is done playing
            while (sourceVoice.State.BuffersQueued > 0)
            {
                if (GetAsyncKeyState(VK_ESCAPE) != 0)
                    break;

                Thread.Sleep(10);
            }

            // wait until the escape key is released
            while (GetAsyncKeyState(VK_ESCAPE) != 0)
                Thread.Sleep(10);

            // cleanup the voice
            buffer.Dispose();
            sourceVoice.Dispose();
            stream.Dispose();
        }
 /// <summary>
 /// Creates a new instance of the <see cref="SoundEffectInstance"/> class.
 /// </summary>
 /// <param name="soundEffect">The source effect whose instance needs to be created.</param>
 /// <param name="sourceVoice">The source voice to play the created instance.</param>
 /// <param name="isFireAndForget">A value indicating whether this instance is not monitored after it is being send to playback.</param>
 internal SoundEffectInstance(SoundEffect soundEffect, SourceVoice sourceVoice, bool isFireAndForget)
 {
     Effect = soundEffect;
     voice = sourceVoice;
     IsFireAndForget = isFireAndForget;
     paused = false;
     IsLooped = false;
     volume = 1.0f;
     pan = 0.0f;
     pitch = 0.0f;
     outputMatrix = null;
 }
Esempio n. 6
0
		protected override void BeginBufferChange()
		{
			if (audioBuffer != null && audioBuffer.AudioData != null)
			{
				sourceVoice.FlushSourceBuffers();
				sourceVoice.Stop();
				sourceVoice.Dispose();
				sourceVoice = null;
				audioBuffer.AudioData.Dispose();
				audioBuffer.AudioData = null;
			}
		}
Esempio n. 7
0
        public XAudio2Driver(Configuration config)
        {
            IsDisposed = false;

            try
            {
                _isBusy = new WaitableBool(false);

                _device      = new XAudio2();
                _masterVoice = new MasteringVoice(_device);
                _sourceVoice = new SourceVoice(_device,
                                               new WaveFormat()
                                               {
                                                   FormatTag             = WaveFormatTag.Pcm,
                                                   Channels              = 2,
                                                   BitsPerSample         = 16,
                                                   SamplesPerSecond      = 32040,
                                                   AverageBytesPerSecond = 2 * (16 / 8) * 32040,
                                                   BlockAlignment        = 2 * (16 / 8)
                                               },
                                               VoiceFlags.None, 2.0F);
                _sourceVoice.BufferStart += (s, e) =>
                {
                    if (_sourceVoice.State.BuffersQueued < BufferCount)
                    {
                        _isBusy.Value = false;
                    }
                };

                _buffers       = new byte[BufferCount][];
                _bufferStreams = new DataStream[BufferCount];
                for (int i = 0; i < BufferCount; i++)
                {
                    _buffers[i]       = new byte[Snes.MaxAudioBufferLength * 4];
                    _bufferStreams[i] = new DataStream(_buffers[i], true, false);
                }

                _bufferCursor = 0;
                _audioBuffer = new AudioBuffer();
                _isPaused = true;
            }
            catch
            {
                Dispose();
                throw;
            }
        }
Esempio n. 8
0
        private void DisposeInternally()
        {
            Stop();

            if (SourceVoice != null)
            {
                SourceVoice.Clear();

                VoicePool.PutVoice(SourceVoice, WaveFormat);
                SourceVoice = null;
            }

            if (_dataStream != null)
            {
                _dataStream.Dispose();
                _dataStream = null;
            }
        }
Esempio n. 9
0
        /// <summary>
        /// Loads a wave file into a SourceVoice.
        /// </summary>
        /// <param name="FileName">The path of the file to load.</param>
        /// <param name="device">The XAudio2 device to load the sound on.</param>
        /// <param name="notificationsSupport">True to enable receiving notifications on this buffer, false otherwise. A notification might include an event when this buffer starts processing data, or when the buffer has finished playing. Set this parameter to true if you wish to receive a notification when the buffer is done playing by means of the function passed to setOnEnd.</param>
        /// <returns>A populated ExtendedAudioBuffer.</returns>
        public static ExtendedAudioBuffer LoadSound(string FileName, XAudio2 device, bool notificationsSupport)
        {
            if (!File.Exists(FileName))
            {
                throw (new ArgumentException("The sound " + FileName + " could not be found."));
            }
            SoundStream stream = new SoundStream(File.OpenRead(FileName));
            WaveFormat  format = stream.Format;            // So we don't lose reference to it when we close the stream.
            AudioBuffer buffer = new AudioBuffer {
                Stream = stream.ToDataStream(), AudioBytes = (int)stream.Length, Flags = SharpDX.XAudio2.BufferFlags.EndOfStream
            };

            // We can now safely close the stream.
            stream.Close();
            SourceVoice sv = new SourceVoice(device, format, VoiceFlags.None, 5.0f, notificationsSupport);

            return(new ExtendedAudioBuffer(buffer, sv));
        }
Esempio n. 10
0
        /// <summary>
        /// Gets a free voice from the voice pool.  If none are available a new one is created
        /// </summary>
        /// <param name="pSound">Sound to play with the voice</param>
        private static void GetVoice(AudioResource pSound, int pId, bool pLoop, out SourceVoice pVoice)
        {
            lock (_freeVoices)
            {
                if (_freeVoices.Count == 0)
                {
                    pVoice = new SourceVoice(Device, pSound.Stream, true);
                }
                else
                {
                    pVoice = _freeVoices[_freeVoices.Count - 1];
                    _freeVoices.RemoveAt(_freeVoices.Count - 1);
                }

                var cb = new SoundCompleteCallback(pVoice, pId, pSound);
                cb.AddCallback(pLoop);
            }
        }
Esempio n. 11
0
 public override void Setup(WaveFormatEx format)
 {
     _format            = new WaveFormatExtensible(format.nSamplesPerSec, format.wBitsPerSample, format.nChannels);
     _voice             = new SourceVoice(_xaudio2, _format);
     _voice.StreamEnd  += _voice_StreamEnd;
     _voice.VoiceError += _voice_VoiceError;
     _emitter           = new Emitter
     {
         ChannelAzimuths     = GetAzimuths(_format.Channels),
         ChannelCount        = _format.Channels,
         ChannelRadius       = 10,
         CurveDistanceScaler = float.MinValue,
         OrientFront         = new Vector3(0, 0, 1),
         OrientTop           = new Vector3(0, 1, 0),
         Position            = new Vector3(0, 0, 0),
         Velocity            = new Vector3(0, 0, 0)
     };
 }
Esempio n. 12
0
        protected SourceVoice CreateVoice(SharpDX.XAudio2.XAudio2 device, string fileName)
        {
            using (var stream = new SoundStream(File.OpenRead(fileName)))
            {
                _format = stream.Format;
                _buffer = new AudioBuffer
                {
                    Stream     = stream.ToDataStream(),
                    AudioBytes = (int)stream.Length,
                    Flags      = BufferFlags.EndOfStream
                };
                _packetsInfo = stream.DecodedPacketsInfo;
            }

            var sourceVoice = new SourceVoice(device, _format, true);

            return(sourceVoice);
        }
Esempio n. 13
0
        public SoundEffectInstance CreateInstance()
        {
#if DIRECTX
            SourceVoice voice = null;
            if (Device != null)
            {
                voice = new SourceVoice(Device, _format, VoiceFlags.None, XAudio2.MaximumFrequencyRatio);
            }

            var instance = new SoundEffectInstance(this, voice);
#elif (WINDOWS && OPENGL) || LINUX
            var instance = new SoundEffectInstance(this);
#else
            var instance = new SoundEffectInstance();
            instance.Sound = _sound;
#endif
            return(instance);
        }
Esempio n. 14
0
        public MySourceVoice(MySourceVoicePool owner, XAudio2 device, WaveFormat sourceFormat)
        {
            // This value influences how many native memory is allocated in XAudio
            // When shifting sound to higher frequency it needs more data, because it's compressed in time
            // Ratio 2 equals to 11 or 12 semitones (=1 octave)
            // Values around 32 should be pretty safe
            // Values around 128 needs large amount of memory
            // Values > 128 are memory killer
            const float MaxFrequencyRatio = 2;

            m_voice            = new SourceVoice(device, sourceFormat, VoiceFlags.UseFilter, MaxFrequencyRatio, true);
            m_voice.BufferEnd += OnStopPlaying;
            m_valid            = true;

            m_owner = owner;
            m_owner.OnAudioEngineChanged += m_owner_OnAudioEngineChanged;
            Flush();
        }
Esempio n. 15
0
        private void Dispose(bool disposing)
        {
            if (disposing)
            {
                Stop();

                if (SourceVoice != null)
                {
                    SourceVoice.DestroyVoice();
                    SourceVoice.Dispose();
                }

                if (AudioBuffer != null)
                {
                    AudioBuffer.Stream.Dispose();
                }
            }
        }
Esempio n. 16
0
        public Sound(Instrument instrument, XAudio2 xAudio2)
        {
            using (var stream = new SoundStream(File.OpenRead(instrument.Path.LocalPath)))
            {
                WaveFormat waveFormat = stream.Format;
                var        buffer     = new AudioBuffer
                {
                    Stream     = stream.ToDataStream(),
                    AudioBytes = (int)stream.Length,
                    Flags      = BufferFlags.EndOfStream
                };

                var sourceVoice = new SourceVoice(xAudio2, waveFormat);
                sourceVoice.SubmitSourceBuffer(buffer, stream.DecodedPacketsInfo);

                Setup(buffer, sourceVoice, stream.DecodedPacketsInfo, instrument.Volume);
            }
        }
Esempio n. 17
0
        public void PlaySound(string name)
        {
            if (!initialized)
            {
                return;
            }

            if (Sounds.TryGetValue(name, out var wave))
            {
                if (wave == null)
                {
                    wave = LoadSound(name);
                }
            }
            else
            {
                wave = LoadSound(name);
            }

            if (wave == null)
            {
                DebugWindow.LogError($"Sound file: {name}.wav not found.");
                return;
            }

            var sourceVoice = new SourceVoice(xAudio2, wave.WaveFormat, true);

            sourceVoice.SubmitSourceBuffer(wave.Buffer, wave.DecodedPacketsInfo);
            sourceVoice.Start();
            _list.Add(sourceVoice);

            for (var i = 0; i < _list.Count; i++)
            {
                var sv = _list[i];

                if (sv.State.BuffersQueued <= 0)
                {
                    sv.Stop();
                    sv.DestroyVoice();
                    sv.Dispose();
                    _list.RemoveAt(i);
                }
            }
        }
Esempio n. 18
0
        /// <summary>
        /// SharpDX XAudio2 sample. Plays a generated sound with some reverb.
        /// </summary>
        static void Main(string[] args)
        {
            var xaudio2        = new XAudio2();
            var masteringVoice = new MasteringVoice(xaudio2);

            var waveFormat  = new WaveFormat(44100, 32, 2);
            var sourceVoice = new SourceVoice(xaudio2, waveFormat);

            int bufferSize = waveFormat.ConvertLatencyToByteSize(60000);
            var dataStream = new DataStream(bufferSize, true, true);

            int numberOfSamples = bufferSize / waveFormat.BlockAlign;

            for (int i = 0; i < numberOfSamples; i++)
            {
                double vibrato = Math.Cos(2 * Math.PI * 10.0 * i / waveFormat.SampleRate);
                float  value   = (float)(Math.Cos(2 * Math.PI * (220.0 + 4.0 * vibrato) * i / waveFormat.SampleRate) * 0.5);
                dataStream.Write(value);
                dataStream.Write(value);
            }
            dataStream.Position = 0;

            var audioBuffer = new AudioBuffer {
                Stream = dataStream, Flags = BufferFlags.EndOfStream, AudioBytes = bufferSize
            };

            var reverb           = new Reverb();
            var effectDescriptor = new EffectDescriptor(reverb);

            sourceVoice.SetEffectChain(effectDescriptor);
            sourceVoice.EnableEffect(0);

            sourceVoice.SubmitSourceBuffer(audioBuffer, null);

            sourceVoice.Start();

            Console.WriteLine("Play sound");
            for (int i = 0; i < 60; i++)
            {
                Console.Write(".");
                Console.Out.Flush();
                Thread.Sleep(1000);
            }
        }
Esempio n. 19
0
        public fAudio()
        {
            device         = new XAudio2();
            masteringVoice = new MasteringVoice(device);

            waveFormat = new WaveFormat();
            {
                waveFormat.Channels              = CHANNELS;
                waveFormat.SamplesPerSecond      = SAMPLERATE;
                waveFormat.BitsPerSample         = BITDEPTH;
                waveFormat.BlockAlignment        = (short)(waveFormat.BitsPerSample / 8 * waveFormat.Channels);
                waveFormat.AverageBytesPerSecond = waveFormat.SamplesPerSecond * waveFormat.BlockAlignment;
                waveFormat.FormatTag             = WaveFormatTag.IeeeFloat;
            }

            sourceVoice            = new SourceVoice(device, waveFormat);
            sourceVoice.BufferEnd += sourceVoice_BufferEnd;

            audioBuffer = new AudioBuffer[NUMOFBUF];
            data        = new byte[NUMOFBUF][];
            for (int i = 0; i < NUMOFBUF; i++)
            {
                data[i] = new byte[NUMOFSAMPLE * waveFormat.BlockAlignment];
                byte[] buff;
                sq.Note = (95 - 12 * i);
                for (int j = 0; j < data[i].Length; j += waveFormat.BlockAlignment)
                {
                    buff           = sq.getByte();
                    data[i][j + 0] = buff[0];
                    data[i][j + 1] = buff[1];
                    data[i][j + 2] = buff[2];
                    data[i][j + 3] = buff[3];
                }
                audioBuffer[i]            = new AudioBuffer();
                audioBuffer[i].AudioData  = new MemoryStream(data[i], true);
                audioBuffer[i].Flags      = BufferFlags.EndOfStream;
                audioBuffer[i].AudioBytes = data[i].Length;
                audioBuffer[i].LoopCount  = 0;

                audioBuffer[i].AudioData.Position = 0;
                sourceVoice.SubmitSourceBuffer(audioBuffer[i]);
            }
            bufferCount = 0;
        }
Esempio n. 20
0
        public void PlayImmediate(short[] data, int sampleRate, float volume)
        {
            StopImmediate();

            immediateDonePlaying = false;

            immediateAudioBuffer = new AudioBuffer();
            immediateAudioBuffer.AudioDataPointer = Utilities.AllocateMemory(data.Length * sizeof(short));
            immediateAudioBuffer.AudioBytes       = data.Length * sizeof(short);
            Marshal.Copy(data, 0, immediateAudioBuffer.AudioDataPointer, data.Length);

            var waveFormat = new WaveFormat(sampleRate, 16, 1);

            immediateVoice            = new SourceVoice(xaudio2, waveFormat);
            immediateVoice.BufferEnd += ImmediateVoice_BufferEnd;
            immediateVoice.SetVolume(volume);
            immediateVoice.SubmitSourceBuffer(immediateAudioBuffer, null);
            immediateVoice.Start();
        }
Esempio n. 21
0
        protected override void EndBufferChange()
        {
            if (AudioBuffer != null)
            {
                if (xAudioBuffer == null)
                {
                    xAudioBuffer = new XAudioBuffer();
                }

                audioBufferHandle       = GCHandle.Alloc(AudioBuffer.RawBuffer, GCHandleType.Pinned);
                xAudioBuffer.Stream     = new DataStream(audioBufferHandle.AddrOfPinnedObject(), AudioBuffer.SizeInBytes, true, true);
                xAudioBuffer.AudioBytes = (int)xAudioBuffer.Stream.Length;
                xAudioBuffer.LoopLength = AudioBuffer.RawBuffer.Length / 2;
                xAudioBuffer.LoopCount  = XAudio2.MaximumLoopCount;
                sourceVoice             = new SourceVoice(xAudio, waveFormat);
                sourceVoice.SubmitSourceBuffer(xAudioBuffer, null);
                sourceVoice.Start();
            }
        }
Esempio n. 22
0
        private void PlaySound(int soundID,
                               Emitter emitter,
                               float volume,
                               LinkedSoundList list,
                               ref VoiceSendDescriptor voiceSendDescriptor,
                               Action <IntPtr>?onFxEnd = null)
        {
            if (!_soundBuffer.TryGetValue(soundID, out SoundBuffer buffer))
            {
                return;
            }

            SourceVoice sourceVoice = new SourceVoice(_xAudio2, buffer.Format, VoiceFlags.None, true);

            sourceVoice.SetVolume(volume);
            sourceVoice.SubmitSourceBuffer(buffer.AudioBuffer, buffer.DecodedPacketsInfo);
            sourceVoice.SetOutputVoices(voiceSendDescriptor);

            LinkedSoundList.Sound sound = new LinkedSoundList.Sound(emitter, sourceVoice);
            list.Add(sound);

            sourceVoice.BufferEnd += _ =>
            {
                list.Remove(sound);
                sourceVoice.DestroyVoice();
            };

            if (onFxEnd != null)
            {
                sourceVoice.BufferEnd += onFxEnd;
            }
            sourceVoice.Start();

            DspSettings settings = _x3DAudio.Calculate(
                _listener,
                sound.Emitter,
                CalculateFlags.Matrix | CalculateFlags.Doppler,
                buffer.Format.Channels,
                _inputChannelCount);

            sound.SourceVoice.SetOutputMatrix(buffer.Format.Channels, _inputChannelCount, settings.MatrixCoefficients);
            sound.SourceVoice.SetFrequencyRatio(settings.DopplerFactor);
        }
Esempio n. 23
0
        private void TestOutputMatrixBehaviour(Sound sound)
        {
            int inputChannels  = sound.Format.Channels;
            int outputChannels = audioDevice.GetDeviceDetails(0).OutputFormat.Channels;

            SourceVoice sourceVoice = new SourceVoice(audioDevice, sound.Format);

            sourceVoice.SubmitSourceBuffer(sound.Buffer);
            Console.WriteLine("Pre: ");
            PrintVoiceInfo(inputChannels, outputChannels, sourceVoice);
            sourceVoice.Start();
            Console.WriteLine("Started: ");
            PrintVoiceInfo(inputChannels, outputChannels, sourceVoice);
            sourceVoice.Volume = 0.7f;
            Console.WriteLine("Volume set: ");
            PrintVoiceInfo(inputChannels, outputChannels, sourceVoice);
            System.Threading.Thread.Sleep(300);
            PrintVoiceInfo(inputChannels, outputChannels, sourceVoice);
        }
Esempio n. 24
0
        private void PlatformDispose(bool disposing)
        {
            if (disposing)
            {
                if (_reverb != null)
                {
                    _reverb.Dispose();
                }

                if (_voice != null)
                {
                    _voice.DestroyVoice();
                    _voice.Dispose();
                }
            }
            _voice  = null;
            _effect = null;
            _reverb = null;
        }
Esempio n. 25
0
        public void Stop()
        {
            if (playingTask != null)
            {
                quitEvent.Set();
                playingTask.Wait();
                playingTask = null;
            }

            if (sourceVoice != null)
            {
                sourceVoice.Stop();
                sourceVoice.FlushSourceBuffers();
                sourceVoice.DestroyVoice();
                sourceVoice.BufferEnd -= SourceVoice_BufferEnd;
                sourceVoice.Dispose();
                sourceVoice = null;
            }
        }
Esempio n. 26
0
        public static void PlayPcm(XAudio2 device, string fileName)
        {
            var        s      = System.IO.File.OpenRead(fileName); //open the wav file
            WaveStream stream = new WaveStream(s);                 //pass the stream to the library

            s.Close();                                             //close the file

            AudioBuffer buffer = new AudioBuffer();                //init the buffer

            buffer.AudioData  = stream;                            //set the input stream for the audio
            buffer.AudioBytes = (int)stream.Length;                //set the size of the buffer to the size of the stream
            buffer.Flags      = BufferFlags.EndOfStream;           //presumably set it to play until the end of the stream/file


            SourceVoice sourceVoice = new SourceVoice(device, stream.Format); //this looks like it might initalise the actual output

            sourceVoice.SubmitSourceBuffer(buffer);                           //pass the buffer to the output thingo
            sourceVoice.Start();                                              //start the playback?

            //above 2 sections are guessed, there is no documentation on the classes/proerties.

            // loop until the sound is done playing
            while (sourceVoice.State.BuffersQueued > 0) // This keeps looping while there is sound in the buffer
            {                                           // (presumably). For this specific example it will stop
                if (GetAsyncKeyState(VK_ESCAPE) != 0)   // plying the sound if escape is pressed. That is what the
                {
                    break;                              // DLLImport and stuff at the top is for
                }
                Thread.Sleep(10);                       //
            }

            // wait until the escape key is released
            while (GetAsyncKeyState(VK_ESCAPE) != 0) //it jsut waits here until the person presses escape
            {
                Thread.Sleep(10);
            }

            // cleanup the voice
            buffer.Dispose();
            sourceVoice.Dispose();
            stream.Dispose();
        }
Esempio n. 27
0
        public SineGenerator()
        {
            _xaudio2        = new XAudio2();
            _masteringVoice = new MasteringVoice(_xaudio2);

            _waveFormat = new WaveFormat(44100, 32, 2);

            _sourceVoice = new SourceVoice(_xaudio2, _waveFormat);

            _bufferSize = _waveFormat.ConvertLatencyToByteSize(7);
            _dataStream = new DataStream(_bufferSize, true, true);

            _sourceVoice.BufferEnd += sourceVoice_BufferEnd;
            _sourceVoice.Start();

            _isConvolutionOn = false;
            _horizontalAngle = 0;
            _elevation       = 0;

            _bufferEndEvent = new AutoResetEvent(false);

            _valueRate = 20;
            _valueAmp  = 0.5f;

            _nextBuffer = 0;

            _playEvent = new ManualResetEventSlim();
            _log       = new List <string>();


            // Pre-allocate buffers
            _audioBuffersRing = new AudioBuffer[3];
            _memBuffers       = new DataPointer[_audioBuffersRing.Length];
            for (int i = 0; i < _audioBuffersRing.Length; i++)
            {
                _audioBuffersRing[i]   = new AudioBuffer();
                _memBuffers[i].Size    = _bufferSize;
                _memBuffers[i].Pointer = Utilities.AllocateMemory(_memBuffers[i].Size);
            }

            _playSoundTask = Task.Factory.StartNew(PlaySoundAsync, TaskCreationOptions.LongRunning);
        }
Esempio n. 28
0
        public void Init()
        {
            m_XAudio2        = new XAudio2();
            m_MasteringVoice = new MasteringVoice(m_XAudio2);

            var defWaveFormat = new WaveFormat(96000, 24, 2);

            for (var i = 0; i < 10; i++)
            {
                var voice = new SourceVoice(m_XAudio2, defWaveFormat);

                voice.BufferEnd += (ptr) => {
                    lock (m_VoicePool) {
                        m_VoicePool.Enqueue(voice);
                    }
                };

                m_VoicePool.Enqueue(voice);
            }
        }
Esempio n. 29
0
        public SoundEffectVoice(XAudio2 device, string soundFileName, float volume)
        {
            _audioDevice = device;
            _baseVolume  = volume;

            // TODO: This could be optimised: cache & copy the audio file bytes?
            _stream = new SoundStream(File.OpenRead(soundFileName));
            var format = _stream.Format;

            _audioBuffer = new AudioBuffer
            {
                Stream     = _stream.ToDataStream(),
                AudioBytes = (int)_stream.Length,
                Flags      = BufferFlags.EndOfStream,
            };
            _stream.Close();

            _voice            = new SourceVoice(_audioDevice, format, true);
            _voice.BufferEnd += VoiceBufferEnd;
        }
Esempio n. 30
0
            public void Play()
            {
                DateTime start = DateTime.Now;

                Console.WriteLine("Play() start");
                sourceVoice = new SourceVoice(Program.audioDevice, Format);
                Console.WriteLine("Create source voice");
                sourceVoice.BufferEnd += new EventHandler <ContextEventArgs>(sourceVoice_BufferEnd);
                sourceVoice.StreamEnd += new EventHandler(sourceVoice_StreamEnd);
                sourceVoice.SubmitSourceBuffer(Buffer);
                Console.WriteLine("Submitted source buffers");
                sourceVoice.Start();
                Console.WriteLine("Started source voice");
                var channel = new Channel {
                    SourceVoice = sourceVoice
                };
                DateTime end = DateTime.Now;

                Console.WriteLine("Play() end (" + (end - start).TotalMilliseconds + " ms)");
            }
Esempio n. 31
0
 private bool LoadSound(string fileName)
 {
     try
     {
         FileStream fileStream = File.OpenRead(fileName);
         WaveStream waveStream = new WaveStream((Stream)fileStream);
         fileStream.Close();
         this.buffer            = new AudioBuffer();
         this.buffer.AudioData  = (Stream)waveStream;
         this.buffer.AudioBytes = (int)waveStream.Length;
         this.buffer.Flags      = BufferFlags.EndOfStream;
         this.sourceVoice       = new SourceVoice(this.device, waveStream.Format);
         this.Volume            = 40;
         return(true);
     }
     catch
     {
         return(false);
     }
 }
Esempio n. 32
0
        public static void clear()
        {
            List <SourceVoice> svremove = new List <SourceVoice>();
            var  iter    = sources.GetEnumerator();
            bool hasNext = iter.MoveNext();

            while (hasNext)
            {
                SourceVoice a = iter.Current.Key;
                AudioBuffer b = iter.Current.Value;
                hasNext = iter.MoveNext();
                svremove.Add(a);
            }
            foreach (SourceVoice sv in svremove)
            {
                finishSource(sv);
                sources.Remove(sv);
            }
            svremove.Clear();
        }
Esempio n. 33
0
        private static async Task PlaySoundAsync(XAudio2 device, Stream baseStream)
        {
            baseStream.Position = 0;

            var stream = new SoundStream(baseStream);

            await using var dataStream = stream.ToDataStream();
            var buffer = new AudioBuffer(dataStream);

            using var voice = new SourceVoice(device, stream.Format, true);
            voice.SubmitSourceBuffer(buffer, stream.DecodedPacketsInfo);
            voice.Start();

            while (voice.State.BuffersQueued > 0)
            {
                await Task.Delay(TimeSpan.FromMilliseconds(1));
            }

            voice.DestroyVoice();
        }
            private void loadSound(ISoundFactory factory)
            {
                using (var stream = factory.OpenWaveStream(emitter.Sound))
                {
                    buffer            = new AudioBuffer();
                    buffer.AudioData  = stream;
                    buffer.AudioBytes = (int)stream.Length;
                    buffer.Flags      = BufferFlags.EndOfStream;

                    if (emitter.Loop)
                    {
                        buffer.LoopCount = XAudio2.LoopInfinite;
                    }


                    sourceVoice = new SourceVoice(TW.Audio.XAudio2Device, stream.Format);
                    sourceVoice.SubmitSourceBuffer(buffer);
                    sourceVoice.Start();
                }
            }
Esempio n. 35
0
        /// <summary>
        /// Executa o stream de som.
        /// </summary>
        /// <param name="sound"></param>
        public void Play(EngineSound sound)
        {
            SourceVoice voice;

            lock (FreeVoices) {
                if (FreeVoices.Count == 0)
                {
                    voice            = new SourceVoice(XAudio2, sound.SoundStream.Format, true);
                    voice.BufferEnd += (new SoundCompleteCallback(this, voice)).OnSoundFinished;
                }
                else
                {
                    voice = FreeVoices[FreeVoices.Count - 1];
                    FreeVoices.RemoveAt(FreeVoices.Count - 1);
                }
            }

            voice.SubmitSourceBuffer(sound.AudioBuffer, sound.SoundStream.DecodedPacketsInfo);
            voice.Start();
        }
Esempio n. 36
0
        protected void DestroySourceVoice()
        {
            if (_sourceVoice != null)
            {
                _sourceVoice.BufferEnd -= OnBufferEnd;
                if (!_sourceVoice.IsDisposed)
                {
                    _sourceVoice.DestroyVoice();
                    _sourceVoice.Dispose();
                }
                _sourceVoice = null;
            }
            if (_bufferEndEvent != null)
            {
                _bufferEndEvent.Dispose();
            }
            _bufferEndEvent = null;

            _isPlaying = false;
        }
Esempio n. 37
0
        static void PlayPCM(XAudio2 device, string fileName)
        {
            //WaveStream stream = new WaveStream(fileName);
            var        s      = System.IO.File.OpenRead(fileName);
            WaveStream stream = new WaveStream(s);

            s.Close();

            AudioBuffer buffer = new AudioBuffer();

            buffer.AudioData  = stream;
            buffer.AudioBytes = (int)stream.Length;
            buffer.Flags      = BufferFlags.EndOfStream;

            SourceVoice sourceVoice = new SourceVoice(device, stream.Format);

            sourceVoice.SubmitSourceBuffer(buffer);
            sourceVoice.Start();

            // loop until the sound is done playing
            while (sourceVoice.State.BuffersQueued > 0)
            {
                if (GetAsyncKeyState(VK_ESCAPE) != 0)
                {
                    break;
                }

                Thread.Sleep(10);
            }

            // wait until the escape key is released
            while (GetAsyncKeyState(VK_ESCAPE) != 0)
            {
                Thread.Sleep(10);
            }

            // cleanup the voice
            buffer.Dispose();
            sourceVoice.Dispose();
            stream.Dispose();
        }
Esempio n. 38
0
		public void StartSound()
		{
			BufferSizeSamples = Sound.MillisecondsToSamples(Global.Config.SoundBufferSizeMs);
			MaxSamplesDeficit = BufferSizeSamples;

			var format = new WaveFormat
				{
					SamplesPerSecond = Sound.SampleRate,
					BitsPerSample = Sound.BytesPerSample * 8,
					Channels = Sound.ChannelCount,
					FormatTag = WaveFormatTag.Pcm,
					BlockAlignment = Sound.BlockAlign,
					AverageBytesPerSecond = Sound.SampleRate * Sound.BlockAlign
				};

			_sourceVoice = new SourceVoice(_device, format);

			_bufferPool = new BufferPool();
			_runningSamplesQueued = 0;

			_sourceVoice.Start();
		}
Esempio n. 39
0
        public void Play( Form on )
        {
            var screens = Screen.AllScreens;
            var screens_left  = screens.Min( screen => screen.Bounds.Left  );
            var screens_right = screens.Max( screen => screen.Bounds.Right );
            var screens_width = screens_right-screens_left;

            var bestScreen = screens.OrderByDescending( screen => {
                var area = screen.Bounds;
                area.Intersect( on.Bounds );
                return area.Width*area.Height;
            }).First();

            var balances = new[]{1.5f,1.5f};
            if ( screens.Length==3 && DisplayBalances.ContainsKey(bestScreen.DeviceName) ) balances = DisplayBalances[bestScreen.DeviceName];

            var path   = Registry.CurrentUser.OpenSubKey(@"AppEvents\Schemes\Apps\.Default\"+Name+@"\.Current").GetValue(null) as string;
            var stream = new WaveStream(path);
            var buffer = new AudioBuffer() { AudioBytes=(int)stream.Length, AudioData=stream, Flags=BufferFlags.EndOfStream };

            var voice = new SourceVoice( XAudio2, stream.Format );
            voice.SubmitSourceBuffer( buffer );
            voice.SetChannelVolumes( balances.Length, balances );
            voice.BufferEnd += (sender,ctx) => {
                try {
                    on.BeginInvoke(new Action(()=>{
                        voice.Dispose();
                        buffer.Dispose();
                        stream.Dispose();
                    }));
                } catch ( InvalidOperationException ) {
                    // herp derp on must be disposed/gone
                }
            };
            voice.Start();
        }
        public void PlayPPM(IntPtr win)
        {
            Rate = 192000; //44100 on cheapo, 96000 on AC97, 192000 on HD Audio
                           // its the number of samples that exist for each second of audio
            channels = 2;  // 1 = mono, 2 = stereo

            PPMSamples = (int)(0.0225 * Rate * channels);   // 22 or 22.5ms in samples, rounded up
                                                            // no. of bytes per second = channels * rate * bytes in one sample
            microsec = Rate / 10000.0;                      // 192 = 1ms, 19.2 = 0.1ms or 1mis @ 192khz
            PPMchannels = new Dictionary<int, double>();
            frame = new List<short>();
            Amplitude = 32760;

            /*WaveFile wFile;
            wFile = new WaveFile(channels, 16, Rate);
            */

            //Set channels to neutral except throttle, throttle = zero.
            PPMchannels.Add(1, 10.0); //Throttle
            PPMchannels.Add(2, 50.0); //Ailerons
            PPMchannels.Add(3, 50.0); //Stab
            PPMchannels.Add(4, 50.0); //Rudder
            PPMchannels.Add(5, 50.0);
            PPMchannels.Add(6, 50.0);
            PPMchannels.Add(7, 50.0);
            PPMchannels.Add(8, 50.0);

            byte[] data = GenPPM();

            /*wFile.SetData(data, data.Length);
            wFile.WriteFile(@"C:\Users\kang\Desktop\test.wav");
            */
            ms = new MemoryStream();
            ms.SetLength(0);
            ms.Write(data, 0, data.Length);
            ms.Position = 0;

            wf = new WaveFormat();
            wf.FormatTag = WaveFormatTag.Pcm;
            wf.BitsPerSample = (short)16;
            wf.Channels = channels;
            wf.SamplesPerSecond = Rate;
            wf.BlockAlignment = (short)(wf.Channels * wf.BitsPerSample / 8);
            wf.AverageBytesPerSecond = wf.SamplesPerSecond * wf.BlockAlignment;

            device = new XAudio2();
            device.StartEngine();
            masteringVoice = new MasteringVoice(device);
            srcVoice = new SourceVoice(device, wf);
            buffer = new AudioBuffer();
            buffer.AudioData = ms;
            buffer.AudioBytes = (int)data.Length;
            buffer.Flags = SlimDX.XAudio2.BufferFlags.None;

            srcVoice.BufferStart += new EventHandler<ContextEventArgs>(srcVoice_BufferStart);
            srcVoice.FrequencyRatio = 1;
            srcVoice.SubmitSourceBuffer(buffer);
            srcVoice.Start();
        }
Esempio n. 41
0
 public void Reset()
 {
     if (sVoice != null)
     {
         sVoice.Stop();
         sVoice.Dispose();
     }
     if(audioWriter != null)
         audioWriter.Close();
     if(audioBuffer != null)
         audioBuffer.Dispose();
     sVoice = new SourceVoice(device, audioFormat, VoiceFlags.None);
     audioBuffer = new AudioBuffer();
     audioBuffer.AudioData = new MemoryStream();
     audioWriter = new BinaryWriter(audioBuffer.AudioData);
     mVoice.Volume = volume;
     sVoice.Start();
 }
Esempio n. 42
0
            public void Run()
            {
                Stopwatch sw = new Stopwatch();
                var s = System.IO.File.OpenRead(fileName);
                //Console.WriteLine(String.Format("OpenRead: {0} ms", sw.ElapsedMilliseconds)); sw.Reset(); sw.Start();
                WaveStream stream = new WaveStream(s);
                //Console.WriteLine(String.Format("new WaveStream: {0} ms", sw.ElapsedMilliseconds)); sw.Reset(); sw.Start();

                int lengthInBytes = (int)stream.Length;
                int bytesPerSample = stream.Format.Channels * stream.Format.BitsPerSample / 8;
                int nSamples = lengthInBytes / bytesPerSample;
                int samplesPerBuffer = STREAMING_BUFFER_SIZE / bytesPerSample;

                int currentBytePosition = 0;
                int currentSamplePosition = 0;

                sourceVoice = new SourceVoice(audioDevice, stream.Format);
                sourceVoice.BufferEnd += new EventHandler<ContextEventArgs>(sourceVoice_BufferEnd);
                sourceVoice.FrequencyRatio = 2f;

                DateTime startTime = DateTime.Now;

                while (currentBytePosition < lengthInBytes)
                {
                    int readBytes = System.Math.Min(STREAMING_BUFFER_SIZE, lengthInBytes - currentBytePosition);
                    int readSamples = readBytes / bytesPerSample;

                    //if (readBytes < STREAMING_BUFFER_SIZE)
                        //Console.WriteLine(String.Format("Read bytes: {0}, Read samples: {1}, Read samples (float): {2}", readBytes, readSamples, (float)readBytes / bytesPerSample));

                    Console.WriteLine("---------------------------------- " + (DateTime.Now - startTime).TotalSeconds);
                    Console.WriteLine(String.Format("Read bytes: {0}\tBytes left: {1}\tPosition: {2}", readBytes, lengthInBytes - currentBytePosition, currentBytePosition));
                    Console.WriteLine(String.Format("Read samples: {0}\tSamples left: {1}\tPosition: {2}", readSamples, nSamples - currentSamplePosition, currentSamplePosition));

                    //Console.WriteLine(String.Format("To AudioBuffer creation: {0} ms", sw.ElapsedMilliseconds)); sw.Reset(); sw.Start();
                    var ab = new AudioBuffer
                    {
                        AudioData = stream,
                        AudioBytes = lengthInBytes,
                        PlayBegin = currentSamplePosition,
                        PlayLength = readSamples
                    };
                    //Console.WriteLine(String.Format("After AudioBuffer creation: {0} ms", sw.ElapsedMilliseconds)); sw.Reset(); sw.Start();

                    //Console.WriteLine("Buffers queued: " + sourceVoice.State.BuffersQueued);
                    if (sourceVoice.State.BuffersQueued >= MAX_BUFFER_COUNT - 1)
                        bufferPlaybackEndEvent.WaitOne();

                    VoiceDetails voiceDetails = sourceVoice.VoiceDetails;
                    long samplesPlayed = sourceVoice.State.SamplesPlayed;
                    Console.WriteLine("Time: " + samplesPlayed / (float)voiceDetails.InputSampleRate);

                    //Console.WriteLine(String.Format("Pre-submit: {0} ms", sw.ElapsedMilliseconds)); sw.Reset(); sw.Start();
                    sourceVoice.SubmitSourceBuffer(ab);
                    //Console.WriteLine(String.Format("Post-submit: {0} ms", sw.ElapsedMilliseconds)); sw.Reset(); sw.Start();
                    bufferReady.Set();

                    currentBytePosition += readBytes;
                    currentSamplePosition += readSamples;
                }

                while (sourceVoice.State.BuffersQueued > 0)
                    bufferPlaybackEndEvent.WaitOne();

                if (StreamEnd != null)
                    StreamEnd(this, null);
            }
Esempio n. 43
0
            public void Playback(SourceVoice sourceVoice)
            {
                playing = true;
                int currentByteArrayIndex = 0;

                //WaveStream waveStream = new WaveStream(null, 1234)
                while (playing)
                {
                    bufferReady.WaitOne();
                    DataStream ds = dataBuffers[currentByteArrayIndex];
                    WaveStream stream = new WaveStream(ds);

                    int bytesPerSample = stream.Format.Channels * stream.Format.BitsPerSample / 8;

                    //AudioBuffer buffer = new AudioBuffer
                    //{
                    //    AudioData = stream,
                    //    AudioBytes = (int)stream.Length,
                    //    PlayBegin = 0,
                    //    PlayLength = readSamples
                    //};

                    currentByteArrayIndex = (currentByteArrayIndex + 1) % MAX_BUFFER_COUNT;
                }
            }
Esempio n. 44
0
        public void Play()
        {
            WaveStream stream;

            if (!soundManager.SoundDictionary.ContainsKey(filename))
            {
                // Add our sound to the sound library
                var s = System.IO.File.OpenRead(Path.Combine("Assets", filename));
                stream = new WaveStream(s);
                s.Close();
                soundManager.SoundDictionary[filename] = stream;
            }
            else
            {
                stream = soundManager.SoundDictionary[filename];
            }

            WaveFormat format = stream.Format;

            buffer = new AudioBuffer();
            buffer.AudioData = stream;
            buffer.AudioBytes = (int)stream.Length;
            buffer.Flags = BufferFlags.EndOfStream;
            buffer.AudioData.Position = 0;

            if (Looping == true)
            {
                buffer.LoopCount = XAudio2.LoopInfinite;
                buffer.LoopLength = 0;
            }

            currentlyPlaying = new SourceVoice(soundManager.device, format);
            currentlyPlaying.Volume = this.Volume;
            currentlyPlaying.BufferEnd += (s, e) => playing = false;
            currentlyPlaying.Start();
            currentlyPlaying.SubmitSourceBuffer(buffer);

            playing = true;
        }
Esempio n. 45
0
		public void StopSound()
		{
			_sourceVoice.Stop();
			_sourceVoice.Dispose();
			_sourceVoice = null;

			_bufferPool.Dispose();
			_bufferPool = null;

			BufferSizeSamples = 0;
		}
Esempio n. 46
0
		protected override void EndBufferChange()
		{
			if (AudioBuffer != null)
			{
				if (audioBuffer == null) audioBuffer = new XAudioBuffer();

				audioBuffer.AudioData = new DataStream(AudioBuffer.RawBuffer, true, true);
				audioBuffer.AudioBytes = (int)audioBuffer.AudioData.Length;
				audioBuffer.LoopLength = AudioBuffer.RawBuffer.Length / 2;
				audioBuffer.LoopCount = XAudio2.LoopInfinite;
				waveFormat.AverageBytesPerSecond = (waveFormat.BlockAlignment = (short)((waveFormat.BitsPerSample = (short)BitsPerSample) / 8 * (waveFormat.Channels = 2))) * (waveFormat.SamplesPerSecond = Frequency);
				sourceVoice = new SourceVoice(xAudio, waveFormat);
				sourceVoice.SubmitSourceBuffer(audioBuffer);
				sourceVoice.Start();
			}
		}
Esempio n. 47
0
 public void Play()
 {
     DateTime start = DateTime.Now;
     Console.WriteLine("Play() start");
     sourceVoice = new SourceVoice(Program.audioDevice, Format);
     Console.WriteLine("Create source voice");
     sourceVoice.BufferEnd += new EventHandler<ContextEventArgs>(sourceVoice_BufferEnd);
     sourceVoice.StreamEnd += new EventHandler(sourceVoice_StreamEnd);
     sourceVoice.SubmitSourceBuffer(Buffer);
     Console.WriteLine("Submitted source buffers");
     sourceVoice.Start();
     Console.WriteLine("Started source voice");
     var channel = new Channel { SourceVoice = sourceVoice };
     DateTime end = DateTime.Now;
     Console.WriteLine("Play() end (" + (end - start).TotalMilliseconds + " ms)");
 }
Esempio n. 48
0
        private static void PrintVoiceInfo(int inputChannels, int outputChannels, SourceVoice sourceVoice)
        {
            float[] channelVolumes = sourceVoice.GetChannelVolumes(inputChannels);
            float[] outputMatrix = sourceVoice.GetOutputMatrix(inputChannels, outputChannels);

            // volume, channelvolumes, outputmatrix
            Console.WriteLine("Volume:\t\t" + sourceVoice.Volume);
            Console.WriteLine("VolumeLevels:");
            PrintArray(channelVolumes);
            Console.WriteLine("OutputMatrix:");
            PrintArray(outputMatrix);
            float c1 = 0, c2 = 0;
            for (int i = 0; i < outputMatrix.Length / 2; i++)
            {
                c1 += outputMatrix[i];
                c2 += outputMatrix[i + outputMatrix.Length / 2];
            }
            Console.WriteLine("Sum OM: ({0}, {1})", c1, c2);
            Console.WriteLine("--------------------------------------------------------");
        }
        private void ReleaseSourceVoice()
        {
            if (voice != null && !voice.IsDisposed)
            {
                voice.Stop(0);
                voice.FlushSourceBuffers();
                if (isReverbSubmixEnabled)
                {
                    voice.SetOutputVoices((VoiceSendDescriptor[])null);
                    isReverbSubmixEnabled = false;
                }

                if (Effect.VoicePool.IsDisposed)
                {
                    voice.DestroyVoice();
                    voice.Dispose();
                }
                else
                {
                    Effect.VoicePool.Return(voice);
                }
            }
            voice = null;
        }
        /// <summary>
        /// Resets the current instance to be reused in an instance pool.
        /// </summary>
        /// <param name="soundEffect">The new parent sound effect.</param>
        /// <param name="sourceVoice">The new source voice.</param>
        /// <param name="isFireAndForget">The new <see cref="IsFireAndForget"/> value.</param>
        internal void Reset(SoundEffect soundEffect, SourceVoice sourceVoice, bool isFireAndForget)
        {
            Effect = soundEffect;
            voice = sourceVoice;
            IsFireAndForget = isFireAndForget;

            if (soundEffect != null && sourceVoice != null)
                Reset();
        }
Esempio n. 51
0
        static void Main()
        {
            var form = new RenderForm("DotRocket/SlimDX example");

            var description = new SwapChainDescription()
            {
                BufferCount = 1,
                Usage = Usage.RenderTargetOutput,
                OutputHandle = form.Handle,
                IsWindowed = true,
                ModeDescription = new ModeDescription(0, 0, new Rational(60, 1), Format.R8G8B8A8_UNorm),
                SampleDescription = new SampleDescription(1, 0),
                Flags = SwapChainFlags.AllowModeSwitch,
                SwapEffect = SwapEffect.Discard
            };

            // Setup rendering
            Device device;
            SwapChain swapChain;
            Device.CreateWithSwapChain(DriverType.Hardware, DeviceCreationFlags.None, description, out device, out swapChain);
            RenderTargetView renderTarget;
            using (var resource = Resource.FromSwapChain<Texture2D>(swapChain, 0))
                renderTarget = new RenderTargetView(device, resource);
            var context = device.ImmediateContext;
            var viewport = new Viewport(0.0f, 0.0f, form.ClientSize.Width, form.ClientSize.Height);
            context.OutputMerger.SetTargets(renderTarget);
            context.Rasterizer.SetViewports(viewport);

            // Prevent alt+enter (broken on WinForms)
            using (var factory = swapChain.GetParent<Factory>())
                factory.SetWindowAssociation(form.Handle, WindowAssociationFlags.IgnoreAltEnter);

            // Setup audio-streaming
            XAudio2 xaudio2 = new XAudio2();
            stream = new XWMAStream("tune.xwma");
            MasteringVoice masteringVoice = new MasteringVoice(xaudio2);
            sourceVoice = new SourceVoice(xaudio2, stream.Format);
            audioBuffer = new AudioBuffer();
            audioBuffer.AudioData = stream;
            audioBuffer.AudioBytes = (int)stream.Length;
            audioBuffer.Flags = BufferFlags.EndOfStream;
            sourceVoice.SubmitSourceBuffer(audioBuffer, stream.DecodedPacketsInfo);
            sourceVoice.Start();

            // Setup DotRocket
#if DEBUG
            DotRocket.Device rocket = new DotRocket.ClientDevice("sync");
            rocket.OnPause += Pause;
            rocket.OnSetRow += SetRow;
            rocket.OnIsPlaying += IsPlaying;
            rocket.Connect("localhost", 1338);
#else
            DotRocket.Device rocket = new DotRocket.PlayerDevice("sync");
#endif

            // Get our belowed tracks!
            DotRocket.Track clear_r = rocket.GetTrack("clear.r");
            DotRocket.Track clear_g = rocket.GetTrack("clear.g");
            DotRocket.Track clear_b = rocket.GetTrack("clear.b");

            MessagePump.Run(form, () =>
            {
                // Hammertime.
                double row = ((double)(sourceVoice.State.SamplesPlayed - samplesBias) / stream.Format.SamplesPerSecond) * rowRate;

                // Paint some stuff.
                rocket.Update((int)System.Math.Floor(row));
                context.ClearRenderTargetView(renderTarget, new Color4(
                    clear_r.GetValue(row),
                    clear_g.GetValue(row),
                    clear_b.GetValue(row)));
                swapChain.Present(0, PresentFlags.None);
            });

            // clean up all resources
            // anything we missed will show up in the debug output
            renderTarget.Dispose();
            swapChain.Dispose();
            device.Dispose();
        }
 public void PreBuffer(int sampleOffset)
 {
     sourceVoice = new SourceVoice(audioDevice, waveStream.Format);
     sourceVoice.BufferEnd += new EventHandler<ContextEventArgs>(sourceVoice_BufferEnd);
     sourceVoice.StreamEnd += new EventHandler(sourceVoice_StreamEnd);
     Reset(sampleOffset);
 }
Esempio n. 53
0
        private void TestOutputMatrixBehaviour(Sound sound)
        {
            int inputChannels = sound.Format.Channels;
            int outputChannels = audioDevice.GetDeviceDetails(0).OutputFormat.Channels;

            SourceVoice sourceVoice = new SourceVoice(audioDevice, sound.Format);
            sourceVoice.SubmitSourceBuffer(sound.Buffer);
            Console.WriteLine("Pre: ");
            PrintVoiceInfo(inputChannels, outputChannels, sourceVoice);
            sourceVoice.Start();
            Console.WriteLine("Started: ");
            PrintVoiceInfo(inputChannels, outputChannels, sourceVoice);
            sourceVoice.Volume = 0.7f;
            Console.WriteLine("Volume set: ");
            PrintVoiceInfo(inputChannels, outputChannels, sourceVoice);
            System.Threading.Thread.Sleep(300);
            PrintVoiceInfo(inputChannels, outputChannels, sourceVoice);
        }