예제 #1
0
파일: Program.cs 프로젝트: zhandb/slimdx
        static void PlayPCM(XAudio2 device, string fileName)
        {
            //WaveStream stream = new WaveStream(fileName);
            var s = System.IO.File.OpenRead(fileName);
            WaveStream stream = new WaveStream(s);
            s.Close();

            AudioBuffer buffer = new AudioBuffer();
            buffer.AudioData = stream;
            buffer.AudioBytes = (int)stream.Length;
            buffer.Flags = BufferFlags.EndOfStream;

            SourceVoice sourceVoice = new SourceVoice(device, stream.Format);
            sourceVoice.SubmitSourceBuffer(buffer);
            sourceVoice.Start();

            // loop until the sound is done playing
            while (sourceVoice.State.BuffersQueued > 0)
            {
                if (GetAsyncKeyState(VK_ESCAPE) != 0)
                    break;

                Thread.Sleep(10);
            }

            // wait until the escape key is released
            while (GetAsyncKeyState(VK_ESCAPE) != 0)
                Thread.Sleep(10);

            // cleanup the voice
            buffer.Dispose();
            sourceVoice.Dispose();
            stream.Dispose();
        }
예제 #2
0
 /// <summary>
 /// Adds the wave to the source voices and starts playing it.
 /// </summary>
 /// <param name="wave">The wave.</param>
 public void AddSound(Wave wave)
 {
     SourceVoice source = new SourceVoice(audio, wave.Data.Format);
     source.Start();
     source.SubmitSourceBuffer(wave.Buffer);
     sources.Add(source);
 }
예제 #3
0
        private void InitializeAudio(AwcStream audio, float playBegin = 0)
        {
            currentAudio = audio;
            trackLength  = audio.Length;

            if (xAudio2 == null)
            {
                xAudio2        = new XAudio2();
                masteringVoice = new MasteringVoice(xAudio2);
            }

            Stream      wavStream   = audio.GetWavStream();
            SoundStream soundStream = new SoundStream(wavStream);

            audioBuffer = new AudioBuffer
            {
                Stream     = soundStream.ToDataStream(),
                AudioBytes = (int)soundStream.Length,
                Flags      = BufferFlags.EndOfStream
            };
            if (playBegin > 0)
            {
                audioBuffer.PlayBegin = (int)(soundStream.Format.SampleRate * playBegin) / 128 * 128;
                if (playtime.IsRunning)
                {
                    playtime.Restart();
                }
                else
                {
                    playtime.Reset();
                }
                playBeginMs = (int)(playBegin * 1000);
            }
            else
            {
                playBeginMs = 0;
            }
            soundStream.Close();
            wavStream.Close();

            trackFinished = false;
            sourceVoice   = new SourceVoice(xAudio2, soundStream.Format, true);
            sourceVoice.SubmitSourceBuffer(audioBuffer, soundStream.DecodedPacketsInfo);
            sourceVoice.BufferEnd += (context) => trackFinished = true;
            sourceVoice.SetVolume((float)VolumeTrackBar.Value / 100);
        }
예제 #4
0
파일: Program.cs 프로젝트: nernst/synth
        /// <summary>
        /// SharpDX XAudio2 sample. Plays a generated sound with some reverb.
        /// </summary>
        static void Main(string[] args)
        {
            var xaudio2        = new XAudio2();
            var masteringVoice = new MasteringVoice(xaudio2);

            var waveFormat  = new WaveFormat(44100, 32, 2);
            var sourceVoice = new SourceVoice(xaudio2, waveFormat);

            int bufferSize = waveFormat.ConvertLatencyToByteSize(60000);
            var dataStream = new DataStream(bufferSize, true, true);

            int numberOfSamples = bufferSize / waveFormat.BlockAlign;

            for (int i = 0; i < numberOfSamples; i++)
            {
                // cos(2 * PI * (220 + 4 * cos(2 * PI * 10 * t)) * t) * 0.5
                double vibrato = Math.Cos(2 * Math.PI * 10.0 * i / waveFormat.SampleRate);
                float  value   = (float)(Math.Cos(2 * Math.PI * (220.0 + 4.0 * vibrato) * i / waveFormat.SampleRate) * 0.5);
                dataStream.Write(value);
                dataStream.Write(value);
            }
            dataStream.Position = 0;

            var audioBuffer = new AudioBuffer {
                Stream = dataStream, Flags = BufferFlags.EndOfStream, AudioBytes = bufferSize
            };

            var reverb           = new Reverb(xaudio2);
            var effectDescriptor = new EffectDescriptor(reverb);

            sourceVoice.SetEffectChain(effectDescriptor);
            sourceVoice.EnableEffect(0);

            sourceVoice.SubmitSourceBuffer(audioBuffer, null);

            sourceVoice.Start();

            Console.WriteLine("Play sound");
            for (int i = 0; i < 60; i++)
            {
                Console.Write(".");
                Console.Out.Flush();
                Thread.Sleep(1000);
            }
        }
예제 #5
0
        public SoundManager(System.IntPtr handle, short BitsPerSample, short Channels, int SamplesPerSecond)
        {
            System.AppDomain.CurrentDomain.AssemblyResolve += new System.ResolveEventHandler(CurrentDomain_AssemblyResolve);
            SlimDX.Multimedia.WaveFormat format = new SlimDX.Multimedia.WaveFormat();
            format.BitsPerSample         = BitsPerSample;
            format.Channels              = Channels;
            format.SamplesPerSecond      = SamplesPerSecond;
            format.BlockAlignment        = (short)(format.Channels * format.BitsPerSample / 8);
            format.AverageBytesPerSecond = format.SamplesPerSecond * format.BlockAlignment;
            //format.FormatTag = WaveFormatTag.Pcm;
            format.FormatTag = SlimDX.Multimedia.WaveFormatTag.Pcm;

            device = new XAudio2(XAudio2Flags.None, ProcessorSpecifier.AnyProcessor);

            device.StartEngine();

            masteringVoice = new MasteringVoice(device, Channels, SamplesPerSecond);

            sourceVoice = new SourceVoice(device, format, VoiceFlags.None);

            //FilterParameters fp = new FilterParameters();
            //fp.Frequency = 0.5f;//sourceVoice.FilterParameters.Frequency;
            //fp.OneOverQ = 0.5f;//sourceVoice.FilterParameters.OneOverQ;
            //fp.Type = FilterType.LowPassFilter;
            //sourceVoice.FilterParameters = fp;

            //sourceVoice.BufferEnd += new System.EventHandler<ContextEventArgs>(sourceVoice_BufferEnd);
            // sourceVoice.StreamEnd += new System.EventHandler(sourceVoice_StreamEnd);
            // sourceVoice.BufferStart += new System.EventHandler<ContextEventArgs>(sourceVoice_BufferStart);
            // sourceVoice.VoiceError += new EventHandler<ErrorEventArgs>(sourceVoice_VoiceError);

            sourceVoice.Volume = 0.5f;
            buffer             = new AudioBuffer();
            buffer.AudioData   = new System.IO.MemoryStream();

            waveFormat     = format;
            bytesPerSample = (waveFormat.BitsPerSample / 8) * Channels;
            for (int i = 0; i < BUFFER_COUNT; i++)
            {
                //sampleData[i] = new float[SAMPLE_SIZE * Channels];
                sampleData[i] = new short[SAMPLE_SIZE * Channels];
                bData[i]      = new byte[SAMPLE_SIZE * bytesPerSample];
            }
            sourceVoice.SubmitSourceBuffer(buffer);
        }
예제 #6
0
        void PlayDevice()
        {
            AudioBuffer buffer;
            byte *      mixbuf  = _hidden.mixbuf;
            byte *      nextbuf = _hidden.nextbuf;
            int         mixlen  = _hidden.mixlen;

            if (!_enabled)// shutting down?
            {
                return;
            }

            // Submit the next filled buffer
            buffer                  = _hidden.audioBuffersRing[_hidden.nextBuffer];
            buffer.AudioBytes       = mixlen;
            buffer.AudioDataPointer = (IntPtr)nextbuf;
            //buffer.Context = _hidden.device;

            if (nextbuf == mixbuf)
            {
                nextbuf += mixlen;
            }
            else
            {
                nextbuf = mixbuf;
            }
            _hidden.nextbuf    = nextbuf;
            _hidden.nextBuffer = ++_hidden.nextBuffer % _hidden.audioBuffersRing.Length;

            try
            {
                _sourceVoice.SubmitSourceBuffer(buffer, null);
            }
            catch (SharpDXException e)
            {
                if (e.HResult == ResultCode.DeviceInvalidated.Code)
                {
                    // !!! FIXME: possibly disconnected or temporary lost. Recover?
                }

                // uhoh, panic!
                _sourceVoice.FlushSourceBuffers();
                OpenedAudioDeviceDisconnected();
            }
        }
예제 #7
0
        public void PlayFX(System.IO.Stream resource)
        {
            var stream     = new SoundStream(resource);
            var waveFormat = stream.Format;
            var buffer     = new AudioBuffer
            {
                Stream     = stream.ToDataStream(),
                AudioBytes = (int)stream.Length,
                Flags      = BufferFlags.EndOfStream
            };

            stream.Close();

            var sourceVoice = new SourceVoice(xaudio2, waveFormat, true);

            sourceVoice.SubmitSourceBuffer(buffer, stream.DecodedPacketsInfo);
            sourceVoice.Start();
        }
예제 #8
0
        public Sound(Instrument instrument, XAudio2 xAudio2)
        {
            using (var stream = new SoundStream(File.OpenRead(instrument.Path.LocalPath)))
            {
                WaveFormat waveFormat = stream.Format;
                var        buffer     = new AudioBuffer
                {
                    Stream     = stream.ToDataStream(),
                    AudioBytes = (int)stream.Length,
                    Flags      = BufferFlags.EndOfStream
                };

                var sourceVoice = new SourceVoice(xAudio2, waveFormat);
                sourceVoice.SubmitSourceBuffer(buffer, stream.DecodedPacketsInfo);

                Setup(buffer, sourceVoice, stream.DecodedPacketsInfo, instrument.Volume);
            }
        }
예제 #9
0
 private void buttonPlayStop_Click(object sender, EventArgs e)
 {
     if (!isStarted)
     {
         // Play a buffer
         sourceVoice.SubmitSourceBuffer(audioBuffer, null);
         sourceVoice.Start();
         isStarted           = true;
         buttonPlayStop.Text = "Stop";
     }
     else
     {
         // Stop a buffer
         sourceVoice.Stop(PlayFlags.None, 0);
         isStarted           = false;
         buttonPlayStop.Text = "Play";
     }
 }
예제 #10
0
파일: Program.cs 프로젝트: uPD71054/BPLAY
        public fAudio()
        {
            device         = new XAudio2();
            masteringVoice = new MasteringVoice(device);

            waveFormat = new WaveFormat();
            {
                waveFormat.Channels              = CHANNELS;
                waveFormat.SamplesPerSecond      = SAMPLERATE;
                waveFormat.BitsPerSample         = BITDEPTH;
                waveFormat.BlockAlignment        = (short)(waveFormat.BitsPerSample / 8 * waveFormat.Channels);
                waveFormat.AverageBytesPerSecond = waveFormat.SamplesPerSecond * waveFormat.BlockAlignment;
                waveFormat.FormatTag             = WaveFormatTag.IeeeFloat;
            }

            sourceVoice            = new SourceVoice(device, waveFormat);
            sourceVoice.BufferEnd += sourceVoice_BufferEnd;

            audioBuffer = new AudioBuffer[NUMOFBUF];
            data        = new byte[NUMOFBUF][];
            for (int i = 0; i < NUMOFBUF; i++)
            {
                data[i] = new byte[NUMOFSAMPLE * waveFormat.BlockAlignment];
                byte[] buff;
                sq.Note = (95 - 12 * i);
                for (int j = 0; j < data[i].Length; j += waveFormat.BlockAlignment)
                {
                    buff           = sq.getByte();
                    data[i][j + 0] = buff[0];
                    data[i][j + 1] = buff[1];
                    data[i][j + 2] = buff[2];
                    data[i][j + 3] = buff[3];
                }
                audioBuffer[i]            = new AudioBuffer();
                audioBuffer[i].AudioData  = new MemoryStream(data[i], true);
                audioBuffer[i].Flags      = BufferFlags.EndOfStream;
                audioBuffer[i].AudioBytes = data[i].Length;
                audioBuffer[i].LoopCount  = 0;

                audioBuffer[i].AudioData.Position = 0;
                sourceVoice.SubmitSourceBuffer(audioBuffer[i]);
            }
            bufferCount = 0;
        }
예제 #11
0
        public void PlaySound(string name)
        {
            if (!initialized)
            {
                return;
            }

            if (Sounds.TryGetValue(name, out var wave))
            {
                if (wave == null)
                {
                    wave = LoadSound(name);
                }
            }
            else
            {
                wave = LoadSound(name);
            }

            if (wave == null)
            {
                DebugWindow.LogError($"Sound file: {name}.wav not found.");
                return;
            }

            var sourceVoice = new SourceVoice(xAudio2, wave.WaveFormat, true);

            sourceVoice.SubmitSourceBuffer(wave.Buffer, wave.DecodedPacketsInfo);
            sourceVoice.Start();
            _list.Add(sourceVoice);

            for (var i = 0; i < _list.Count; i++)
            {
                var sv = _list[i];

                if (sv.State.BuffersQueued <= 0)
                {
                    sv.Stop();
                    sv.DestroyVoice();
                    sv.Dispose();
                    _list.RemoveAt(i);
                }
            }
        }
예제 #12
0
        private void PlaySound(int soundID,
                               Emitter emitter,
                               float volume,
                               LinkedSoundList list,
                               ref VoiceSendDescriptor voiceSendDescriptor,
                               Action <IntPtr>?onFxEnd = null)
        {
            if (!_soundBuffer.TryGetValue(soundID, out SoundBuffer buffer))
            {
                return;
            }

            SourceVoice sourceVoice = new SourceVoice(_xAudio2, buffer.Format, VoiceFlags.None, true);

            sourceVoice.SetVolume(volume);
            sourceVoice.SubmitSourceBuffer(buffer.AudioBuffer, buffer.DecodedPacketsInfo);
            sourceVoice.SetOutputVoices(voiceSendDescriptor);

            LinkedSoundList.Sound sound = new LinkedSoundList.Sound(emitter, sourceVoice);
            list.Add(sound);

            sourceVoice.BufferEnd += _ =>
            {
                list.Remove(sound);
                sourceVoice.DestroyVoice();
            };

            if (onFxEnd != null)
            {
                sourceVoice.BufferEnd += onFxEnd;
            }
            sourceVoice.Start();

            DspSettings settings = _x3DAudio.Calculate(
                _listener,
                sound.Emitter,
                CalculateFlags.Matrix | CalculateFlags.Doppler,
                buffer.Format.Channels,
                _inputChannelCount);

            sound.SourceVoice.SetOutputMatrix(buffer.Format.Channels, _inputChannelCount, settings.MatrixCoefficients);
            sound.SourceVoice.SetFrequencyRatio(settings.DopplerFactor);
        }
예제 #13
0
        public void srcVoice_BufferStart(object sender, ContextEventArgs e)
        {
            byte[] data = GenPPM();

            ms.SetLength(0);
            ms.Write(data, 0, data.Length);
            ms.Position = 0;

            try
            {
                srcVoice.SubmitSourceBuffer(buffer);
                srcVoice.Start();
            }
            catch
            {
                buffer.Flags = SlimDX.XAudio2.BufferFlags.EndOfStream;
                srcVoice.Stop();
            }
        }
예제 #14
0
        public void PlayImmediate(short[] data, int sampleRate, float volume)
        {
            StopImmediate();

            immediateDonePlaying = false;

            immediateAudioBuffer = new AudioBuffer();
            immediateAudioBuffer.AudioDataPointer = Utilities.AllocateMemory(data.Length * sizeof(short));
            immediateAudioBuffer.AudioBytes       = data.Length * sizeof(short);
            Marshal.Copy(data, 0, immediateAudioBuffer.AudioDataPointer, data.Length);

            var waveFormat = new WaveFormat(sampleRate, 16, 1);

            immediateVoice            = new SourceVoice(xaudio2, waveFormat);
            immediateVoice.BufferEnd += ImmediateVoice_BufferEnd;
            immediateVoice.SetVolume(volume);
            immediateVoice.SubmitSourceBuffer(immediateAudioBuffer, null);
            immediateVoice.Start();
        }
예제 #15
0
        private static void QueueBuffer()
        {
            // fill remaining buffer with the current sample
            var buffer = SampleBuffers[CurrentBuffer];

            while (CurrentSample < SamplesPerSlice)
            {
                FilteredSample          = (FilteredSample * FilterValue + LastSample * (256 - FilterValue)) / 256;
                buffer[CurrentSample++] = (byte)FilteredSample;
            }

            // queue the buffer and wait for it to start playing
            BufferEvents[CurrentBuffer].Reset();
            SourceVoice.SubmitSourceBuffer(AudioBuffers[CurrentBuffer], null);
            BufferEvents[CurrentBuffer].WaitOne();
            CurrentBuffer = 1 - CurrentBuffer;
            CurrentSample = 0;
            LastCycle     = AtmelContext.Clock;
        }
예제 #16
0
        protected override void EndBufferChange()
        {
            if (AudioBuffer != null)
            {
                if (xAudioBuffer == null)
                {
                    xAudioBuffer = new XAudioBuffer();
                }

                audioBufferHandle       = GCHandle.Alloc(AudioBuffer.RawBuffer, GCHandleType.Pinned);
                xAudioBuffer.Stream     = new DataStream(audioBufferHandle.AddrOfPinnedObject(), AudioBuffer.SizeInBytes, true, true);
                xAudioBuffer.AudioBytes = (int)xAudioBuffer.Stream.Length;
                xAudioBuffer.LoopLength = AudioBuffer.RawBuffer.Length / 2;
                xAudioBuffer.LoopCount  = XAudio2.MaximumLoopCount;
                sourceVoice             = new SourceVoice(xAudio, waveFormat);
                sourceVoice.SubmitSourceBuffer(xAudioBuffer, null);
                sourceVoice.Start();
            }
        }
예제 #17
0
        public void play()
        {
            var         dataStream = DataStream.Create(BufferSamples, true, true);
            AudioBuffer buffer     = new AudioBuffer
            {
                /*LoopCount = AudioBuffer.LoopInfinite,*/
                Stream     = dataStream,
                AudioBytes = (int)dataStream.Length,
                Flags      = BufferFlags.EndOfStream
            };


            sourcevoice.SubmitSourceBuffer(buffer, null);
            sourcevoice.SetVolume(vol);



            sourcevoice.Start();
        }
예제 #18
0
        private void TestOutputMatrixBehaviour(Sound sound)
        {
            int inputChannels  = sound.Format.Channels;
            int outputChannels = audioDevice.GetDeviceDetails(0).OutputFormat.Channels;

            SourceVoice sourceVoice = new SourceVoice(audioDevice, sound.Format);

            sourceVoice.SubmitSourceBuffer(sound.Buffer);
            Console.WriteLine("Pre: ");
            PrintVoiceInfo(inputChannels, outputChannels, sourceVoice);
            sourceVoice.Start();
            Console.WriteLine("Started: ");
            PrintVoiceInfo(inputChannels, outputChannels, sourceVoice);
            sourceVoice.Volume = 0.7f;
            Console.WriteLine("Volume set: ");
            PrintVoiceInfo(inputChannels, outputChannels, sourceVoice);
            System.Threading.Thread.Sleep(300);
            PrintVoiceInfo(inputChannels, outputChannels, sourceVoice);
        }
예제 #19
0
파일: AudioTest.cs 프로젝트: AmmRage/mmflex
        public static void PlayPcm(XAudio2 device, string fileName)
        {
            var        s      = System.IO.File.OpenRead(fileName); //open the wav file
            WaveStream stream = new WaveStream(s);                 //pass the stream to the library

            s.Close();                                             //close the file

            AudioBuffer buffer = new AudioBuffer();                //init the buffer

            buffer.AudioData  = stream;                            //set the input stream for the audio
            buffer.AudioBytes = (int)stream.Length;                //set the size of the buffer to the size of the stream
            buffer.Flags      = BufferFlags.EndOfStream;           //presumably set it to play until the end of the stream/file


            SourceVoice sourceVoice = new SourceVoice(device, stream.Format); //this looks like it might initalise the actual output

            sourceVoice.SubmitSourceBuffer(buffer);                           //pass the buffer to the output thingo
            sourceVoice.Start();                                              //start the playback?

            //above 2 sections are guessed, there is no documentation on the classes/proerties.

            // loop until the sound is done playing
            while (sourceVoice.State.BuffersQueued > 0) // This keeps looping while there is sound in the buffer
            {                                           // (presumably). For this specific example it will stop
                if (GetAsyncKeyState(VK_ESCAPE) != 0)   // plying the sound if escape is pressed. That is what the
                {
                    break;                              // DLLImport and stuff at the top is for
                }
                Thread.Sleep(10);                       //
            }

            // wait until the escape key is released
            while (GetAsyncKeyState(VK_ESCAPE) != 0) //it jsut waits here until the person presses escape
            {
                Thread.Sleep(10);
            }

            // cleanup the voice
            buffer.Dispose();
            sourceVoice.Dispose();
            stream.Dispose();
        }
예제 #20
0
            public void Play()
            {
                DateTime start = DateTime.Now;

                Console.WriteLine("Play() start");
                sourceVoice = new SourceVoice(Program.audioDevice, Format);
                Console.WriteLine("Create source voice");
                sourceVoice.BufferEnd += new EventHandler <ContextEventArgs>(sourceVoice_BufferEnd);
                sourceVoice.StreamEnd += new EventHandler(sourceVoice_StreamEnd);
                sourceVoice.SubmitSourceBuffer(Buffer);
                Console.WriteLine("Submitted source buffers");
                sourceVoice.Start();
                Console.WriteLine("Started source voice");
                var channel = new Channel {
                    SourceVoice = sourceVoice
                };
                DateTime end = DateTime.Now;

                Console.WriteLine("Play() end (" + (end - start).TotalMilliseconds + " ms)");
            }
            private void loadSound(ISoundFactory factory)
            {
                using (var stream = factory.OpenWaveStream(emitter.Sound))
                {
                    buffer            = new AudioBuffer();
                    buffer.AudioData  = stream;
                    buffer.AudioBytes = (int)stream.Length;
                    buffer.Flags      = BufferFlags.EndOfStream;

                    if (emitter.Loop)
                    {
                        buffer.LoopCount = XAudio2.LoopInfinite;
                    }


                    sourceVoice = new SourceVoice(TW.Audio.XAudio2Device, stream.Format);
                    sourceVoice.SubmitSourceBuffer(buffer);
                    sourceVoice.Start();
                }
            }
예제 #22
0
        private static async Task PlaySoundAsync(XAudio2 device, Stream baseStream)
        {
            baseStream.Position = 0;

            var stream = new SoundStream(baseStream);

            await using var dataStream = stream.ToDataStream();
            var buffer = new AudioBuffer(dataStream);

            using var voice = new SourceVoice(device, stream.Format, true);
            voice.SubmitSourceBuffer(buffer, stream.DecodedPacketsInfo);
            voice.Start();

            while (voice.State.BuffersQueued > 0)
            {
                await Task.Delay(TimeSpan.FromMilliseconds(1));
            }

            voice.DestroyVoice();
        }
예제 #23
0
        static void PlayPCM(XAudio2 device, string fileName)
        {
            //WaveStream stream = new WaveStream(fileName);
            var        s      = System.IO.File.OpenRead(fileName);
            WaveStream stream = new WaveStream(s);

            s.Close();

            AudioBuffer buffer = new AudioBuffer();

            buffer.AudioData  = stream;
            buffer.AudioBytes = (int)stream.Length;
            buffer.Flags      = BufferFlags.EndOfStream;

            SourceVoice sourceVoice = new SourceVoice(device, stream.Format);

            sourceVoice.SubmitSourceBuffer(buffer);
            sourceVoice.Start();

            // loop until the sound is done playing
            while (sourceVoice.State.BuffersQueued > 0)
            {
                if (GetAsyncKeyState(VK_ESCAPE) != 0)
                {
                    break;
                }

                Thread.Sleep(10);
            }

            // wait until the escape key is released
            while (GetAsyncKeyState(VK_ESCAPE) != 0)
            {
                Thread.Sleep(10);
            }

            // cleanup the voice
            buffer.Dispose();
            sourceVoice.Dispose();
            stream.Dispose();
        }
예제 #24
0
        public static void PlaySound(string soundfile)
        {
            SourceVoice sourceVoice;

            if (!LoadedSounds.ContainsKey(soundfile))
            {
                var buffer = GetBuffer(soundfile);
                sourceVoice = new SourceVoice(XAudio, buffer.WaveFormat, true);
                sourceVoice.SetVolume(Volume, SharpDX.XAudio2.XAudio2.CommitNow);
                sourceVoice.SubmitSourceBuffer(buffer, buffer.DecodedPacketsInfo);
                sourceVoice.Start();
            }
            else
            {
                sourceVoice = LoadedSounds[soundfile];
                if (sourceVoice != null)
                {
                    sourceVoice.Stop();
                }
            }
        }
예제 #25
0
 public void MainLoop(int samples, bool reverse)
 {
     audioWriter.BaseStream.SetLength(0);
     if (reverse)
     {
         for (int i = samples - 1; i >= 0; i--)
         {
             audioWriter.Write(buffer[i]);
         }
     }
     else
     {
         for (int i = 0; i < samples; i++)
         {
             audioWriter.Write(buffer[i]);
         }
     }
     audioWriter.BaseStream.Position = 0;
     audioBuffer.AudioBytes          = samples * (audioFormat.BitsPerSample / 8);
     sVoice.SubmitSourceBuffer(audioBuffer);
 }
예제 #26
0
        public static void playSoundFile(string filename)
        {
            var ss         = new SoundStream(File.OpenRead(filename));
            var waveFormat = ss.Format;
            var ab         = new AudioBuffer
            {
                Stream     = ss.ToDataStream(),
                AudioBytes = (int)ss.Length,
                Flags      = BufferFlags.EndOfStream
            };

            ss.Close();

            var sv = new SourceVoice(xa2, waveFormat, true);

            //sv.BufferEnd += (context) => Console.WriteLine(" => event received: end of buffer");
            //sv.StreamEnd += () => finishPlaying(sv, ab);
            sv.SubmitSourceBuffer(ab, ss.DecodedPacketsInfo);
            sv.Start();
            sources.Add(sv, ab);
        }
예제 #27
0
        /// <summary>
        /// Play a sound file. Supported format are Wav(pcm+adpcm) and XWMA
        /// </summary>
        /// <param name="device">The device.</param>
        /// <param name="text">Text to display</param>
        /// <param name="fileName">Name of the file.</param>
        static void PLaySoundFile(XAudio2 device, string text, string fileName)
        {
            Console.WriteLine("{0} => {1} (Press esc to skip)", text, fileName);
            var stream     = new SoundStream(File.OpenRead(fileName));
            var waveFormat = stream.Format;
            var buffer     = new AudioBuffer
            {
                Stream     = stream.ToDataStream(),
                AudioBytes = (int)stream.Length,
                Flags      = BufferFlags.EndOfStream
            };

            stream.Close();

            var sourceVoice = new SourceVoice(device, waveFormat, true);

            // Adds a sample callback to check that they are working on source voices
            sourceVoice.BufferEnd += (context) => Console.WriteLine(" => event received: end of buffer");
            sourceVoice.SubmitSourceBuffer(buffer, stream.DecodedPacketsInfo);
            sourceVoice.Start();

            int count = 0;

            while (sourceVoice.State.BuffersQueued > 0 && !IsKeyPressed(ConsoleKey.Escape))
            {
                if (count == 50)
                {
                    Console.Write(".");
                    Console.Out.Flush();
                    count = 0;
                }
                Thread.Sleep(10);
                count++;
            }
            Console.WriteLine();

            sourceVoice.DestroyVoice();
            sourceVoice.Dispose();
            buffer.Stream.Dispose();
        }
예제 #28
0
        public void QueueDataBlock(short[] buffer, int length, int sampleRate)
        {
            // Initialize source if it's null
            if (source == null)
            {
                var fmt = new WaveFormat
                {
                    SamplesPerSecond      = sampleRate,
                    BitsPerSample         = BITS_PER_SAMPLE,
                    AverageBytesPerSecond = sampleRate * BYTES_PER_SAMPLE,
                    Channels       = NUM_CHANNELS,
                    BlockAlignment = BLOCK_ALIGN,
                    FormatTag      = WaveFormatTag.Pcm
                };
                source = new SourceVoice(device, fmt);
            }

            // Copy the samples to a stream
            using (var ms = new MemoryStream(length * BYTES_PER_SAMPLE))
            {
                using (var writer = new BinaryWriter(ms, Encoding.Default, true))
                {
                    for (int i = 0; i < length; i++)
                    {
                        writer.Write(buffer[i]);
                    }
                    writer.Flush();
                }
                ms.Position = 0;

                // Queue the buffer
                source.SubmitSourceBuffer(new AudioBuffer {
                    AudioData = ms, AudioBytes = length * BYTES_PER_SAMPLE
                });
            }

            // Make sure it's playing
            source.Start();
        }
예제 #29
0
        public static void PlayXAudioSound(object soundFile)
        {
            try
            {
                xaudio2        = new XAudio2();
                masteringVoice = new MasteringVoice(xaudio2);

                var stream     = new SoundStream(File.OpenRead(soundFile as string));
                var waveFormat = stream.Format;
                buffer = new AudioBuffer
                {
                    Stream     = stream.ToDataStream(),
                    AudioBytes = (int)stream.Length,
                    Flags      = BufferFlags.EndOfStream
                };
                stream.Close();

                sourceVoice = new SourceVoice(xaudio2, waveFormat, true);
                sourceVoice.SubmitSourceBuffer(buffer, stream.DecodedPacketsInfo);
                sourceVoice.Start();

                while (sourceVoice.State.BuffersQueued > 0)
                {
                    Thread.Sleep(1);
                }

                sourceVoice.DestroyVoice();
                sourceVoice.Dispose();
                sourceVoice = null;
                buffer.Stream.Dispose();

                xaudio2.Dispose();
                masteringVoice.Dispose();
            }
            catch (Exception e)
            {
                Console.WriteLine(e);
            }
        }
예제 #30
0
 /// <summary>
 /// Plays the audio.
 /// </summary>
 /// <param name="stop">If true, will stop the sound and return its position to 0 before playing it. Passing false will have the effect of resuming the sound from the last position it was stopped at.</param>
 /// <param name="loop">Whether or not to loop the sound.</param>
 public void play(bool stop, bool loop)
 {
     this.looping           = loop;
     isInitializingPlayback = true;
     if (loop)
     {
         buffer.LoopCount = AudioBuffer.LoopInfinite;
     }
     // We'll start the buffer from the beginning if we've never played this buffer before so that the sound can be loaded.
     // Otherwise, the sound might start from a random position in the buffer.
     if (stop || hasNeverPlayed)
     {
         hasNeverPlayed = false;
         voice.Stop();
         voice.FlushSourceBuffers();
         buffer.Stream.Position = 0;
         voice.SubmitSourceBuffer(buffer, null);
     }
     voice.Start();
     isStopped = false;
     isInitializingPlayback = false;
 }
예제 #31
0
        //Enqueue the currently linked datasource buffer for playing
        public void PushDataSourceForPlaying()
        {
            //Do nothing, start will be deferred !
            if (MaxDefferedStart > 0)
            {
                //Not playing the sound with Buffered start => Assign new rnd start
                if (_voice.State.BuffersQueued == 0 && _deferredPaused == false)
                {
                    AssignNewRndStart();
                    _deferredPaused = true;
                    logger.Trace("AssignNewRndStart {0} {1}", this.Id, this.PlayingDataSource.Alias);
                }

                if (_defferedStart > _deferredStartTimer.ElapsedMilliseconds)
                {
                    return;
                }
                _deferredPaused = false;
            }

            if (_playingDataSource is SoundStreamedDataSource)
            {
                ((SoundStreamedDataSource)_playingDataSource).StartVoiceDataFetching(this);
            }
            else
            {
                _voice.SubmitSourceBuffer(_playingDataSource.AudioBuffer, null);
            }

            if (is3DSound)
            {
                RefreshVoices();
            }
            else
            {
                //Reset Default Channel Mapping
                _voice.SetOutputMatrix(_playingDataSource.WaveFormat.Channels, _soundEngine.DeviceDetail.OutputFormat.Channels, _defaultChannelMapping);
            }
        }
예제 #32
0
        void SoundPlayerThread()
        {
            Globals.bPlaySignal = true;

            WaveMemStream = new MemoryStream(WaveDaten);

            WaveBuffer           = new AudioBuffer();
            WaveBuffer.Flags     = BufferFlags.EndOfStream;
            WaveBuffer.AudioData = WaveMemStream;
            // WaveBuffer.AudioBytes = clGlobals.BytesProSekunde;
            WaveBuffer.AudioBytes = (int)WaveMemStream.Length;
            WaveBuffer.LoopCount  = XAudio2.LoopInfinite;

            WaveSourceVoice = new SourceVoice(AudioDevice, SignalFormat);
            WaveSourceVoice.SubmitSourceBuffer(WaveBuffer);

            WaveSourceVoice.Start();

            while (Globals.bPlaySignal)
            {
                Thread.Sleep(10);
            }

            WaveSourceVoice.Stop();
            Thread.Sleep(10);
            WaveMemStream.Close();
            WaveMemStream.Dispose();
            WaveMemStream = null;
            WaveBuffer.Dispose();
            WaveBuffer = null;
            WaveSourceVoice.Dispose();
            WaveSourceVoice = null;
            // Thread.Sleep(100);
            soundThreadStart = null;
            // this.ClearWaveContainer();
            // this.InitWaveContainer();
            this.m_soundThread.Abort();
        }
예제 #33
0
        public void Play( Form on )
        {
            var screens = Screen.AllScreens;
            var screens_left  = screens.Min( screen => screen.Bounds.Left  );
            var screens_right = screens.Max( screen => screen.Bounds.Right );
            var screens_width = screens_right-screens_left;

            var bestScreen = screens.OrderByDescending( screen => {
                var area = screen.Bounds;
                area.Intersect( on.Bounds );
                return area.Width*area.Height;
            }).First();

            var balances = new[]{1.5f,1.5f};
            if ( screens.Length==3 && DisplayBalances.ContainsKey(bestScreen.DeviceName) ) balances = DisplayBalances[bestScreen.DeviceName];

            var path   = Registry.CurrentUser.OpenSubKey(@"AppEvents\Schemes\Apps\.Default\"+Name+@"\.Current").GetValue(null) as string;
            var stream = new WaveStream(path);
            var buffer = new AudioBuffer() { AudioBytes=(int)stream.Length, AudioData=stream, Flags=BufferFlags.EndOfStream };

            var voice = new SourceVoice( XAudio2, stream.Format );
            voice.SubmitSourceBuffer( buffer );
            voice.SetChannelVolumes( balances.Length, balances );
            voice.BufferEnd += (sender,ctx) => {
                try {
                    on.BeginInvoke(new Action(()=>{
                        voice.Dispose();
                        buffer.Dispose();
                        stream.Dispose();
                    }));
                } catch ( InvalidOperationException ) {
                    // herp derp on must be disposed/gone
                }
            };
            voice.Start();
        }
        public void PlayPPM(IntPtr win)
        {
            Rate = 192000; //44100 on cheapo, 96000 on AC97, 192000 on HD Audio
                           // its the number of samples that exist for each second of audio
            channels = 2;  // 1 = mono, 2 = stereo

            PPMSamples = (int)(0.0225 * Rate * channels);   // 22 or 22.5ms in samples, rounded up
                                                            // no. of bytes per second = channels * rate * bytes in one sample
            microsec = Rate / 10000.0;                      // 192 = 1ms, 19.2 = 0.1ms or 1mis @ 192khz
            PPMchannels = new Dictionary<int, double>();
            frame = new List<short>();
            Amplitude = 32760;

            /*WaveFile wFile;
            wFile = new WaveFile(channels, 16, Rate);
            */

            //Set channels to neutral except throttle, throttle = zero.
            PPMchannels.Add(1, 10.0); //Throttle
            PPMchannels.Add(2, 50.0); //Ailerons
            PPMchannels.Add(3, 50.0); //Stab
            PPMchannels.Add(4, 50.0); //Rudder
            PPMchannels.Add(5, 50.0);
            PPMchannels.Add(6, 50.0);
            PPMchannels.Add(7, 50.0);
            PPMchannels.Add(8, 50.0);

            byte[] data = GenPPM();

            /*wFile.SetData(data, data.Length);
            wFile.WriteFile(@"C:\Users\kang\Desktop\test.wav");
            */
            ms = new MemoryStream();
            ms.SetLength(0);
            ms.Write(data, 0, data.Length);
            ms.Position = 0;

            wf = new WaveFormat();
            wf.FormatTag = WaveFormatTag.Pcm;
            wf.BitsPerSample = (short)16;
            wf.Channels = channels;
            wf.SamplesPerSecond = Rate;
            wf.BlockAlignment = (short)(wf.Channels * wf.BitsPerSample / 8);
            wf.AverageBytesPerSecond = wf.SamplesPerSecond * wf.BlockAlignment;

            device = new XAudio2();
            device.StartEngine();
            masteringVoice = new MasteringVoice(device);
            srcVoice = new SourceVoice(device, wf);
            buffer = new AudioBuffer();
            buffer.AudioData = ms;
            buffer.AudioBytes = (int)data.Length;
            buffer.Flags = SlimDX.XAudio2.BufferFlags.None;

            srcVoice.BufferStart += new EventHandler<ContextEventArgs>(srcVoice_BufferStart);
            srcVoice.FrequencyRatio = 1;
            srcVoice.SubmitSourceBuffer(buffer);
            srcVoice.Start();
        }
예제 #35
0
        public void Play()
        {
            WaveStream stream;

            if (!soundManager.SoundDictionary.ContainsKey(filename))
            {
                // Add our sound to the sound library
                var s = System.IO.File.OpenRead(Path.Combine("Assets", filename));
                stream = new WaveStream(s);
                s.Close();
                soundManager.SoundDictionary[filename] = stream;
            }
            else
            {
                stream = soundManager.SoundDictionary[filename];
            }

            WaveFormat format = stream.Format;

            buffer = new AudioBuffer();
            buffer.AudioData = stream;
            buffer.AudioBytes = (int)stream.Length;
            buffer.Flags = BufferFlags.EndOfStream;
            buffer.AudioData.Position = 0;

            if (Looping == true)
            {
                buffer.LoopCount = XAudio2.LoopInfinite;
                buffer.LoopLength = 0;
            }

            currentlyPlaying = new SourceVoice(soundManager.device, format);
            currentlyPlaying.Volume = this.Volume;
            currentlyPlaying.BufferEnd += (s, e) => playing = false;
            currentlyPlaying.Start();
            currentlyPlaying.SubmitSourceBuffer(buffer);

            playing = true;
        }
예제 #36
0
        private void TestOutputMatrixBehaviour(Sound sound)
        {
            int inputChannels = sound.Format.Channels;
            int outputChannels = audioDevice.GetDeviceDetails(0).OutputFormat.Channels;

            SourceVoice sourceVoice = new SourceVoice(audioDevice, sound.Format);
            sourceVoice.SubmitSourceBuffer(sound.Buffer);
            Console.WriteLine("Pre: ");
            PrintVoiceInfo(inputChannels, outputChannels, sourceVoice);
            sourceVoice.Start();
            Console.WriteLine("Started: ");
            PrintVoiceInfo(inputChannels, outputChannels, sourceVoice);
            sourceVoice.Volume = 0.7f;
            Console.WriteLine("Volume set: ");
            PrintVoiceInfo(inputChannels, outputChannels, sourceVoice);
            System.Threading.Thread.Sleep(300);
            PrintVoiceInfo(inputChannels, outputChannels, sourceVoice);
        }
예제 #37
0
파일: Program.cs 프로젝트: fcondolo/rocket
        static void Main()
        {
            var form = new RenderForm("DotRocket/SlimDX example");

            var description = new SwapChainDescription()
            {
                BufferCount = 1,
                Usage = Usage.RenderTargetOutput,
                OutputHandle = form.Handle,
                IsWindowed = true,
                ModeDescription = new ModeDescription(0, 0, new Rational(60, 1), Format.R8G8B8A8_UNorm),
                SampleDescription = new SampleDescription(1, 0),
                Flags = SwapChainFlags.AllowModeSwitch,
                SwapEffect = SwapEffect.Discard
            };

            // Setup rendering
            Device device;
            SwapChain swapChain;
            Device.CreateWithSwapChain(DriverType.Hardware, DeviceCreationFlags.None, description, out device, out swapChain);
            RenderTargetView renderTarget;
            using (var resource = Resource.FromSwapChain<Texture2D>(swapChain, 0))
                renderTarget = new RenderTargetView(device, resource);
            var context = device.ImmediateContext;
            var viewport = new Viewport(0.0f, 0.0f, form.ClientSize.Width, form.ClientSize.Height);
            context.OutputMerger.SetTargets(renderTarget);
            context.Rasterizer.SetViewports(viewport);

            // Prevent alt+enter (broken on WinForms)
            using (var factory = swapChain.GetParent<Factory>())
                factory.SetWindowAssociation(form.Handle, WindowAssociationFlags.IgnoreAltEnter);

            // Setup audio-streaming
            XAudio2 xaudio2 = new XAudio2();
            stream = new XWMAStream("tune.xwma");
            MasteringVoice masteringVoice = new MasteringVoice(xaudio2);
            sourceVoice = new SourceVoice(xaudio2, stream.Format);
            audioBuffer = new AudioBuffer();
            audioBuffer.AudioData = stream;
            audioBuffer.AudioBytes = (int)stream.Length;
            audioBuffer.Flags = BufferFlags.EndOfStream;
            sourceVoice.SubmitSourceBuffer(audioBuffer, stream.DecodedPacketsInfo);
            sourceVoice.Start();

            // Setup DotRocket
#if DEBUG
            DotRocket.Device rocket = new DotRocket.ClientDevice("sync");
            rocket.OnPause += Pause;
            rocket.OnSetRow += SetRow;
            rocket.OnIsPlaying += IsPlaying;
            rocket.Connect("localhost", 1338);
#else
            DotRocket.Device rocket = new DotRocket.PlayerDevice("sync");
#endif

            // Get our belowed tracks!
            DotRocket.Track clear_r = rocket.GetTrack("clear.r");
            DotRocket.Track clear_g = rocket.GetTrack("clear.g");
            DotRocket.Track clear_b = rocket.GetTrack("clear.b");

            MessagePump.Run(form, () =>
            {
                // Hammertime.
                double row = ((double)(sourceVoice.State.SamplesPlayed - samplesBias) / stream.Format.SamplesPerSecond) * rowRate;

                // Paint some stuff.
                rocket.Update((int)System.Math.Floor(row));
                context.ClearRenderTargetView(renderTarget, new Color4(
                    clear_r.GetValue(row),
                    clear_g.GetValue(row),
                    clear_b.GetValue(row)));
                swapChain.Present(0, PresentFlags.None);
            });

            // clean up all resources
            // anything we missed will show up in the debug output
            renderTarget.Dispose();
            swapChain.Dispose();
            device.Dispose();
        }
예제 #38
0
 public void Play()
 {
     DateTime start = DateTime.Now;
     Console.WriteLine("Play() start");
     sourceVoice = new SourceVoice(Program.audioDevice, Format);
     Console.WriteLine("Create source voice");
     sourceVoice.BufferEnd += new EventHandler<ContextEventArgs>(sourceVoice_BufferEnd);
     sourceVoice.StreamEnd += new EventHandler(sourceVoice_StreamEnd);
     sourceVoice.SubmitSourceBuffer(Buffer);
     Console.WriteLine("Submitted source buffers");
     sourceVoice.Start();
     Console.WriteLine("Started source voice");
     var channel = new Channel { SourceVoice = sourceVoice };
     DateTime end = DateTime.Now;
     Console.WriteLine("Play() end (" + (end - start).TotalMilliseconds + " ms)");
 }
예제 #39
0
            public void Run()
            {
                Stopwatch sw = new Stopwatch();
                var s = System.IO.File.OpenRead(fileName);
                //Console.WriteLine(String.Format("OpenRead: {0} ms", sw.ElapsedMilliseconds)); sw.Reset(); sw.Start();
                WaveStream stream = new WaveStream(s);
                //Console.WriteLine(String.Format("new WaveStream: {0} ms", sw.ElapsedMilliseconds)); sw.Reset(); sw.Start();

                int lengthInBytes = (int)stream.Length;
                int bytesPerSample = stream.Format.Channels * stream.Format.BitsPerSample / 8;
                int nSamples = lengthInBytes / bytesPerSample;
                int samplesPerBuffer = STREAMING_BUFFER_SIZE / bytesPerSample;

                int currentBytePosition = 0;
                int currentSamplePosition = 0;

                sourceVoice = new SourceVoice(audioDevice, stream.Format);
                sourceVoice.BufferEnd += new EventHandler<ContextEventArgs>(sourceVoice_BufferEnd);
                sourceVoice.FrequencyRatio = 2f;

                DateTime startTime = DateTime.Now;

                while (currentBytePosition < lengthInBytes)
                {
                    int readBytes = System.Math.Min(STREAMING_BUFFER_SIZE, lengthInBytes - currentBytePosition);
                    int readSamples = readBytes / bytesPerSample;

                    //if (readBytes < STREAMING_BUFFER_SIZE)
                        //Console.WriteLine(String.Format("Read bytes: {0}, Read samples: {1}, Read samples (float): {2}", readBytes, readSamples, (float)readBytes / bytesPerSample));

                    Console.WriteLine("---------------------------------- " + (DateTime.Now - startTime).TotalSeconds);
                    Console.WriteLine(String.Format("Read bytes: {0}\tBytes left: {1}\tPosition: {2}", readBytes, lengthInBytes - currentBytePosition, currentBytePosition));
                    Console.WriteLine(String.Format("Read samples: {0}\tSamples left: {1}\tPosition: {2}", readSamples, nSamples - currentSamplePosition, currentSamplePosition));

                    //Console.WriteLine(String.Format("To AudioBuffer creation: {0} ms", sw.ElapsedMilliseconds)); sw.Reset(); sw.Start();
                    var ab = new AudioBuffer
                    {
                        AudioData = stream,
                        AudioBytes = lengthInBytes,
                        PlayBegin = currentSamplePosition,
                        PlayLength = readSamples
                    };
                    //Console.WriteLine(String.Format("After AudioBuffer creation: {0} ms", sw.ElapsedMilliseconds)); sw.Reset(); sw.Start();

                    //Console.WriteLine("Buffers queued: " + sourceVoice.State.BuffersQueued);
                    if (sourceVoice.State.BuffersQueued >= MAX_BUFFER_COUNT - 1)
                        bufferPlaybackEndEvent.WaitOne();

                    VoiceDetails voiceDetails = sourceVoice.VoiceDetails;
                    long samplesPlayed = sourceVoice.State.SamplesPlayed;
                    Console.WriteLine("Time: " + samplesPlayed / (float)voiceDetails.InputSampleRate);

                    //Console.WriteLine(String.Format("Pre-submit: {0} ms", sw.ElapsedMilliseconds)); sw.Reset(); sw.Start();
                    sourceVoice.SubmitSourceBuffer(ab);
                    //Console.WriteLine(String.Format("Post-submit: {0} ms", sw.ElapsedMilliseconds)); sw.Reset(); sw.Start();
                    bufferReady.Set();

                    currentBytePosition += readBytes;
                    currentSamplePosition += readSamples;
                }

                while (sourceVoice.State.BuffersQueued > 0)
                    bufferPlaybackEndEvent.WaitOne();

                if (StreamEnd != null)
                    StreamEnd(this, null);
            }
예제 #40
0
		protected override void EndBufferChange()
		{
			if (AudioBuffer != null)
			{
				if (audioBuffer == null) audioBuffer = new XAudioBuffer();

				audioBuffer.AudioData = new DataStream(AudioBuffer.RawBuffer, true, true);
				audioBuffer.AudioBytes = (int)audioBuffer.AudioData.Length;
				audioBuffer.LoopLength = AudioBuffer.RawBuffer.Length / 2;
				audioBuffer.LoopCount = XAudio2.LoopInfinite;
				waveFormat.AverageBytesPerSecond = (waveFormat.BlockAlignment = (short)((waveFormat.BitsPerSample = (short)BitsPerSample) / 8 * (waveFormat.Channels = 2))) * (waveFormat.SamplesPerSecond = Frequency);
				sourceVoice = new SourceVoice(xAudio, waveFormat);
				sourceVoice.SubmitSourceBuffer(audioBuffer);
				sourceVoice.Start();
			}
		}