Beispiel #1
0
 public void render(String peerId, AudioBuffer echo)
 {
     if (IsSupported)
     {
         AudioMixer.AddSourceFrame(peerId, new AudioBuffer(echo.Data, echo.Index, echo.Length));
     }
 }
 public AudioBuffer ReadToBuffer(AudioBuffer buffer)
 {
     byte[] data = new byte[Length - Position];
     int read = Read(data, 0, data.Length);
     buffer.Data(Format, data, 0, read, Frequency);
     return buffer;
 }
        /// <summary>
        /// Encodes a frame.
        /// </summary>
        /// <param name="frame">The frame.</param>
        /// <returns></returns>
        public override byte[] Encode(AudioBuffer frame)
		{
			if (_Encoder == null)
			{
				_Encoder = new CocoaOpusEncoder(ClockRate, Channels, PacketTime);
                _Encoder.Quality = 1.0;
                _Encoder.Bitrate = 125;
			}

			using (var pool = new NSAutoreleasePool())
			{
				GCHandle dataHandle = GCHandle.Alloc(frame.Data, GCHandleType.Pinned);
				try
				{
					IntPtr dataPointer = dataHandle.AddrOfPinnedObject();

					using (var buffer = new CocoaOpusBuffer {
						Data = NSData.FromBytesNoCopy(dataPointer, (uint)frame.Data.Length, false),
						Index = frame.Index,
						Length = frame.Length
					})
					{
						using (var encodedFrameData = _Encoder.EncodeBuffer(buffer))
						{
							return encodedFrameData.ToArray();
						}
					}
				}
				finally
				{
					dataHandle.Free();
				}
			}
        }
        public AudioStreamer(AudioSource source, AudioStream stream, int bufferSampleCount)
        {
            if (source == null)
                throw new ArgumentNullException("source");
            if (stream == null)
                throw new ArgumentNullException("stream");
            this.source = source;
            this.stream = stream;
            this.bufferSampleCount = bufferSampleCount;
            this.bufferSamples = new byte[stream.Format.SampleByteSize() * bufferSampleCount];

            buffers = new AudioBuffer[4];
            for (int index = 0; index < buffers.Length; index++)
            {
                buffers[index] = new AudioBuffer(Context);
                FillBuffer(buffers[index]);
            }

            source.Queue(buffers);

            lock (Context.streamers)
            {
                id = nextId++;
                Context.streamers[id] = this;
            }
        }
Beispiel #5
0
        /// <summary>
        /// Initializes a new instance of the <see cref="SoundEffect"/> class.
        /// </summary>
        /// <param name="audioManager">The associated audio manager instance.</param>
        /// <param name="name">The name of the current instance.</param>
        /// <param name="waveFormat">The format of the current instance.</param>
        /// <param name="buffer">The buffer containing audio data.</param>
        /// <param name="decodedPacketsInfo">The information regaring decoded packets.</param>
        internal SoundEffect(AudioManager audioManager, string name, WaveFormat waveFormat, DataStream buffer, uint[] decodedPacketsInfo)
        {
            AudioManager = audioManager;
            Name = name;
            Format = waveFormat;
            AudioBuffer = new AudioBuffer
            {
                Stream = buffer,
                AudioBytes = (int)buffer.Length,
                Flags = BufferFlags.EndOfStream,
            };
            LoopedAudioBuffer = new AudioBuffer
            {
                Stream = buffer,
                AudioBytes = (int)buffer.Length,
                Flags = BufferFlags.EndOfStream,
                LoopCount = AudioBuffer.LoopInfinite,
            };

            DecodedPacketsInfo = decodedPacketsInfo;

            Duration = Format.SampleRate > 0 ? TimeSpan.FromMilliseconds(GetSamplesDuration() * 1000 / Format.SampleRate) : TimeSpan.Zero;

            children = new List<WeakReference>();
            VoicePool = AudioManager.InstancePool.GetVoicePool(Format);
        }
Beispiel #6
0
        static void PlayPCM(XAudio2 device, string fileName)
        {
            //WaveStream stream = new WaveStream(fileName);
            var s = System.IO.File.OpenRead(fileName);
            WaveStream stream = new WaveStream(s);
            s.Close();

            AudioBuffer buffer = new AudioBuffer();
            buffer.AudioData = stream;
            buffer.AudioBytes = (int)stream.Length;
            buffer.Flags = BufferFlags.EndOfStream;

            SourceVoice sourceVoice = new SourceVoice(device, stream.Format);
            sourceVoice.SubmitSourceBuffer(buffer);
            sourceVoice.Start();

            // loop until the sound is done playing
            while (sourceVoice.State.BuffersQueued > 0)
            {
                if (GetAsyncKeyState(VK_ESCAPE) != 0)
                    break;

                Thread.Sleep(10);
            }

            // wait until the escape key is released
            while (GetAsyncKeyState(VK_ESCAPE) != 0)
                Thread.Sleep(10);

            // cleanup the voice
            buffer.Dispose();
            sourceVoice.Dispose();
            stream.Dispose();
        }
        /// <summary>
        /// Encodes a frame.
        /// </summary>
        /// <param name="frame">The frame.</param>
        /// <returns></returns>
        public override byte[] Encode(AudioBuffer frame)
        {
            if (_Encoder == null)
            {
                _Encoder = new Encoder(ClockRate, Channels, PacketTime);
                _Encoder.Quality = 1.0;
                _Encoder.Bitrate = 125;
            }

			byte[] data; int index; int length;
			var echoCanceller = EchoCanceller;
			if (echoCanceller == null)
			{
				data = frame.Data;
				index = frame.Index;
				length = frame.Length;
			}
			else
			{
				data = echoCanceller.capture(frame);
				index = 0;
				length = data.Length;
			} 

            return _Encoder.Encode(data, index, length);
        }
Beispiel #8
0
 internal ProcessBuffer(uint nframes, AudioBuffer[] audioInBuffers, AudioBuffer[] audioOutBuffers, MidiEventCollection<MidiInEvent>[] midiInEvents, MidiEventCollection<MidiOutEvent>[] midiOutEvents)
 {
     Frames = (int)nframes;
     AudioIn = audioInBuffers;
     AudioOut = audioOutBuffers;
     MidiIn = midiInEvents;
     MidiOut = midiOutEvents;
 }
		public byte[] capture(AudioBuffer input)
        {
			if (IsSupported)
            {
                return AcousticEchoCanceller.Capture(input.Data, input.Index, input.Length);
            }
            else
            {
                return BitAssistant.SubArray(input.Data, input.Index, input.Length);
            }
		}
Beispiel #10
0
        /// <summary>
        /// Initializes a new instance of the <see cref="Wave"/> class, loading a wave file and creating an audio buffer from it.
        /// </summary>
        /// <param name="path">The path to the wave file.</param>
        public Wave(string path)
        {
            Contract.Requires<ArgumentException>(System.IO.File.Exists(path),"Parameter path must match an existing file");
            Data = new WaveStream(path);

            Buffer = new AudioBuffer()
            {
                AudioData = Data,
                AudioBytes = (int)Data.Length,
                Flags = BufferFlags.EndOfStream
            };
        }
        public int Read(AudioBuffer buff, int maxLength)
        {
            buff.Prepare(this, maxLength);

            int[,] samples = buff.Samples;
            for (int i = 0; i < buff.Length; i++)
                for (int j = 0; j < PCM.ChannelCount; j++)
                    samples[i, j] = _sampleVal;

            _sampleOffset += buff.Length;
            return buff.Length;
        }
 public int Read(AudioBuffer buff, int maxLength)
 {
     if (lwcdfBuffer == null || lwcdfBuffer.Size < buff.Size)
         lwcdfBuffer = new AudioBuffer(_lwcdfSource, buff.Size);
     int sampleCount = _audioSource.Read(buff, maxLength);
     if (sampleCount != _lwcdfSource.Read(lwcdfBuffer, maxLength))
         throw new Exception("size mismatch"); // Very likely to happen (depending on lwcdfSource implementation)
     for (uint i = 0; i < sampleCount; i++)
         for (int c = 0; c < buff.PCM.ChannelCount; c++)
             buff.Samples[i, c] = (int)Math.Round(buff.Samples[i, c] / scaling_factor + lwcdfBuffer.Samples[i, c]);
     return sampleCount;
 }
		void PrepareInputBufferList ()
		{
			uint byteSize = MaxFrames * sizeof(float);

			MutableAudioBufferList = new AudioBuffers (OriginalAudioBufferList.Count);

			for (int i = 0; i < OriginalAudioBufferList.Count; ++i) {
				MutableAudioBufferList[i] = new AudioBuffer {
					Data = OriginalAudioBufferList [i].Data,
					DataByteSize = (int)byteSize,
					NumberChannels = OriginalAudioBufferList [i].NumberChannels
				};
			}
		}
		public void render(String peerId, AudioBuffer echo)
        {
			if (IsSupported)
            {
				if (AudioMixer != null)
				{
					AudioMixer.AddSourceFrame(peerId, new AudioBuffer(echo.Data, echo.Index, echo.Length));
				}
				else
				{
					AcousticEchoCanceller.Render(echo.Data, echo.Index, echo.Length);
				}
            }
		}
Beispiel #15
0
        public void TestPassThrough()
        {
            short[] data = TestsHelper.LoadAudioFile("test_mono_44100.raw");

            Assert.IsNotNull(data, "Failed to load test data (check DATA_PATH in TestsHelper.cs)");

            AudioBuffer buffer = new AudioBuffer();
            AudioProcessor processor = new AudioProcessor(44100, buffer);
            processor.Reset(44100, 1);
            processor.Consume(data, data.Length);
            processor.Flush();

            CollectionAssert.AreEqual(data, buffer.data);
        }
Beispiel #16
0
        public void TestAccessors()
        {
            AudioBuffer buffer = new AudioBuffer();
            AudioBuffer buffer2 = new AudioBuffer();
            AudioProcessor processor = new AudioProcessor(44100, buffer);

            Assert.AreEqual(44100, processor.TargetSampleRate);
            Assert.AreEqual(buffer, processor.Consumer);

            processor.TargetSampleRate = 11025;
            Assert.AreEqual(11025, processor.TargetSampleRate);

            processor.Consumer = buffer2;
            Assert.AreEqual(buffer2, processor.Consumer);
        }
Beispiel #17
0
        public void TestResampleMono()
        {
            short[] data1 = TestsHelper.LoadAudioFile("test_mono_44100.raw");
            short[] data2 = TestsHelper.LoadAudioFile("test_mono_11025.raw");

            Assert.IsNotNull(data1, "Failed to load test data (check DATA_PATH in TestsHelper.cs)");
            Assert.IsNotNull(data2, "Failed to load test data (check DATA_PATH in TestsHelper.cs)");

            AudioBuffer buffer = new AudioBuffer();
            AudioProcessor processor = new AudioProcessor(11025, buffer);
            processor.Reset(44100, 1);
            processor.Consume(data1, data1.Length);
            processor.Flush();

            CollectionAssert.AreEqual(data2, buffer.data);
        }
Beispiel #18
0
        public XAudio2Driver(Configuration config)
        {
            IsDisposed = false;

            try
            {
                _isBusy = new WaitableBool(false);

                _device      = new XAudio2();
                _masterVoice = new MasteringVoice(_device);
                _sourceVoice = new SourceVoice(_device,
                                               new WaveFormat()
                                               {
                                                   FormatTag             = WaveFormatTag.Pcm,
                                                   Channels              = 2,
                                                   BitsPerSample         = 16,
                                                   SamplesPerSecond      = 32040,
                                                   AverageBytesPerSecond = 2 * (16 / 8) * 32040,
                                                   BlockAlignment        = 2 * (16 / 8)
                                               },
                                               VoiceFlags.None, 2.0F);
                _sourceVoice.BufferStart += (s, e) =>
                {
                    if (_sourceVoice.State.BuffersQueued < BufferCount)
                    {
                        _isBusy.Value = false;
                    }
                };

                _buffers       = new byte[BufferCount][];
                _bufferStreams = new DataStream[BufferCount];
                for (int i = 0; i < BufferCount; i++)
                {
                    _buffers[i]       = new byte[Snes.MaxAudioBufferLength * 4];
                    _bufferStreams[i] = new DataStream(_buffers[i], true, false);
                }

                _bufferCursor = 0;
                _audioBuffer = new AudioBuffer();
                _isPaused = true;
            }
            catch
            {
                Dispose();
                throw;
            }
        }
        public void TestPassThrough()
        {
            short[] samples = { 1000, 2000, 3000, 4000, 5000, 6000 };
            short[] data = (short[])(samples.Clone());

            AudioBuffer buffer = new AudioBuffer();
            SilenceRemover processor = new SilenceRemover(buffer);
            processor.Reset(44100, 1);
            processor.Consume(data, data.Length);
            processor.Flush();

            Assert.AreEqual(data.Length, buffer.data.Length);
            for (int i = 0; i < data.Length; i++)
            {
                Assert.AreEqual(data[i], buffer.data[i]); // << "Signals differ at index " << i;
            }
        }
        void Microphone_BufferReady(object sender, EventArgs e)
        {
            var outputLength = Microphone.GetData(OutputBuffer);

            var frame = new AudioBuffer(OutputBuffer, 0, outputLength);
            if (!Resampler.Resample(frame, true))
            {
                Log.Error("Could not resample XNA audio.");
            }

            if (!frame.ConvertMonoToStereo())
            {
                Log.Error("Could not convert XNA audio to stereo.");
            }

            RaiseFrame(frame);
        }
Beispiel #21
0
        /// <summary>
        /// Loads a wave file into a SourceVoice.
        /// </summary>
        /// <param name="FileName">The path of the file to load.</param>
        /// <param name="device">The XAudio2 device to load the sound on.</param>
        /// <param name="notificationsSupport">True to enable receiving notifications on this buffer, false otherwise. A notification might include an event when this buffer starts processing data, or when the buffer has finished playing. Set this parameter to true if you wish to receive a notification when the buffer is done playing by means of the function passed to setOnEnd.</param>
        /// <returns>A populated ExtendedAudioBuffer.</returns>
        public static ExtendedAudioBuffer LoadSound(string FileName, XAudio2 device, bool notificationsSupport)
        {
            if (!File.Exists(FileName))
            {
                throw (new ArgumentException("The sound " + FileName + " could not be found."));
            }
            SoundStream stream = new SoundStream(File.OpenRead(FileName));
            WaveFormat  format = stream.Format;            // So we don't lose reference to it when we close the stream.
            AudioBuffer buffer = new AudioBuffer {
                Stream = stream.ToDataStream(), AudioBytes = (int)stream.Length, Flags = SharpDX.XAudio2.BufferFlags.EndOfStream
            };

            // We can now safely close the stream.
            stream.Close();
            SourceVoice sv = new SourceVoice(device, format, VoiceFlags.None, 5.0f, notificationsSupport);

            return(new ExtendedAudioBuffer(buffer, sv));
        }
Beispiel #22
0
        public Sound(Instrument instrument, XAudio2 xAudio2)
        {
            using (var stream = new SoundStream(File.OpenRead(instrument.Path.LocalPath)))
            {
                WaveFormat waveFormat = stream.Format;
                var        buffer     = new AudioBuffer
                {
                    Stream     = stream.ToDataStream(),
                    AudioBytes = (int)stream.Length,
                    Flags      = BufferFlags.EndOfStream
                };

                var sourceVoice = new SourceVoice(xAudio2, waveFormat);
                sourceVoice.SubmitSourceBuffer(buffer, stream.DecodedPacketsInfo);

                Setup(buffer, sourceVoice, stream.DecodedPacketsInfo, instrument.Volume);
            }
        }
Beispiel #23
0
        void Microphone_BufferReady(object sender, EventArgs e)
        {
            var outputLength = Microphone.GetData(OutputBuffer);

            var frame = new AudioBuffer(OutputBuffer, 0, outputLength);

            if (!Resampler.Resample(frame, true))
            {
                Log.Error("Could not resample XNA audio.");
            }

            if (!frame.ConvertMonoToStereo())
            {
                Log.Error("Could not convert XNA audio to stereo.");
            }

            RaiseFrame(frame);
        }
Beispiel #24
0
        private AudioBuffer Decode(byte[] encodedFrame, bool fec)
        {
            var data = _Decoder.Decode(encodedFrame, fec);

            if (data == null)
            {
                return(null);
            }

            var frame         = new AudioBuffer(data, 0, data.Length);
            var echoCanceller = EchoCanceller;

            if (echoCanceller != null)
            {
                echoCanceller.render(PeerId, frame);
            }
            return(frame);
        }
Beispiel #25
0
        public void Init(frmCUEPlayer parent)
        {
            MdiParent = parent;
            _device   = WasapiOut.GetDefaultAudioEndpoint();
            _device.AudioEndpointVolume.OnVolumeNotification += new AudioEndpointVolumeNotificationDelegate(AudioEndpointVolume_OnVolumeNotification);
            mediaSliderVolume.Value = (int)(_device.AudioEndpointVolume.MasterVolumeLevelScalar * 100);
            //mediaSliderVolume.Maximum = (int)(_device.AudioEndpointVolume.VolumeRange);
            Show();

            int delay = 100;

            try
            {
                _player = new WasapiOut(_device, NAudio.CoreAudioApi.AudioClientShareMode.Shared, true, delay, new AudioPCMConfig(32, 2, 44100));
            }
            catch
            {
                _player = null;
            }
            if (_player == null)
            {
                try
                {
                    _player = new WasapiOut(_device, NAudio.CoreAudioApi.AudioClientShareMode.Shared, true, delay, new AudioPCMConfig(32, 2, 48000));
                    SOXResamplerConfig cfg;
                    cfg.Quality       = SOXResamplerQuality.Very;
                    cfg.Phase         = 50;
                    cfg.AllowAliasing = false;
                    cfg.Bandwidth     = 0;
                    _resampler        = new SOXResampler(parent.Mixer.PCM, _player.Settings.PCM, cfg);
                    resampled         = new AudioBuffer(_player.Settings.PCM, parent.Mixer.BufferSize * 2 * parent.Mixer.PCM.SampleRate / _player.Settings.PCM.SampleRate);
                }
                catch (Exception ex)
                {
                    _player = null;
                    Trace.WriteLine(ex.Message);
                }
            }
            parent.Mixer.AudioRead += new EventHandler <AudioReadEventArgs>(Mixer_AudioRead);
            if (_player != null)
            {
                _player.Play();
            }
        }
Beispiel #26
0
        public fAudio()
        {
            device         = new XAudio2();
            masteringVoice = new MasteringVoice(device);

            waveFormat = new WaveFormat();
            {
                waveFormat.Channels              = CHANNELS;
                waveFormat.SamplesPerSecond      = SAMPLERATE;
                waveFormat.BitsPerSample         = BITDEPTH;
                waveFormat.BlockAlignment        = (short)(waveFormat.BitsPerSample / 8 * waveFormat.Channels);
                waveFormat.AverageBytesPerSecond = waveFormat.SamplesPerSecond * waveFormat.BlockAlignment;
                waveFormat.FormatTag             = WaveFormatTag.IeeeFloat;
            }

            sourceVoice            = new SourceVoice(device, waveFormat);
            sourceVoice.BufferEnd += sourceVoice_BufferEnd;

            audioBuffer = new AudioBuffer[NUMOFBUF];
            data        = new byte[NUMOFBUF][];
            for (int i = 0; i < NUMOFBUF; i++)
            {
                data[i] = new byte[NUMOFSAMPLE * waveFormat.BlockAlignment];
                byte[] buff;
                sq.Note = (95 - 12 * i);
                for (int j = 0; j < data[i].Length; j += waveFormat.BlockAlignment)
                {
                    buff           = sq.getByte();
                    data[i][j + 0] = buff[0];
                    data[i][j + 1] = buff[1];
                    data[i][j + 2] = buff[2];
                    data[i][j + 3] = buff[3];
                }
                audioBuffer[i]            = new AudioBuffer();
                audioBuffer[i].AudioData  = new MemoryStream(data[i], true);
                audioBuffer[i].Flags      = BufferFlags.EndOfStream;
                audioBuffer[i].AudioBytes = data[i].Length;
                audioBuffer[i].LoopCount  = 0;

                audioBuffer[i].AudioData.Position = 0;
                sourceVoice.SubmitSourceBuffer(audioBuffer[i]);
            }
            bufferCount = 0;
        }
Beispiel #27
0
        /// <summary>
        /// SharpDX XAudio2 sample. Plays a generated sound with some reverb.
        /// </summary>
        static void Main(string[] args)
        {
            var xaudio2        = new XAudio2();
            var masteringVoice = new MasteringVoice(xaudio2);

            var waveFormat  = new WaveFormat(44100, 32, 2);
            var sourceVoice = new SourceVoice(xaudio2, waveFormat);

            int bufferSize = waveFormat.ConvertLatencyToByteSize(60000);
            var dataStream = new DataStream(bufferSize, true, true);

            int numberOfSamples = bufferSize / waveFormat.BlockAlign;

            for (int i = 0; i < numberOfSamples; i++)
            {
                double vibrato = Math.Cos(2 * Math.PI * 10.0 * i / waveFormat.SampleRate);
                float  value   = (float)(Math.Cos(2 * Math.PI * (220.0 + 4.0 * vibrato) * i / waveFormat.SampleRate) * 0.5);
                dataStream.Write(value);
                dataStream.Write(value);
            }
            dataStream.Position = 0;

            var audioBuffer = new AudioBuffer {
                Stream = dataStream, Flags = BufferFlags.EndOfStream, AudioBytes = bufferSize
            };

            var reverb           = new Reverb();
            var effectDescriptor = new EffectDescriptor(reverb);

            sourceVoice.SetEffectChain(effectDescriptor);
            sourceVoice.EnableEffect(0);

            sourceVoice.SubmitSourceBuffer(audioBuffer, null);

            sourceVoice.Start();

            Console.WriteLine("Play sound");
            for (int i = 0; i < 60; i++)
            {
                Console.Write(".");
                Console.Out.Flush();
                Thread.Sleep(1000);
            }
        }
        private FlacDecoderWriteStatus WriteCallback(IntPtr decoder, ref FLAC__Frame frame, int **buffer, IntPtr clientData)
        {
            int blocksize = (int)frame.Header.Blocksize;

            if (this.pcm == null)
            {
                this.pcm           = new AudioPCMConfig((int)frame.Header.BitsPerSample, (int)frame.Header.Channels, (int)frame.Header.SampleRate);
                this.decoderLength = FLAC__stream_decoder_get_total_samples(this.handle);
            }

            if (this.audioBuffer == null || this.audioBuffer.Size < blocksize)
            {
                this.audioBuffer = new AudioBuffer(this.pcm, blocksize);
            }

            int channelCount = pcm.ChannelCount;

            for (int i = 0; i < channelCount; ++i)
            {
                fixed(int *sampleBufferPtr = this.audioBuffer.Samples)
                {
                    int *source    = buffer[i];
                    int *sourceEnd = source + blocksize;

                    int *sampleBufferPtrCopy = sampleBufferPtr + i;

                    while (source != sourceEnd)
                    {
                        *sampleBufferPtrCopy = *source;

                        ++source;
                        sampleBufferPtrCopy += channelCount;
                    }
                }
            }

            this.audioBuffer.Length = blocksize;
            this.audioBufferOffset  = 0;
            this.decoderPosition   += blocksize;

            this.audioBuffer.Prepare(this.audioBuffer.Samples, this.audioBuffer.Length);

            return(FlacDecoderWriteStatus.Continue);
        }
Beispiel #29
0
        private bool LoadAudio(string path)
        {
            DisposeSource();

            //Get audio stream
            _sourceStream = WAV.FromFile(path);

            _audioSource = path;

            //Create buffer for stream
            _buffer      = _provider.CreateBuffer(_sourceStream);
            _buffer.Loop = chkLoop.Checked;

            //Set controls
            _sampleTime = new DateTime((long)_sourceStream.Samples * 10000000 / _sourceStream.Frequency);

            txtPath.Text      = path;
            lblFrequency.Text = String.Format("{0} Hz", _sourceStream.Frequency);
            lblSamples.Text   = String.Format("{0}", _sourceStream.Samples);

            customTrackBar1.Value         = 0;
            customTrackBar1.TickStyle     = TickStyle.None;
            customTrackBar1.Maximum       = _sourceStream.Samples;
            customTrackBar1.TickFrequency = _sourceStream.Samples / 8;
            customTrackBar1.TickStyle     = TickStyle.BottomRight;

            numLoopStart.Value   = 0;
            numLoopStart.Maximum = numLoopEnd.Maximum = _sourceStream.Samples;
            numLoopEnd.Value     = _sourceStream.Samples;

            pnlLoopStart.Width = 0;
            pnlLoopEnd.Width   = 0;

            btnOkay.Enabled = true;

            if (_type == 0)
            {
                chkLoopEnable.Checked = true;
            }

            UpdateTimeDisplay();

            return(true);
        }
Beispiel #30
0
        public XAudio2Stream(int rate, int bits, int channels, int bufferSize, int numBuffers, BufferFillEventHandler bufferFillCallback)
        {
            xaudio2          = new XAudio2();
            masteringVoice   = new MasteringVoice(xaudio2);
            waveFormat       = new WaveFormat(rate, bits, channels);
            audioBuffersRing = new AudioBuffer[numBuffers];
            memBuffers       = new DataPointer[audioBuffersRing.Length];

            for (int i = 0; i < audioBuffersRing.Length; i++)
            {
                audioBuffersRing[i]   = new AudioBuffer();
                memBuffers[i].Size    = bufferSize;
                memBuffers[i].Pointer = Utilities.AllocateMemory(memBuffers[i].Size);
            }

            bufferFill      = bufferFillCallback;
            bufferSemaphore = new Semaphore(numBuffers, numBuffers);
            quitEvent       = new ManualResetEvent(false);
        }
Beispiel #31
0
 public void Write(AudioBuffer buff)
 {
     try
     {
         wrt.Write(buff);
     }
     catch (IOException ex)
     {
         if (_encoderProcess.HasExited)
         {
             throw new IOException(string.Format("{0} has exited prematurely with code {1}", m_settings.Path, _encoderProcess.ExitCode), ex);
         }
         else
         {
             throw ex;
         }
     }
     //_sampleLen += sampleCount;
 }
Beispiel #32
0
        public void play()
        {
            var         dataStream = DataStream.Create(BufferSamples, true, true);
            AudioBuffer buffer     = new AudioBuffer
            {
                /*LoopCount = AudioBuffer.LoopInfinite,*/
                Stream     = dataStream,
                AudioBytes = (int)dataStream.Length,
                Flags      = BufferFlags.EndOfStream
            };


            sourcevoice.SubmitSourceBuffer(buffer, null);
            sourcevoice.SetVolume(vol);



            sourcevoice.Start();
        }
        unsafe private void ProcessFrameOutput(AudioFrame frame)
        {
            using (AudioBuffer buffer = frame.LockBuffer(AudioBufferAccessMode.Write))
                using (IMemoryBufferReference reference = buffer.CreateReference())
                {
                    byte *dataInBytes;
                    uint  capacityInBytes;


                    // Get the buffer from the AudioFrame
                    ((IMemoryBufferByteAccess)reference).GetBuffer(out dataInBytes, out capacityInBytes);

                    for (int i = 0; i < capacityInBytes; i++)
                    {
                        dataInFloat[i] = *((float *)dataInBytes + i);
                    }
                    SendHello(capacityInBytes);
                }
        }
Beispiel #34
0
        public void PlayImmediate(short[] data, int sampleRate, float volume)
        {
            StopImmediate();

            immediateDonePlaying = false;

            immediateAudioBuffer = new AudioBuffer();
            immediateAudioBuffer.AudioDataPointer = Utilities.AllocateMemory(data.Length * sizeof(short));
            immediateAudioBuffer.AudioBytes       = data.Length * sizeof(short);
            Marshal.Copy(data, 0, immediateAudioBuffer.AudioDataPointer, data.Length);

            var waveFormat = new WaveFormat(sampleRate, 16, 1);

            immediateVoice            = new SourceVoice(xaudio2, waveFormat);
            immediateVoice.BufferEnd += ImmediateVoice_BufferEnd;
            immediateVoice.SetVolume(volume);
            immediateVoice.SubmitSourceBuffer(immediateAudioBuffer, null);
            immediateVoice.Start();
        }
Beispiel #35
0
        public void Encode()
        {
            AudioBuffer buffer = new AudioBuffer(audioSource.PCM, BufferSize);

            this.AudioDest.FinalSampleCount = this.audioSource.Length;

            while (audioSource.Read(buffer, BufferSize) > 0)
            {
                if (this.trackGain != null)
                {
                    DspHelper.AnalyzeSamples(this.trackGain, buffer);
                }
                if (this.drMeter != null)
                {
                    this.drMeter.Feed(buffer.Samples, buffer.Length);
                }

                this.AudioDest.Write(buffer);

                ProgressChangedEventArgs eventArgs = new ProgressChangedEventArgs((double)this.audioSource.Position / this.audioSource.Length);
                this.OnProgressChanged(eventArgs);
                if (eventArgs.Cancel)
                {
                    this.AudioDest.Close();
                    this.AudioDest = null;
                    Utility.TryDeleteFile(this.targetFilename);
                    return;
                }
            }

            if (this.drMeter != null)
            {
                this.drMeter.Finish();
            }

            this.AudioDest.Close();
            this.AudioDest = null;

            if (this.tags != null)
            {
                this.tags.WriteToFile(this.targetFilename);
            }
        }
        public static AudioBuffer LoadWav(string filename)
        {
            if (String.IsNullOrEmpty(filename))
            {
                throw new ArgumentException(filename);
            }

            AudioBuffer audio;

            audioDictionary.TryGetValue(filename, out audio);

            if (audio == null)
            {
                audio = new AudioBuffer();
                audio.LoadObject(filename);
                audioDictionary.Add(filename, audio);
            }
            return(audio);
        }
        public void ConstructorTest()
        {
            AudioBuffer buff = Codecs.WAV.AudioDecoder.ReadAllSamples(new Codecs.WAV.DecoderSettings(), "test.wav");

            CUETools.Codecs.libFLAC.Encoder target;

            target = new CUETools.Codecs.libFLAC.Encoder(new CUETools.Codecs.libFLAC.EncoderSettings()
            {
                PCM = buff.PCM, EncoderMode = "7"
            }, "flacwriter2.flac");
            target.Settings.Padding   = 1;
            target.Settings.BlockSize = 32;
            //target.Vendor = "CUETools";
            //target.CreationTime = DateTime.Parse("15 Aug 1976");
            target.FinalSampleCount = buff.Length;
            target.Write(buff);
            target.Close();
            CollectionAssert.AreEqual(File.ReadAllBytes("flacwriter1.flac"), File.ReadAllBytes("flacwriter2.flac"), "flacwriter2.flac doesn't match.");
        }
        /// <summary> Конвертирование wav-файла во flac </summary>
        /// <returns>Частота дискретизации</returns>
        public static int Wav2Flac(Stream wavStream, Stream flacStream)
        {
            int sampleRate = 0;

            IAudioSource audioSource = new WAVReader(null, wavStream);
            AudioBuffer  buff        = new AudioBuffer(audioSource, 0x10000);

            FlakeWriter flakewriter = new FlakeWriter(null, flacStream, audioSource.PCM);

            sampleRate = audioSource.PCM.SampleRate;

            FlakeWriter audioDest = flakewriter;

            while (audioSource.Read(buff, -1) != 0)
            {
                audioDest.Write(buff);
            }
            return(sampleRate);
        }
Beispiel #39
0
        /// <summary>
        /// Append a new audio buffer to the audio output.
        /// </summary>
        /// <param name="bufferTag">The unique tag of this buffer.</param>
        /// <param name="userBuffer">The buffer informations.</param>
        /// <returns>A <see cref="ResultCode"/> reporting an error or a success.</returns>
        public ResultCode AppendBuffer(ulong bufferTag, ref AudioUserBuffer userBuffer)
        {
            lock (_parentLock)
            {
                AudioBuffer buffer = new AudioBuffer
                {
                    BufferTag   = bufferTag,
                    DataPointer = userBuffer.Data,
                    DataSize    = userBuffer.DataSize
                };

                if (_session.AppendBuffer(buffer))
                {
                    return(ResultCode.Success);
                }

                return(ResultCode.BufferRingFull);
            }
        }
Beispiel #40
0
        /// <summary>
        /// GenerateNext is called by the Generator base class when the next sample should be read.
        /// </summary>
        /// <param name="currentTime">The originating time that triggered the current call.</param>
        /// <returns>The originating time at which to capture the next sample.</returns>
        protected override DateTime GenerateNext(DateTime currentTime)
        {
            DateTime          originatingTime = default(DateTime);
            int               streamIndex     = 0;
            SourceReaderFlags flags           = SourceReaderFlags.None;
            long              timestamp       = 0;
            Sample            sample          = this.sourceReader.ReadSample(SourceReaderIndex.AnyStream, 0, out streamIndex, out flags, out timestamp);

            if (sample != null)
            {
                originatingTime = this.start + TimeSpan.FromTicks(timestamp);
                MediaBuffer buffer           = sample.ConvertToContiguousBuffer();
                int         currentByteCount = 0;
                int         maxByteCount     = 0;
                IntPtr      data             = buffer.Lock(out maxByteCount, out currentByteCount);

                if (streamIndex == this.imageStreamIndex)
                {
                    using (var sharedImage = ImagePool.GetOrCreate(this.videoWidth, this.videoHeight, Imaging.PixelFormat.BGR_24bpp))
                    {
                        sharedImage.Resource.CopyFrom(data);
                        this.Image.Post(sharedImage, originatingTime);
                    }
                }
                else if (streamIndex == this.audioStreamIndex)
                {
                    AudioBuffer audioBuffer = new AudioBuffer(currentByteCount, this.waveFormat);
                    Marshal.Copy(data, audioBuffer.Data, 0, currentByteCount);
                    this.Audio.Post(audioBuffer, originatingTime);
                }

                buffer.Unlock();
                buffer.Dispose();
                sample.Dispose();
            }

            if (flags == SourceReaderFlags.Endofstream)
            {
                return(DateTime.MaxValue); // Used to indicated there is no more data
            }

            return(originatingTime);
        }
        // </SnippetMixProperty>

        // <SnippetProcessFrame>
        unsafe public void ProcessFrame(ProcessAudioFrameContext context)
        {
            AudioFrame inputFrame  = context.InputFrame;
            AudioFrame outputFrame = context.OutputFrame;

            using (AudioBuffer inputBuffer = inputFrame.LockBuffer(AudioBufferAccessMode.Read),
                   outputBuffer = outputFrame.LockBuffer(AudioBufferAccessMode.Write))
                using (IMemoryBufferReference inputReference = inputBuffer.CreateReference(),
                       outputReference = outputBuffer.CreateReference())
                {
                    byte *inputDataInBytes;
                    byte *outputDataInBytes;
                    uint  inputCapacity;
                    uint  outputCapacity;

                    ((IMemoryBufferByteAccess)inputReference).GetBuffer(out inputDataInBytes, out inputCapacity);
                    ((IMemoryBufferByteAccess)outputReference).GetBuffer(out outputDataInBytes, out outputCapacity);

                    float *inputDataInFloat  = (float *)inputDataInBytes;
                    float *outputDataInFloat = (float *)outputDataInBytes;

                    float inputData;
                    float echoData;

                    // Process audio data
                    int dataInFloatLength = (int)inputBuffer.Length / sizeof(float);

                    for (int i = 0; i < dataInFloatLength; i++)
                    {
                        inputData            = inputDataInFloat[i] * (1.0f - this.Mix);
                        echoData             = echoBuffer[currentActiveSampleIndex] * this.Mix;
                        outputDataInFloat[i] = inputData + echoData;
                        echoBuffer[currentActiveSampleIndex] = inputDataInFloat[i];
                        currentActiveSampleIndex++;

                        if (currentActiveSampleIndex == echoBuffer.Length)
                        {
                            // Wrap around (after one second of samples)
                            currentActiveSampleIndex = 0;
                        }
                    }
                }
        }
        public void Save()
        {
            using (Settings settings = new MPSettings())
            {
                settings.SetValue(SECTION_NAME, SERVER_NAME, ServerName);
                settings.SetValue(SECTION_NAME, SERVER_PASS, Password);
                settings.SetValue(SECTION_NAME, CUSTOM_ADDRESS, CustomAddress.HexStringFromBytes());
                settings.SetValue(SECTION_NAME, RTSP_PORT, RtspPort);
                settings.SetValue(SECTION_NAME, UDP_PORT, UdpPort);
                settings.SetValue(SECTION_NAME, BUFFER_SIZE, AudioBuffer.ToString(CultureInfo.InvariantCulture));
                settings.SetValueAsBool(SECTION_NAME, ALLOW_VOLUME, AllowVolume);
                settings.SetValueAsBool(SECTION_NAME, SEND_COMMANDS, SendCommands);

                settings.SetValue(SECTION_NAME, AIRPLAY_PORT, AirplayPort);
                settings.SetValueAsBool(SECTION_NAME, ALLOW_HD_STREAMS, AllowHDStreams);
                settings.SetValue(SECTION_NAME, VIDEO_BUFFER, VideoBuffer);
                settings.SetValueAsBool(SECTION_NAME, IOS8_WORKAROUND, iOS8Workaround);
            }
        }
Beispiel #43
0
        public void AudioBufferConstructor1()
        {
            AudioBuffer b = new AudioBuffer(64);

            Assert.AreEqual(64, b.ByteSize);
            Assert.AreEqual(16, b.FloatSize);

            b.ByteData[0] = 100;
            b.ByteData[1] = 50;

            AudioBuffer b2 = new AudioBuffer(64);

            b2.FloatData[0] = b.FloatData[0];

            for (int x = 0; x < b.ByteSize; x++)
            {
                Assert.AreEqual(b.ByteData[x], b2.ByteData[x]);
            }
        }
Beispiel #44
0
        public IOSAudioProcessor()
        {
            var inputComponent = AudioComponent.FindNextComponent(
                null,
                new AudioComponentDescription
            {
                ComponentFlags        = 0,
                ComponentFlagsMask    = 0,
                ComponentManufacturer = AudioComponentManufacturerType.Apple,
                ComponentSubType      = (int)AudioTypeOutput.Remote,
                ComponentType         = AudioComponentType.Output
            });

            recorder = inputComponent.CreateAudioUnit();
            recorder.SetEnableIO(true, AudioUnitScopeType.Input, inputBus);
            recorder.SetEnableIO(false, AudioUnitScopeType.Output, outputBus);

            var audioFormat = new AudioStreamBasicDescription
            {
                SampleRate       = StudentDemo.Globals.SAMPLERATE,
                Format           = AudioFormatType.LinearPCM,
                FormatFlags      = AudioFormatFlags.IsSignedInteger | AudioFormatFlags.IsPacked,
                FramesPerPacket  = 1,
                ChannelsPerFrame = 1,
                BitsPerChannel   = 16,
                BytesPerPacket   = 2,
                BytesPerFrame    = 2
            };

            recorder.SetAudioFormat(audioFormat, AudioUnitScopeType.Output, inputBus);
            recorder.SetAudioFormat(audioFormat, AudioUnitScopeType.Input, outputBus);

            recorder.SetInputCallback(AudioInputCallBack, AudioUnitScopeType.Global, inputBus);

            // TODO: Disable buffers (requires interop)
            aBuffer = new AudioBuffer
            {
                NumberChannels = 1,
                DataByteSize   = 512 * 2,
                Data           = System.Runtime.InteropServices.Marshal.AllocHGlobal(512 * 2)
            };
        }
Beispiel #45
0
        public int Read(AudioBuffer buff, int maxLength)
        {
            buff.Prepare(this, maxLength);

            int offset      = 0;
            int sampleCount = buff.Length;

            while (_samplesInBuffer < sampleCount)
            {
                if (_samplesInBuffer > 0)
                {
                    interlace(buff, offset, _samplesInBuffer);
                    sampleCount         -= _samplesInBuffer;
                    offset              += _samplesInBuffer;
                    _samplesInBuffer     = 0;
                    _samplesBufferOffset = 0;
                }

                fill_frames_buffer();

                if (_framesBufferLength == 0)
                {
                    return(buff.Length = offset);
                }

                int bytesDecoded = DecodeFrame(_framesBuffer, _framesBufferOffset, _framesBufferLength);
                _framesBufferLength -= bytesDecoded;
                _framesBufferOffset += bytesDecoded;

                _samplesInBuffer -= _samplesBufferOffset;                 // can be set by Seek, otherwise zero
                _sampleOffset    += _samplesInBuffer;
            }

            interlace(buff, offset, sampleCount);
            _samplesInBuffer     -= sampleCount;
            _samplesBufferOffset += sampleCount;
            if (_samplesInBuffer == 0)
            {
                _samplesBufferOffset = 0;
            }
            return(buff.Length = offset + sampleCount);
        }
Beispiel #46
0
        public static void PlayPcm(XAudio2 device, string fileName)
        {
            var        s      = System.IO.File.OpenRead(fileName); //open the wav file
            WaveStream stream = new WaveStream(s);                 //pass the stream to the library

            s.Close();                                             //close the file

            AudioBuffer buffer = new AudioBuffer();                //init the buffer

            buffer.AudioData  = stream;                            //set the input stream for the audio
            buffer.AudioBytes = (int)stream.Length;                //set the size of the buffer to the size of the stream
            buffer.Flags      = BufferFlags.EndOfStream;           //presumably set it to play until the end of the stream/file


            SourceVoice sourceVoice = new SourceVoice(device, stream.Format); //this looks like it might initalise the actual output

            sourceVoice.SubmitSourceBuffer(buffer);                           //pass the buffer to the output thingo
            sourceVoice.Start();                                              //start the playback?

            //above 2 sections are guessed, there is no documentation on the classes/proerties.

            // loop until the sound is done playing
            while (sourceVoice.State.BuffersQueued > 0) // This keeps looping while there is sound in the buffer
            {                                           // (presumably). For this specific example it will stop
                if (GetAsyncKeyState(VK_ESCAPE) != 0)   // plying the sound if escape is pressed. That is what the
                {
                    break;                              // DLLImport and stuff at the top is for
                }
                Thread.Sleep(10);                       //
            }

            // wait until the escape key is released
            while (GetAsyncKeyState(VK_ESCAPE) != 0) //it jsut waits here until the person presses escape
            {
                Thread.Sleep(10);
            }

            // cleanup the voice
            buffer.Dispose();
            sourceVoice.Dispose();
            stream.Dispose();
        }
Beispiel #47
0
        public SineGenerator()
        {
            _xaudio2        = new XAudio2();
            _masteringVoice = new MasteringVoice(_xaudio2);

            _waveFormat = new WaveFormat(44100, 32, 2);

            _sourceVoice = new SourceVoice(_xaudio2, _waveFormat);

            _bufferSize = _waveFormat.ConvertLatencyToByteSize(7);
            _dataStream = new DataStream(_bufferSize, true, true);

            _sourceVoice.BufferEnd += sourceVoice_BufferEnd;
            _sourceVoice.Start();

            _isConvolutionOn = false;
            _horizontalAngle = 0;
            _elevation       = 0;

            _bufferEndEvent = new AutoResetEvent(false);

            _valueRate = 20;
            _valueAmp  = 0.5f;

            _nextBuffer = 0;

            _playEvent = new ManualResetEventSlim();
            _log       = new List <string>();


            // Pre-allocate buffers
            _audioBuffersRing = new AudioBuffer[3];
            _memBuffers       = new DataPointer[_audioBuffersRing.Length];
            for (int i = 0; i < _audioBuffersRing.Length; i++)
            {
                _audioBuffersRing[i]   = new AudioBuffer();
                _memBuffers[i].Size    = _bufferSize;
                _memBuffers[i].Pointer = Utilities.AllocateMemory(_memBuffers[i].Size);
            }

            _playSoundTask = Task.Factory.StartNew(PlaySoundAsync, TaskCreationOptions.LongRunning);
        }
        public ReplicatedPlayer(MasterRenderer renderer, World world, SimpleCamera camera, Vector3 position, Team team)
            : base(renderer, world, camera, position, team)
        {
            this.camera = camera;

            interpPos = new Vector3Anim();
            interpPos.SnapTo(position);

            yawAnim   = new FloatAnim();
            pitchAnim = new FloatAnim();

            // This is fully server controlled
            ItemManager.DontUpdateItems   = true;
            ItemManager.IsReplicated      = true;
            CharacterController.IsEnabled = false;

            CreateStarterBackpack();

            AudioBuffer jumpAudioBuffer = AssetManager.LoadSound("Player/jump.wav");

            if (jumpAudioBuffer != null)
            {
                jumpAudioSource             = new AudioSource(jumpAudioBuffer);
                jumpAudioSource.Gain        = 0.2f;
                jumpAudioSource.MaxDistance = 100f;
            }

            AudioBuffer landAudioBuffer = AssetManager.LoadSound("Player/land.wav");

            if (landAudioBuffer != null)
            {
                landAudioSource             = new AudioSource(landAudioBuffer);
                landAudioSource.Gain        = 0.2f;
                landAudioSource.MaxDistance = 120f;
            }

            walkingAudioSource = new CyclicAudioSource("Player/footstep.wav", 1, 0f,
                                                       relative: false, maxDistance: 100f);

            runningAudioSource = new CyclicAudioSource("Player/run.wav", 1, 0f,
                                                       relative: false, maxDistance: 200f);
        }
Beispiel #49
0
        AudioSource LoadAudioFromConfig(GunAudioConfig config, bool replicated = false, bool far = false)
        {
            string filePath;

            if (replicated)
            {
                if (far)
                {
                    filePath = config.FarFilepath;
                }
                else
                {
                    filePath = config.ReplicatedFilepath;
                }
            }
            else
            {
                filePath = config.LocalFilepath;
            }

            AudioBuffer buffer = AssetManager.LoadSound(filePath);

            if (buffer == null)
            {
                return(null);
            }

            AudioSource audioSource = new AudioSource(buffer);

            audioSource.Gain = replicated ? config.ReplicatedGain : config.LocalGain;

            if (replicated)
            {
                audioSource.MaxDistance = far ? config.FarMaxDistance : config.NearMaxDistance;
            }
            else
            {
                audioSource.IsSourceRelative = true;
            }

            return(audioSource);
        }
        public void ComputeHashes()
        {
            try
            {
                SHA1CryptoServiceProvider sha1 = new SHA1CryptoServiceProvider();

                this.CRC32 = 0;

                long totalSamples     = this.audioSource.Length;
                long processedSamples = 0;

                AudioBuffer buffer = new AudioBuffer(this.audioSource.PCM, 44100);
                while (this.audioSource.Read(buffer, 44100) > 0)
                {
                    byte[] bufferBytes = buffer.Bytes;
                    if (this.audioSource.Position == this.audioSource.Length)
                    {
                        sha1.TransformFinalBlock(bufferBytes, 0, buffer.ByteLength);
                    }
                    else
                    {
                        sha1.TransformBlock(bufferBytes, 0, buffer.ByteLength, null, 0);
                    }
                    this.CRC32 = Crc32.ComputeChecksum(this.CRC32, buffer.Bytes, 0, buffer.ByteLength);

                    processedSamples += buffer.Length;

                    ProgressChangedEventArgs eventArgs = new ProgressChangedEventArgs((double)processedSamples / totalSamples);
                    this.OnProgressChanged(eventArgs);
                    if (eventArgs.Cancel)
                    {
                        return;
                    }
                }

                this.SHA1 = sha1.Hash;
            }
            finally
            {
                this.audioSource.Close();
            }
        }
        public void TestRemoveLeadingSilence()
        {
            short[] samples1 = { 0, 60, 0, 1000, 2000, 0, 4000, 5000, 0 };
            short[] data1 = (short[])(samples1.Clone());

            short[] samples2 = { 1000, 2000, 0, 4000, 5000, 0 };
            short[] data2 = (short[])(samples2.Clone());

            AudioBuffer buffer = new AudioBuffer();
            SilenceRemover processor = new SilenceRemover(buffer, 100);
            processor.Reset(44100, 1);
            processor.Consume(data1, data1.Length);
            processor.Flush();

            Assert.AreEqual(data2.Length, buffer.data.Length);
            for (int i = 0; i < data2.Length; i++)
            {
                Assert.AreEqual(data2[i], buffer.data[i]); // << "Signals differ at index " << i;
            }
        }
Beispiel #52
0
        public void Play( Form on )
        {
            var screens = Screen.AllScreens;
            var screens_left  = screens.Min( screen => screen.Bounds.Left  );
            var screens_right = screens.Max( screen => screen.Bounds.Right );
            var screens_width = screens_right-screens_left;

            var bestScreen = screens.OrderByDescending( screen => {
                var area = screen.Bounds;
                area.Intersect( on.Bounds );
                return area.Width*area.Height;
            }).First();

            var balances = new[]{1.5f,1.5f};
            if ( screens.Length==3 && DisplayBalances.ContainsKey(bestScreen.DeviceName) ) balances = DisplayBalances[bestScreen.DeviceName];

            var path   = Registry.CurrentUser.OpenSubKey(@"AppEvents\Schemes\Apps\.Default\"+Name+@"\.Current").GetValue(null) as string;
            var stream = new WaveStream(path);
            var buffer = new AudioBuffer() { AudioBytes=(int)stream.Length, AudioData=stream, Flags=BufferFlags.EndOfStream };

            var voice = new SourceVoice( XAudio2, stream.Format );
            voice.SubmitSourceBuffer( buffer );
            voice.SetChannelVolumes( balances.Length, balances );
            voice.BufferEnd += (sender,ctx) => {
                try {
                    on.BeginInvoke(new Action(()=>{
                        voice.Dispose();
                        buffer.Dispose();
                        stream.Dispose();
                    }));
                } catch ( InvalidOperationException ) {
                    // herp derp on must be disposed/gone
                }
            };
            voice.Start();
        }
        public void PlayPPM(IntPtr win)
        {
            Rate = 192000; //44100 on cheapo, 96000 on AC97, 192000 on HD Audio
                           // its the number of samples that exist for each second of audio
            channels = 2;  // 1 = mono, 2 = stereo

            PPMSamples = (int)(0.0225 * Rate * channels);   // 22 or 22.5ms in samples, rounded up
                                                            // no. of bytes per second = channels * rate * bytes in one sample
            microsec = Rate / 10000.0;                      // 192 = 1ms, 19.2 = 0.1ms or 1mis @ 192khz
            PPMchannels = new Dictionary<int, double>();
            frame = new List<short>();
            Amplitude = 32760;

            /*WaveFile wFile;
            wFile = new WaveFile(channels, 16, Rate);
            */

            //Set channels to neutral except throttle, throttle = zero.
            PPMchannels.Add(1, 10.0); //Throttle
            PPMchannels.Add(2, 50.0); //Ailerons
            PPMchannels.Add(3, 50.0); //Stab
            PPMchannels.Add(4, 50.0); //Rudder
            PPMchannels.Add(5, 50.0);
            PPMchannels.Add(6, 50.0);
            PPMchannels.Add(7, 50.0);
            PPMchannels.Add(8, 50.0);

            byte[] data = GenPPM();

            /*wFile.SetData(data, data.Length);
            wFile.WriteFile(@"C:\Users\kang\Desktop\test.wav");
            */
            ms = new MemoryStream();
            ms.SetLength(0);
            ms.Write(data, 0, data.Length);
            ms.Position = 0;

            wf = new WaveFormat();
            wf.FormatTag = WaveFormatTag.Pcm;
            wf.BitsPerSample = (short)16;
            wf.Channels = channels;
            wf.SamplesPerSecond = Rate;
            wf.BlockAlignment = (short)(wf.Channels * wf.BitsPerSample / 8);
            wf.AverageBytesPerSecond = wf.SamplesPerSecond * wf.BlockAlignment;

            device = new XAudio2();
            device.StartEngine();
            masteringVoice = new MasteringVoice(device);
            srcVoice = new SourceVoice(device, wf);
            buffer = new AudioBuffer();
            buffer.AudioData = ms;
            buffer.AudioBytes = (int)data.Length;
            buffer.Flags = SlimDX.XAudio2.BufferFlags.None;

            srcVoice.BufferStart += new EventHandler<ContextEventArgs>(srcVoice_BufferStart);
            srcVoice.FrequencyRatio = 1;
            srcVoice.SubmitSourceBuffer(buffer);
            srcVoice.Start();
        }
        /// <summary>
        /// Decodes an encoded frame.
        /// </summary>
        /// <param name="encodedFrame">The encoded frame.</param>
        /// <returns></returns>
        public override AudioBuffer Decode(byte[] encodedFrame)
		{
			if (_Decoder == null)
			{
                _Decoder = new CocoaOpusDecoder(ClockRate, Channels, PacketTime);
                Link.GetRemoteStream().DisablePLC = true;
			}

            if (LastRTPSequenceNumber == -1)
            {
                LastRTPSequenceNumber = CurrentRTPSequenceNumber;
                return DecodeNormal(encodedFrame);
            }
            else
            {
                var sequenceNumberDelta = RTPPacket.GetSequenceNumberDelta(CurrentRTPSequenceNumber, LastRTPSequenceNumber);
                LastRTPSequenceNumber = CurrentRTPSequenceNumber;

                var missingPacketCount = sequenceNumberDelta - 1;
                var previousFrames = new AudioBuffer[missingPacketCount];

                var plcFrameCount = (missingPacketCount > 1) ? missingPacketCount - 1 : 0;
                if (plcFrameCount > 0)
                {
                    Log.InfoFormat("Adding {0} frames of loss concealment to incoming audio stream. Packet sequence violated.", plcFrameCount.ToString());
                    for (var i = 0; i < plcFrameCount; i++)
                    {
                        previousFrames[i] = DecodePLC();
                    }
                }

                var fecFrameCount = (missingPacketCount > 0) ? 1 : 0;
                if (fecFrameCount > 0)
                {
                    var fecFrame = DecodeFEC(encodedFrame);
                    var fecFrameIndex = missingPacketCount - 1;
                    if (fecFrame == null)
                    {
                        previousFrames[fecFrameIndex] = DecodePLC();
                    }
                    else
                    {
                        previousFrames[fecFrameIndex] = fecFrame;
                    }
                }

                var frame = DecodeNormal(encodedFrame);
                frame.PreviousBuffers = previousFrames;
                return frame;
            }

        }
		/// <summary>
		/// When a AudioProperty in the fed packets is found this callback is called
		/// </summary>
		void AudioPropertyFound (object sender, PropertyFoundEventArgs args)
		{
			switch (args.Property) {
			case AudioFileStreamProperty.ReadyToProducePackets:
				Started = false;
				
				
				if (OutputQueue != null)
					OutputQueue.Dispose ();
				
				OutputQueue = new OutputAudioQueue (fileStream.StreamBasicDescription);
				currentByteCount = 0;
				OutputQueue.OutputCompleted += HandleOutputQueueOutputCompleted;
				outputBuffers = new List<AudioBuffer>();
				
				for (int i = 0; i < MaxBufferCount; i++)
				{
					IntPtr outBuffer;
					OutputQueue.AllocateBuffer (BufferSize, out outBuffer);
					outputBuffers.Add (new AudioBuffer () { Buffer = outBuffer, PacketDescriptions = new List<AudioStreamPacketDescription>() });
				}
				
				currentBuffer = outputBuffers.First ();
				
				OutputQueue.MagicCookie = fileStream.MagicCookie;				
				break;
			}
		}
		/// <summary>
		/// Wait until a buffer is freed up
		/// </summary>
		void WaitForBuffer ()
		{
			int curIndex = outputBuffers.IndexOf (currentBuffer);
			currentBuffer = outputBuffers[curIndex < outputBuffers.Count - 1 ? curIndex + 1 : 0];
			
			lock (currentBuffer) {
				while (currentBuffer.IsInUse) 
					Monitor.Wait (currentBuffer);
			}
		}
        public LossyWAVWriter(IAudioDest audioDest, IAudioDest lwcdfDest, double quality, AudioPCMConfig pcm)
        {
            _audioDest = audioDest;
            _lwcdfDest = lwcdfDest;
            _pcm = pcm;

            if (_audioDest != null && _audioDest.PCM.BitsPerSample > _pcm.BitsPerSample)
                throw new Exception("audio parameters mismatch");
            if (_lwcdfDest != null && _lwcdfDest.PCM.BitsPerSample != _pcm.BitsPerSample)
                throw new Exception("audio parameters mismatch");

            int quality_integer = (int)Math.Floor(quality);

            fft_analysis_string = new string[4] { "0100010", "0110010", "0111010", "0111110" };
            bool[] quality_auto_fft32_on = { false, false, false, true, true, true, true, true, true, true, true };
            double[] quality_noise_threshold_shifts = { 20, 16, 9, 6, 3, 0, -2.4, -4.8, -7.2, -9.6, -12 };
            double[] quality_signal_to_noise_ratio = { -18, -22, -23.5, -23.5, -23.5, -25, -28, -31, -34, -37, -40 };
            double[] quality_dynamic_minimum_bits_to_keep = { 2.5, 2.75, 3.00, 3.25, 3.50, 3.75, 4.0, 4.25, 4.5, 4.75, 5.00 };
            double[] quality_maximum_clips_per_channel = { 3, 3, 3, 3, 2, 1, 0, 0, 0, 0, 0 };

            this_analysis_number = 2;
            impulse = quality_auto_fft32_on[quality_integer];
            linkchannels = false;
            noise_threshold_shift = Math.Round(interpolate_param(quality_noise_threshold_shifts, quality) * 1000) / 1000;
            snr_value = Math.Round(interpolate_param(quality_signal_to_noise_ratio, quality) * 1000) / 1000;
            dynamic_minimum_bits_to_keep = Math.Round(interpolate_param(quality_dynamic_minimum_bits_to_keep, quality) * 1000) / 1000;
            maximum_clips_per_channel = (int)Math.Round(interpolate_param(quality_maximum_clips_per_channel, quality));
            scaling_factor = 1.0;
            shaping_factor = Math.Min(1, quality / 10);
            shaping_is_on = shaping_factor > 0;

            _audioBuffer = new AudioBuffer(_pcm, 256);
        }
        public void Write(AudioBuffer buff)
        {
            if (!initialized)
                Initialize();

            buff.Prepare(this);

            int pos = 0;
            int sampleCount = buff.Length;
            while (sampleCount + samplesInBuffer > codec_block_size)
            {
                shift_codec_blocks(); // next_codec_block_size is now zero
                if (samplesInBuffer > 0)
                    Array.Copy(sampleBuffer, 0, rotating_blocks_ptr[3], 0, samplesInBuffer * _pcm.ChannelCount);
                Array.Copy(buff.Samples, pos * _pcm.ChannelCount, rotating_blocks_ptr[3], samplesInBuffer * _pcm.ChannelCount, (codec_block_size - samplesInBuffer) * _pcm.ChannelCount);
                next_codec_block_size = codec_block_size;
                pos += codec_block_size - samplesInBuffer;
                sampleCount -= codec_block_size - samplesInBuffer;
                samplesInBuffer = 0;
                if (samplesWritten > 0)
                    process_this_codec_block();
                samplesWritten += next_codec_block_size;
            }
            if (sampleCount > 0)
            {
                Array.Copy(buff.Samples, pos * _pcm.ChannelCount, sampleBuffer, samplesInBuffer * _pcm.ChannelCount, sampleCount * _pcm.ChannelCount);
                samplesInBuffer += sampleCount;
            }
        }
 unsafe void interlace(AudioBuffer buff, int offset, int count)
 {
     if (PCM.ChannelCount == 2)
     {
         fixed (int* src = &samplesBuffer[_samplesBufferOffset])
             buff.Interlace(offset, src, src + Flake.MAX_BLOCKSIZE, count);
     }
     else
     {
         for (int ch = 0; ch < PCM.ChannelCount; ch++)
             fixed (int* res = &buff.Samples[offset, ch], src = &samplesBuffer[_samplesBufferOffset + ch * Flake.MAX_BLOCKSIZE])
             {
                 int* psrc = src;
                 for (int i = 0; i < count; i++)
                     res[i + i] = *(psrc++);
             }
     }
 }
 /// <summary>
 /// Plays back an audio frame.
 /// </summary>
 /// <param name="buffer">The frame.</param>
 public override void Render(AudioBuffer buffer)
 {
     WaveProvider.AddSamples(buffer.Data, buffer.Index, buffer.Length);
 }