private void ProcessAudio() { SetupChart(); byte[] audioData = new byte[16 * 1024]; Int16[] convertedData = new Int16[audioData.Count() / sizeof(Int16)]; int read = 0; Int16 lastSample = 0; int maxUpper = 500; while (Wave.Position < Wave.Length) { read = Wave.Read(audioData, 0, (16 * 1024)); // Might need this to check we haven't run out of stuff to read if (read == 0) { break; } for (int i = 0; i < read / 2; i++) { convertedData[i] = BitConverter.ToInt16(audioData, i * 2); //chart1.Series["Wave"].Points.Add(BitConverter.ToInt16(audioData, i * 2)); } //foreach (var sample in convertedData) // chart1.Series["Wave"].Points.Add(sample); int freqThreshold = 400; foreach (var sample in convertedData) { if (sample > 0 && sample <= freqThreshold) { chart1.Series["Wave"].Points.Add(freqThreshold); } else if (sample > freqThreshold) { chart1.Series["Wave"].Points.Add(0); } else if (sample < 0 && sample <= -freqThreshold) { chart1.Series["Wave"].Points.Add(freqThreshold); } else if (sample < -freqThreshold) { chart1.Series["Wave"].Points.Add(0); } } int mode = (from sample in convertedData where sample > 0 group sample by sample into g orderby g.Count() descending select g.Key).FirstOrDefault(); Console.WriteLine("Mode: {0}", mode); } }
private static void TestMp3() { string mp3File = @".\TestAudio\speech_20200512013308591.mp3"; string outputFile = @".\TestAudio\output-from-mp3.wav"; using (Mp3FileReader reader = new Mp3FileReader(mp3File)) { using (WaveStream pcmStream = WaveFormatConversionStream.CreatePcmStream(reader)) { WaveFileWriter.CreateWaveFile(outputFile, pcmStream); } } return; Dictionary <uint, byte[]> audioBytes = new Dictionary <uint, byte[]>(); uint timestamp = 0; string file = @".\TestAudio\speech_20200512013308591.mp3"; //var pcmFormat = new WaveFormat(8000, 16, 1); var ulawFormat = WaveFormat.CreateMuLawFormat(8000, 1); using (var pcmStm = WaveFormatConversionStream.CreatePcmStream(new Mp3FileReader(file))) { using (WaveFormatConversionStream ulawStm = new WaveFormatConversionStream(ulawFormat, pcmStm)) { byte[] buffer = new byte[160]; int bytesRead = ulawStm.Read(buffer, 0, 160); while (bytesRead > 0) { byte[] sample = new byte[bytesRead]; Array.Copy(buffer, sample, bytesRead); //m_rtpChannel.AddSample(sample); audioBytes.Add(timestamp, sample); timestamp += 160; bytesRead = ulawStm.Read(buffer, 0, 160); } } } //WaveFileWriter.CreateWaveFile(tempFile, WaveP); string fileName = @".\TestAudio\output.wav"; using (WaveFileWriter writer = new WaveFileWriter(fileName, ulawFormat)) { var testSequence = audioBytes.SelectMany(p => p.Value).ToArray(); writer.Write(testSequence, 0, testSequence.Length); } }
public byte[] PlayFileWave() { int prevBuffer = bufferInUse; long pos = audioStream.Position; if (currentBufferWriter != null) { currentBufferWriter.Wait(); } int remain = Convert.ToInt32(audioStream.Length - audioStream.Position); Console.WriteLine(remain + "<" + bufferSize); if (remain < bufferSize) { state = PlaybackState.Stopped; if (remain != 0) { int newBufferSize = bufferSize + remain; byte[] tempB = new byte[newBufferSize]; Array.Copy(audioBuffers[bufferInUse], tempB, bufferSize); audioStream.Read(tempB, bufferSize, newBufferSize); bufferSize = newBufferSize; return(tempB); } else { return(audioBuffers[bufferInUse]); } } if (state == PlaybackState.Paused || state == PlaybackState.Stopped) { state = PlaybackState.Playing; audioStream.Read(audioBuffers[bufferInUse], 0, bufferSize); switchBuffer(); currentBufferWriter = audioStream.ReadAsync(audioBuffers[bufferInUse], 0, bufferSize); } else { switchBuffer(); currentBufferWriter = audioStream.ReadAsync(audioBuffers[bufferInUse], 0, bufferSize); } return(audioBuffers[prevBuffer]); //switchBuffer(); //int remain = Convert.ToInt32(audioStream.Length - audioStream.Position); //audioStream.Read(audioBuffers[bufferInUse], 0, remain); //return audioBuffers[bufferInUse]; }
public static byte[] CompressToMsAdpcm(byte[] data, uint overwriteSampleRate, out int uncompressedSize) { WaveFormat pcmFormat = new WaveFormat((int)overwriteSampleRate, 16, 1); AdpcmWaveFormat adpcmFormat = new AdpcmWaveFormat((int)overwriteSampleRate, 1); using (var inStream = new MemoryStream(data)) using (var anyWaveStream = new WaveFileReader(inStream)) using (var pcmStream = new RawSourceWaveStream(anyWaveStream, pcmFormat)) { int sampleSize = ((pcmStream.WaveFormat.BitsPerSample * pcmStream.WaveFormat.Channels) / 8); int uncompressedSampleCount = (int)(pcmStream.Length / sampleSize); uncompressedSampleCount = AlignTo(uncompressedSampleCount, adpcmFormat.SamplesPerBlock); uncompressedSize = uncompressedSampleCount * 2; // Time 2 because 16 bit mono samples (2 byte per sample) // We have to align the wave data to the wave block size // otherise NAudio will just cut off some samples! using (var alignedPcmStream = new AlignStream { _baseStream = pcmStream, _extendedLengthInBytes = uncompressedSize }) using (var adpcmStream = new WaveFormatConversionStream(adpcmFormat, alignedPcmStream)) using (var outStream = new MemoryStream()) { using (WaveFileWriter outWaveFileformat = new WaveFileWriter(outStream, adpcmFormat)) { byte[] buffer = new byte[8192]; int bytesRead; while ((bytesRead = adpcmStream.Read(buffer, 0, buffer.Length)) != 0) { outWaveFileformat.Write(buffer, 0, bytesRead); } } return(outStream.ToArray()); } } }
public Mp3Provider(string filename) { byte[] convertedData; int size; WaveFormat f = new WaveFormat(44100, 16, 2); using (WaveStream r = OpenWaveFile(filename)) { using (var conv = new WaveFormatConversionStream(f, r)) { int len = (int)conv.Length; size = len / 4; convertedData = new byte[len]; conv.Read(convertedData, 0, len); } } _RawData = convertedData; _RawFormat = f; _TotalTimeMs = convertedData.Length * 1000L / f.AverageBytesPerSecond; _Timer = new PlayerTimer(_TotalTimeMs); _Timer.StateChanged += Timer_StateChanged; _WavePlayer = new WavePlayer(_RawData, _RawFormat, _TotalTimeMs); }
public async Task PlayAudioAsync(string filePath, Channel channel) { var _vClient = await JoinVoiceChannelAsync(channel); var channelCount = Client.GetService <AudioService>().Config.Channels; var outFormat = new WaveFormat(48000, 16, 2); var length = Convert.ToInt32(outFormat.AverageBytesPerSecond / 60.0 * 1000.0); byte[] buffer = new byte[length]; using (var reader = new WaveFileReader(filePath)) { using (var resampler = new WaveFormatConversionStream(outFormat, reader)) { int count = 0; while ((count = resampler.Read(buffer, 0, length)) > 0) { _vClient.Send(buffer, 0, count); } } } //todo: maybe someday we can use mp3s //using (var reader = new Mp3FileReader(filePath)) //{ // using (var resampler = new DmoMp3FrameDecompressor(outFormat)) // { // int count = 0; // while((count = resampler.)) // } //} _vClient.Wait(); await _vClient.Disconnect(); }
private static void SynthToCam(string fileName, CameraWindow cw) { using (var waveStream = new MemoryStream()) { //write some silence to the stream to allow camera to initialise properly var silence = new byte[1 * 22050]; waveStream.Write(silence, 0, silence.Count()); //read in and convert the wave stream into our format using (var reader = new WaveFileReader(fileName)) { var newFormat = new WaveFormat(11025, 16, 1); byte[] buff = new byte[22050]; using (var conversionStream = new WaveFormatConversionStream(newFormat, reader)) { do { int i = conversionStream.Read(buff, 0, 22050); waveStream.Write(buff, 0, i); if (i < 22050) { break; } } while (true); } } //write some silence to the stream to allow camera to end properly waveStream.Write(silence, 0, silence.Count()); waveStream.Seek(0, SeekOrigin.Begin); var ds = new DirectStream(waveStream) { RecordingFormat = new WaveFormat(11025, 16, 1) }; var talkTarget = TalkHelper.GetTalkTarget(cw.Camobject, ds); ds.Start(); talkTarget.Start(); while (ds.IsRunning) { Thread.Sleep(100); } ds.Stop(); talkTarget.Stop(); talkTarget = null; ds = null; waveStream.Close(); } }
private byte[] RealMix(ReceivedRtp item1, ReceivedRtp item2) { if (item1 == null || item2 == null) { return(null); } if (item1.size == 0 || item2.size == 0) { return(null); } byte[] wavSrc1 = new byte[item1.size - headersize]; byte[] wavSrc2 = new byte[item2.size - headersize]; Array.Copy(item1.buff, headersize, wavSrc1, 0, (item1.size - headersize)); Array.Copy(item2.buff, headersize, wavSrc2, 0, (item2.size - headersize)); WaveMixerStream32 mixer = new WaveMixerStream32(); // mixer.AutoStop = true; MemoryStream memstrem = new MemoryStream(wavSrc1); RawSourceWaveStream rawsrcstream = new RawSourceWaveStream(memstrem, this.codec); WaveFormatConversionStream conversionstream = new WaveFormatConversionStream(pcmFormat16, rawsrcstream); WaveChannel32 channelstream = new WaveChannel32(conversionstream); mixer.AddInputStream(channelstream); memstrem = new MemoryStream(wavSrc2); rawsrcstream = new RawSourceWaveStream(memstrem, this.codec); conversionstream = new WaveFormatConversionStream(pcmFormat16, rawsrcstream); channelstream = new WaveChannel32(conversionstream); mixer.AddInputStream(channelstream); mixer.Position = 0; Wave32To16Stream to16 = new Wave32To16Stream(mixer); var convStm = new WaveFormatConversionStream(pcmFormat8, to16); byte[] mixedbytes = new byte[(int)convStm.Length]; int chk = convStm.Read(mixedbytes, 0, (int)convStm.Length); //Buffer.BlockCopy(tobyte, 0, writingBuffer, 0, tobyte.Length); memstrem.Close(); rawsrcstream.Close(); conversionstream.Close(); channelstream.Close(); convStm.Close(); convStm.Dispose(); convStm = null; to16.Close(); to16.Dispose(); to16 = null; mixer.Close(); mixer.Dispose(); mixer = null; return(mixedbytes); }
private void AudioSourceDataAvailable(object sender, DataAvailableEventArgs e) { try { lock (_obj) { if (_bTalking && _avstream != null) { byte[] bSrc = e.RawData; int totBytes = bSrc.Length; if (!_audioSource.RecordingFormat.Equals(_waveFormat)) { using (var ws = new TalkHelperStream(bSrc, totBytes, _audioSource.RecordingFormat)) { int j = -1; var bDst = new byte[44100]; totBytes = 0; using (var helpStm = new WaveFormatConversionStream(_waveFormat, ws)) { while (j != 0) { j = helpStm.Read(bDst, totBytes, 10000); totBytes += j; } helpStm.Close(); } ws.Close(); bSrc = bDst; } } var enc = new byte[totBytes / 2]; ALawEncoder.ALawEncode(bSrc, totBytes, enc); try { _avstream.Write(enc, 0, enc.Length); _avstream.Flush(); } catch (SocketException) { StopTalk(); } } } } catch (Exception ex) { MainForm.LogExceptionToFile(ex); StopTalk(); } }
private static void TestWav() { Dictionary <uint, byte[]> audioBytes = new Dictionary <uint, byte[]>(); uint timestamp = 0; string file = @".\TestAudio\output-from-mp3.wav"; var pcmFormat = new WaveFormat(8000, 16, 1); var ulawFormat = WaveFormat.CreateMuLawFormat(8000, 1); using (WaveFormatConversionStream pcmStm = new WaveFormatConversionStream(pcmFormat, new WaveFileReader(file))) { using (WaveFormatConversionStream ulawStm = new WaveFormatConversionStream(ulawFormat, pcmStm)) { byte[] buffer = new byte[160]; int bytesRead = ulawStm.Read(buffer, 0, 160); while (bytesRead > 0) { byte[] sample = new byte[bytesRead]; Array.Copy(buffer, sample, bytesRead); //m_rtpChannel.AddSample(sample); audioBytes.Add(timestamp, sample); timestamp += 160; bytesRead = ulawStm.Read(buffer, 0, 160); } } } string fileName = @".\TestAudio\output-from-wav.wav"; using (WaveFileWriter writer = new WaveFileWriter(fileName, ulawFormat)) { var testSequence = audioBytes.SelectMany(p => p.Value).ToArray(); writer.Write(testSequence, 0, testSequence.Length); } }
private void GetAudioSamples() { ////var pcmStream = WaveFormatConversionStream.CreatePcmStream(new Mp3FileReader("whitelight.mp3")); //var pcmStream = new WaveFileReader("whitelight-ulaw.wav"); //byte[] sampleBuffer = new byte[160]; //int bytesRead = pcmStream.Read(sampleBuffer, 0, 160); ////int bytesRead = m_rawRTPPayloadReader.BaseStream.Read(sampleBuffer, 0, 160); //while (bytesRead > 0) //{ // m_rtpChannel.AddSample(sampleBuffer); // bytesRead = pcmStream.Read(sampleBuffer, 0, 160); // //bytesRead = m_rawRTPPayloadReader.BaseStream.Read(sampleBuffer, 0, 160); //} var pcmFormat = new WaveFormat(8000, 16, 1); var ulawFormat = WaveFormat.CreateMuLawFormat(8000, 1); using (WaveFormatConversionStream pcmStm = new WaveFormatConversionStream(pcmFormat, new Mp3FileReader("whitelight.mp3"))) { using (WaveFormatConversionStream ulawStm = new WaveFormatConversionStream(ulawFormat, pcmStm)) { byte[] buffer = new byte[160]; int bytesRead = ulawStm.Read(buffer, 0, 160); while (bytesRead > 0) { byte[] sample = new byte[bytesRead]; Array.Copy(buffer, sample, bytesRead); m_rtpChannel.Send(sample, 20); bytesRead = ulawStm.Read(buffer, 0, 160); } } } logger.Debug("Finished adding audio samples."); }
private static WaveFormat mp3ToWav(string pathToMp3, Stream outputStream, int sampleRate, int bitDepth, int numChannels) { using (var reader = new Mp3FileReader(pathToMp3)) { var targetFormat = new NAudio.Wave.WaveFormat(sampleRate, bitDepth, numChannels); var pcmStream = new WaveFormatConversionStream(targetFormat, reader); var buffer = new byte[pcmStream.Length]; pcmStream.Read(buffer, 0, (int)pcmStream.Length); outputStream.Write(buffer, 0, buffer.Length); outputStream.Position = 0; pcmStream.Close(); return(targetFormat); } }
private byte[] ResampleWindows(byte[] pcm) { using (MemoryStream mem = new MemoryStream(pcm)) { using (RawSourceWaveStream stream = new RawSourceWaveStream(mem, oldFormat)) { using (WaveFormatConversionStream resampler = new WaveFormatConversionStream(newFormat, stream)) { int resampled_length = (int)((float)pcm.Length * ((float)newFormat.SampleRate / (float)oldFormat.SampleRate)); byte[] ret = new byte[resampled_length]; resampler.Read(ret, 0, resampled_length); return(ret); } } } }
static Task DoVoice(DiscordVoiceClient vc, string file) { return(Task.Run(() => { try { int ms = 20; int channels = 2; int sampleRate = 48000; int blockSize = 48 * 2 * channels * ms; //sample rate * 2 * channels * milliseconds byte[] buffer = new byte[blockSize]; var outFormat = new WaveFormat(sampleRate, 16, channels); vc.SetSpeaking(true); using (var mp3Reader = new Mp3FileReader(file)) { using (var resampler = new WaveFormatConversionStream(outFormat, mp3Reader)) { int byteCount; while ((byteCount = resampler.Read(buffer, 0, blockSize)) > 0) { if (vc.Connected) { //sequence = await vc.SendSmallOpusAudioPacket(buffer, sampleRate, byteCount, sequence).ConfigureAwait(false); vc.SendVoice(buffer); //sequence = vc.SendSmallOpusAudioPacket(buffer, 48000, buffer.Length, sequence); //Task.Delay(19).Wait(); } else { break; } } Console.ForegroundColor = ConsoleColor.Yellow; Console.WriteLine("Voice finished enqueuing"); Console.ForegroundColor = ConsoleColor.White; } } } catch (Exception ex) { Console.WriteLine(ex.Message); } })); }
public void ConvertData(byte[] data, int length) { if (mSrcWaveFormat == null) { SubDebug(string.Format("Source wave format is null")); return; } if (mPcmWaveFormat == null) { SubDebug(string.Format("Pcm wave format is null")); return; } if (mBufferedStream == null) { SubDebug(string.Format("Buffered wave stream is null")); return; } if (mConvStream == null) { SubDebug(string.Format("Conversion wave stream is null")); return; } try { mBufferedStream.Write(data, 0, length); byte[] buffer = new byte[mPcmWaveFormat.AverageBytesPerSecond / 5]; while (true) { int bytesRead = mConvStream.Read(buffer, 0, buffer.Length); //Return 0 means we have read all avaliable data if (bytesRead == 0) { break; } SubDataConverted(buffer, bytesRead); } } catch (Exception ex) { SubDebug(string.Format("Convert data fail.\t{0}", ex.Message)); } }
private void CanCreateConversionStream(WaveFormat inputFormat, WaveFormat outputFormat) { var inputStream = new NullWaveStream(inputFormat, 10000); using (var stream = new WaveFormatConversionStream( outputFormat, inputStream)) { byte[] buffer = new byte[stream.WaveFormat.AverageBytesPerSecond]; int totalRead = 0; int bytesRead; do { bytesRead = stream.Read(buffer, 0, buffer.Length); totalRead += bytesRead; } while (bytesRead > 0); Debug.WriteLine(String.Format("Converted {0}", totalRead)); Assert.AreEqual(inputStream.Length, inputStream.Position); } }
/// <summary> /// Resample wavefile to new waveformat and save output file /// </summary> /// <param name="wavIn">input stream</param> /// <param name="waveFormat">waveformat</param> /// <returns>byte[] array</returns> public static byte[] ResampleWav(Stream wavIn, WaveFormat waveFormat) { using (var reader = new WaveFileReader(wavIn)) { using (var conversionStream = new WaveFormatConversionStream(waveFormat, reader)) { using (MemoryStream ms = new MemoryStream()) { int bytes = 0; byte[] buffer = new byte[16 * 1024]; while ((bytes = conversionStream.Read(buffer, 0, buffer.Length)) != 0) { ms.Write(buffer, 0, bytes); } return(ms.ToArray()); } } } }
public static void ComputeAllFeatures(string wavpath, string resultpath, System.Windows.Forms.TextBox progress) { int aa = 0; StreamWriter writer = new StreamWriter("d:\\features.txt"); for (int i = 1; i <= 674; i++) { WaveStream readerStream = new WaveFileReader(@"D:\Z projekti\Emocije\code\sustav(pun)\wavs\" + i.ToString() + ".wav"); WaveFormat format = new WaveFormat(readerStream.WaveFormat.SampleRate, 8, 1); readerStream = new WaveFormatConversionStream(format, readerStream); int length = (int)readerStream.Length; byte[] buffer = new byte[length]; readerStream.Read(buffer, 0, length); Classifier = new Classifiers.GoodClassifier(); Classifier.SamplingFrequency = 11025; Classifier.SubWindowLength = 256; // 44100 [samples per second] * 0.025 [25 milisecond interval] Classifier.SubWindowShift = 165; // 44100 [samples per second] * 0.015 [15 milisecond interval] Classifier.SuperWindowLength = (int)Math.Floor((double)length / (Classifier.SubWindowShift)) - 10; // 44100 [samples per second] / 1102 [SubFeatures per second] * 2 [seconds] Classifier.SuperWindowShift = 5; // 44100 [samples per second] / 1102 [SubFeatures per second] * 1 [seconds] Classifier.ClassificationComplete += new AbstractClassifier.ClassifComplete(Classifier_ClassificationComplete); Classifier.SubFeaturesComputed += new AbstractClassifier.SubFeaturesComp(Classifier_SubFeaturesComputed); Classifier.SuperFeaturesComputed += new AbstractClassifier.SuperFeaturesComp(Classifier_SuperFeaturesComputed); Classifier.EnqueueData(buffer); System.Windows.Forms.Application.DoEvents(); string line = ""; foreach (double d in Classifier.AllFeatures.First()) { line += d.ToString(CultureInfo.InvariantCulture) + ","; } line = line.TrimEnd(','); line += "\r\n"; writer.WriteLine(line); writer.Flush(); progress.Text = i.ToString(); } writer.Flush(); }
private void AudioSourceDataAvailable(object sender, DataAvailableEventArgs e) { try { lock (_obj) { if (_bTalking && _avstream != null) { byte[] bSrc = e.RawData; int totBytes = bSrc.Length; if (!_audioSource.RecordingFormat.Equals(_waveFormat)) { var ws = new TalkHelperStream(bSrc, totBytes, _audioSource.RecordingFormat); var helpStm = new WaveFormatConversionStream(_waveFormat, ws); totBytes = helpStm.Read(bSrc, 0, 25000); ws.Close(); ws.Dispose(); helpStm.Close(); helpStm.Dispose(); } var enc = new byte[totBytes / 2]; ALawEncoder.ALawEncode(bSrc, totBytes, enc); try { _avstream.Write(enc, 0, enc.Length); _avstream.Flush(); } catch (SocketException) { StopTalk(); } } } } catch (Exception ex) { Log.Error("", ex);//MainForm.LogExceptionToFile(ex); StopTalk(); } }
/// <summary> /// Converts a WMA file to a WAV stream /// </summary> /// <param name="outputStream">Stream to store the converted wav.</param> /// <param name="filePath">Path to a .wma file to convert</param> /// <returns>The WaveFormat object of the converted wav</returns> private static WaveFormat wmaToWav(string pathToWma, Stream outputStream, int sampleRate, int bitDepth, int numChannels) { if (!Path.GetExtension(pathToWma).ToLowerInvariant().Contains("wma")) { throw new ArgumentException("Must be a .wma file!"); } using (var reader = new WMAFileReader(pathToWma)) { var targetFormat = new NAudio.Wave.WaveFormat(sampleRate, bitDepth, numChannels); var pcmStream = new WaveFormatConversionStream(targetFormat, reader); var buffer = new byte[pcmStream.Length]; pcmStream.Read(buffer, 0, (int)pcmStream.Length); outputStream.Write(buffer, 0, buffer.Length); outputStream.Position = 0; pcmStream.Close(); return(targetFormat); } }
public CachedSound(String audioFilename) { /*force all the inputs convert to mono, 16bit, 44100Hz*/ var sampleRate = 44100; int bits = 16; int channel = 1; this.WaveFormat = new WaveFormat(sampleRate, bits, channel); using (var audioFileReader = new WaveFormatConversionStream(WaveFormat, new WaveFileReader(audioFilename))) { // TODO: could add resampling in here if required this.Length = audioFileReader.Length; var wholeFile = new List <byte>((int)(audioFileReader.Length)); var readBuffer = new byte[audioFileReader.WaveFormat.SampleRate * audioFileReader.WaveFormat.Channels / 4]; // buffer length = 2 samples int bytesRead; while ((bytesRead = audioFileReader.Read(readBuffer, 0, readBuffer.Length)) > 0) { wholeFile.AddRange(readBuffer.Take(bytesRead)); } AudioData = wholeFile.ToArray(); } }
public static byte[] Wav_to_wav2(string fil_n, byte ch, out byte[] mono) { byte[] mono2 = null; Task a = Task.Run(() => { WaveFormat format = new WaveFormat(48000, 16, 1); try { using (WaveFileReader reader = new WaveFileReader(fil_n)) { using (WaveFormatConversionStream stream = new WaveFormatConversionStream(format, reader)) { mono2 = new byte[stream.Length]; stream.Read(mono2, 0, mono2.Length); } } } catch (Exception) { } }); byte[] buffer = null; try { WaveFormat format = new WaveFormat(48000, 16, ch); using (WaveFileReader reader = new WaveFileReader(fil_n)) { using (WaveFormatConversionStream stream = new WaveFormatConversionStream(format, reader)) { buffer = new byte[stream.Length]; stream.Read(buffer, 0, buffer.Length); } } } catch (Exception) { } a.Wait(); mono = mono2; return(buffer); }
private void saveToolStripMenuItem_Click(object sender, EventArgs e) { if (Wave == null) { Stream audioStream = Recorder.GetRecordingStream(); audioStream.Position = 0; Wave = new WaveFormatConversionStream(WaveFormat, new Wave32To16Stream(new WaveFileReader(audioStream))); } using (WaveFileWriter waveWriter = new WaveFileWriter(@"c:\users\chris\appdata\local\temp\recording.wav", Wave.WaveFormat)) { byte[] buffer = new byte[16 * 1024]; Wave.Position = 0; while (Wave.Position < Wave.Length) { int read = Wave.Read(buffer, 0, 16 * 1024); if (read > 0) { waveWriter.Write(buffer, 0, read); } } } }
public byte[] Encode() { int ms = 20; int channels = 2; int sampleRate = 48000; int blockSize = 48 * 2 * channels * ms; //the size per each frame to encode byte[] buffer = new byte[blockSize]; //a nicely sized pcm buffer to work with. var outFormat = new WaveFormat(sampleRate, 16, channels); if (__filename.EndsWith(".mp3")) { using (var mp3Reader = new Mp3FileReader(__filename)) { using (var resampler = new WaveFormatConversionStream(outFormat, mp3Reader)) { int byteCount; using (BinaryWriter bw = new BinaryWriter(new MemoryStream())) { while ((byteCount = resampler.Read(buffer, 0, blockSize)) > 0) { //now to encode byte[] opusOutput = new byte[buffer.Length]; //extra bytes but that's okay int opusEncoded = encoder.EncodeFrame(buffer, 0, opusOutput); bw.Write((ushort)opusEncoded); bw.Write(opusOutput, 0, opusEncoded); } MemoryStream baseStream = bw.BaseStream as MemoryStream; return(baseStream.ToArray()); } } } } return(null); }
private void AudioSourceDataAvailable(object sender, DataAvailableEventArgs e) { try { lock (_obj) { if (_bTalking && _avstream != null) { byte[] bSrc = e.RawData; int totBytes = bSrc.Length; int j = -1; if (!_audioSource.RecordingFormat.Equals(_waveFormat)) { var ws = new TalkHelperStream(bSrc, totBytes, _audioSource.RecordingFormat); var bDst = new byte[44100]; totBytes = 0; using (var helpStm = new WaveFormatConversionStream(_waveFormat, ws)) { while (j != 0) { j = helpStm.Read(bDst, totBytes, 10000); totBytes += j; } } bSrc = bDst; } var enc = _muLawCodec.Encode(bSrc, 0, totBytes); ALawEncoder.ALawEncode(bSrc, totBytes, enc); Buffer.BlockCopy(enc, 0, _talkBuffer, _talkDatalen, enc.Length); _talkDatalen += enc.Length; j = 0; try { while (j + 240 < _talkDatalen) { //need to write out in 240 byte packets var pkt = new byte[240]; Buffer.BlockCopy(_talkBuffer, j, pkt, 0, 240); // _avstream.Write(_hdr, 0, _hdr.Length); _avstream.Write(pkt, 0, 240); j += 240; } if (j < _talkDatalen) { Buffer.BlockCopy(_talkBuffer, j, _talkBuffer, 0, _talkDatalen - j); _talkDatalen = _talkDatalen - j; } } catch (SocketException) { StopTalk(); } } } } catch (Exception ex) { Logger.LogExceptionToFile(ex, "TalkAxis"); StopTalk(); } }
private void AudioSourceDataAvailable(object sender, DataAvailableEventArgs e) { try { lock (_obj) { if (_bTalking && _avstream != null) { byte[] bSrc = e.RawData; int totBytes = bSrc.Length; int j = -1; if (!_audioSource.RecordingFormat.Equals(_waveFormat)) { var ws = new TalkHelperStream(bSrc, totBytes, _audioSource.RecordingFormat); var bDst = new byte[44100]; totBytes = 0; using (var helpStm = new WaveFormatConversionStream(_waveFormat, ws)) { while (j != 0) { j = helpStm.Read(bDst, totBytes, 10000); totBytes += j; } } bSrc = bDst; } if (_needsencodeinit) { EncodeInit(BitConverter.ToInt16(e.RawData, 0), BitConverter.ToInt16(e.RawData, 2)); _needsencodeinit = false; } var buff = new byte[25000]; int c; unsafe { fixed(byte *src = bSrc) { fixed(byte *dst = buff) { c = EncodeFoscam(src, totBytes, dst); } } } Buffer.BlockCopy(buff, 0, _talkBuffer, _talkDatalen, c); _talkDatalen += c; var dtms = (int)(DateTime.UtcNow - _dt).TotalMilliseconds; int i = 0; j = 0; try { while (j + 160 < _talkDatalen) { //need to write out in 160 byte packets for 40ms byte[] cmd = SInit(TalkData, MoIPAvFlag); cmd = AddNext(cmd, dtms + (i * 40)); cmd = AddNext(cmd, _seq); cmd = AddNext(cmd, (int)(DateTime.UtcNow - _dt).TotalSeconds); cmd = AddNext(cmd, (byte)0x0); cmd = AddNext(cmd, 160); var pkt = new byte[160]; Buffer.BlockCopy(_talkBuffer, j, pkt, 0, 160); cmd = AddNext(cmd, pkt, 160); Encode(ref cmd); _avstream.Write(cmd, 0, cmd.Length); j += 160; _seq++; i++; } if (j < _talkDatalen) { Buffer.BlockCopy(_talkBuffer, j, _talkBuffer, 0, _talkDatalen - j); _talkDatalen = _talkDatalen - j; } } catch (SocketException) { StopTalk(true); } } } } catch (Exception ex) { Logger.LogException(ex, "TalkFoscam"); StopTalk(true); } }
public byte[] CustomConversionToLinear(byte[] audio) { log4net.ILog log = log4net.LogManager.GetLogger(System.Reflection.MethodBase.GetCurrentMethod().DeclaringType); bool resample8KHz16bit = false; MemoryStream inMem = new MemoryStream(audio); byte[] output; int outputLength = 0; String traceabilityId = Guid.NewGuid().ToString() + ": "; log.Info(traceabilityId + "Start CustomConversionToLinear with audio[" + audio.Length + "]. resample8KHz16bit = " + resample8KHz16bit); log.Info(traceabilityId + "First bytes of audio: " + audio[0] + " " + audio[1] + " " + audio[2] + " " + audio[3]); switch (AudioType.CheckAudioType(audio)) { case AudioTypeEnum.WAV: log.Info(traceabilityId + "File is WAV"); if (!resample8KHz16bit) { output = inMem.ToArray(); outputLength = output.Length; } else { byte[] fileBytes; using (var wfr = new WaveFileReader(inMem)) { using (var pcmStream = new WaveFormatConversionStream(new WaveFormat(8000, 16, wfr.WaveFormat.Channels), wfr)) { fileBytes = new byte[pcmStream.Length]; pcmStream.Read(fileBytes, 0, Convert.ToInt32(pcmStream.Length)); output = fileBytes; outputLength = output.Length; } } } break; case AudioTypeEnum.MP3: log.Info(traceabilityId + "File is MP3"); output = ConvertMP3toWAV(inMem, resample8KHz16bit); outputLength = output.Length; break; case AudioTypeEnum.WMA: log.Info(traceabilityId + "File is WMA"); output = ConvertWMAtoWAV(inMem, resample8KHz16bit); outputLength = output.Length; break; default: case AudioTypeEnum.Unknown: log.Warn(traceabilityId + "File is UNKOWN"); output = inMem.ToArray(); outputLength = output.Length; break; } log.Info(traceabilityId + "Finished: output length is " + outputLength); return(output); }
private static void SynthToCam(string fileName, CameraWindow cw) { try { using (var waveStream = new MemoryStream()) { //write some silence to the stream to allow camera to initialise properly var silence = new byte[1 * 22050]; waveStream.Write(silence, 0, silence.Length); var newFormat = new WaveFormat(11025, 16, 1); try { if (File.Exists(fileName)) { //read in and convert the wave stream into our format var reader = new WaveFileReader(fileName); var buff = new byte[22050]; using (var conversionStream = new WaveFormatConversionStream(newFormat, reader)) { while (true) { var i = conversionStream.Read(buff, 0, buff.Length); waveStream.Write(buff, 0, i); if (i < 22050) { break; } } } } else { throw null; } } catch { const int BUFFER_LIMIT = 1024000; using (var ar = new AudioReader(newFormat.SampleRate, newFormat.Channels)) { ar.ReadSamples(fileName, (b, c) => { waveStream.Write(b, 0, c); return(waveStream.Length >= BUFFER_LIMIT); }); } } //write some silence to the stream to allow camera to end properly waveStream.Write(silence, 0, silence.Length); waveStream.Seek(0, SeekOrigin.Begin); var ds = new DirectStream(waveStream) { RecordingFormat = new WaveFormat(11025, 16, 1) }; var talkTarget = TalkHelper.GetTalkTarget(cw.Camobject, ds); ds.Start(); talkTarget.Start(); while (ds.IsRunning) { Thread.Sleep(100); } ds.Stop(); talkTarget.Stop(); talkTarget = null; ds = null; } } catch (Exception ex) { Logger.LogException(ex, "SynthToCam"); } }
public void PlaySoundEffect(User user, Channel ch, string name) { if (string.IsNullOrWhiteSpace(name)) { return; } // Ensure voice channel is connected ConnectToVoice(); // Play the sound effect Task.Run(() => { try { sending.WaitOne(); var effect = SoundEffectRepository.FindByName(name); if (audio != null && effect != null) { if (effect.Duration.TotalMilliseconds == 0) { return; } SoundboardLoggingService.Instance.Info( string.Format("[{0}] playing <{1}>", user.Name, name)); // Change "playing" to the sound effect name SetStatusMessage(name); // Records play statistics Statistics.Play(user, effect); // Notify users soundbot will begin playing SendMessage(ch, string.Format(Properties.Resources.MessagePlayingSound, name)); // Resample and stream sound effect over the configured voice channel var format = new WaveFormat(48000, 16, 2); var length = Convert.ToInt32(format.AverageBytesPerSecond / 60.0 * 1000.0); var buffer = new byte[length]; using (var reader = new WaveFileReader(effect.Path)) using (var resampler = new WaveFormatConversionStream(format, reader)) { int count = 0; while ((count = resampler.Read(buffer, 0, length)) > 0) { audio.Send(buffer, 0, count); } } audio.Wait(); SetStatusMessage(Configuration.Status); } } catch (Exception ex) { SoundboardLoggingService.Instance.Error( string.Format(Properties.Resources.MessagePlayingFailed, name), ex); } finally { sending.Set(); } }); }
private static async Task SendRecvRtp(Socket rtpSocket, RTPSession rtpSession, IPEndPoint dstRtpEndPoint, string audioFileName, CancellationTokenSource cts) { try { SIPSorcery.Sys.Log.Logger.LogDebug($"Sending from RTP socket {rtpSocket.LocalEndPoint} to {dstRtpEndPoint}."); // Nothing is being done with the data being received from the client. But the remote rtp socket will // be switched if it differs from the one in the SDP. This helps cope with NAT. var rtpRecvTask = Task.Run(async() => { DateTime lastRecvReportAt = DateTime.Now; uint packetReceivedCount = 0; uint bytesReceivedCount = 0; byte[] buffer = new byte[512]; EndPoint remoteEP = new IPEndPoint(IPAddress.Any, 0); SIPSorcery.Sys.Log.Logger.LogDebug($"Listening on RTP socket {rtpSocket.LocalEndPoint}."); var recvResult = await rtpSocket.ReceiveFromAsync(buffer, SocketFlags.None, remoteEP); while (recvResult.ReceivedBytes > 0 && !cts.IsCancellationRequested) { RTPPacket rtpPacket = new RTPPacket(buffer.Take(recvResult.ReceivedBytes).ToArray()); packetReceivedCount++; bytesReceivedCount += (uint)rtpPacket.Payload.Length; recvResult = await rtpSocket.ReceiveFromAsync(buffer, SocketFlags.None, remoteEP); if (DateTime.Now.Subtract(lastRecvReportAt).TotalSeconds > RTP_REPORTING_PERIOD_SECONDS) { lastRecvReportAt = DateTime.Now; dstRtpEndPoint = recvResult.RemoteEndPoint as IPEndPoint; SIPSorcery.Sys.Log.Logger.LogDebug($"RTP recv {rtpSocket.LocalEndPoint}<-{dstRtpEndPoint} pkts {packetReceivedCount} bytes {bytesReceivedCount}"); } } }); string audioFileExt = Path.GetExtension(audioFileName).ToLower(); switch (audioFileExt) { case ".g722": case ".ulaw": { uint timestamp = 0; using (StreamReader sr = new StreamReader(audioFileName)) { DateTime lastSendReportAt = DateTime.Now; uint packetReceivedCount = 0; uint bytesReceivedCount = 0; byte[] buffer = new byte[320]; int bytesRead = sr.BaseStream.Read(buffer, 0, buffer.Length); while (bytesRead > 0 && !cts.IsCancellationRequested) { packetReceivedCount++; bytesReceivedCount += (uint)bytesRead; if (!dstRtpEndPoint.Address.Equals(IPAddress.Any)) { rtpSession.SendAudioFrame(rtpSocket, dstRtpEndPoint, timestamp, buffer); } timestamp += (uint)buffer.Length; if (DateTime.Now.Subtract(lastSendReportAt).TotalSeconds > RTP_REPORTING_PERIOD_SECONDS) { lastSendReportAt = DateTime.Now; SIPSorcery.Sys.Log.Logger.LogDebug($"RTP send {rtpSocket.LocalEndPoint}->{dstRtpEndPoint} pkts {packetReceivedCount} bytes {bytesReceivedCount}"); } await Task.Delay(40, cts.Token); bytesRead = sr.BaseStream.Read(buffer, 0, buffer.Length); } } } break; case ".mp3": { DateTime lastSendReportAt = DateTime.Now; uint packetReceivedCount = 0; uint bytesReceivedCount = 0; var pcmFormat = new WaveFormat(8000, 16, 1); var ulawFormat = WaveFormat.CreateMuLawFormat(8000, 1); uint timestamp = 0; using (WaveFormatConversionStream pcmStm = new WaveFormatConversionStream(pcmFormat, new Mp3FileReader(audioFileName))) { using (WaveFormatConversionStream ulawStm = new WaveFormatConversionStream(ulawFormat, pcmStm)) { byte[] buffer = new byte[320]; int bytesRead = ulawStm.Read(buffer, 0, buffer.Length); while (bytesRead > 0 && !cts.IsCancellationRequested) { packetReceivedCount++; bytesReceivedCount += (uint)bytesRead; byte[] sample = new byte[bytesRead]; Array.Copy(buffer, sample, bytesRead); if (dstRtpEndPoint.Address != IPAddress.Any) { rtpSession.SendAudioFrame(rtpSocket, dstRtpEndPoint, timestamp, buffer); } timestamp += (uint)buffer.Length; if (DateTime.Now.Subtract(lastSendReportAt).TotalSeconds > RTP_REPORTING_PERIOD_SECONDS) { lastSendReportAt = DateTime.Now; SIPSorcery.Sys.Log.Logger.LogDebug($"RTP send {rtpSocket.LocalEndPoint}->{dstRtpEndPoint} pkts {packetReceivedCount} bytes {bytesReceivedCount}"); } await Task.Delay(40, cts.Token); bytesRead = ulawStm.Read(buffer, 0, buffer.Length); } } } } break; default: throw new NotImplementedException($"The {audioFileExt} file type is not understood by this example."); } } catch (OperationCanceledException) { } catch (Exception excp) { SIPSorcery.Sys.Log.Logger.LogError($"Exception sending RTP. {excp.Message}"); } }