public void CanConvertALawToSuggestedPcm() { using (WaveStream stream = WaveFormatConversionStream.CreatePcmStream( new NullWaveStream(WaveFormat.CreateALawFormat(8000, 1), 1000))) { } }
// Encode G.711 private void button2_Click(object sender, EventArgs e) { try { if (this.currentAudio == null) { throw new Exception("Вы не выбрали файл для кодирования."); } if (codecToEncode.SelectedItem == null) { throw new Exception("Вы не выбрали кодэк."); } } catch (Exception ex) { MessageBox.Show(ex.Message, "Ошибка", MessageBoxButtons.OK, MessageBoxIcon.Error); return; } SaveFileDialog save = new SaveFileDialog(); save.Filter = "Wave File (*.wav)|*.wav;"; if (save.ShowDialog() != DialogResult.OK) { return; } Codecs codec = (codecToEncode.SelectedIndex == 0) ? Codecs.ALAW : Codecs.MULAW; byte[] samples = new byte[this.currentAudio.ShortSamples.Length]; for (int i = 0; i < this.currentAudio.ShortSamples.Length; i++) { if (codec == Codecs.ALAW) { samples[i] = ALawEncoder.LinearToALawSample(this.currentAudio.ShortSamples[i]); } else if (codec == Codecs.MULAW) { samples[i] = MuLawEncoder.LinearToMuLawSample(this.currentAudio.ShortSamples[i]); } } WaveFormat format = null; if (codec == Codecs.ALAW) { format = WaveFormat.CreateALawFormat(this.currentAudio.SampleRate, this.currentAudio.Stream.WaveFormat.Channels); } else if (codec == Codecs.MULAW) { format = WaveFormat.CreateMuLawFormat(this.currentAudio.SampleRate, this.currentAudio.Stream.WaveFormat.Channels); } WaveFileWriter writer = new WaveFileWriter(save.FileName, format); writer.Write(samples, 0, samples.Length); writer.Close(); DialogResult dres = MessageBox.Show("Аудиофайл успешно сохранен. Открыть файл?", "Файл сохранен", MessageBoxButtons.YesNo, MessageBoxIcon.Question); if (dres == DialogResult.Yes) { this.decodeG711(save.FileName, codec); } }
public void TestNAudio() { using (var reader = new WaveFileReader("Sound\\CAU1.wav")) { //WaveFormatConversionStream conv = new WaveFormatConversionStream(WaveFormat.CreateCustomFormat(WaveFormatEncoding.Pcm, 8000, 1, 8000, 1, 8), reader); var sp = new VolumeSampleProvider(reader.ToSampleProvider()); sp.Volume = 0.5f; using (var writer = new WaveFileWriter("dv.wav", WaveFormat.CreateALawFormat(8000, 1))) { int sampleCount = (int)reader.SampleCount; var buff = new float[sampleCount]; sp.Read(buff, 0, sampleCount); writer.WriteSamples(buff, 0, sampleCount); writer.Close(); } reader.Close(); } }
public virtual bool Append(WaveStream stream) { if (IsSessionOpen) { if (stream.CanSeek) { stream.Seek(0, SeekOrigin.Begin); } Logger?.LogDebug("SpeechEnrollerBase.Append(): In-Length: " + stream.Length); Logger?.LogDebug("SpeechEnrollerBase.Append(): In-WaveFormat: " + stream.WaveFormat); SpeechAudio speechAudio; if (Codec == WaveFormatEncoding.ALaw) { speechAudio = new SpeechAudio(stream, WaveFormat.CreateALawFormat(8000, 1)); } else if (Codec == WaveFormatEncoding.Pcm) { speechAudio = new SpeechAudio(stream, new WaveFormat(8000, 16, 1)); } else { return(false); } Logger?.LogDebug("SpeechEnrollerBase.Append(): Append-Length: " + speechAudio.Stream.Length); Logger?.LogDebug("SpeechEnrollerBase.Append(): Append-WaveFormat: " + speechAudio.Stream.WaveFormat); speechAudio.FileName = BuildAudioName(); Content.Add("data", speechAudio); TotalSnippetsSent++; TotalAudioBytesSent += speechAudio.Stream.Length; return(true); } return(false); }
public AcmALawChatCodec() : base(new WaveFormat(8000, 16, 1), WaveFormat.CreateALawFormat(8000, 1)) { }
public ALaw() : base(new WaveFormat(8000, 16, 1), WaveFormat.CreateALawFormat(8000, 1)) { }
protected override void OnLoad(EventArgs e) { Visible = false; ShowInTaskbar = false; base.OnLoad(e); /* * Get all installed voices * */ var voices = speech.GetInstalledVoices(); string voice = ""; foreach (InstalledVoice v in voices) { if (v.Enabled) { //voice = v.VoiceInfo.Name; Console.WriteLine(v.VoiceInfo.Name); } } queuetimer = new System.Timers.Timer(250); queuetimer.Elapsed += (object sender, ElapsedEventArgs ev) => { TTSRequest r; if (Queue.TryDequeue(out r)) { Console.WriteLine("dequeing off of concurrent queue..."); if (r.Interrupt) { // stop current TTS if (IsSpeaking) { //speech.StopSpeaking(); } if (IsSounding) { //sound.Stop(); if (sound.PlaybackState == PlaybackState.Playing) { sound.Stop(); } } // clear queue SpeechQueue.Clear(); } SpeechQueue.Enqueue(r); RequestCount++; } var eventdata = new Hashtable(); eventdata.Add("ProcessedRequests", RequestCount); eventdata.Add("QueuedRequests", SpeechQueue.Count); eventdata.Add("IsSpeaking", IsSounding); InstrumentationEvent blam = new InstrumentationEvent(); blam.EventName = "status"; blam.Data = eventdata; NotifyGui(blam.EventMessage()); }; // when this timer fires, it will pull off of the speech queue and speak it // the long delay also adds a little pause between tts requests. speechtimer = new System.Timers.Timer(1000); speechtimer.Elapsed += (object sender, ElapsedEventArgs ev) => { if (IsSpeaking.Equals(false)) { if (SpeechQueue.Count > 0) { TTSRequest r = SpeechQueue.Dequeue(); Console.WriteLine("dequeuing off of speech queue"); IsSpeaking = true; speechtimer.Enabled = false; //speech.SpeakAsync(r.Text); //using (speech = new SpeechSynthesizer()) { speech = new SpeechSynthesizer(); speech.SpeakCompleted += speech_SpeakCompleted; format = new SpeechAudioFormatInfo(EncodingFormat.ALaw, 8000, 8, 1, 1, 2, null); //format = new SpeechAudioFormatInfo(11025, AudioBitsPerSample.Sixteen, AudioChannel.Mono); // var si = speech.GetType().GetMethod("SetOutputStream", BindingFlags.Instance | BindingFlags.NonPublic); stream = new MemoryStream(); //si.Invoke(speech, new object[] { stream, format, true, true }); //speech.SetOutputToWaveStream(stream); speech.SetOutputToAudioStream(stream, format); speech.SelectVoice(config.getVoice(r.Language, r.Voice)); int rate = (r.Speed * 2 - 10); Console.WriteLine(rate); try { speech.Rate = rate; } catch (ArgumentOutOfRangeException ex) { speech.Rate = 0; } speech.SpeakAsync(r.Text); //} synthesis.WaitOne(); speech.SpeakCompleted -= speech_SpeakCompleted; speech.SetOutputToNull(); speech.Dispose(); //IsSpeaking = false; IsSounding = true; stream.Position = 0; //WaveFormat.CreateCustomFormat(WaveFormatEncoding.WmaVoice9, 11025, 1, 16000, 2, 16) using (RawSourceWaveStream reader = new RawSourceWaveStream(stream, WaveFormat.CreateALawFormat(8000, 1))) { WaveStream ws = WaveFormatConversionStream.CreatePcmStream(reader); //var waveProvider = new MultiplexingWaveProvider(new IWaveProvider[] { ws }, 4); //waveProvider.ConnectInputToOutput(0, 3); sound = new WaveOutEvent(); // set output device *before* init Console.WriteLine("Output Device: " + OutputDeviceId); sound.DeviceNumber = OutputDeviceId; sound.Init(ws); //sound.Init(waveProvider); sound.PlaybackStopped += output_PlaybackStopped; // Console.WriteLine("playing here " + ws.Length); sound.Play(); } playback.WaitOne(); //IsSounding = false; speechtimer.Enabled = true; } } }; queuetimer.Enabled = true; queuetimer.Start(); speechtimer.Enabled = true; speechtimer.Start(); InitHTTPServer(); }
private void StackRtp2Instance(RecordInfo_t _recInfo) { var _ingInstance = RecordIngList.FirstOrDefault(x => x.ext == _recInfo.extension && x.peer == _recInfo.peer_number); if (_ingInstance == null) { byte[] rtpbuff = new byte[_recInfo.size]; Array.Copy(_recInfo.voice, 0, rtpbuff, 0, _recInfo.size); WinSound.RTPPacket rtp = new WinSound.RTPPacket(rtpbuff); WaveFormat _wavformat; switch (rtp.PayloadType) { case 0: _wavformat = WaveFormat.CreateMuLawFormat(8000, 1); break; case 8: _wavformat = WaveFormat.CreateALawFormat(8000, 1); break; case 4: _wavformat = WaveFormat.CreateCustomFormat(WaveFormatEncoding.G723, 8000, 1, 8000 * 1, 1, 8); break; case 18: _wavformat = WaveFormat.CreateCustomFormat(WaveFormatEncoding.G729, 8000, 1, 8000 * 1, 1, 8); break; default: _wavformat = WaveFormat.CreateALawFormat(8000, 1); break; } DateTime now = DateTime.Now; TimeSpan ts = now - new DateTime(1970, 1, 1, 0, 0, 0, 0, DateTimeKind.Local); string _header = string.Format("{0:0000}{1:00}{2:00}{3:00}{4:00}{5:00}{6:000}", now.Year, now.Month, now.Day, now.Hour, now.Minute, now.Second, now.Millisecond); string _datepath = string.Format("{0:0000}-{1:00}-{2:00}", now.Year, now.Month, now.Day); string _fileName = string.Format("{0}_{1}_{2}.wav", _header, _recInfo.extension, _recInfo.peer_number); string _wavFileName = string.Format(@"{0}\{1}\{2}", _option.SaveDirectory, _datepath, _fileName); string _path = string.Format(@"{0}\{1}", _option.SaveDirectory, _datepath); if (!Directory.Exists(_path)) { Directory.CreateDirectory(_path); } RtpRecordInfo RecInstance = new RtpRecordInfo(_wavformat, string.Format(@"{0}\{1}", _option.SaveDirectory, _datepath), _fileName) { ext = _recInfo.extension, peer = _recInfo.peer_number, codec = _wavformat, idx = ts.TotalMilliseconds, savepath = string.Format(@"{0}\{1}", _option.SaveDirectory, _datepath), filename = _fileName }; RecInstance.EndOfRtpStreamEvent += RecInstance_EndOfRtpStreamEvent; RecInstance.Add(_recInfo); lock (RecordIngList) { RecordIngList.Add(RecInstance); } } else { _ingInstance.Add(_recInfo); } }
private void StackRtp2Instance(RecordInfo_t recInfo, byte[] buffer) { var ingInstance = RecordIngList.FirstOrDefault(x => x.ext == recInfo.extension && x.peer == recInfo.peer_number); if (ingInstance == null) { byte[] rtpbuff = new byte[recInfo.size]; Array.Copy(recInfo.voice, 0, rtpbuff, 0, recInfo.size); WinSound.RTPPacket rtp = new WinSound.RTPPacket(rtpbuff); WaveFormat wavformat; switch (rtp.PayloadType) { case 0: wavformat = WaveFormat.CreateMuLawFormat(8000, 1); break; case 8: wavformat = WaveFormat.CreateALawFormat(8000, 1); break; case 4: wavformat = WaveFormat.CreateCustomFormat(WaveFormatEncoding.G723, 8000, 1, 8000, 1, 8); break; case 18: wavformat = WaveFormat.CreateCustomFormat(WaveFormatEncoding.G729, 8000, 1, 8000, 1, 8); break; default: wavformat = WaveFormat.CreateALawFormat(8000, 1); break; } DateTime now = DateTime.Now; TimeSpan ts = now - new DateTime(1970, 1, 1, 0, 0, 0, 0, DateTimeKind.Local); string header = string.Format("{0:0000}{1:00}{2:00}{3:00}{4:00}{5:00}{6:000}", now.Year, now.Month, now.Day, now.Hour, now.Minute, now.Second, now.Millisecond); string datepath = string.Format("{0:0000}-{1:00}-{2:00}", now.Year, now.Month, now.Day); string fileName = string.Format("{0}_{1}_{2}.wav", header, recInfo.extension, recInfo.peer_number); string path = string.Format(@"{0}\{1}", Options.savedir, datepath); if (!Directory.Exists(path)) { Directory.CreateDirectory(path); } RtpRecordInfo RecInstance = new RtpRecordInfo(wavformat, path, fileName) { ext = recInfo.extension, peer = recInfo.peer_number, codec = wavformat, idx = ts.TotalMilliseconds, savepath = path, filename = fileName }; RecInstance.EndOfRtpStreamEvent += RecInstance_EndOfRtpStreamEvent; // util.WriteLogTest3(recInfo.isExtension.ToString() + " : >> RTPPacket Codec : " + rtp.PayloadType.ToString() + " // RecInfo Codec : " + recInfo.codec.ToString(), fileName + "_codec"); RecInstance.chkcount++; RecInstance.firstIsExtension = recInfo.isExtension; RecInstance.Add(recInfo); lock (RecordIngList) { RecordIngList.Add(RecInstance); } } else { //if (ingInstance.chkcount == 1 && ingInstance.firstIsExtension != recInfo.isExtension) //{ // byte[] rtpbuff = new byte[recInfo.size]; // Array.Copy(recInfo.voice, 0, rtpbuff, 0, recInfo.size); // WinSound.RTPPacket rtp = new WinSound.RTPPacket(rtpbuff); // util.WriteLogTest3(recInfo.isExtension.ToString() + " : >> RTPPacket Codec : " + rtp.PayloadType.ToString() + " // Structure Codec : " + recInfo.codec.ToString(), ingInstance.filename + "_codec"); // ingInstance.chkcount++; //} ingInstance.Add(recInfo); } }