static bool GenerateFile(Options options) { AudioBitsPerSample bitRate = options.BitRate == 1 ? AudioBitsPerSample.Eight : AudioBitsPerSample.Sixteen; AudioChannel channel = options.Channel == 1 ? AudioChannel.Stereo : AudioChannel.Mono; var format = new SpeechAudioFormatInfo(options.SampleRate, bitRate, channel); string text = ""; if (String.IsNullOrEmpty(options.Text)) { text = File.ReadAllText(options.InputFile, Encoding.UTF8); } else { text = options.Text; } string path = String.IsNullOrEmpty(options.Folder) ? options.Path : String.Format("{0}/{1}", options.Folder, options.Path); SpeechSynthesizer speaker = new SpeechSynthesizer(); try { speaker.SelectVoice(options.VoiceName); speaker.SetOutputToWaveFile(path, format); speaker.Speak(text); } catch (Exception) { return(false); } return(true); }
private void button1_Click(object sender, EventArgs e) { Progress t_progressForm = new Progress(); t_progressForm.Show(); List <string> m_aryLines = new List <string>(); List <string> m_aryNames = new List <string>(); DataGridViewRowCollection t_rows = dataGrid.Rows; foreach (DataGridViewRow t_row in t_rows) { if (t_row.Cells[0].Value == null || t_row.Cells[1].Value == null) { continue; } m_aryNames.Add(t_row.Cells[0].Value.ToString()); m_aryLines.Add(t_row.Cells[1].Value.ToString()); } int t_nRate = 41000; AudioBitsPerSample t_sample = AudioBitsPerSample.Sixteen; AudioChannel t_channel = AudioChannel.Stereo; if (textRate.Text.CompareTo("") == 0) { if (!Int32.TryParse(textRate.Text, out t_nRate)) { return; } } if (textBPS.Text.CompareTo("") == 0) { int t_nSample = 0; if (Int32.TryParse(textBPS.Text, out t_nSample)) { t_sample = (AudioBitsPerSample)t_nSample; } else { return; } } if (textChannels.Text.CompareTo("") == 0) { int t_nChannel = 0; if (Int32.TryParse(textChannels.Text, out t_nChannel)) { t_channel = (AudioChannel)t_nChannel; } } t_progressForm.Generate(m_aryNames, m_aryLines, textPath.Text, t_nRate, t_sample, t_channel); }
public SpeechAudioFormatInfo(int samplesPerSecond, AudioBitsPerSample bitsPerSample, AudioChannel channel) : this(EncodingFormat.Pcm, samplesPerSecond, (short)bitsPerSample, (short)channel, null) { // Don't explicitly check these are sensible values - allow flexibility here as some formats may do unexpected things here. _blockAlign = (short)(_channelCount * (_bitsPerSample / 8)); _averageBytesPerSecond = _samplesPerSecond * _blockAlign; }
private void cmdSave_Click(object sender, EventArgs e) { lock (_Lock) { try { if (!string.IsNullOrEmpty(txtText.Text)) { setStopPauseEnabled(); if (!string.IsNullOrEmpty(txtFileName.Text)) { this.Cursor = Cursors.WaitCursor; } Reader = new SpeechSynthesizer(); Reader.SelectVoice(cmbVoices.Text); AudioBitsPerSample audioBitsPerSample = (AudioBitsPerSample)Enum.Parse(typeof(AudioBitsPerSample), cmbBit.SelectedItem.ToString()); AudioChannel audioChannel = (AudioChannel)Enum.Parse(typeof(AudioChannel), cmbChannel.SelectedItem.ToString()); if (!string.IsNullOrEmpty(txtFileName.Text)) { Reader.SetOutputToWaveFile(txtFileName.Text, new SpeechAudioFormatInfo(Convert.ToInt32(cmbHz.SelectedItem), audioBitsPerSample, audioChannel)); } Reader.Rate = tbRate.Value; Reader.SpeakAsync(txtText.Text); cmdSave.Enabled = false; Reader.SpeakCompleted += Reader_SpeakCompleted; } else { MessageBox.Show("Type the text that needs to be told", INF, MessageBoxButtons.OK, MessageBoxIcon.Information); } } catch (Exception ex) { MessageBox.Show(ex.Message, ERR, MessageBoxButtons.OK, MessageBoxIcon.Error); } finally { } } }
public static void wavDevDbgPoc(int samplesPerSecond, AudioBitsPerSample bitsPerSample, AudioChannel channel, string file) { file = $"{file}.{bitsPerSample}.{channel}.{samplesPerSecond}.mp3"; using (var synth = new SpeechSynthesizer()) { synth.SetOutputToWaveFile(file, new SpeechAudioFormatInfo(32000, AudioBitsPerSample.Sixteen, AudioChannel.Mono)); var m_SoundPlayer = new System.Media.SoundPlayer(file); var builder = new PromptBuilder(); builder.AppendText($"{bitsPerSample}.{channel}.{samplesPerSecond}"); synth.Speak(builder); m_SoundPlayer.Play(); } }
public void Generate(List<string> p_aryNames, List<string> p_aryLines, string p_strPath, int p_nRate, AudioBitsPerSample p_samples, AudioChannel p_channels) { SpeechAudioFormatInfo t_audioFormatInfo = new SpeechAudioFormatInfo(p_nRate, p_samples, p_channels); SpeechSynthesizer t_synth = new SpeechSynthesizer(); progressBar1.Maximum = p_aryLines.Count; progressBar1.Step = 1; label1.Text = progressBar1.Step + "/" + p_aryNames.Count; for (int t_i = 0; t_i < p_aryNames.Count; ++t_i) { t_synth.SetOutputToWaveFile(p_strPath + "\\" + p_aryNames[t_i] + ".wav"); t_synth.Speak(p_aryLines[t_i]); label1.Text = (t_i + 1) + "/" + p_aryLines.Count; progressBar1.PerformStep(); progressBar1.Refresh(); } t_synth.Dispose(); Close(); }
public SpeechAudioFormatInfo(int samplesPerSecond, AudioBitsPerSample bitsPerSample, AudioChannel channel) : this(EncodingFormat.Pcm, samplesPerSecond, (short)bitsPerSample, (short)channel, null) { _blockAlign = (short)(_channelCount * (_bitsPerSample / 8)); _averageBytesPerSecond = _samplesPerSecond * _blockAlign; }
public void Generate(List <string> p_aryNames, List <string> p_aryLines, string p_strPath, int p_nRate, AudioBitsPerSample p_samples, AudioChannel p_channels) { SpeechAudioFormatInfo t_audioFormatInfo = new SpeechAudioFormatInfo(p_nRate, p_samples, p_channels); SpeechSynthesizer t_synth = new SpeechSynthesizer(); progressBar1.Maximum = p_aryLines.Count; progressBar1.Step = 1; label1.Text = progressBar1.Step + "/" + p_aryNames.Count; for (int t_i = 0; t_i < p_aryNames.Count; ++t_i) { t_synth.SetOutputToWaveFile(p_strPath + "\\" + p_aryNames[t_i] + ".wav"); t_synth.Speak(p_aryLines[t_i]); label1.Text = (t_i + 1) + "/" + p_aryLines.Count; progressBar1.PerformStep(); progressBar1.Refresh(); } t_synth.Dispose(); Close(); }
public AudioFormat(AudioSamplingRate rate, AudioBitsPerSample bits, AudioChannels ch) { AudioSamplingRate = rate; AudioBitsPerSample = bits; AudioChannels = ch; }
public AudioFormat(AudioSamplingRate rate, AudioBitsPerSample bits) { AudioSamplingRate = rate; AudioBitsPerSample = bits; }