// Handle the SpeechRecognized event of the name grammar.
        public static void NameSpeechRecognized(
            object sender, SpeechRecognizedEventArgs e)
        {
            //Console.WriteLine("Grammar ({0}) recognized speech: {1}",
            //  e.Result.Grammar.Name, e.Result.Text);

            try
            {
                RecognizedAudio audio = e.Result.Audio;

                // Add code to verify and persist the audio.
                string path = @"C:\temp\passwordAudio.wav";
                using (Stream outputStream = new FileStream(path, FileMode.Create))
                {
                    //RecognizedAudio passwordAudio = audio.GetRange(start, duration);
                    RecognizedAudio passwordAudio = audio;
                    passwordAudio.WriteToWaveStream(outputStream);
                    outputStream.Close();
                }

                Thread testThread =
                    new Thread(new ParameterizedThreadStart(TestAudio));
                testThread.Start(path);
            }
            catch (Exception ex)
            {
                Console.WriteLine("Exception thrown while processing audio:");
                Console.WriteLine(ex.ToString());
            }
        }
예제 #2
0
        void recognizer_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
        {
            RecognizedAudio audio = e.Result.Audio;

            TimeSpan start    = audio.AudioPosition - audio.AudioPosition;
            TimeSpan duration = audio.Duration;

            // Add code to verify and persist the audio.
            string path = @"E:\Deep Learning\Understanding Simple Speech Commands\nameAudio.wav";

            using (Stream outputStream = new FileStream(path, FileMode.Create))
            {
                RecognizedAudio nameAudio = audio.GetRange(start, duration);
                nameAudio.WriteToWaveStream(outputStream);
                outputStream.Close();
            }


            python_runner.python_path = @"C:\Users\admin\AppData\Local\Programs\Python\Python36\python.exe";
            python_runner.script_path = @"E:\Deep Learning\Understanding Simple Speech Commands\SpeechRecognition\from_file.py";

            python_runner.arguments.Add(path);

            python_runner.run();

            string sphinx_result = python_runner.results;


            python_runner.python_path = @"C:\Users\admin\AppData\Local\Programs\Python\Python36\python.exe";
            python_runner.script_path = @"E:\Deep Learning\Understanding Simple Speech Commands\deepspeech\client_from_file.py";

            python_runner.arguments.Add(path);

            python_runner.run();

            string deep_speech_result = python_runner.errors;

            textBox1.Text = e.Result.Text + "----------------" + sphinx_result + "----------------" + deep_speech_result;



            /*
             * txt_out.AppendText(e.Result.Text + ", " + (int)(100 * e.Result.Confidence) + Environment.NewLine);
             *
             * //char beginer = (char)7;
             * //char terminator = (char)10;
             * //string message =beginer+getCommandForWord(e.Result.Text)+terminator;
             *
             * string message = getCommandForWord(e.Result.Text, e.Result.Confidence);
             *
             * var bytes = ASCIIEncoding.ASCII.GetBytes(message);
             *
             * foreach (var c in clients)
             * {
             *  c.GetStream().Write(bytes, 0, bytes.Length);
             * }*/
        }
예제 #3
0
        private static void DumpRecordedAudio( RecognizedAudio audio )
        {
            if ( audio == null )
                return;

            int fileId = 0;
            string filename;
            while ( File.Exists( (filename = "RetainedAudio_" + fileId + ".wav") ) )
                fileId++;

            Console.WriteLine( "\nWriting file: {0}", filename );
            using ( var file = new FileStream( filename, System.IO.FileMode.CreateNew ) )
                audio.WriteToWaveStream( file );
        }
        public void handleSpeechRecognizedResult(float confidence, string textResult,
                                                 string grammarName, string ruleName, KeyValuePair <string, SemanticValue>[] kvp,
                                                 double audioDuration, RecognizedAudio audio)
        {
            string fileP   = null;
            string relPath = null;

            //only write audio file when given path is not null
            if (saveAudioPath != null)
            {
                string indexerStr = waveFileNameIndexer + "";
                while (indexerStr.Length < 4)
                {
                    indexerStr = "0" + indexerStr;
                }

                fileP = saveAudioPath + "\\" + indexerStr + ".wav";

                relPath = EBookUtil.convertAudioToRelativePath(@fileP);
            }

            ActivityExecutor.add(new InternalSpeechRecognitionResultActivity(confidence, textResult, false,
                                                                             kvp, grammarName, ruleName, audioDuration, relPath));

            //only write audio file when given path is not null
            if (fileP != null)
            {
                //write audio to file
                FileStream stream = new FileStream(fileP, FileMode.Create);

                audio.WriteToWaveStream(stream);
                stream.Flush();
                stream.Close();
                unconfirmSaveAudioList.Add(fileP);
                Trace.WriteLine("write to file " + fileP);
                waveFileNameIndexer++;
            }
            String timeStamp = EBookUtil.GetTimestamp();

            string text = "\n" + confidence + "\t" + textResult + "(complete)\t\t" +
                          kvp.ToArray() + "\t" + grammarName + "\t" + timeStamp;

            Trace.WriteLine(text);
        }
예제 #5
0
        private static void DumpRecordedAudio(RecognizedAudio audio)
        {
            if (audio == null)
            {
                return;
            }

            int    fileId = 0;
            string filename;

            while (File.Exists((filename = "RetainedAudio_" + fileId + ".wav")))
            {
                fileId++;
            }

            Console.WriteLine("\nWriting file: {0}", filename);
            using (var file = new FileStream(filename, System.IO.FileMode.CreateNew))
                audio.WriteToWaveStream(file);
        }
예제 #6
0
        //Useless
        private void DumpRecordedAudio(RecognizedAudio audio)
        {
            if (audio == null)
            {
                return;
            }

            int    fileId = 0;
            string filename;

            while (File.Exists((filename = "RetainedAudio_" + fileId + ".wav")))
            {
                fileId++;
            }

            _remoteOperation.message("\nWriting file: " + filename);
            using (var file = new FileStream(filename, System.IO.FileMode.CreateNew))
                audio.WriteToWaveStream(file);
        }
예제 #7
0
        static void SaveRecordedAudio(RecognizedAudio audio)
        {
            if (audio == null)
            {
                return;
            }

            string filename = "save_" + count + ".wav";

            while (File.Exists(filename))
            {
                count++;
                filename = "save_" + count + ".wav";
            }

            Console.WriteLine("寫入檔案: " + filename);
            using (var file = new FileStream(filename, FileMode.CreateNew))
            {
                audio.WriteToWaveStream(file);
            }
        }
        // Loads the recognizedAudio into a memory stream and creates a Soundplayer object
        // for playing the audio. Passing null just disables the button
        private void SetRecognizedAudio(RecognizedAudio recognizedAudio)
        {
            if (recognizedAudio == null)
            {
                _recognizedAudioStream = null;
                _recognizedAudioPlayer = null;

                _buttonRecognizedAudio.IsEnabled = false;
            }
            else
            {
                _recognizedAudioStream = new MemoryStream();
                recognizedAudio.WriteToWaveStream(_recognizedAudioStream);
                _recognizedAudioStream.Position = 0;
                _recognizedAudioPlayer = new System.Media.SoundPlayer(_recognizedAudioStream);

                _buttonRecognizedAudio.IsEnabled = true;
            }
        }
        static void SaveRecordedAudio(RecognizedAudio audio)
        {
            if (audio == null)
                return;

            string filename = "save_" + count + ".wav" ;
            while (File.Exists(filename))
            {
                count++;
                filename = "save_" + count + ".wav";
            }

            Console.WriteLine("�g�J�ɮ�: " +  filename);
            using (var file = new FileStream(filename, FileMode.CreateNew))
            {
                audio.WriteToWaveStream(file);
            }
        }
        public override void manejar_comando_entrenamiento(SpeechRecognizedEventArgs e)
        {
            if (e.Result.Text.ToUpperInvariant() == siguiente_comando)
            {
                RecognizedAudio audio    = e.Result.Audio;
                TimeSpan        duration = audio.Duration;
                int             resultado;

                string path = Path.GetTempFileName();

                using (Stream outputStream = new FileStream(path, FileMode.Create))
                {
                    RecognizedAudio nameAudio = audio;
                    nameAudio.WriteToWaveStream(outputStream);
                    outputStream.Close();
                }

                resultado = AV.avf_agregar_muestra_WAV(entrenador, 0,
                                                       (dataGridView1[2, fila].Value.ToString().Split('_')[1] == "3") ? AV.AVP_MUESTRA_VALIDACION :
                                                       (AV.AVP_MUESTRA_ENTRENAMIENTO | AV.AVP_MUESTRA_VALIDACION), path);

                #if DEBUG
                File.Copy(path, dataGridView1[2, fila].Value.ToString() + ".wav", true);
                #endif
                File.Delete(path);

                switch (resultado)
                {
                case AV.AVS_SIN_MEMORIA:
                    Environment.Exit(1);
                    return;

                case AV.AVS_FORMATO_ARCHIVO_NO_VALIDO:
                    errorlabel.Text    = "La grabación está dañada. \nPor favor, reintente la operación.";
                    errorlabel.Visible = true;
                    errorpanel.Visible = true;
                    return;

                case AV.AVS_ARCHIVO_INACCESIBLE:
                    errorlabel.Text    = "No se pudo acceder a la voz grabada. \nPor favor, verifique que se pueda escribir en el disco \ny reintente la operación.";
                    errorlabel.Visible = true;
                    errorpanel.Visible = true;
                    return;

                case AV.AVS_MUESTREO_DEMASIADO_BAJO:
                case AV.AVS_MUESTREO_NO_ES_MULTIPLO_DE_4_HZ:
                    errorlabel.Text    = "La grabación no puede ser utilizada por la aplicación. \n Por favor, utilice otro micrófono y \nreinicie el proceso de entrenamiento.";
                    errorlabel.Visible = true;
                    errorpanel.Visible = true;
                    return;

                case AV.AVS_DURACION_MENOR_A_MEDIO_SEGUNDO:
                    errorlabel.Text    = "La grabación es demasiado corta. \nSe necesita una grabación de al menos medio segundo. \nPor favor, grabe el comando nuevamente, hablando lento y claro.";
                    errorlabel.Visible = true;
                    errorpanel.Visible = true;
                    return;

                default:
                    if (resultado >= 0)
                    {
                        break;
                    }
                    errorlabel.Text    = "Ocurrió un error inesperado, por favor reintente.";
                    errorlabel.Visible = true;
                    errorpanel.Visible = true;
                    return;
                }

                errorlabel.Visible = false;
                errorpanel.Visible = false;

                dataGridView1[1, fila].Value = "Reconocido";
                dataGridView1.ClearSelection();
                if (dataGridView1.RowCount == (fila + 1))
                {
                    lblTitle.Text         = "Entrenando";
                    label1.Text           = "El sistema se está entrenando para reconocer tu voz";
                    label2.Visible        = true;
                    label2.Text           = "La operación tardará aproximadamente 20 minutos";
                    dataGridView1.Visible = false;
                    pausaBtn.Visible      = false;
                    cafe.Visible          = true;
                    G.comando_form.Close();
                    entrenar();
                }
                else
                {
                    dataGridView1.Rows[++fila].Selected           = true;
                    dataGridView1.FirstDisplayedScrollingRowIndex = fila;
                    siguiente_comando = dataGridView1[0, fila].Value.ToString().ToUpperInvariant();
                }
            }
        }