private void SpeechBotConnector_Recognized(object sender, SpeechRecognitionEventArgs e)
        {
            if (e.Result.Reason == ResultReason.RecognizedSpeech)
            {
                this.RecognizedText = e.Result.Text;
                this.speechDuration = (int)e.Result.Duration.TotalMilliseconds;

                Trace.TraceInformation($"[{DateTime.Now.ToString("h:mm:ss tt", CultureInfo.CurrentCulture)}] Recognized event received. SessionId = {e.SessionId}, Speech duration = {this.speechDuration}, Recognized text = {this.RecognizedText}");
            }
            else if (e.Result.Reason == ResultReason.RecognizedKeyword)
            {
                this.RecognizedKeyword = e.Result.Text;

                Trace.TraceInformation($"[{DateTime.Now.ToString("h:mm:ss tt", CultureInfo.CurrentCulture)}] Recognized event received. SessionId = {e.SessionId}");
                Trace.TraceInformation($"Keyword Recognition Verified : {e.Result.Text}");
            }
            else if (e.Result.Reason == ResultReason.NoMatch)
            {
                Trace.TraceInformation($"[{DateTime.Now.ToString("h:mm:ss tt", CultureInfo.CurrentCulture)}] Recognized event received. Speech could not be recognized. SessionId = {e.SessionId}");
                Trace.TraceInformation($"No match details = {NoMatchDetails.FromResult(e.Result)}");
            }
            else
            {
                Trace.TraceInformation($"[{DateTime.Now.ToString("h:mm:ss tt", CultureInfo.CurrentCulture)}] Recognized event received. e.Result.Reason = {e.Result.Reason}. SessionId = {e.SessionId}");
            }
        }
예제 #2
0
        public void RecogniseIntent(Action <IntentRecognitionResult> handler)
        {
            var result = recognizer.RecognizeOnceAsync().Result;

            switch (result.Reason)
            {
            case ResultReason.RecognizedIntent:

                Logger.OnIntentRecognised(result);

                handler(result);

                break;

            case ResultReason.RecognizedSpeech:

                Logger.OnSpeechRecognised(result);

                break;

            case ResultReason.NoMatch:
                Logger.OnSpeechUnrecognised();

                var d = NoMatchDetails.FromResult(result);

                Console.WriteLine(d.Reason);

                break;

            default:
                Logger.OnSpeechUnrecognised();

                break;
            }
        }
        public static async Task <string> RecognizeSpeechAsync()
        {
            // Creates an instance of a speech config with specified subscription key and service region.
            // Creates a speech recognizer.

            {
                // Starts speech recognition, and returns after a single utterance is recognized. The end of a
                // single utterance is determined by listening for silence at the end or until a maximum of 15
                // seconds of audio is processed.  The task returns the recognition text as result.
                // Note: Since RecognizeOnceAsync() returns only a single utterance, it is suitable only for single
                // shot recognition like command or query.
                // For long-running multi-utterance recognition, use StartContinuousRecognitionAsync() instead.
                var result = await recognizer.RecognizeOnceAsync();

                // Checks result.
                if (result.Reason == ResultReason.RecognizedSpeech)
                {
                    return(result.Text);
                }
                else if (result.Reason == ResultReason.NoMatch)
                {
                    return($"NOMATCH. Error code {NoMatchDetails.FromResult(result).ToString()}");
                }
                else if (result.Reason == ResultReason.Canceled)
                {
                    var cancellation = CancellationDetails.FromResult(result);

                    if (cancellation.Reason == CancellationReason.Error)
                    {
                        return($"CANCELED: Error code {cancellation.ErrorCode}");
                    }
                    else
                    {
                        return($"CANCELED: {cancellation.Reason}");
                    }
                }
                else
                {
                    return("Unknown error");
                }
            }
        }
예제 #4
0
        public async void testFunc()
        {
            // Define Output Path and Create a new WaveInEvent
            var outputFilePath = @"C:\Users\Joe\source\repos\Monotone\Monotone\bin\x86\Debug x86\audio.wav";
            var waveInEvent    = new WaveInEvent();

            waveInEvent.DeviceNumber = 0;

            var bufferedWaveProvider = new BufferedWaveProvider(new WaveFormat(8000, 1));

            // Prepare the fileWriter
            WaveFileWriter fileWriter = new WaveFileWriter(outputFilePath, waveInEvent.WaveFormat);

            // Set-up the Azure speech configuration with our subscription info and enable dictation capabilities
            var speechConfig = SpeechConfig.FromSubscription("b8305ebbfce64754a0150547a076a0be", "westus");

            speechConfig.EnableDictation();
            speechConfig.SetProfanity(ProfanityOption.Raw);
            speechConfig.OutputFormat = OutputFormat.Detailed;

            // Set-Up Audio Configuration using Audio Callback method for NAudio Capture
            NAudioCompatibileAudioCallback1 audioCallback = new NAudioCompatibileAudioCallback1(ref bufferedWaveProvider);
            //var audioStreamCallback = AudioInputStream.CreatePullStream(audioCallback, AudioStreamFormat.GetDefaultInputFormat());
            var audioStreamCallback = AudioInputStream.CreatePullStream(audioCallback,
                                                                        AudioStreamFormat.GetWaveFormatPCM((uint)waveInEvent.WaveFormat.SampleRate,
                                                                                                           (byte)waveInEvent.WaveFormat.BitsPerSample,
                                                                                                           (byte)waveInEvent.WaveFormat.Channels));

            var audioConfig = AudioConfig.FromStreamInput(audioStreamCallback);

            // Initialize the SpeechRecognizer API
            var recognizer = new SpeechRecognizer(speechConfig, audioConfig);

            // Declar a TaskCompletionSource to help shutdown the continuous speech processing later
            var stopRecognition = new TaskCompletionSource <int>();

            // Recognizer Event-Based handeling
            #region Recognizer Event-Based Handeling

            recognizer.Recognizing += (s, e) =>
            {
                Console.WriteLine($"RECOGNIZING: Text ={e.Result.Text}");
            };

            recognizer.Recognized += (s, e) =>
            {
                if (e.Result.Reason == ResultReason.RecognizedSpeech)
                {
                    Console.WriteLine($"RECOGNIZED: Text ={e.Result.Text}");
                }
                else if (e.Result.Reason == ResultReason.NoMatch)
                {
                    Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                    Console.WriteLine($"NOMATCH: Detail ={NoMatchDetails.FromResult(e.Result).Reason}");
                    Console.WriteLine($"NOMATCH: Duration ={e.Result.Duration}");
                }
            };

            recognizer.Canceled += (s, e) =>
            {
                if (e.Reason == CancellationReason.Error)
                {
                    Console.WriteLine($"CANCELLED: ErrorCode ={e.ErrorCode}");
                    Console.WriteLine($"CANCELLED: ErrorDetails ={e.ErrorDetails}");
                    Console.WriteLine($"CANCELLED: Did you update your subscription info?");
                }

                stopRecognition.TrySetResult(0);
            };

            recognizer.SessionStopped += (s, e) =>
            {
                Console.WriteLine("\n   Session Stopped Event.");

                stopRecognition.TrySetResult(0);
            };
            #endregion

            // NAudio WaveInEvent Event-Based Handeling
            #region NAudio WaveInEvent Event-Based Handeling
            waveInEvent.DataAvailable += (s, a) =>
            {
                // Use callback to send recorded data for analysis via Recognizer
                //audioCallback.Read(a.Buffer, (uint)a.BytesRecorded);
                bufferedWaveProvider.AddSamples(a.Buffer, 0, a.BytesRecorded);

                // Then Write the data
                fileWriter.Write(a.Buffer, 0, a.BytesRecorded);

                // Force Stop Recording after 30 seconds
                //if (fileWriter.Position > waveInEvent.WaveFormat.AverageBytesPerSecond * 30)
                //{
                //    waveInEvent.StopRecording();
                //}
            };

            waveInEvent.RecordingStopped += (s, a) =>
            {
                fileWriter?.Dispose();
                fileWriter = null;
            };
            #endregion

            // Start Recording
            waveInEvent.StartRecording();

            Console.WriteLine("Begin Recording... ");
            Console.WriteLine("Press Any Key to Stop Recording.");

            // Start Continuous Recognition
            await recognizer.StartContinuousRecognitionAsync();

            // Waits for completion. Task.WaitAny keeps the task rooted
            Task.WaitAny(new[] { stopRecognition.Task });

            Console.ReadKey();

            // Stops Recognition
            await recognizer.StopContinuousRecognitionAsync();

            // Stop Recording and dispose object
            waveInEvent.StopRecording();
            waveInEvent.Dispose();

            Console.WriteLine("Recording Stopped.");
        }