static SpeechRecognizer user_config_to_speech_recognizer(SpeechConfig speech_config, AudioConfig audio_config, USER_CONFIG user_config)
        {
            SpeechRecognizer speech_recognizer;

            if (true == user_config.language_id_enabled)
            {
/* Note: Continuous language identification is supported only in C#, C++, and Python.
 * See:
 * https://docs.microsoft.com/azure/cognitive-services/speech-service/how-to-automatic-language-detection?pivots=programming-language-cpp#language-identification-with-speech-to-text
 */
                AutoDetectSourceLanguageConfig detect_language_config = AutoDetectSourceLanguageConfig.FromLanguages(user_config.language_id_languages);
                speech_recognizer = new SpeechRecognizer(speech_config, detect_language_config, audio_config);
            }
            else
            {
                speech_recognizer = new SpeechRecognizer(speech_config, audio_config);
            }

            if (true == user_config.phrase_list_enabled)
            {
                PhraseListGrammar grammar = PhraseListGrammar.FromRecognizer(speech_recognizer);
                grammar.AddPhrase(user_config.phrase_list);
            }

            return(speech_recognizer);
        }
Пример #2
0
        public async void AudioStart()
        {
            var audioStream  = new VoiceAudioStream();
            var audioFormat  = AudioStreamFormat.GetWaveFormatPCM(16000, 16, 1);
            var audioConfig  = AudioConfig.FromStreamInput(audioStream, audioFormat);
            var speechConfig = SpeechConfig.FromSubscription(_config["SpeechApiKey"], _config["SpeechRegion"]);
            var speechClient = new SpeechRecognizer(speechConfig, audioConfig);
            var phraseList   = PhraseListGrammar.FromRecognizer(speechClient);

            foreach (var phrase in phrases)
            {
                phraseList.AddPhrase(phrase);
            }

            speechClient.Recognized += _speechClient_Recognized;

            string sessionId = speechClient.Properties.GetProperty(PropertyId.Speech_SessionId);

            var conn = new ConnectionInfo()
            {
                SessionId    = sessionId,
                AudioStream  = audioStream,
                SpeechClient = speechClient,
            };

            _connections.Add(Context.ConnectionId, conn);

            await speechClient.StartContinuousRecognitionAsync();

            Debug.WriteLine("Audio start message.");
        }
Пример #3
0
        private static async Task Main(string[] args)
        {
            CreateHostBuilder(args).Build();

            Initialize();

            s_twitchBot = new TwitchBot(s_twitchBotSettings);

            s_twitchBot.Connect();

            PhraseListGrammar phraseList =
                PhraseListGrammar.FromRecognizer(s_speechRecognizer);

            phraseList.AddPhrase(s_twitchBotSettings.BotName);
            phraseList.AddPhrase("RPG");
            phraseList.AddPhrase("GitHub");
            phraseList.AddPhrase("Discord");

            s_speechRecognizer.Recognized     += OnSpeechRecognizedAsync;
            s_speechRecognizer.Canceled       += SpeechRecognizerOnCanceled;
            s_speechRecognizer.SessionStopped += SpeechRecognizerOnSessionStopped;

            System.Console.WriteLine("Speak into your microphone.");

            await s_speechRecognizer.StartContinuousRecognitionAsync();

            Task.WaitAny(s_stopRecognition.Task);

            DisposeAll();
        }
Пример #4
0
        public RunningBackGround()
        {
            InitializeVoiceControl();

            var pharseList = PhraseListGrammar.FromRecognizer(recognizer);

            pharseList.AddPhrase("hey jarvis");


            recognizer.Recognized += Recognizer_Recognized;
            recognizer.Canceled   += Recognizer_Canceled;
            speechConfig.EnableDictation();
        }
Пример #5
0
 private void loadmyMyGrammarList()
 {
     pharseList = PhraseListGrammar.FromRecognizer(recognizer);
     pharseList.AddPhrase(Orders.Hide1);
     pharseList.AddPhrase(Orders.Hide2);
     pharseList.AddPhrase(Orders.Hide3);
     pharseList.AddPhrase(Orders.Show1);
     pharseList.AddPhrase(Orders.Show2);
     pharseList.AddPhrase(Orders.OpenChrome1);
     pharseList.AddPhrase(Orders.OpenChrome2);
     pharseList.AddPhrase(Orders.OpenExplorer1);
     pharseList.AddPhrase(Orders.OpenExplorer2);
     pharseList.AddPhrase(Orders.OpenNotepad1);
     pharseList.AddPhrase(Orders.OpenNotepad2);
     pharseList.AddPhrase(Orders.Time1);
     pharseList.AddPhrase(Orders.Time2);
 }
Пример #6
0
        //
        // Create SpeechRecognizer
        //
        private SpeechRecognizer SpeechRecognizerFromUserConfig()
        {
            AudioConfig      audioConfig  = AudioConfigFromUserConfig();
            SpeechConfig     speechConfig = SpeechConfigFromUserConfig();
            SpeechRecognizer speechRecognizer;

            if (userConfig.languageIDLanguages is string[] languageIDLanguagesValue)
            {
                var autoDetectSourceLanguageConfig = AutoDetectSourceLanguageConfig.FromLanguages(languageIDLanguagesValue);
                speechRecognizer = new SpeechRecognizer(speechConfig, autoDetectSourceLanguageConfig, audioConfig);
            }
            else
            {
                speechRecognizer = new SpeechRecognizer(speechConfig, audioConfig);
            }

            if (this.userConfig.phraseList is string phraseListValue)
            {
                var grammar = PhraseListGrammar.FromRecognizer(speechRecognizer);
                grammar.AddPhrase(phraseListValue);
            }

            return(speechRecognizer);
        }
Пример #7
0
        // Continuous speech recognition assisted with a phrase list.
        public static async Task ContinuousRecognitionWithFileAndPhraseListsAsync()
        {
            // Creates an instance of a speech config with specified subscription key and service region.
            // Replace with your own subscription key and service region (e.g., "westus").
            var config = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion");

            var stopRecognition = new TaskCompletionSource <int>();

            // Creates a speech recognizer using file as audio input.
            // Replace with your own audio file name.
            using (var audioInput = AudioConfig.FromWavFileInput(@"wreck-a-nice-beach.wav"))
            {
                using (var recognizer = new SpeechRecognizer(config, audioInput))
                {
                    // Subscribes to events.
                    recognizer.Recognizing += (s, e) =>
                    {
                        Console.WriteLine($"RECOGNIZING: Text={e.Result.Text}");
                    };

                    recognizer.Recognized += (s, e) =>
                    {
                        if (e.Result.Reason == ResultReason.RecognizedSpeech)
                        {
                            Console.WriteLine($"RECOGNIZED: Text={e.Result.Text}");
                        }
                        else if (e.Result.Reason == ResultReason.NoMatch)
                        {
                            Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                        }
                    };

                    recognizer.Canceled += (s, e) =>
                    {
                        Console.WriteLine($"CANCELED: Reason={e.Reason}");

                        if (e.Reason == CancellationReason.Error)
                        {
                            Console.WriteLine($"CANCELED: ErrorCode={e.ErrorCode}");
                            Console.WriteLine($"CANCELED: ErrorDetails={e.ErrorDetails}");
                            Console.WriteLine($"CANCELED: Did you update the subscription info?");
                        }

                        stopRecognition.TrySetResult(0);
                    };

                    recognizer.SessionStarted += (s, e) =>
                    {
                        Console.WriteLine("\n    Session started event.");
                    };

                    recognizer.SessionStopped += (s, e) =>
                    {
                        Console.WriteLine("\n    Session stopped event.");
                        Console.WriteLine("\nStop recognition.");
                        stopRecognition.TrySetResult(0);
                    };

                    // Before starting recognition, add a phrase list to help recognition.
                    PhraseListGrammar phraseListGrammar = PhraseListGrammar.FromRecognizer(recognizer);
                    phraseListGrammar.AddPhrase("Wreck a nice beach");

                    // Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition.
                    await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

                    // Waits for completion.
                    // Use Task.WaitAny to keep the task rooted.
                    Task.WaitAny(new[] { stopRecognition.Task });

                    // Stops recognition.
                    await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);
                }
            }
        }
        public async Task Start(SpeechRecognitionOptions options)
        {
            SpeechRecognizer recognizer = null;

            try
            {
                Logger.LogInformation("Starting speech recognition");

                var credentials = this.Credentials;

                var speechConfig = SpeechConfig.FromEndpoint(new Uri($"wss://{credentials.Region}.stt.speech.microsoft.com/speech/universal/v2"), credentials.SubscriptionKey);
                speechConfig.SetProfanity(ProfanityOption.Raw);

                if (options.Languages.Count > 1)
                {
                    //enable continuous language detection when we have more than 1 language
                    //this seems kind of buggy though, at times the speech recognition just simply doesn't work at all when this is enabled
                    speechConfig.SetProperty(PropertyId.SpeechServiceConnection_ContinuousLanguageIdPriority, "Latency");
                }

                var languageConfig = AutoDetectSourceLanguageConfig.FromLanguages(options.Languages.Select(lang =>
                {
                    //convert language selections
                    if (lang.Length == 2)
                    {
                        //two-letter code. select some default five-letter code instead.
                        if (lang == "en")
                        {
                            lang = "en-US";
                        }
                        else
                        {
                            lang = lang + "-" + lang.ToUpperInvariant();
                        }
                    }
                    return(lang);
                }).ToArray());

                recognizer = new SpeechRecognizer(speechConfig, languageConfig, AudioConfig);

                //set up the special phrases if any
                if (options.Phrases?.Count > 0)
                {
                    var phrases = PhraseListGrammar.FromRecognizer(recognizer);
                    foreach (var phrase in options.Phrases)
                    {
                        phrases.AddPhrase(phrase);
                    }
                }

                //prepare events
                recognizer.Canceled += (sender, e) =>
                {
                    SpeechRecognizer = null;
                    Dispose(Disposables);

                    if (e.ErrorCode == CancellationErrorCode.Forbidden || e.ErrorCode == CancellationErrorCode.AuthenticationFailure)
                    {
                        //out of quota (or invalid key, try the next one anyway)
                        int credentialsIndexCurrent = CredentialsIndex;
                        if (NextCredentials())
                        {
                            Logger.LogInformation($"Out of quota for credentials {credentialsIndexCurrent}. Restarting with {CredentialsIndex}");

                            Threading.Tasks.FireAndForget(() => Start(options));
                            return;
                        }
                    }

                    if (e.Reason != CancellationReason.EndOfStream && e.Reason != CancellationReason.CancelledByUser)
                    {
                        Logger.LogWarning($"Recognition stopped. reason={e.Reason}, erroCode={e.ErrorCode}, details={e.ErrorDetails}");
                    }

                    Stopped?.Invoke(this, new SpeechRecognitionStoppedEvent()
                    {
                        Message = $"{e.ErrorCode}: {e.ErrorDetails}"
                    });
                };
                recognizer.Recognizing += (sender, e) =>
                {
                    OnSpeechEvent(e, false);
                };
                recognizer.Recognized += (sender, e) =>
                {
                    OnSpeechEvent(e, true);
                };
                recognizer.SpeechEndDetected += (sender, e) =>
                {
                    StreamAudioNoiseGate?.OnAudioStop();
                };

                //start recognizing
                await recognizer.StartContinuousRecognitionAsync();

                //start our audio source
                if (!IsRunning && StreamAudioSource != null)
                {
                    await StreamAudioSource.Start();
                }
            }
            catch (Exception e)
            {
                Logger.LogError(e, "Could not start continuous recognition");

                recognizer?.Dispose();
                throw;
            }

            SpeechRecognizer = recognizer;
            IsRunning        = true;

            Disposables.Add(recognizer);
        }
Пример #9
0
        // Starts an infinite loop that continuously flips a coin and decides whether or not user has won based on the user input.
        public async Task Play()
        {
            // Loading the subscription key and service region from the settings file.
            Settings settings = JsonConvert.DeserializeObject <Settings>(File.ReadAllText(Path.Combine(Environment.CurrentDirectory, "settings.json")));

            var config = SpeechConfig.FromSubscription(settings.SubscriptionKey, settings.ServiceRegion);

            // Using the created config to create a new speech recognizer that can be used to convert speech to text.
            using var recognizer = new SpeechRecognizer(config);

            // Adding the words that the speech recognizer should listen for to the grammer list. This ensures that the recognizer hears "Tails" and not "Tales".
            var phraseList = PhraseListGrammar.FromRecognizer(recognizer);

            phraseList.AddPhrase("Heads");
            phraseList.AddPhrase("Tails");
            phraseList.AddPhrase("Stop");

            string userChoice = "";

            while (true)
            {
                Console.WriteLine("Say heads or tails:");

                // Recognizing a single input from the microphone, meaning that the recognizer stops after the first word.
                var result = await recognizer.RecognizeOnceAsync();

                // Going through the possible reasons a recognition result might be generated.
                switch (result.Reason)
                {
                case ResultReason.RecognizedSpeech:
                    userChoice = result.Text.Replace(".", "");
                    Console.WriteLine($"You said \"{userChoice}\"");
                    break;

                case ResultReason.NoMatch:
                    Console.WriteLine("Speech could not be recognized.\n");
                    break;

                default:
                    Console.WriteLine("There was a problem with the speech recognizer");
                    break;
                }

                // If the user said heads or tails then we flip the coin and report the result of the coin toss to the user, while also updating the statistics.
                if (userChoice == "Heads" || userChoice == "Tails")
                {
                    string flippedCoin = FlipCoin();

                    if (flippedCoin == userChoice)
                    {
                        winCounter += 1;
                        Console.WriteLine($"The coin landed {flippedCoin}, You won!\n");
                    }
                    else
                    {
                        lossCounter += 1;
                        Console.WriteLine($"The coin landed {flippedCoin}, You lost!\n");
                    }

                    Console.WriteLine(GetStatistics());
                }

                // Stopping the application if the user said "Stop".
                if (userChoice == "Stop")
                {
                    Environment.Exit(0);
                }
            }
        }
        public async IAsyncEnumerable <RecognitionWord> ContinuousRecognition(IWaveProvider audioSource, [EnumeratorCancellation] CancellationToken cancellation, IAsyncEnumerable <string>?sourceLangs, IAsyncEnumerable <string>?phrases)
        {
            var config      = SpeechConfig.FromSubscription(_key, _region);
            var audioConfig = AudioConfig.FromStreamInput(new PullAdapter(audioSource, 24000), AudioStreamFormat.GetWaveFormatPCM(24000, 16, 1));

            using var recogniser = new SpeechRecognizer(config,
                                                        AutoDetectSourceLanguageConfig.FromLanguages(await(sourceLangs ?? Array.Empty <string>().ToAsyncEnumerable()).Append("en-GB").ToArrayAsync(cancellation)),
                                                        audioConfig
                                                        );

            // Add some likely words to the phrase dictionary
            var phraseList = PhraseListGrammar.FromRecognizer(recogniser);

            phraseList.AddPhrase("mute");
            phraseList.AddPhrase("discord");
            phraseList.AddPhrase("stop");
            if (phrases != null)
            {
                await foreach (var phrase in phrases.WithCancellation(cancellation))
                {
                    phraseList.AddPhrase(phrase);
                }
            }

            // Subscribe to recogniser results
            var results = new ConcurrentQueue <RecognitionWord>();

            recogniser.Recognized += (_, e) =>
            {
                if (e.Result.Reason == ResultReason.RecognizedSpeech)
                {
                    results.Enqueue(new RecognitionWord(e.Result.Text));
                }
                else if (e.Result.Reason == ResultReason.NoMatch)
                {
                    results.Enqueue(new RecognitionWord(null));
                }
            };

            recogniser.Canceled += (s, e) =>
            {
                Console.WriteLine($"CANCELED: Reason={e.Reason}");

                if (e.Reason == CancellationReason.Error)
                {
                    results.Enqueue(new RecognitionWord($"CANCELED: ErrorCode={e.ErrorCode}"));
                    results.Enqueue(new RecognitionWord($"CANCELED: ErrorDetails={e.ErrorDetails}"));
                    results.Enqueue(new RecognitionWord($"CANCELED: Did you update the subscription info?"));
                }
            };

            recogniser.SessionStarted += (_, e) =>
            {
                results.Enqueue(new RecognitionWord("Session_started_event."));
            };

            var stopped = false;

            recogniser.SessionStopped += (_, e) =>
            {
                results.Enqueue(new RecognitionWord("Session_stopped_event."));
                stopped = true;
            };

            // Return recognised results until cancelled
            await recogniser.StartContinuousRecognitionAsync();

            while (!cancellation.IsCancellationRequested && !stopped)
            {
                if (results.TryDequeue(out var r))
                {
                    yield return(r);
                }
            }

            // Stop receiving further results
            await recogniser.StopContinuousRecognitionAsync();

            // Finish sending remaining results
            foreach (var result in results)
            {
                yield return(result);
            }
        }