예제 #1
0
        public static async Task RecognizeSpeech()
        {
            var speechConfig = SpeechConfig.FromSubscription("0b3abd1c585b4403a373e6b862099949", "uksouth");

            using var recognizer = new SpeechRecognizer(speechConfig);


            using var audioConfig = AudioConfig.FromDefaultMicrophoneInput();
            using var recgonizer  = new SpeechRecognizer(speechConfig, audioConfig);

            var result = await recognizer.RecognizeOnceAsync();

            if (result.Reason == ResultReason.RecognizedSpeech)
            {
                if (result.Text == "Red")
                {
                    Console.BackgroundColor = ConsoleColor.Red;
                    Console.WriteLine($"RECOGNIZED: Text={result.Text}");
                }
                else
                {
                    if (result.Text == "Blue")
                    {
                        Console.BackgroundColor = ConsoleColor.Blue;
                        Console.WriteLine($"RECOGNIZED: Text={result.Text}");
                    }
                }
            }
        }
예제 #2
0
        private async Task VerificationEnroll(SpeechConfig config, Dictionary <string, string> profileMapping)
        {
            using (var client = new VoiceProfileClient(config))
                using (var profile = await client.CreateProfileAsync(VoiceProfileType.TextDependentVerification, "en-us"))
                {
                    using (var audioInput = AudioConfig.FromDefaultMicrophoneInput())
                    {
                        Console.WriteLine($"Enrolling profile id {profile.Id}.");
                        // give the profile a human-readable display name
                        profileMapping.Add(profile.Id, "Your Name");

                        VoiceProfileEnrollmentResult result = null;
                        while (result is null || result.RemainingEnrollmentsCount > 0)
                        {
                            Console.WriteLine("Speak the passphrase, \"My voice is my passport, verify me.\"");
                            result = await client.EnrollProfileAsync(profile, audioInput);

                            Console.WriteLine($"Remaining enrollments needed: {result.RemainingEnrollmentsCount}");
                            Console.WriteLine("");
                        }

                        if (result.Reason == ResultReason.EnrolledVoiceProfile)
                        {
                            await SpeakerVerify(config, profile, profileMapping);
                        }
                        else if (result.Reason == ResultReason.Canceled)
                        {
                            var cancellation = VoiceProfileEnrollmentCancellationDetails.FromResult(result);
                            Console.WriteLine($"CANCELED {profile.Id}: ErrorCode={cancellation.ErrorCode} ErrorDetails={cancellation.ErrorDetails}");
                        }
                    }
                }
        }
예제 #3
0
        public static async Task KeywordRecognizer()
        {
            Console.WriteLine("say something ...");
            using (var audioInput = AudioConfig.FromDefaultMicrophoneInput())
            {
                using (var recognizer = new KeywordRecognizer(audioInput))
                {
                    var model  = KeywordRecognitionModel.FromFile("YourKeywordModelFilename.");
                    var result = await recognizer.RecognizeOnceAsync(model).ConfigureAwait(false);

                    Console.WriteLine($"got result reason as {result.Reason}");
                    if (result.Reason == ResultReason.RecognizedKeyword)
                    {
                        var stream = AudioDataStream.FromResult(result);

                        await Task.Delay(2000);

                        stream.DetachInput();
                        await stream.SaveToWaveFileAsync("AudioFromRecognizedKeyword.wav");
                    }
                    else
                    {
                        Console.WriteLine($"got result reason as {result.Reason}. You can't get audio when no keyword is recognized.");
                    }
                }
            }
        }
        /// <summary>
        /// Create a DialogServiceConnector from the user-provided input
        /// </summary>
        public void InitDialogServiceConnector()
        {
            DialogServiceConfig dialogServiceConfig = null;

            dialogServiceConfig = BotFrameworkConfig.FromSubscription(SubscriptionTB.Text, RegionTB.Text);

            if (dialogServiceConnector != null)
            {
                dialogServiceConnector.SessionStarted   -= DialogServiceConnector_SessionStarted;
                dialogServiceConnector.SessionStopped   -= DialogServiceConnector_SessionStopped;
                dialogServiceConnector.Recognizing      -= DialogServiceConnector_Recognizing;
                dialogServiceConnector.Recognized       -= DialogServiceConnector_Recognized;
                dialogServiceConnector.ActivityReceived -= DialogServiceConnector_ActivityReceived;
                dialogServiceConnector.Canceled         -= DialogServiceConnector_Canceled;
            }

            var audioConfig = AudioConfig.FromDefaultMicrophoneInput();

            dialogServiceConnector = new DialogServiceConnector(dialogServiceConfig, audioConfig);
            dialogServiceConnector.SessionStarted   += DialogServiceConnector_SessionStarted;
            dialogServiceConnector.SessionStopped   += DialogServiceConnector_SessionStopped;
            dialogServiceConnector.Recognizing      += DialogServiceConnector_Recognizing;
            dialogServiceConnector.Recognized       += DialogServiceConnector_Recognized;
            dialogServiceConnector.ActivityReceived += DialogServiceConnector_ActivityReceived;
            dialogServiceConnector.Canceled         += DialogServiceConnector_Canceled;

            SendActivityButton.IsEnabled = true;
            StartButton.IsEnabled        = true;
        }
예제 #5
0
    /// <summary>
    /// Uses the provided properties to create a connector from config and register callbacks
    /// </summary>
    private void CreateDialogServiceConnector()
    {
        Debug.Log($"CreateDialogServiceConnector enter");

        if (dialogServiceConnector == null)
        {
            if (subscriptionKey == string.Empty || region == string.Empty)
            {
                Debug.Log($"One or more input fields weren't provided. Check the fields in the Canvas object or in the script source");
                throw new InvalidOperationException("DialogServiceConfig creation failed");
            }

            // Creates an instance of a DialogServiceConfig with your bot connection ID, subscription key, and service region.
            // Replace in the editor on the Canvas object OR directly in the code, above in the member declarations
            dialogServiceConfig = BotFrameworkConfig.FromSubscription(subscriptionKey, region);
            if (dialogServiceConfig == null)
            {
                Debug.Log($"One or more input fields weren't provided. Check the fields in the Canvas object or in the script source");
                throw new InvalidOperationException("DialogServiceConfig creation failed");
            }

            AudioConfig audioConfig = AudioConfig.FromDefaultMicrophoneInput();
            dialogServiceConnector = new DialogServiceConnector(dialogServiceConfig, audioConfig);

            dialogServiceConnector.ActivityReceived += DialogServiceConnector_ActivityReceived;
            dialogServiceConnector.Canceled         += DialogServiceConnector_Canceled;
            dialogServiceConnector.Recognized       += DialogServiceConnector_Recognized;
        }

        stateIndicatorString = "DialogServiceConnector created";

        ttsAudio = GetComponent <AudioSource>();

        Debug.Log($"CreateDialogServiceConnector exit");
    }
 public async Task <string> CreateRecognitionModelFromMicrophoneAsync(SpeechConfig config)
 {
     using (var audioInput = AudioConfig.FromDefaultMicrophoneInput())
     {
         return(await EnrollProfileAsync(config, audioInput, VoiceProfileType.TextIndependentIdentification));
     }
 }
예제 #7
0
        private async void speechButton_ClickAsync(object sender, RoutedEventArgs e)
        {
            bool isChecked = (sender as ToggleButton).IsChecked ?? false;

            if (isChecked)
            {
                string region = "westeurope";
                string key    = "10b3abeb322347b69ae151bb56a618cd";

                var speechConfig = SpeechConfig.FromSubscription(key, region);
                speechConfig.SpeechRecognitionLanguage = "de-DE";
                using (var audioConfig = AudioConfig.FromDefaultMicrophoneInput())
                {
                    using (var recognizer = new SpeechRecognizer(speechConfig, audioConfig))
                    {
                        var resultText = await recognizer.RecognizeOnceAsync();

                        contentRichTextbox.Document.Blocks.Add(new Paragraph(new Run(resultText.Text)));   //Add a text to the richtTextBlock
                    }
                }
            }
            else
            {
            }
        }
        private async void OnRecognitionButtonClicked(object sender, EventArgs e)
        {
            bool locationAccessGranted = await DependencyService.Get <ILocationService>().GetPermissionsAsync();

            if (!locationAccessGranted)
            {
                UpdateUI("Please give location access.");
            }

            if (_recognizer == null)
            {
                _recognizer = new KeywordRecognizer(AudioConfig.FromDefaultMicrophoneInput());
            }
            if (_model == null)
            {
                var kwsModelDir = DependencyService.Get <IFileSystemService>().GetWakeWordModelPath(kwsModelFile);
                _model = KeywordRecognitionModel.FromFile(kwsModelDir);
            }

            UpdateUI("Say wakeword to start recording speech.");
            _result = await _recognizer.RecognizeOnceAsync(_model).ConfigureAwait(false);

            var locationResult = await DependencyService.Get <ILocationService>().GetCurrentGPSCoordinatesAsync();

            string message = $"Detected keyword at TIME: {DateTime.Now} and LOCATION: {locationResult}";

            UpdateUI(message);

//            UpdateUI("Got a keyword, now you can keep talking...");
//            await DoSpeechRecognition().ConfigureAwait(false);
        }
예제 #9
0
        public async Task StartAsync(string fileName = null)
        {
            var speechConfig = SpeechConfig.FromSubscription(this.settings.SubscriptionKey, this.settings.Region);

            speechConfig.SpeechRecognitionLanguage = "de-de";
            speechConfig.OutputFormat = OutputFormat.Detailed;

            using (var audioInput = fileName == null ? AudioConfig.FromDefaultMicrophoneInput() : AudioConfig.FromWavFileInput(fileName))
            {
                using (var intentRecognizer = new IntentRecognizer(speechConfig, audioInput))
                {
                    stopRecognition = new TaskCompletionSource <int>();

                    var model = LanguageUnderstandingModel.FromAppId(this.settings.LuisAppId);

                    intentRecognizer.AddAllIntents(model);

                    intentRecognizer.SessionStarted      += IntentRecognizer_SessionStarted;
                    intentRecognizer.Recognized          += IntentRecognizer_Recognized;
                    intentRecognizer.Recognizing         += IntentRecognizer_Recognizing;
                    intentRecognizer.SessionStopped      += IntentRecognizer_SessionStopped;
                    intentRecognizer.SpeechEndDetected   += IntentRecognizer_SpeechEndDetected;
                    intentRecognizer.SpeechStartDetected += IntentRecognizer_SpeechStartDetected;
                    intentRecognizer.Canceled            += IntentRecognizer_Canceled;

                    await intentRecognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

                    Task.WaitAny(stopRecognition.Task);

                    await intentRecognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);
                }
            }
        }
예제 #10
0
        private static void Initialize()
        {
            SpeechConfig speechConfig =
                SpeechConfig.FromSubscription(s_twitchBotSettings.SpeechKey, s_twitchBotSettings.SpeechRegion);

            s_audioConfig       = AudioConfig.FromDefaultMicrophoneInput();
            s_speechRecognizer  = new SpeechRecognizer(speechConfig, s_audioConfig);
            s_speechSynthesizer = new SpeechSynthesizer(speechConfig, s_audioConfig);
        }
        public async Task RecognizeSpeech()
        {
            var audioConfig  = AudioConfig.FromDefaultMicrophoneInput();
            var speechConfig = SpeechConfig.FromSubscription(key, "westus2");

            // Creates a speech recognizer.
            using (var recognizer = new IntentRecognizer(speechConfig, audioConfig))
            {
                // Hide user secrets later
                var model = LanguageUnderstandingModel.FromAppId(Environment.GetEnvironmentVariable("LUIS_APP_ID"));
                recognizer.AddAllIntents(model);

                var stopRecognition = new TaskCompletionSource <int>();
                // Can add logic to exit using voice command, "Thanks see you at the window" etc.
                // Subscribe to appropriate events
                recognizer.Recognizing += (s, e) =>
                {
                    // Use this to send partial responses
                    Console.WriteLine($"Partial: {e.Result.Text}");
                };

                recognizer.Recognized += (s, e) =>
                {
                    var exit = ProcessRecognizedText(s, e);
                    if (exit)
                    {
                        recognizer.StopContinuousRecognitionAsync().Wait(); //ConfigureAwait(false);
                    }
                };

                recognizer.SessionStarted += (s, e) =>
                {
                    Console.WriteLine("Session started event.");
                };

                recognizer.SessionStopped += (s, e) =>
                {
                    Console.WriteLine("Session stopped event.");
                    stopRecognition.TrySetResult(0);
                };

                recognizer.Canceled += (s, e) =>
                {
                    Console.WriteLine(e.ErrorDetails);
                    stopRecognition.TrySetResult(0);
                };

                // Instantiate new Order object
                _order = new Order();

                Console.WriteLine("Say something to get started, or \"Exit\" to quit.");
                await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

                // Need more understanding about this part
                Task.WaitAny(new[] { stopRecognition.Task });
            }
        }
예제 #12
0
    async static Task FromMic(SpeechConfig speechConfig)
    {
        var audioConfig = AudioConfig.FromDefaultMicrophoneInput();
        var recognizer  = new SpeechRecognizer(speechConfig, audioConfig);

        Console.WriteLine("Speak into your microphone.");
        var result = await recognizer.RecognizeOnceAsync();

        Debug.Log(result.Text);
    }
예제 #13
0
        async static Task FromMic(SpeechConfig speechConfig)
        {
            using var audioConfig = AudioConfig.FromDefaultMicrophoneInput();
            using var recognizer  = new SpeechRecognizer(speechConfig, audioConfig);

            Console.WriteLine("Speak into your microphone.");
            var result = await recognizer.RecognizeOnceAsync();

            Console.WriteLine($"RECOGNIZED: Text={result.Text}");
        }
예제 #14
0
        public static async Task SpeakerVerify(SpeechConfig config, VoiceProfile profile, Dictionary <string, string> profileMapping)
        {
            var speakerRecognizer = new SpeakerRecognizer(config, AudioConfig.FromDefaultMicrophoneInput());
            var model             = SpeakerVerificationModel.FromProfile(profile);

            Console.WriteLine("Speak the passphrase to verify: \"My voice is my passport, please verify me.\"");
            var result = await speakerRecognizer.RecognizeOnceAsync(model);

            Console.WriteLine($"Verified voice profile for speaker {profileMapping[result.ProfileId]}, score is {result.Score}");
        }
예제 #15
0
        //////////////////////////////////////////////////////////////   LISTENING COMMANDS   ////////////////////////////////////////////////////////////////////////////////////
        public static async Task RecognizeSpeechAsync()
        {
            Console.Clear();
            Console.WriteLine("Please Say 'Hey Rosita' to begin");
            var keywordModel = KeywordRecognitionModel.FromFile("C:\\Users\\Johnny\\Documents\\GitHub\\Rosita\\Rosita\\827f85af-e8cd-44ad-8d48-1963414c3bde.table");

            using var audioConfig10     = AudioConfig.FromDefaultMicrophoneInput();
            using var keywordRecognizer = new KeywordRecognizer(audioConfig10);
            KeywordRecognitionResult keyresult = await keywordRecognizer.RecognizeOnceAsync(keywordModel);

            var config =
                SpeechConfig.FromSubscription(
                    "aabb8086039843e7b4339dd4928f2de1",
                    "eastus");

            using var audioConfig = AudioConfig.FromDefaultMicrophoneInput();
            using var recognizer  = new SpeechRecognizer(config, audioConfig);

            Console.WriteLine("Say something...");
            var result = await recognizer.RecognizeOnceAsync();

            string command = result.Text;

            switch (result.Reason)
            {
            case ResultReason.RecognizedSpeech:
                Console.WriteLine($"RECOGNIZED: Text={result.Text}");
                break;

            case ResultReason.NoMatch:
                Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                break;

            case ResultReason.Canceled:
                var cancellation = CancellationDetails.FromResult(result);
                Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                if (cancellation.Reason == CancellationReason.Error)
                {
                    Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                    Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
                    Console.WriteLine($"CANCELED: Did you update the subscription info?");
                }
                break;
            }
            //////////////////////////////////////////////////////////////   LISTENING COMMANDS END   /////////////////////////////////////////////////////////////////////////
            ///
            ///
            ///
            /////////////////////////////////////////////////////////////////////   LINK TO KEY PHRASES   ////////////////////////////////////////////////////////////////////////////

            await Speech.TalkingAsync(command.ToLower());

            /////////////////////////////////////////////////////////////////////   LINK TO KEY PHRASES END  ////////////////////////////////////////////////////////////////////////////
        }
예제 #16
0
        async static Task FromMic(SpeechConfig speechConfig)
        {
            using var audioConfig = AudioConfig.FromDefaultMicrophoneInput();
            using var recognizer  = new SpeechRecognizer(speechConfig, audioConfig);

            Console.WriteLine("Habla al microfono");
            var result = await recognizer.RecognizeOnceAsync();

            Console.WriteLine($"Esto es lo que reconozco: {result.Text}");
            Console.ReadKey();
        }
/*
 * Main functions
 */

        static AudioConfig args_to_audio_config(string[] args)
        {
            if (get_cmd_option(args, "-i") is string audio_input_file)
            {
                return(AudioConfig.FromWavFileInput(audio_input_file));
            }
            else
            {
                return(AudioConfig.FromDefaultMicrophoneInput());
            }
        }
        /// <summary>
        /// Constructs an <see cref="AudioConfig"/> from <see cref="Config"/>.
        /// Depending on the available services, this may either use the audio features built into the Speech SDK (such as <see cref="AudioConfig.FromDefaultMicrophoneInput"/>),
        /// or it may construct a <see cref="IStreamAudioSource"/> that accesses the requested <see cref="AudioDevice"/> with resampling and noise gates as required.
        /// </summary>
        /// <returns></returns>
        protected AudioConfig GetAudioConfig()
        {
            var streamSource = GetStreamAudioSource(Config.AudioSource);

            if (streamSource != null)
            {
                //use this stream source and convert to an Azure audio stream
                try
                {
                    var azureInput = AudioInputStream.CreatePushStream(AudioStreamFormat.GetWaveFormatPCM(
                                                                           (uint)streamSource.Format.SampleRate,
                                                                           (byte)streamSource.Format.BitsPerSample,
                                                                           (byte)streamSource.Format.ChannelCount));

                    byte[] bufferOptional = null;
                    streamSource.DataAvailable += (s, e) =>
                    {
                        azureInput.Write(e.Buffer.GetArray(ref bufferOptional), e.Buffer.Count);
                    };
                    streamSource.Stopped += (s, e) =>
                    {
                        if (e.Cause == StreamAudioSourceStoppedCause.Stopped)
                        {
                            //signal end-of-stream to Azure
                            azureInput.Close();
                        }
                    };

                    this.StreamAudioSource = streamSource;
                    return(AudioConfig.FromStreamInput(azureInput));
                }
                catch (Exception ex)
                {
                    Logger.LogError(ex, $"Error while creating an Azure AudioConfig from an IStreamAudioSource. Format: SampleRate={streamSource.Format.SampleRate}, BitsPerSample={streamSource.Format.BitsPerSample}, Channels={streamSource.Format.ChannelCount}");
                    streamSource.Dispose();
                }
            }

            this.StreamAudioSource    = null;
            this.StreamAudioNoiseGate = null;

            //try and use the built-in audio engine
            if (Config.AudioSource is AudioDevice audioDevice)
            {
                if (audioDevice.UseDefaultAudioInputDevice)
                {
                    return(AudioConfig.FromDefaultMicrophoneInput());
                }
            }

            return(null);
        }
예제 #19
0
 protected virtual void MicConfiguration()
 {
     try
     {
         var audioConfig = AudioConfig.FromDefaultMicrophoneInput();
         _speechRecognizer = new SpeechRecognizer(_speechConfig, audioConfig);
         AddEvents();
     }
     catch (Exception ex)
     {
         throw ex;
     }
 }
        //Speech Button
        private async void SpeechButton_Click(object sender, RoutedEventArgs e)
        {
            var speechConfig =
                SpeechConfig.FromSubscription(SecretsHelper.GetAzureServiceApiKey(), SecretsHelper.GetAzureRegion());

            using (var audioConfig = AudioConfig.FromDefaultMicrophoneInput())
                using (var recognizer = new SpeechRecognizer(speechConfig, audioConfig))
                {
                    var result = await recognizer.RecognizeOnceAsync();

                    contentRichTextBox.Document.Blocks.Add(new Paragraph(new Run(result.Text)));
                }
        }
예제 #21
0
        private AudioConfig GetAudioConfig(InputSourceType inputSource, string wavFilename)
        {
            switch (inputSource)
            {
            case InputSourceType.Microphone:
                return(AudioConfig.FromDefaultMicrophoneInput());

            case InputSourceType.WavFile:
                return(AudioConfig.FromWavFileInput(wavFilename));

            default:
                throw new ArgumentException($"Unhandled InputSourceType: {inputSource}");
            }
        }
예제 #22
0
        protected override async Task OnMessageActivityAsync(ITurnContext <IMessageActivity> turnContext, CancellationToken cancellationToken)
        {
            await turnContext.SendActivityAsync(MessageFactory.Text($"Echo: {turnContext.Activity.Text}"), cancellationToken);

            var config = SpeechConfig.FromSubscription("----------------------------", "northeurope");

            using (var recognizer = new SpeechRecognizer(config, AudioConfig.FromDefaultMicrophoneInput()))
            {
                await turnContext.SendActivityAsync(MessageFactory.Text($"Echo: Please some words."), cancellationToken);

                var result = await recognizer.RecognizeOnceAsync();

                await turnContext.SendActivityAsync(MessageFactory.Text($"Echo: {result.Text}"), cancellationToken);
            }
        }
예제 #23
0
        private async void BtnSpeech_OnClick(object sender, RoutedEventArgs e)
        {
            string region       = "westeurope";
            string key          = "cdc0886a26494b6ea32cb41cc17165c5";
            var    speechConfig = SpeechConfig.FromSubscription(key, region);


            using (var audioConfig = AudioConfig.FromDefaultMicrophoneInput())
            {
                using (var recognizer = new SpeechRecognizer(speechConfig, audioConfig))
                {
                    var result = await recognizer.RecognizeOnceAsync();

                    RichTextContent.Document.Blocks.Add(new Paragraph(new Run(result.Text)));
                }
            }
        }
예제 #24
0
        private async void speechButton_Click(object sender, RoutedEventArgs e)
        {
            string region = "westus";
            string key    = "4e1418a74a6a457e83faabc6451fe62d";

            var speechConfig = SpeechConfig.FromSubscription(key, region);

            using (var audioConfig = AudioConfig.FromDefaultMicrophoneInput())
            {
                using (var recognizer = new SpeechRecognizer(speechConfig, audioConfig))
                {
                    var result = await recognizer.RecognizeOnceAsync();

                    contentRichTextBox.Document.Blocks.Add(new Paragraph(new Run(result.Text)));
                }
            }
        }
        public static async Task <string> VozATexto()
        {
            //Creamos las configuraciones necesarias
            var speechConfig = SpeechConfig.FromSubscription(SPEECH_SUBSCRIPTION_KEY, SPEECH_REGION);

            using var audioConfig = AudioConfig.FromDefaultMicrophoneInput();
            //Si queremos reconocer texto en un fichero de audio en lugar de en el micrófono usaremos la línea siguiente
            //using var audioConfig = AudioConfig.FromWavFileInput("PathToFile.wav");

            //Creamos el cliente
            using var cliente = new SpeechRecognizer(speechConfig, "es-ES", audioConfig);

            //Realizamos la llamada a la API
            SpeechRecognitionResult resultado = await cliente.RecognizeOnceAsync();

            //Devolvemos el resultado
            return(resultado.Text);
        }
예제 #26
0
파일: Program.cs 프로젝트: petefield/speech
        async static Task Main(string[] args)
        {
            const string WAKE_WORD    = "hey computer";
            var          speechConfig = SpeechConfig.FromSubscription("e073d2855d604ddda74ba6518ab2e6b3", "westeurope");
            var          Intentconfig = SpeechConfig.FromSubscription("9051c66d5ba949ac84e32b01c37eb9b4", "westus");
            var          audioConfig  = AudioConfig.FromDefaultMicrophoneInput();

            var model = LanguageUnderstandingModel.FromAppId("7f7a9344-69b6-4582-a01d-19ffa3c9bed8");

            var continuousRecognizer = new SpeechRecognizer(speechConfig, audioConfig);
            var intentRecognizer     = new IntentRecognizer(Intentconfig, audioConfig);

            intentRecognizer.AddAllIntents(model);

            var  synthesizer        = new SpeechSynthesizer(speechConfig);
            bool _waitingForCommand = false;

            continuousRecognizer.Recognized += async(s, e) =>
            {
                if (!_waitingForCommand)
                {
                    if (e.Result.Reason == ResultReason.RecognizedSpeech)
                    {
                        Console.WriteLine($"RECOGNIZED: Text={e.Result.Text}");

                        if (e.Result.Text.Contains(WAKE_WORD, StringComparison.CurrentCultureIgnoreCase))
                        {
                            Console.WriteLine($"RECOGNIZED: {WAKE_WORD}");
                            _waitingForCommand = true;
                            await ParseCommand(synthesizer, await awaitCommand(intentRecognizer, synthesizer));

                            _waitingForCommand = false;
                            Console.WriteLine("Listening for wake word.");
                        }
                    }
                }
            };

            await continuousRecognizer.StartContinuousRecognitionAsync();

            Console.Write("Press any key!");
            Console.Read();
        }
예제 #27
0
        private async void RecognizeKeywordButton_Click(object sender, RoutedEventArgs e)
        {
            RecognizeKeywordButton.IsEnabled = false;

            if (recognizer == null)
            {
                recognizer = new KeywordRecognizer(AudioConfig.FromDefaultMicrophoneInput());
            }
            if (model == null)
            {
                await InitializeKeywordModel();
            }

            NotifyUser("Say \"Computer\"", NotifyType.StatusMessage);
            result = await recognizer.RecognizeOnceAsync(model);

            NotifyUser("Got a keyword, now you can keep talking...", NotifyType.StatusMessage);
            SaveToFileButton.IsEnabled = true;
        }
        // 使用麥克風進行語音辨識
        public static async Task RecognitionWithMicrophoneAsync()
        {
            // 建立語音辨識的設定,這裡必須提供 Azure Cognitive Service 的訂閱金鑰和服務區域
            var config = SpeechConfig.FromSubscription(YourSubscriptionKey, YourServiceRegion);

            // 預設使用 en-us 的美式英文作為辨識語言
            config.SpeechRecognitionLanguage = "en-us";

            // 建立語音辨識器,並將音訊來源指定為機器預設的麥克風
            using (var recognizer = new SpeechRecognizer(config, AudioConfig.FromDefaultMicrophoneInput()))
            {
                Console.WriteLine("Say something...");

                // 開始進行語音辨識,會在辨別出句子結束時,返回語音辨識的結果。
                // 會藉由句子說完後,所產生的靜默時間作為辨識依據,或者語音超過 15 秒,也會處理成斷句。
                var result = await recognizer.RecognizeOnceAsync().ConfigureAwait(false);

                // 輸出語音辨識結果
                switch (result.Reason)
                {
                case ResultReason.RecognizedSpeech:
                    Console.WriteLine($"RECOGNIZED: {result.Text}");
                    break;

                case ResultReason.NoMatch:
                    Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                    break;

                case ResultReason.Canceled:
                default:
                    var cancellation = CancellationDetails.FromResult(result);
                    Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                    if (cancellation.Reason == CancellationReason.Error)
                    {
                        Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                        Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
                        Console.WriteLine($"CANCELED: Did you update the subscription info?");
                    }
                    break;
                }
            }
        }
예제 #29
0
        async static Task FromMic(SpeechConfig speechConfig)
        {
            var checker = true;

            while (checker)
            {
                using var audioConfig = AudioConfig.FromDefaultMicrophoneInput();
                using var recognizer  = new SpeechRecognizer(speechConfig, audioConfig);

                Console.WriteLine("Speak into your microphone.");
                var result = await recognizer.RecognizeOnceAsync();

                Console.WriteLine($"RECOGNIZED: Text={result.Text}");

                if (result.Text == "end." || result.Text == "End.")
                {
                    break;
                }
            }
        }
예제 #30
0
 //
 // Create AudioConfig
 //
 private AudioConfig AudioConfigFromUserConfig()
 {
     if (this.userConfig.inputFilePath is string inputFilePathValue)
     {
         if (!this.userConfig.useCompressedAudio)
         {
             return(Helper.OpenWavFile(inputFilePathValue, AudioProcessingOptions.Create(0)));
         }
         else
         {
             var reader = new BinaryReader(File.OpenRead(inputFilePathValue));
             var format = AudioStreamFormat.GetCompressedFormat(userConfig.compressedAudioFormat);
             var stream = new PullAudioInputStream(new BinaryAudioStreamReader(reader), format);
             return(AudioConfig.FromStreamInput(stream));
         }
     }
     else
     {
         return(AudioConfig.FromDefaultMicrophoneInput());
     }
 }