Esempio n. 1
0
        protected override void OnActivityResult(int requestCode, Result resultCode, Intent data)
        {
            base.OnActivityResult(requestCode, resultCode, data);

            if (requestCode == SpeechRecognitionAndroidService.SpeakRequestCode)
            {
                if (resultCode == Result.Ok)
                {
                    var matches = data.GetStringArrayListExtra(RecognizerIntent.ExtraResults);

                    if (matches.Count != 0)
                    {
                        Console.WriteLine(matches[0]);

                        SpeechRecognized?.Invoke(
                            this, new SpeechRecognizedEvent(recognizedText: matches[0]));
                    }
                    else
                    {
                        ReturnEmptyResponse();
                    }
                }

                else
                {
                    ReturnEmptyResponse();
                }
            }
        }
Esempio n. 2
0
 private void InvokeSpeechRecognized(String recognizedText)
 {
     if (SpeechRecognized != null)
     {
         SpeechRecognized.Invoke(this, recognizedText);
     }
 }
        /// <summary>
        /// Speech recognized event handler
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="eventArgs"></param>
        private void SpeechRecognizedHandler(object sender, SpeechRecognizedEventArgs eventArgs)
        {
            RecognitionResult result = eventArgs.Result;
            SpeechRecognized  msg    = new SpeechRecognized();

            SpeechRecognizedNotification notification = new SpeechRecognizedNotification();

            if (result.Audio != null)
            {
                notification.StartTime = result.Audio.StartTime;
                notification.Duration  = result.Audio.Duration;
            }
            else
            {
                // If the engine's audio input is set to null no audio information is available
                notification.StartTime = new DateTime(0);
                notification.Duration  = new TimeSpan(0);
            }
            notification.Confidence = result.Confidence;
            notification.Text       = result.Text;
            notification.Semantics  = new RecognizedSemanticValue(null, result.Semantics);

            msg.Body = notification;
            SendNotification(_subMgrPort, msg);
        }
Esempio n. 4
0
        protected void RecEngine_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
        {
            VoiceCommand voiceCommand = _listCommands.FirstOrDefault(x => x.Commands.Contains(e.Result.Text))?.Copy();

            if (voiceCommand == null)
            {
                return;
            }
            voiceCommand.DoSayResponse = false;
            if (e.Result.Confidence < ConfidenceThreshold)
            {
                voiceCommand.ActionToPerform = VoiceCommandAction.DidntGetThat;
                voiceCommand.Response        = "I didn't get that";
            }
            switch (voiceCommand.ActionToPerform)
            {
            case VoiceCommandAction.GiveFeedback:
                if (IsListening)
                {
                    _isGivingFeedback = true;
                    SayResponseAsync(voiceCommand.Response);
                }
                break;

            case VoiceCommandAction.StopGivingFeedback:
                if (IsListening)
                {
                    SayResponseAsync(voiceCommand.Response);
                    _isGivingFeedback = false;
                }
                break;

            case VoiceCommandAction.DidntGetThat:
                if (IsListening)
                {
                    SayResponseAsync(voiceCommand.Response);
                }
                break;

            case VoiceCommandAction.StartListening:
                IsListening = true;
                SayResponseAsync(voiceCommand.Response);
                break;

            case VoiceCommandAction.StopListening:
                IsListening = false;
                SayResponseAsync(voiceCommand.Response);
                break;

            default:
                voiceCommand.DoSayResponse = true;
                break;
            }
            if (!IsListening && voiceCommand.ActionToPerform != VoiceCommandAction.StopListening)
            {
                return;
            }
            SpeechRecognized?.Invoke(this, new ODSpeechRecognizedEventArgs(voiceCommand));
        }
Esempio n. 5
0
 public Listening(SpeechRecognized recDelegate)
 {
     ear = new SpeechRecognitionEngine(System.Globalization.CultureInfo.CurrentCulture);
     createGrammars();
     State = ListenerState.Default;
     ear.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(recDelegate);
     ear.SpeechDetected += new EventHandler<SpeechDetectedEventArgs>(ear_SpeechDetected);
     ear.SetInputToDefaultAudioDevice();
     ear.RecognizeAsync(RecognizeMode.Multiple);
 }
Esempio n. 6
0
        void _rg_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
        {
            if (e.Result == null)
            {
                return;
            }

            var ev = new RecognitionEventArgs(e.Result.Confidence, e.Result.Text);

            SpeechRecognized?.Invoke(sender, ev);
        }
        /// <summary>
        /// Speech recognized event handler
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="eventArgs"></param>
        private void SpeechRecognizedHandler(object sender, SpeechRecognizedEventArgs eventArgs)
        {
            RecognitionResult result = eventArgs.Result;
            SpeechRecognized  msg    = new SpeechRecognized();

            SpeechRecognizedNotification notification = new SpeechRecognizedNotification();

            PopulateCommonSpeechNotificationInformation(result, notification);
            notification.Confidence = result.Confidence;
            notification.Text       = result.Text;
            notification.Semantics  = new RecognizedSemanticValue(null, result.Semantics);

            msg.Body = notification;
            SendNotification(subMgrPort, msg);
        }
 private void HandleSpeechCompleted(object sender, RecognizeCompletedEventArgs eventArgs)
 {
     if (eventArgs.Result is null)
     {
         SpeechNotRecognized?.Invoke(this, new EventArgs());
     }
     else
     {
         var answer = new AnswerSelectedEventArgs
         {
             Result         = true,
             FieldName      = currentAnswer.FieldName,
             SelectedAnswer = GetProperResult(eventArgs),
         };
         SpeechRecognized?.Invoke(this, answer);
     }
 }
Esempio n. 9
0
        public void Dispose()
        {
            if (Sensor != null)
            {
                Sensor.Dispose();
            }
            if (SpeechRecognizer != null)
            {
                SpeechRecognizer.Dispose();
            }
            SpeechRecognizer = null;
            Sensor           = null;

            skeletons = null;

            foreach (var item in SpeechRecognized.GetInvocationList())
            {
                SpeechRecognized -= (EventHandler <VoiceCommandEventArgs>)item;
            }
            foreach (var item in GestureDetected.GetInvocationList())
            {
                GestureDetected -= (EventHandler <GestureDetectedEventArgs>)item;
            }
        }
Esempio n. 10
0
        // Method called from the recognizer when a recognition has occurred.
        // Only called for SpeechRecognition events, not SpeechRecognitionRejected.
        internal void OnRecognitionInternal(SpeechRecognizedEventArgs eventArgs)
        {
            Debug.Assert(eventArgs.Result.Grammar == this);

            SpeechRecognized?.Invoke(this, eventArgs);
        }
Esempio n. 11
0
 private void ReturnEmptyResponse()
 {
     SpeechRecognized?.Invoke(this, new SpeechRecognizedEvent(recognizedText: ""));
 }
Esempio n. 12
0
        public async Task TranslateAsync(YouTubeVideo youTubeVideo, string fromLanguage, IEnumerable <string> toLanguages)
        {
            // Declare the necessary directories and files variables
            var outputPath       = Path.Combine("Output", Guid.NewGuid().ToString());
            var downloadFilePath = Path.Combine(outputPath, "input.mp4");

            // StringBuilders for data to be passed to event subscriber
            var tsb  = new StringBuilder();
            var osb  = new StringBuilder();
            var info = new StringBuilder();

            var config = SpeechTranslationConfig.FromSubscription(
                configuration["AzureSpeechTranslation:SubscriptionKey"], configuration["AzureSpeechTranslation:Region"]);

            config.SpeechRecognitionLanguage = fromLanguage;

            foreach (var language in toLanguages)
            {
                config.AddTargetLanguage(language);
            }

            var vidBytes = await youTubeVideo.GetBytesAsync();

            // Before saving the video, create the directory
            CreateOutputDirectory(outputPath);

            // Save the video
            await File.WriteAllBytesAsync(downloadFilePath, vidBytes);

            // Extract the audio from the video to work on it
            var wavAudioFile = await ExtractingWavAudioAsync(downloadFilePath);

            var stopTranslation = new TaskCompletionSource <int>();
            var lineCount       = 1;

            using (var audioInput = AudioConfig.FromWavFileInput(wavAudioFile))
            {
                using (var recognizer = new TranslationRecognizer(config, audioInput))
                {
                    // Subscribes to events.
                    recognizer.Recognized += (s, e) =>
                    {
                        if (e.Result.Reason == ResultReason.TranslatedSpeech)
                        {
                            foreach (var element in e.Result.Translations)
                            {
                                var fromTime = TimeSpan.FromTicks(e.Result.OffsetInTicks);
                                var toTime   = fromTime.Add(e.Result.Duration);

                                osb.AppendLine($"{lineCount}");
                                osb.AppendLine($"{fromTime.ToString(@"hh\:mm\:ss\.fff")} --> {toTime.ToString(@"hh\:mm\:ss\.fff")}");
                                osb.AppendLine(e.Result.Text);
                                osb.AppendLine();

                                tsb.AppendLine($"{lineCount}");
                                tsb.AppendLine($"{fromTime.ToString(@"hh\:mm\:ss\.fff")} --> {toTime.ToString(@"hh\:mm\:ss\.fff")}");
                                tsb.AppendLine(element.Value);
                                tsb.AppendLine();

                                var speechServicesEventArgs = SetSpeechServicesInformationArgs(fromLanguage, element.Key,
                                                                                               osb.ToString(), tsb.ToString());
                                osb.Clear();
                                tsb.Clear();

                                SpeechRecognized?.Invoke(this, speechServicesEventArgs);
                            }

                            lineCount++;
                        }
                        else if (e.Result.Reason == ResultReason.RecognizedSpeech)
                        {
                            info.AppendLine($"RECOGNIZED: Text={e.Result.Text}");
                            info.AppendLine($"    Speech not translated.");
                            var speechServicesEventArgs = SetSpeechServicesInformationArgs(fromLanguage, information: info.ToString());
                            info.Clear();

                            SpeechRecognized?.Invoke(this, speechServicesEventArgs);
                        }
                        else if (e.Result.Reason == ResultReason.NoMatch)
                        {
                            info.AppendLine($"NOMATCH: Speech could not be recognized.");
                            var speechServicesEventArgs = SetSpeechServicesInformationArgs(fromLanguage, information: info.ToString());
                            info.Clear();

                            SpeechRecognized?.Invoke(this, speechServicesEventArgs);
                        }
                    };

                    recognizer.Canceled += (s, e) =>
                    {
                        info.AppendLine($"CANCELED: Reason={e.Reason}");

                        if (e.Reason == CancellationReason.Error)
                        {
                            info.AppendLine($"CANCELED: ErrorCode={e.ErrorCode}");
                            info.AppendLine($"CANCELED: ErrorDetails={e.ErrorDetails}");
                            info.AppendLine($"CANCELED: Did you update the subscription info?");
                        }

                        var speechServicesEventArgs = SetSpeechServicesInformationArgs(fromLanguage, information: info.ToString());
                        info.Clear();

                        SpeechCanceled?.Invoke(this, speechServicesEventArgs);
                        stopTranslation.TrySetResult(0);
                    };

                    recognizer.SpeechStartDetected += (s, e) =>
                    {
                        info.AppendLine("Speech start detected event.");
                        var speechServicesEventArgs = SetSpeechServicesInformationArgs(fromLanguage, information: info.ToString());
                        info.Clear();

                        SpeechStartDetected?.Invoke(this, speechServicesEventArgs);
                    };

                    recognizer.SpeechEndDetected += (s, e) =>
                    {
                        info.AppendLine("Speech end detected event.");
                        var speechServicesEventArgs = SetSpeechServicesInformationArgs(fromLanguage, information: info.ToString());
                        info.Clear();

                        SpeechEndDetected?.Invoke(this, speechServicesEventArgs);
                    };

                    recognizer.SessionStarted += (s, e) =>
                    {
                        info.AppendLine("Start translation...");
                        info.AppendLine("Session started event.");
                        var speechServicesEventArgs = SetSpeechServicesInformationArgs(fromLanguage, information: info.ToString());
                        info.Clear();

                        SpeechSessionStarted?.Invoke(this, speechServicesEventArgs);
                    };

                    recognizer.SessionStopped += (s, e) =>
                    {
                        info.AppendLine("Session stopped event.");
                        info.AppendLine("Stop translation.");
                        var speechServicesEventArgs = SetSpeechServicesInformationArgs(fromLanguage, information: info.ToString());
                        info.Clear();

                        SpeechSessionStopped?.Invoke(this, speechServicesEventArgs);

                        stopTranslation.TrySetResult(0);
                    };

                    // Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition.
                    await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

                    // Waits for completion.
                    // Use Task.WaitAny to keep the task rooted.
                    Task.WaitAny(new[] { stopTranslation.Task });

                    // Stops translation.
                    await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);
                }
            }

            //Housekeeping
            Directory.Delete(outputPath, true);
        }
Esempio n. 13
0
 protected virtual void FireSpeechRecognized(string sentence)
 {
     SpeechRecognized?.Invoke(this, new SpeechRecognitionEventArgs(sentence));
 }
 protected void OnSpeechRecognized(SpeechRecognizedEvent e)
 {
     SpeechRecognized?.Invoke(this, e);
 }
 public IEnumerator <ITask> SpeechRecognizedHandler(SpeechRecognized recognized)
 {
     PostActionNotSupported(recognized);
     yield break;
 }
Esempio n. 16
0
 private void SpeechRecognizedProxy(object sender, SpeechRecognizedEventArgs e)
 {
     SpeechRecognized?.Invoke(this, e);
 }