public void StartRecordingSession()
        {
            // Start recording
            AudioEngine.InputNode.InstallTapOnBus(
                bus: 0,
                bufferSize: 1024,
                format: AudioEngine.InputNode.GetBusOutputFormat(0),
                tapBlock: (buffer, when) => LiveSpeechRequest?.Append(buffer));
            AudioEngine.Prepare();
            NSError error;

            AudioEngine.StartAndReturnError(out error);

            // Did recording start?
            if (error != null)
            {
                // Handle error and retur
                return;
            }

            try
            {
                CheckAndStartReconition();
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
            }
        }
        public void StartSpeechRecognition()
        {
            listeningLabel.Text = "Listening";
            recognitionRequest  = new SFSpeechAudioBufferRecognitionRequest();

            audioEngine.Prepare();
            NSError error;

            audioEngine.StartAndReturnError(out error);
            if (error != null)
            {
                Console.WriteLine(error.ToString());
                return;
            }
            recognitionTask = speechRecognizer.GetRecognitionTask(recognitionRequest, (SFSpeechRecognitionResult result, NSError err) =>
            {
                if (err != null)
                {
                    Console.WriteLine(err.ToString());
                }
                else
                {
                    if (result.Final == true)
                    {
                        var results = result.BestTranscription.FormattedString;
                        if (results.ToLower() == "left")
                        {
                            CameraCommand(results);
                        }
                        listeningLabel.Text = result.BestTranscription.FormattedString;
                    }
                }
            });
        }
Exemple #3
0
    private void initPlayer()
    {
        audioEngine = new AVAudioEngine();
        NSError error = new NSError();

        if (!AVAudioSession.SharedInstance().SetPreferredSampleRate(sampleRate, out error))
        {
            throw new Exception("Error setting preffered sample rate for player: " + error);
        }
        AVAudioSession.SharedInstance().SetCategory(AVAudioSessionCategory.PlayAndRecord, AVAudioSessionCategoryOptions.InterruptSpokenAudioAndMixWithOthers);
        AVAudioSession.SharedInstance().SetActive(true);

        audioPlayer = new AVAudioPlayerNode();
        setVolume(AVAudioSession.SharedInstance().OutputVolume);
        inputAudioFormat = new AVAudioFormat(AVAudioCommonFormat.PCMFloat32, sampleRate, (uint)channels, false);

        audioEngine.AttachNode(audioPlayer);
        audioEngine.Connect(audioPlayer, audioEngine.MainMixerNode, inputAudioFormat);
        audioEngine.Prepare();
        if (!audioEngine.StartAndReturnError(out error))
        {
            throw new Exception("Error starting playback audio engine: " + error);
        }
        audioPlayer.Play();
    }
Exemple #4
0
        public IOSAudioDriver( )
        {
            _engine = new AVAudioEngine();
            _mixer  = new AVAudioMixerNode();

            _engine.AttachNode(_mixer);
            _engine.Connect(_mixer, _engine.MainMixerNode, _engine.MainMixerNode.GetBusOutputFormat(0));

            _players = new Player[MaxPlayers];
            for (int i = 0; i < MaxPlayers; i++)
            {
                var player = new Player();
                player.Callback = player.OnPlayedBack;
                player.Done     = true;
                player.Node     = new AVAudioPlayerNode();
                player.PlayId   = 1;
                player.Id       = (uint)i;
                _players[i]     = player;

                _engine.AttachNode(player.Node);
                //_engine.Connect(player.Node, _engine.MainMixerNode, _format);
            }

            _engine.Prepare();
            _engine.StartAndReturnError(out var error);
        }
Exemple #5
0
        public void StartRecording(Action <string> callback)
        {
            var audioSession = AVAudioSession.SharedInstance();

            var error = audioSession.SetCategory(AVAudioSessionCategory.Record);

            audioSession.SetMode(AVAudioSession.ModeMeasurement, out error);
            error = audioSession.SetActive(true, AVAudioSessionSetActiveOptions.NotifyOthersOnDeactivation);

            _speechRecognitionRequest = new SFSpeechAudioBufferRecognitionRequest();

            var inputNode       = _audioEngine.InputNode;
            var recordingFormat = inputNode.GetBusOutputFormat(0);

            inputNode.InstallTapOnBus(0, 1024, recordingFormat, (buffer, when) =>
            {
                _speechRecognitionRequest?.Append(buffer);
            });

            _audioEngine.Prepare();
            _audioEngine.StartAndReturnError(out error);

            _speechRecognitionTask = _speechRecognizer.GetRecognitionTask(_speechRecognitionRequest, (result, recognitionError) =>
            {
                if (result != null)
                {
                    var transcribedText = result.BestTranscription.FormattedString;
                    callback?.Invoke(transcribedText);
                }
            });
        }
        // ============== Speech Recognition Functions ============
        public void StartRecording()
        {
            Querybox.Placeholder = "Recording";
            recognitionRequest   = new SFSpeechAudioBufferRecognitionRequest();

            audioEngine.Prepare();
            NSError error;

            audioEngine.StartAndReturnError(out error);
            if (error != null)
            {
                Console.WriteLine(error.ToString());
                return;
            }
            recognitionTask = speechRecognizer.GetRecognitionTask(recognitionRequest, (SFSpeechRecognitionResult result, NSError err) =>
            {
                if (err != null)
                {
                    Console.WriteLine(err.ToString());
                }
                else
                {
                    if (result.Final == true)
                    {
                        YouAskedLabel.Text = "You asked: " + result.BestTranscription.FormattedString;
                        final_query        = result.BestTranscription.FormattedString;
                    }
                }
            });
        }
        private void StartRecordingAndRecognizing()
        {
            _timer = NSTimer.CreateRepeatingScheduledTimer(5, delegate
            {
                DidFinishTalk();
            });

            _recognitionTask?.Cancel();
            _recognitionTask = null;

            var     audioSession = AVAudioSession.SharedInstance();
            NSError nsError;

            nsError = audioSession.SetCategory(AVAudioSessionCategory.PlayAndRecord);
            audioSession.SetMode(AVAudioSession.ModeDefault, out nsError);
            nsError = audioSession.SetActive(true, AVAudioSessionSetActiveOptions.NotifyOthersOnDeactivation);
            audioSession.OverrideOutputAudioPort(AVAudioSessionPortOverride.Speaker, out nsError);
            _recognitionRequest = new SFSpeechAudioBufferRecognitionRequest();

            var inputNode = _audioEngine.InputNode;

            if (inputNode == null)
            {
                throw new Exception();
            }

            var recordingFormat = inputNode.GetBusOutputFormat(0);

            inputNode.InstallTapOnBus(0, 1024, recordingFormat, (buffer, when) =>
            {
                _recognitionRequest?.Append(buffer);
            });

            _audioEngine.Prepare();
            _audioEngine.StartAndReturnError(out nsError);

            _recognitionTask = _speechRecognizer.GetRecognitionTask(_recognitionRequest, (result, error) =>
            {
                var isFinal = false;
                if (result != null)
                {
                    _recognizedString = result.BestTranscription.FormattedString;
                    //MessagingCenter.Send<ISpeechToText, string>(this, "STT", _recognizedString);
                    _timer.Invalidate();
                    _timer = null;
                    _timer = NSTimer.CreateRepeatingScheduledTimer(2, delegate
                    {
                        DidFinishTalk();
                    });
                }
                if (error != null || isFinal)
                {
                    MessagingCenter.Send <ISpeechToText, string>(this, "STT", _recognizedString);

                    MessagingCenter.Send <ISpeechToText>(this, "Final");
                    StopRecordingAndRecognition(audioSession);
                }
            });
        }
        void StartRecording()
        {
            // Cancel the previous task if it's running.
            recognitionTask?.Cancel();
            recognitionTask = null;

            var     audioSession = AVAudioSession.SharedInstance();
            NSError err;

            err = audioSession.SetCategory(AVAudioSessionCategory.Record);
            audioSession.SetMode(AVAudioSession.ModeMeasurement, out err);
            err = audioSession.SetActive(true, AVAudioSessionSetActiveOptions.NotifyOthersOnDeactivation);

            // Configure request so that results are returned before audio recording is finished
            recognitionRequest = new SFSpeechAudioBufferRecognitionRequest {
                ShouldReportPartialResults = true
            };

            var inputNode = audioEngine.InputNode;

            if (inputNode == null)
            {
                throw new InvalidProgramException("Audio engine has no input node");
            }

            // A recognition task represents a speech recognition session.
            // We keep a reference to the task so that it can be cancelled.
            recognitionTask = speechRecognizer.GetRecognitionTask(recognitionRequest, (result, error) => {
                var isFinal = false;
                if (result != null)
                {
                    textView.Text = result.BestTranscription.FormattedString;
                    isFinal       = result.Final;
                }

                if (error != null || isFinal)
                {
                    audioEngine.Stop();
                    inputNode.RemoveTapOnBus(0);
                    recognitionRequest   = null;
                    recognitionTask      = null;
                    recordButton.Enabled = true;
                    recordButton.SetTitle("Start Recording", UIControlState.Normal);
                }
            });

            var recordingFormat = inputNode.GetBusOutputFormat(0);

            inputNode.InstallTapOnBus(0, 1024, recordingFormat, (buffer, when) => {
                recognitionRequest?.Append(buffer);
            });

            audioEngine.Prepare();
            audioEngine.StartAndReturnError(out err);
            textView.Text  = "(Throw Darts!)";
            textView2.Text = "";
        }
Exemple #9
0
        public void StartSpeechRecognition()
        {
            StopSpeechRecognizer(false);
            StopAudio();
            _recognizerText = "";
            AVAudioSession.SharedInstance().SetActive(true, AVAudioSessionSetActiveOptions.NotifyOthersOnDeactivation);

            var request = new SFSpeechAudioBufferRecognitionRequest();

            request.ShouldReportPartialResults = true;

            var inputNode = engine.InputNode;

            _task = _recognizer.GetRecognitionTask(request, (result, error) =>
            {
                if (result != null)
                {
                    _recognizerText = result.BestTranscription.FormattedString;
                    if (_recognizerTimer == null)
                    {
                        _recognizerTimer = new Timer(RecognizeTimerElapsed, this, TimeSpan.FromSeconds(1), Timeout.InfiniteTimeSpan);
                    }
                    else
                    {
                        _recognizerTimer.Change(TimeSpan.FromSeconds(1), Timeout.InfiniteTimeSpan);
                    }
                    if (result.Final)
                    {
                        SpeechRecognitionResult?.Invoke(this, new SpeechRecognitionEventArgs(_recognizerText));
                    }
                    else
                    {
                        SpeechRecognitionPartialResult?.Invoke(this, new SpeechRecognitionEventArgs(_recognizerText));
                    }
                }
                if (error != null || result?.Final == true)
                {
                    StopSpeechRecognizer(false);
                }
            });

            var recordingFormat = inputNode.GetBusOutputFormat(0);

            inputNode.InstallTapOnBus(0, 1024, recordingFormat, (AVAudioPcmBuffer buffer, AVAudioTime when) =>
            {
                request.Append(buffer);
            });
            engine.Prepare();

            NSError engineError = null;

            if (engine.StartAndReturnError(out engineError))
            {
            }
        }
Exemple #10
0
        public void StartRecording(out int warningStatus)
        {
            if (!IsAuthorized())
            {
                warningStatus = (int)Warning.AccessDenied;
                return;
            }

            var node            = AudioEngine.InputNode;
            var recordingFormat = node.GetBusOutputFormat(0);

            node.InstallTapOnBus(0, 1024, recordingFormat, (AVAudioPcmBuffer buffer, AVAudioTime when) => {
                LiveSpeechRequest.Append(buffer);
            });

            AudioEngine.Prepare();
            NSError error;

            AudioEngine.StartAndReturnError(out error);

            if (error != null)
            {
                Console.WriteLine(strings.speechStartRecordProblem);
                warningStatus = (int)Warning.RecordProblem;
                return;
            }

            // Play start sound
            if (player.IsPlaying)
            {
                player.Stop();
            }
            player.Load("Sounds/siri_start.mp3");
            player.Play();

            RecognitionTask = SpeechRecognizer.GetRecognitionTask(LiveSpeechRequest, (SFSpeechRecognitionResult result, NSError err) =>
            {
                if (err != null)
                {
                    Console.WriteLine(strings.speechRecordError);
                    viewController.ProcessSpeech(null);
                }
                else
                {
                    if (result.Final)
                    {
                        viewController.ProcessSpeech(result.BestTranscription.FormattedString);
                    }
                }
            });

            warningStatus = -1;
        }
Exemple #11
0
        public void Start()
        {
            recognitionTask?.Cancel();
            recognitionTask = null;
            recognizedText  = "";

            var     audioSession = AVAudioSession.SharedInstance();
            NSError err;

            err = audioSession.SetCategory(AVAudioSessionCategory.Record);
            audioSession.SetMode(AVAudioSession.ModeMeasurement, out err);
            err = audioSession.SetActive(true, AVAudioSessionSetActiveOptions.NotifyOthersOnDeactivation);


            liveRequest = new SFSpeechAudioBufferRecognitionRequest {
                ShouldReportPartialResults = true
            };

            var node            = audioEngine.InputNode;
            var recordingFormat = node.GetBusOutputFormat(0);

            node.InstallTapOnBus(0, 1024, recordingFormat, (AVAudioPcmBuffer buffer, AVAudioTime when) => {
                // Append buffer to recognition request
                liveRequest.Append(buffer);
            });

            recognitionTask = speechRecognizer.GetRecognitionTask(liveRequest, (SFSpeechRecognitionResult result, NSError error) => {
                var isFinal = false;
                if (result != null)
                {
                    recognizedText = result.BestTranscription.FormattedString;
                    OnSpeechRecognized(new SpeechRecognizedEvent(recognizedText));
                    isFinal = result.Final;
                }

                if (error != null || isFinal)
                {
                    audioSession.SetCategory(AVAudioSessionCategory.Playback);
                    audioSession.SetMode(AVAudioSession.ModeDefault, out err);
                    node.RemoveTapOnBus(0);
                    audioEngine.Dispose();
                    liveRequest.Dispose();
                    recognitionTask.Dispose();
                    liveRequest     = null;
                    recognitionTask = null;
                }
            });

            audioEngine.Prepare();
            audioEngine.StartAndReturnError(out err);
        }
Exemple #12
0
        public void StartRecording()
        {
            // Setup audio session
            var node = AudioEngine.InputNode;

            node.RemoveTapOnBus(0);
            var recordingFormat = node.GetBusOutputFormat(0);

            node.InstallTapOnBus(0, 1024, recordingFormat, (AVAudioPcmBuffer buffer, AVAudioTime when) => {
                // Append buffer to recognition request
                LiveSpeechRequest.Append(buffer);
            });

            // Start recording
            AudioEngine.Prepare();
            NSError error;

            AudioEngine.StartAndReturnError(out error);

            // Did recording start?
            if (error != null)
            {
                // Handle error and return
                input = "Error: Recording init error!";
            }

            // Start recognition
            RecognitionTask = SpeechRecognizer.GetRecognitionTask(LiveSpeechRequest, (SFSpeechRecognitionResult result, NSError err) => {
                // Was there an error?
                if (err != null)
                {
                    // Handle error
                    input = "Error: Recognition task";
                }
                else
                {
                    // Is this the final translation?
                    if (result.Final)
                    {
                        input = result.BestTranscription.ToString();

                        new UIAlertView("DONE", "You said \"{" + input + "}\".", null, "OK", null).Show();

                        /*var donecontroller = UIAlertController.Create("DONE","You said \"{" + input + "}\".", UIAlertControllerStyle.Alert);
                         * donecontroller.AddAction(UIAlertAction.Create("Ok", UIAlertActionStyle.Default, null));
                         * PresentViewController(donecontroller, true, null);*/
                    }
                }
            });
        }
Exemple #13
0
        private void StartSpeechRecognizer()
        {
            if (!recording)
            {
                speechRecognizer = new SFSpeechRecognizer();
                node             = audioEngine.InputNode;
                var recordingFormat = node.GetBusOutputFormat(0);
                liveSpeechRequest = new SFSpeechAudioBufferRecognitionRequest();

                node.InstallTapOnBus(0, 1024, recordingFormat,
                                     (AVAudioPcmBuffer buffer, AVAudioTime when) =>
                {
                    liveSpeechRequest.Append(buffer);
                });
                recording = true;

                audioEngine.Prepare();
                audioEngine.StartAndReturnError(out NSError error);
                if (error != null)
                {
                    return;
                }

                Timer timer = new Timer(2000);
                timer.Start();
                timer.Elapsed  += EndRecognition;
                RecognitionTask = speechRecognizer.GetRecognitionTask(liveSpeechRequest,
                                                                      (SFSpeechRecognitionResult result, NSError err) =>
                {
                    if (err != null)
                    {
                        Recorded?.Invoke("");
                        return;
                    }
                    else
                    {
                        lastSpokenString = result.BestTranscription.FormattedString;
                        timer.Stop();
                        timer.Interval = 2000;
                        timer.Start();
                    }
                });
            }
            else
            {
                Recorded?.Invoke("");
            }
        }
        public async Task <string> RecognizeAsync()
        {
            var node            = audioEngine.InputNode;
            var recordingFormat = node.GetBusOutputFormat(0);

            var liveSpeechRequest = new SFSpeechAudioBufferRecognitionRequest();

            node.InstallTapOnBus(0, 1024, recordingFormat, (AVAudioPcmBuffer buffer, AVAudioTime when) =>
            {
                // Append buffer to recognition request
                liveSpeechRequest.Append(buffer);
            });

            // Start recording
            audioEngine.Prepare();
            NSError error;

            audioEngine.StartAndReturnError(out error);

            // Did recording start?
            if (error != null)
            {
                return(null);
            }

            var taskCompletionSource = new TaskCompletionSource <string>();
            // Start recognition
            var recognitionTask = speechRecognizer.GetRecognitionTask(liveSpeechRequest, (SFSpeechRecognitionResult recognitionResult, NSError err) =>
            {
                // Was there an error?
                if (err != null)
                {
                    taskCompletionSource?.SetResult(null);
                }
                else
                {
                    taskCompletionSource?.SetResult(recognitionResult.BestTranscription.FormattedString);
                }
            });

            var result = await taskCompletionSource.Task;

            taskCompletionSource = null;
            audioEngine.Stop();
            liveSpeechRequest.EndAudio();
            node.RemoveTapOnBus(0);
            return(result);
        }
Exemple #15
0
        public void TranslateSpeachToText(FoodListFromSpeachViewModel model)
        {
            // Setup audio session
            var node            = AudioEngine.InputNode;
            var recordingFormat = node.GetBusOutputFormat(0);

            node.InstallTapOnBus(0, 1024, recordingFormat, (AVAudioPcmBuffer buffer, AVAudioTime when) => {
                // Append buffer to recognition request
                LiveSpeechRequest.Append(buffer);
            });

            // Start recording
            AudioEngine.Prepare();
            NSError error;

            AudioEngine.StartAndReturnError(out error);

            // Did recording start?
            if (error != null)
            {
                // Handle error and return
                model.FoodItemToAdd = "Error";
            }

            // Start recognition
            RecognitionTask = SpeechRecognizer.GetRecognitionTask(LiveSpeechRequest,
                                                                  (SFSpeechRecognitionResult result, NSError err) =>
            {
                // Was there an error?
                if (err != null)
                {
                    // Handle error
                }
                else
                {
                    // Is this the final translation?
                    if (result.Final)
                    {
                        model.FoodItemToAdd = result.BestTranscription.FormattedString;
                    }
                }
            });
        }
Exemple #16
0
    private void initRecorder()
    {
        audioRecorder = new AVAudioEngine();
        NSError error = new NSError();

        if (!AVAudioSession.SharedInstance().SetPreferredSampleRate(sampleRate, out error))
        {
            throw new Exception("Error setting preffered sample rate for recorder: " + error);
        }
        AVAudioSession.SharedInstance().SetCategory(AVAudioSessionCategory.PlayAndRecord, AVAudioSessionCategoryOptions.InterruptSpokenAudioAndMixWithOthers);
        AVAudioSession.SharedInstance().SetActive(true);
        AVAudioFormat recording_format = new AVAudioFormat(AVAudioCommonFormat.PCMInt16, sampleRate, (uint)channels, false);
        uint          buffer_size      = (uint)CodecTools.getPcmFrameByteSize(sampleRate, bitRate, channels) * 1000;

        audioRecorder.InputNode.InstallTapOnBus(0, buffer_size, recording_format, onDataAvailable);
        audioRecorder.Prepare();
        if (!audioRecorder.StartAndReturnError(out error))
        {
            throw new Exception("Error starting recording audio engine: " + error);
        }
    }
Exemple #17
0
        void StartListening()
        {
            /* Setup audio session. */
            var node            = AudioEngine.InputNode;
            var recordingFormat = node.GetBusOutputFormat(0);

            node.InstallTapOnBus(0, 1024, recordingFormat, (AVAudioPcmBuffer buffer, AVAudioTime when) => {
                /* Append buffer to recognition request. */
                LiveSpeechRequest.Append(buffer);
            });

            /* Start recording */
            AudioEngine.Prepare();
            NSError error;

            AudioEngine.StartAndReturnError(out error);

            /* Did recording start? */
            if (error != null)
            {
                /* Do nothing. */
                return;
            }

            /* Start recognition. */
            RecognitionTask = SpeechRecognizer.GetRecognitionTask(LiveSpeechRequest, (SFSpeechRecognitionResult result, NSError err) => {
                /* Was there an error? */
                if (err != null)
                {
                    /* Do nothing. */ }
                else
                {
                    // Is this the final translation?
                    if (result.Final)
                    {
                        UserText.Text = result.BestTranscription.FormattedString;
                    }
                }
            });
        }
Exemple #18
0
        ///<Summary>
        /// Load wave or mp3 audio file from the Android assets folder
        ///</Summary>
        public bool Load(string fileName)
        {
            DeletePlayer();

            NSError error = new NSError();

            if (!String.IsNullOrWhiteSpace(fileName))
            {
                string directory = Path.GetDirectoryName(fileName);
                string filename  = Path.GetFileNameWithoutExtension(fileName);
                string extension = Path.GetExtension(fileName).Substring(1);
                NSUrl  url       = NSBundle.MainBundle.GetUrlForResource(filename, extension, directory);
                audioFile = new AVAudioFile(url, out error);
            }

            if (audioFile != null)
            {
                componentDescription = new AudioComponentDescription();
                componentDescription.ComponentType    = AudioComponentType.FormatConverter;
                componentDescription.ComponentSubType = (int)AudioUnitSubType.Varispeed;

                engine = new AVAudioEngine();
                player = new AVAudioPlayerNode();
                pitch  = new AVAudioUnitTimePitch(componentDescription);


                engine.AttachNode(player);
                engine.AttachNode(pitch);

                engine.Connect(player, pitch, audioFile.ProcessingFormat);
                engine.Connect(pitch, engine.MainMixerNode, audioFile.ProcessingFormat);

                engine.Prepare();
                NSError startError = new NSError();
                engine.StartAndReturnError(out startError);
            }

            return(true);
        }
        private void StartAnalysing()
        {
            Model = GetModel();

            AudioEngine   = new AVAudioEngine();
            AnalysisQueue = new DispatchQueue("com.r2.SoundAnalysis", false);

            var inputFormat = AudioEngine.InputNode.GetBusInputFormat(0);
            var request     = new SNClassifySoundRequest(Model, out var soundRequestError);

            Analyser = new SNAudioStreamAnalyzer(inputFormat);
            Analyser.AddRequest(request, this, out var addRequestError);

            AudioEngine.InputNode.InstallTapOnBus(
                bus: 0,
                bufferSize: 8192,
                format: inputFormat,
                tapBlock: (buffer, when) =>
                AnalysisQueue.DispatchAsync(() =>
                                            Analyser.Analyze(buffer, when.SampleTime)));

            AudioEngine.Prepare();
            AudioEngine.StartAndReturnError(out var initEngineError);
        }
Exemple #20
0
        // 音声認識の本処理
        private void startRecognitionSession()
        {
            // 音声認識のパラメータ設定と認識開始。ここのパラメータはおまじない。
            audioEngine.InputNode.InstallTapOnBus(
                bus: 0,
                bufferSize: 1024,
                format: audioEngine.InputNode.GetBusOutputFormat(0),
                tapBlock: (buffer, when) => { recognitionRequest?.Append(buffer); }
                );
            audioEngine?.Prepare();
            NSError error = null;

            audioEngine?.StartAndReturnError(out error);
            if (error != null)
            {
                Console.WriteLine(error);
                return;
            }

            try
            {
                if (recognitionTask?.State == SFSpeechRecognitionTaskState.Running)
                {
                    // 音声認識が実行中に音声認識開始処理が呼び出された場合、実行中だった音声認識を中断する。
                    recognitionTask.Cancel();
                }

                recognitionTask = speechRecognizer.GetRecognitionTask(recognitionRequest,
                                                                      (SFSpeechRecognitionResult result, NSError err) =>
                {
                    if (result == null)
                    {
                        // iOS Simulator等、端末が音声認識に対応していない場合はここに入る。
                        StopRecognizing();
                        return;
                    }

                    if (err != null)
                    {
                        Console.WriteLine(err);
                        StopRecognizing();
                        return;
                    }

                    if ((result.BestTranscription != null) && (result.BestTranscription.FormattedString != null))
                    {
                        // 音声を認識できた場合、認識結果を更新する。
                        RecognizedText = result.BestTranscription.FormattedString;
                    }

                    if (result.Final)
                    {
                        // 音声が認識されなくなって時間が経ったら音声認識を打ち切る。
                        StopRecognizing();
                        return;
                    }
                }
                                                                      );
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
            }
        }
Exemple #21
0
            static void StartRecording()
            {
                lock (Lock)
                {
                    if (SpeechRecognizer == null)
                    {
                        SpeechRecognizer  = new SFSpeechRecognizer();
                        LiveSpeechRequest = new SFSpeechAudioBufferRecognitionRequest();
                    }

                    var audioSession = AVAudioSession.SharedInstance();

                    audioSession.SetCategory(AVAudioSessionCategory.PlayAndRecord);
                    audioSession.SetMode(AVAudioSession.ModeDefault, out NSError error);
                    audioSession.OverrideOutputAudioPort(AVAudioSessionPortOverride.Speaker, out NSError speakerError);
                    audioSession.SetActive(true);

                    if (LogErrorAndStop(error) || LogErrorAndStop(speakerError))
                    {
                        return;
                    }

                    AudioEngine = new AVAudioEngine();
                    var node = AudioEngine.InputNode;

                    LiveSpeechRequest.ShouldReportPartialResults = true;

                    RecognitionTask = SpeechRecognizer.GetRecognitionTask(LiveSpeechRequest, (SFSpeechRecognitionResult result, NSError err) =>
                    {
                        if (LogErrorAndStop(err))
                        {
                            return;
                        }

                        var currentText = result.BestTranscription.FormattedString;

                        if (currentText.HasValue())
                        {
                            Listeners?.Invoke(currentText, result.Final);
                        }

                        if (IsContinuous)
                        {
                            Timer = new System.Timers.Timer(20000)
                            {
                                Enabled = true
                            };
                            Timer.Elapsed += (s, ev) =>
                            {
                                StopInstances();
                                StartRecording();
                            };

                            Timer.Start();
                        }
                    });

                    var recordingFormat = node.GetBusOutputFormat(0);
                    node.InstallTapOnBus(0, 1024, recordingFormat, (AVAudioPcmBuffer buffer, AVAudioTime when) =>
                    {
                        LiveSpeechRequest.Append(buffer);
                    });

                    if (AudioEngine == null)
                    {
                        Stop();
                        return;
                    }

                    AudioEngine?.Prepare();
                    AudioEngine?.StartAndReturnError(out error);

                    if (LogErrorAndStop(error))
                    {
                        return;
                    }
                }
            }
Exemple #22
0
        public IAsyncOperation <SpeechRecognitionResult> RecognizeAsync()
        {
            _initialSilenceTimeout          = new Timer();
            _initialSilenceTimeout.Interval = Math.Max(Timeouts.InitialSilenceTimeout.TotalMilliseconds, 5000);
            _initialSilenceTimeout.Elapsed += OnTimeout;

            _endSilenceTimeout          = new Timer();
            _endSilenceTimeout.Interval = Math.Max(Timeouts.EndSilenceTimeout.TotalMilliseconds, 150);
            _endSilenceTimeout.Elapsed += OnTimeout;

            // Cancel the previous task if it's running.
            _recognitionTask?.Cancel();
            _recognitionTask = null;

            var     audioSession = AVAudioSession.SharedInstance();
            NSError err;

            err = audioSession.SetCategory(AVAudioSessionCategory.Record);
            audioSession.SetMode(AVAudioSession.ModeMeasurement, out err);
            err = audioSession.SetActive(true, AVAudioSessionSetActiveOptions.NotifyOthersOnDeactivation);

            // Configure request to get partial results
            _recognitionRequest = new SFSpeechAudioBufferRecognitionRequest
            {
                ShouldReportPartialResults = true,
                TaskHint = SFSpeechRecognitionTaskHint.Dictation
            };

            var inputNode = _audioEngine.InputNode;

            if (inputNode == null)
            {
                throw new InvalidProgramException("Audio engine has no input node");
            }

            var tcs = new TaskCompletionSource <SpeechRecognitionResult>();

            // Keep a reference to the task so that it can be cancelled.
            _recognitionTask = _speechRecognizer.GetRecognitionTask(_recognitionRequest, (result, error) =>
            {
                var isFinal   = false;
                var bestMatch = default(SpeechRecognitionResult);

                if (result != null)
                {
                    _initialSilenceTimeout.Stop();
                    _endSilenceTimeout.Stop();
                    _endSilenceTimeout.Start();

                    bestMatch = new SpeechRecognitionResult()
                    {
                        Text       = result.BestTranscription.FormattedString,
                        Alternates = result.Transcriptions?
                                     .Select(t => new SpeechRecognitionResult()
                        {
                            Text = t.FormattedString
                        })
                                     .ToList()
                    };
                    isFinal = result.Final;

                    OnHypothesisGenerated(bestMatch.Text);
                }

                if (error != null || isFinal)
                {
                    _initialSilenceTimeout.Stop();
                    _endSilenceTimeout.Stop();

                    _audioEngine.Stop();

                    inputNode.RemoveTapOnBus(0);
                    inputNode.Reset();

                    audioSession = AVAudioSession.SharedInstance();
                    err          = audioSession.SetCategory(AVAudioSessionCategory.Playback);
                    audioSession.SetMode(AVAudioSession.ModeDefault, out err);
                    err = audioSession.SetActive(false, AVAudioSessionSetActiveOptions.NotifyOthersOnDeactivation);

                    _recognitionTask = null;

                    OnStateChanged(SpeechRecognizerState.Idle);

                    if (bestMatch != null)
                    {
                        tcs.TrySetResult(bestMatch);
                    }
                    else
                    {
                        tcs.TrySetException(new Exception($"Error during speech recognition: {error.LocalizedDescription}"));
                    }
                }
            });

            var recordingFormat = new AVAudioFormat(sampleRate: 44100, channels: 1);

            inputNode.InstallTapOnBus(0, 1024, recordingFormat, (buffer, when) => {
                _recognitionRequest?.Append(buffer);
            });

            _initialSilenceTimeout.Start();

            _audioEngine.Prepare();
            _audioEngine.StartAndReturnError(out err);

            OnStateChanged(SpeechRecognizerState.Capturing);

            return(tcs.Task.AsAsyncOperation());
        }
        public async Task <Tuple <string, Tuple <bool, CMPSpeechError> > > StartRecordingAsync()
        {
            if (IsRecording() == true)
            {
                return(new Tuple <string, Tuple <bool, CMPSpeechError> > (string.Empty, null));
            }

            var authorizationResult = await CheckAuthorizationAsync();

            if (authorizationResult == null)
            {
                return(new Tuple <string, Tuple <bool, CMPSpeechError> >(string.Empty, null));
            }

            if (authorizationResult.Item1 == false)
            {
                return(new Tuple <string, Tuple <bool, CMPSpeechError> >(string.Empty, authorizationResult));
            }

            CMPSpeechError genericError = null;
            var            inputNode    = _speechAudioEngine.InputNode;

            if (inputNode == null)
            {
                var audioEngineError = new NSError(new NSString(string.Empty), nint.Parse(SpeechToTextErrorEnum.
                                                                                          eNoInputNode.ToString()));
                genericError = PrepareAudioEngineError(audioEngineError);
                ResetSpeechToText();
                return(new Tuple <string, Tuple <bool, CMPSpeechError> >(string.Empty, new Tuple <bool, CMPSpeechError>
                                                                             (false, genericError)));
            }

            Tuple <string, Tuple <bool, CMPSpeechError> > recognitionResult = null;
            await Task.Run(() =>
            {
                try
                {
                    _speechRecognitionTask = _speechRecognizer.GetRecognitionTask(_speechRecognitionRequest,
                                                                                  (SFSpeechRecognitionResult result,
                                                                                   NSError speechError) =>
                    {
                        if (speechError != null)
                        {
                            _speechAudioEngine.Stop();

                            genericError      = new CMPSpeechError(speechError);
                            recognitionResult = new Tuple <string, Tuple <bool, CMPSpeechError> >(string.Empty,
                                                                                                  new Tuple <bool, CMPSpeechError>(false,
                                                                                                                                   genericError));
                            ResetSpeechToText();
                            _speechSemaphore.Release();
                            return;
                        }

                        if (result.Final == true)
                        {
                            _speechAudioEngine.Stop();
                            inputNode.RemoveTapOnBus(0);

                            recognitionResult = new Tuple <string, Tuple <bool, CMPSpeechError> >(result.BestTranscription.
                                                                                                  FormattedString,
                                                                                                  new Tuple <bool,
                                                                                                             CMPSpeechError>(true,
                                                                                                                             null));

                            ResetSpeechToText();
                            _speechSemaphore.Release();
                            return;
                        }
                    });

                    var audioFormat = inputNode.GetBusOutputFormat(0);
                    inputNode.InstallTapOnBus(0, 2048, audioFormat, (AVAudioPcmBuffer buffer, AVAudioTime when) =>
                    {
                        var state = _speechRecognitionTask.State;
                        _speechRecognitionRequest.Append(buffer);
                    });

                    _speechAudioEngine.Prepare();

                    NSError audioEngineError = null;
                    bool couldStart          = _speechAudioEngine.StartAndReturnError(out audioEngineError);

                    if (couldStart == false)
                    {
                        genericError      = PrepareAudioEngineError(audioEngineError);
                        recognitionResult = new Tuple <string, Tuple <bool, CMPSpeechError> >(string.Empty,
                                                                                              new Tuple <bool, CMPSpeechError>(false,
                                                                                                                               genericError));
                        ResetSpeechToText();
                        _speechSemaphore.Release();
                        return;
                    }
                }
                catch (Exception exception)
                {
                    Diagonostics.Debug.WriteLine(exception.Message);
                    ResetSpeechToText();
                    _speechSemaphore.Release();
                }
            });

            await _speechSemaphore.WaitAsync();

            return(recognitionResult);
        }
        private void StartRecordingAndRecognizing()
        {
            try
            {
                _timer = NSTimer.CreateRepeatingScheduledTimer(5, delegate
                {
                    DidFinishTalk();
                });

                // Cancel the previous task if it's running.
                _recognitionTask?.Cancel();
                _recognitionTask = null;

                var audioSession = AVAudioSession.SharedInstance();
                var nsError      = audioSession.SetCategory(AVAudioSessionCategory.PlayAndRecord);
                audioSession.SetMode(AVAudioSession.ModeDefault, out nsError);
                nsError = audioSession.SetActive(true, AVAudioSessionSetActiveOptions.NotifyOthersOnDeactivation);
                audioSession.OverrideOutputAudioPort(AVAudioSessionPortOverride.Speaker, out nsError);

                // Configure request so that results are returned before audio recording is finished
                _recognitionRequest = new SFSpeechAudioBufferRecognitionRequest();

                var inputNode = _audioEngine.InputNode;
                if (inputNode == null)
                {
                    throw new InvalidProgramException("Audio engine has no input node");
                }

                var recordingFormat = inputNode.GetBusOutputFormat(0);
                inputNode.InstallTapOnBus(0, 1024, recordingFormat, (buffer, when) =>
                {
                    _recognitionRequest?.Append(buffer);
                });

                _audioEngine.Prepare();
                _audioEngine.StartAndReturnError(out nsError);

                // A recognition task represents a speech recognition session.
                // We keep a reference to the task so that it can be cancelled.
                _recognitionTask = _speechRecognizer.GetRecognitionTask(_recognitionRequest, (result, error) =>
                {
                    if (result != null)
                    {
                        var eventArg = new TextReceivedEventArg
                        {
                            Text = result.BestTranscription.FormattedString
                        };

                        OnTextReceived(eventArg);

                        _timer.Invalidate();
                        _timer = null;
                        _timer = NSTimer.CreateRepeatingScheduledTimer(2, delegate
                        {
                            DidFinishTalk();
                        });
                    }

                    if (error == null)
                    {
                        return;
                    }

                    OnStoppedListening();
                    StopRecordingAndRecognition();
                });
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
            }
        }
Exemple #25
0
        public void StartRecording(string voice, bool longTimeout = false)
        {
            if (!SpeechEnabled)
            {
                return;
            }
            // Setup audio session
            LastResult  = "";
            AudioEngine = new AVAudioEngine();
            NSLocale voiceLocale = NSLocale.FromLocaleIdentifier(voice);

            SpeechRecognizer  = new SFSpeechRecognizer(voiceLocale);
            LiveSpeechRequest = new SFSpeechAudioBufferRecognitionRequest();

            NSError error;
            var     audioSession = AVAudioSession.SharedInstance();

            audioSession.SetCategory(AVAudioSessionCategory.Record);
            audioSession.SetMode(AVAudioSession.ModeMeasurement, out error);
            if (error != null)
            {
                OnSpeechError?.Invoke("Audio session error: " + error.ToString());
                return;
            }
            audioSession.SetActive(true, AVAudioSessionSetActiveOptions.NotifyOthersOnDeactivation);

            LiveSpeechRequest.ShouldReportPartialResults = true;

            var node = AudioEngine.InputNode;

            if (node == null)
            {
                OnSpeechError?.Invoke("Couldn't initialize Speech Input");
                return;
            }

            RecognitionTask = SpeechRecognizer.GetRecognitionTask(LiveSpeechRequest, (SFSpeechRecognitionResult result, NSError err) => {
                if (IsCancelled)
                {
                    node.RemoveTapOnBus(0);
                    return;
                }
                if (err != null)
                {
                    OnSpeechError?.Invoke(err.ToString());
                }
                else if (result != null)
                {
                    LastResult = result.BestTranscription.FormattedString;
                    Console.WriteLine("You said: \"{0}\". Final: {1}",
                                      LastResult, result.Final);
                    m_lastSpeech = DateTime.Now;
                    if (result.Final)// || !IsRecording) {
                    {
                        OnSpeechOK?.Invoke(LastResult);
                    }
                }
                if ((result != null && result.Final) || err != null || !IsRecording)
                {
                    IsRecording = false;
                    //node.RemoveTapOnBus(0);
                    AudioEngine.Stop();
                    m_speechTimer.Close();
                }
            });

            var recordingFormat = node.GetBusOutputFormat(0);

            node.InstallTapOnBus(0, 1024, recordingFormat, (AVAudioPcmBuffer buffer, AVAudioTime when) => {
                //Console.WriteLine("--> {0}: {1} {2}.{3}", buffer.FrameLength, when.HostTime);
                // Append buffer to recognition request
                LiveSpeechRequest.Append(buffer);
            });

            // Start recording
            AudioEngine.Prepare();
            AudioEngine.StartAndReturnError(out error);

            if (error != null)
            {
                OnSpeechError?.Invoke("Speech init error: " + error.ToString());
                IsRecording = false;
                return;
            }
            IsRecording   = true;
            IsCancelled   = false;
            LastResult    = "";
            m_lastSpeech  = DateTime.MaxValue;
            m_startSpeech = DateTime.Now;
            m_timeout     = longTimeout ? m_phraseTimeout : m_wordTimeout;

            m_speechTimer           = new System.Timers.Timer(250);
            m_speechTimer.AutoReset = true;
            m_speechTimer.Elapsed  += (sender, e) => {
                CheckRecording();
            };
            m_speechTimer.Start();
        }
        void StartRecording()
        {
            if (recognitionTask != null)
            {
                recognitionTask.Cancel();
                recognitionTask = null;
            }

            var audioSession = AVAudioSession.SharedInstance();

            try
            {
                NSError err;
                audioSession.SetCategory(AVAudioSessionCategory.PlayAndRecord);
                audioSession.SetMode(AVAudioSession.ModeMeasurement, out err);
                audioSession.SetActive(true);
            }
            catch (Exception ex)
            {
                var s = ex.ToString();
            }

            recognitionRequest = new SFSpeechAudioBufferRecognitionRequest();

            var inputNode = audioEngine.InputNode;

            recognitionRequest.ShouldReportPartialResults = true;

            recognitionTask = speechRecognizer.GetRecognitionTask(recognitionRequest, (arg1, arg2) =>
            {
                var isFinal = false;

                if (arg1 != null)
                {
                    var inputtedText = arg1.BestTranscription.FormattedString;
                    englishText.Text = inputtedText;

                    var fromLanguage = LanguageCodes.English;
                    var toLangague   = AppDelegate.CurrentLanguage.LanguageCode;

                    translator.TranslateText(inputtedText, fromLanguage, toLangague).ContinueWith(async(arg) =>
                    {
                        var translated = await arg;

                        InvokeOnMainThread(() => translatedText.Text = translated);
                    });


                    isFinal = arg1.Final;
                }

                if (arg2 != null || isFinal)
                {
                    audioEngine.Stop();
                    inputNode.RemoveTapOnBus(0);
                    recognitionRequest = null;
                    recognitionTask    = null;

                    askQuestion.Enabled = true;
                }
            });

            var recordingFormat = inputNode.GetBusOutputFormat(0);

            inputNode.InstallTapOnBus(0, 1024, recordingFormat, (buffer, when) =>
            {
                recognitionRequest?.Append(buffer);
            });

            audioEngine.Prepare();

            try
            {
                NSError err = new NSError();
                audioEngine.StartAndReturnError(out err);

                englishText.Text    = "OK, here we go!";
                translatedText.Text = "";
            }
            catch (Exception) { }
        }
        void StartRecording()
        {
            // Cancel the previous task if it's running.
            recognitionTask?.Cancel();
            recognitionTask = null;


            var     audioSession = AVAudioSession.SharedInstance();
            NSError err;

            err = audioSession.SetCategory(AVAudioSessionCategory.PlayAndRecord, AVAudioSessionCategoryOptions.DefaultToSpeaker);
            audioSession.SetMode(AVAudioSession.ModeMeasurement, out err);
            err = audioSession.SetActive(true, AVAudioSessionSetActiveOptions.NotifyOthersOnDeactivation);

            // Configure request so that results are returned before audio recording is finished
            recognitionRequest = new SFSpeechAudioBufferRecognitionRequest {
                ShouldReportPartialResults = true,
            };

            var inputNode = audioEngine.InputNode;

            if (inputNode == null)
            {
                throw new InvalidProgramException("Audio engine has no input node");
            }


            // A recognition task represents a speech recognition session.
            // We keep a reference to the task so that it can be cancelled.
            recognitionTask = speechRecognizer.GetRecognitionTask(recognitionRequest, (result, error) => {
                var isFinal = false;
                if (result != null)
                {
                    speechIdleTimer.Stop();
                    speechIdleTimer.Start();

                    textView.Text = result.BestTranscription.FormattedString;

                    isFinal = result.Final;
                }

                if (error != null || isFinal)
                {
                    if (result != null)
                    {
                        var intent = nlp.GetMatchingIntent(result.BestTranscription.FormattedString);

                        string resultText;
                        if (intent != null)
                        {
                            textView.Text += "\nAction is " + intent.Action + ".";
                            resultText     = "Action is " + intent.Action + ". ";
                            if (intent.Parameters != null)
                            {
                                intent.Parameters.ForEach(p => {
                                    resultText    += "Parameter " + p.Key + " with values" + string.Join(",", p.Value) + ". ";
                                    textView.Text += "\nParameter " + p.Key + " with values " + string.Join(",", p.Value) + ". ";
                                });
                            }
                        }
                        else
                        {
                            resultText = "Sorry, I did not get that.";
                        }

                        var su = new AVSpeechUtterance(resultText)
                        {
                            Rate            = AVSpeechUtterance.MaximumSpeechRate / 2,
                            Voice           = AVSpeechSynthesisVoice.FromLanguage("en-US"),
                            PitchMultiplier = 1.0f,
                            Volume          = 1
                        };

                        ss.SpeakUtterance(su);
                    }

                    audioEngine.Stop();
                    inputNode.RemoveTapOnBus(0);
                    recognitionRequest   = null;
                    recognitionTask      = null;
                    recordButton.Enabled = true;
                    //recordButton.SetTitle ("Start Recording", UIControlState.Normal);
                    recordButton.Hidden = false;
                    recordStatus.Hidden = true;
                    speechIdleTimer.Stop();
                }
            });

            var recordingFormat = inputNode.GetBusOutputFormat(0);

            inputNode.InstallTapOnBus(0, 1024, recordingFormat, (buffer, when) => {
                recognitionRequest?.Append(buffer);
            });

            audioEngine.Prepare();
            audioEngine.StartAndReturnError(out err);
        }