Ejemplo n.º 1
0
        private CMPSpeechError PrepareAudioEngineError(NSError recognitionError)
        {
            var genericError = new CMPSpeechError(recognitionError);

            genericError.Code    = (int)(recognitionError.Code);
            genericError.Domain  = GetErrorDomain();
            genericError.Message = recognitionError.LocalizedDescription;
            return(genericError);
        }
Ejemplo n.º 2
0
        private CMPSpeechError PrepareAuthorizationError(SFSpeechRecognizerAuthorizationStatus status)
        {
            string errorDomainString = GetErrorDomain();
            string erorMessageString = GetStatusMessageString(status);

            var genericError = new CMPSpeechError();

            genericError.Code    = (int)(status);
            genericError.Domain  = GetErrorDomain();
            genericError.Message = erorMessageString;
            return(genericError);
        }
Ejemplo n.º 3
0
        public async Task <Tuple <string, Tuple <bool, CMPSpeechError> > > StartRecordingAsync()
        {
            if (IsRecording() == true)
            {
                return(new Tuple <string, Tuple <bool, CMPSpeechError> > (string.Empty, null));
            }

            var authorizationResult = await CheckAuthorizationAsync();

            if (authorizationResult == null)
            {
                return(new Tuple <string, Tuple <bool, CMPSpeechError> >(string.Empty, null));
            }

            if (authorizationResult.Item1 == false)
            {
                return(new Tuple <string, Tuple <bool, CMPSpeechError> >(string.Empty, authorizationResult));
            }

            CMPSpeechError genericError = null;
            var            inputNode    = _speechAudioEngine.InputNode;

            if (inputNode == null)
            {
                var audioEngineError = new NSError(new NSString(string.Empty), nint.Parse(SpeechToTextErrorEnum.
                                                                                          eNoInputNode.ToString()));
                genericError = PrepareAudioEngineError(audioEngineError);
                ResetSpeechToText();
                return(new Tuple <string, Tuple <bool, CMPSpeechError> >(string.Empty, new Tuple <bool, CMPSpeechError>
                                                                             (false, genericError)));
            }

            Tuple <string, Tuple <bool, CMPSpeechError> > recognitionResult = null;
            await Task.Run(() =>
            {
                try
                {
                    _speechRecognitionTask = _speechRecognizer.GetRecognitionTask(_speechRecognitionRequest,
                                                                                  (SFSpeechRecognitionResult result,
                                                                                   NSError speechError) =>
                    {
                        if (speechError != null)
                        {
                            _speechAudioEngine.Stop();

                            genericError      = new CMPSpeechError(speechError);
                            recognitionResult = new Tuple <string, Tuple <bool, CMPSpeechError> >(string.Empty,
                                                                                                  new Tuple <bool, CMPSpeechError>(false,
                                                                                                                                   genericError));
                            ResetSpeechToText();
                            _speechSemaphore.Release();
                            return;
                        }

                        if (result.Final == true)
                        {
                            _speechAudioEngine.Stop();
                            inputNode.RemoveTapOnBus(0);

                            recognitionResult = new Tuple <string, Tuple <bool, CMPSpeechError> >(result.BestTranscription.
                                                                                                  FormattedString,
                                                                                                  new Tuple <bool,
                                                                                                             CMPSpeechError>(true,
                                                                                                                             null));

                            ResetSpeechToText();
                            _speechSemaphore.Release();
                            return;
                        }
                    });

                    var audioFormat = inputNode.GetBusOutputFormat(0);
                    inputNode.InstallTapOnBus(0, 2048, audioFormat, (AVAudioPcmBuffer buffer, AVAudioTime when) =>
                    {
                        var state = _speechRecognitionTask.State;
                        _speechRecognitionRequest.Append(buffer);
                    });

                    _speechAudioEngine.Prepare();

                    NSError audioEngineError = null;
                    bool couldStart          = _speechAudioEngine.StartAndReturnError(out audioEngineError);

                    if (couldStart == false)
                    {
                        genericError      = PrepareAudioEngineError(audioEngineError);
                        recognitionResult = new Tuple <string, Tuple <bool, CMPSpeechError> >(string.Empty,
                                                                                              new Tuple <bool, CMPSpeechError>(false,
                                                                                                                               genericError));
                        ResetSpeechToText();
                        _speechSemaphore.Release();
                        return;
                    }
                }
                catch (Exception exception)
                {
                    Diagonostics.Debug.WriteLine(exception.Message);
                    ResetSpeechToText();
                    _speechSemaphore.Release();
                }
            });

            await _speechSemaphore.WaitAsync();

            return(recognitionResult);
        }