private void StartRecordingAndRecognizing() { try { _timer = NSTimer.CreateRepeatingScheduledTimer(5, delegate { DidFinishTalk(); }); // Cancel the previous task if it's running. _recognitionTask?.Cancel(); _recognitionTask = null; var audioSession = AVAudioSession.SharedInstance(); var nsError = audioSession.SetCategory(AVAudioSessionCategory.PlayAndRecord); audioSession.SetMode(AVAudioSession.ModeDefault, out nsError); nsError = audioSession.SetActive(true, AVAudioSessionSetActiveOptions.NotifyOthersOnDeactivation); audioSession.OverrideOutputAudioPort(AVAudioSessionPortOverride.Speaker, out nsError); // Configure request so that results are returned before audio recording is finished _recognitionRequest = new SFSpeechAudioBufferRecognitionRequest(); var inputNode = _audioEngine.InputNode; if (inputNode == null) { throw new InvalidProgramException("Audio engine has no input node"); } var recordingFormat = inputNode.GetBusOutputFormat(0); inputNode.InstallTapOnBus(0, 1024, recordingFormat, (buffer, when) => { _recognitionRequest?.Append(buffer); }); _audioEngine.Prepare(); _audioEngine.StartAndReturnError(out nsError); // A recognition task represents a speech recognition session. // We keep a reference to the task so that it can be cancelled. _recognitionTask = _speechRecognizer.GetRecognitionTask(_recognitionRequest, (result, error) => { if (result != null) { var eventArg = new TextReceivedEventArg { Text = result.BestTranscription.FormattedString }; OnTextReceived(eventArg); _timer.Invalidate(); _timer = null; _timer = NSTimer.CreateRepeatingScheduledTimer(2, delegate { DidFinishTalk(); }); } if (error == null) { return; } OnStoppedListening(); StopRecordingAndRecognition(); }); } catch (Exception ex) { Console.WriteLine(ex.Message); } }
/// <inheritdoc /> public void OnTextReceived(TextReceivedEventArg e) { TextReceived?.Invoke(e); }
private void Current_TextReceived(TextReceivedEventArg e) { RecordLabel.Text = e.Text; }