public async void OnClick(View v) { switch (v.Id) { case Resource.Id.close: mEditText.Text = ""; break; case Resource.Id.btn_speak: string text = mEditText.Text.ToString(); //Check whether the offline model corresponding to the language has been downloaded. MLTtsLocalModel model = new MLTtsLocalModel.Factory(MLTtsConstants.TtsSpeakerOfflineEnUsMaleEagle).Create(); Task <bool> checkModelTask = manager.IsModelExistAsync(model); try { await checkModelTask; if (checkModelTask.IsCompleted && checkModelTask.Result == true) { Speak(text); } else { Log.Error(Tag, "isModelDownload== " + checkModelTask.Result); ShowToast("The offline model has not been downloaded!"); DownloadModel(MLTtsConstants.TtsSpeakerOfflineEnUsMaleEagle); } } catch (Exception e) { Log.Error(Tag, "downloadModel failed: " + e.Message); ShowToast(e.Message); } break; case Resource.Id.btn_download_model: DownloadModel(MLTtsConstants.TtsSpeakerOfflineEnUsMaleEagle); break; case Resource.Id.btn_stop_speak: mlTtsEngine.Stop(); break; default: break; } }
public void OnClick(View v) { switch (v.Id) { case Resource.Id.close: mEditText.Text = ""; break; case Resource.Id.btn_speak: string text = mEditText.Text.ToString(); /** * First parameter sourceText: text information to be synthesized. The value can contain a maximum of 500 characters. * Second parameter indicating the synthesis mode: The format is configA | configB | configC. * configA: * MLTtsEngine.QueueAppend:After an audio synthesis task is generated, the audio synthesis task is processed as follows: If playback is going on, the task is added to the queue for execution in sequence; if playback pauses, the playback is resumed and the task is added to the queue for execution in sequence; if there is no playback, the audio synthesis task is executed immediately. * MLTtsEngine.QueueFlush:The ongoing audio synthesis task and playback are stopped immediately, all audio synthesis tasks in the queue are cleared, and the current audio synthesis task is executed immediately and played. * configB: * MLTtsEngine.OpenStream:The synthesized audio data is output through onAudioAvailable. * configC: * MLTtsEngine.ExternalPlayback:external playback mode. The player provided by the SDK is shielded. You need to process the audio output by the onAudioAvailable callback API. In this case, the playback-related APIs in the callback APIs become invalid, and only the callback APIs related to audio synthesis can be listened. */ // Use the built-in player of the SDK to play speech in queuing mode. string id = mlTtsEngine.Speak(text, MLTtsEngine.QueueAppend); // In queuing mode, the synthesized audio stream is output through onAudioAvailable, and the built-in player of the SDK is used to play the speech. // string id = mlTtsEngine.Speak(text, MLTtsEngine.QueueAppend | MLTtsEngine.OpenStream); // In queuing mode, the synthesized audio stream is output through onAudioAvailable, and the audio stream is not played, but controlled by you. // string id = mlTtsEngine.Speak(text, MLTtsEngine.QueueAppend | MLTtsEngine.OpenStream | MLTtsEngine.ExternalPlayback); break; case Resource.Id.btn_stop_speak: mlTtsEngine.Stop(); break; default: break; } }