public IEnumerator SynthesizeText(string textToRead, AudioList output) { if (textToRead == null || textToRead == "") { textToRead = "Cagin Nicolas Cage in a cage. Yep c**k. Also this is a default message."; } byte[] synthesizeResponse = null; AudioClip clip = null; tts.Synthesize( callback: (DetailedResponse <byte[]> response, IBMError error) => { synthesizeResponse = response.Result; clip = WaveFile.ParseWAV("Narrator_text.wav", synthesizeResponse); Debug.Log("clip"); Debug.Log(clip); if (error != null) { Debug.Log(error.ErrorMessage); } }, text: textToRead, voice: "en-US_MichaelVoice", accept: "audio/wav" ); while (synthesizeResponse == null) { yield return(null); } output.textToSpeechSource.clip = clip; }
public IEnumerator TestSynthesize() { Log.Debug("TextToSpeechServiceV1IntegrationTests", "Attempting to Synthesize..."); byte[] synthesizeResponse = null; AudioClip clip = null; service.Synthesize( callback: (DetailedResponse <byte[]> response, IBMError error) => { synthesizeResponse = response.Result; Assert.IsNotNull(synthesizeResponse); Assert.IsNull(error); clip = WaveFile.ParseWAV("myClip", synthesizeResponse); PlayClip(clip); }, text: synthesizeText, voice: allisionVoice, accept: synthesizeMimeType ); while (synthesizeResponse == null) { yield return(null); } yield return(new WaitForSeconds(clip.length)); }
private void CallTextToSpeech(string outputText) { Debug.Log("Sent to Watson Text To Speech: " + outputText); watsonAnswer.text = outputText; byte[] synthesizeResponse = null; AudioClip clip = null; //NEW: added below if to prevent null text error from occuring in TextToSpeechService if (outputText != null) { _textToSpeech.Synthesize( callback: (DetailedResponse <byte[]> response, IBMError error) => { synthesizeResponse = response.Result; clip = WaveFile.ParseWAV("myClip", synthesizeResponse); PlayClip(clip); }, text: outputText, voice: "en-US_MichaelVoice", accept: "audio/wav" ); } }
public IEnumerator MyCoroutine() { string text = Text.text; while (!authenticator.CanAuthenticate()) { yield return(null); } textToSpeechService = new TextToSpeechService(authenticator); textToSpeechService.SetServiceUrl("https://gateway-lon.watsonplatform.net/text-to-speech/api"); byte[] synthesizeResponse = null; AudioClip clip = null; textToSpeechService.Synthesize( callback: (DetailedResponse <byte[]> response, IBMError error) => { synthesizeResponse = response.Result; clip = WaveFile.ParseWAV("hello_world.wav", synthesizeResponse); AudioSource audioSource = GetComponent <AudioSource>(); audioSource.clip = clip; audioSource.Play(); }, text: text, voice: voice, accept: "audio/wav" ); while (synthesizeResponse == null) { yield return(null); } }
private IEnumerator ExampleSynthesize() { byte[] synthesizeResponse = null; AudioClip clip = null; service.Synthesize( callback: (DetailedResponse <byte[]> response, IBMError error) => { synthesizeResponse = response.Result; Log.Debug("ExampleTextToSpeechV1", "Synthesize done!"); clip = WaveFile.ParseWAV("myClip", synthesizeResponse); PlayClip(clip); }, text: synthesizeText, voice: allisionVoice, accept: synthesizeMimeType ); while (synthesizeResponse == null) { yield return(null); } yield return(new WaitForSeconds(clip.length)); }
public IEnumerator RunSynthesize() { Debug.Log("[" + name + "] Attempting to Synthesize..."); byte [] synthesizeResponse = null; AudioClip clip = null; service.Synthesize( callback: (DetailedResponse <byte []> response, IBMError error) => { synthesizeResponse = response.Result; File.WriteAllBytes(audioPath, synthesizeResponse); clip = WaveFile.ParseWAV("voice_response", synthesizeResponse); LoadingParticles.Stop(); audioSource.PlayOneShot(clip); }, text: InputText, voice: allisionVoice, accept: synthesizeMimeType ); while (synthesizeResponse == null) { yield return(null); } yield return(new WaitForSeconds(clip.length)); ReadyToSend = true; SelectableUIElement.ChangeLock(-1); }
//Synthesizes the sentence public void SynthesizeSentence(string sentence) { activeFile = Regex.Replace(sentence, @"[^a-zA-Z0-9 -]", "").ToLower(); activeFile = Regex.Replace(activeFile, @"\s+", string.Empty); DialogueHash = activeFile; using (MD5 md5Hash = MD5.Create()) { DialogueHash = GetMd5Hash(md5Hash, DialogueHash); } ttsSynthesing = true; if (voiceLines.ContainsKey(DialogueHash)) { associatedSource.clip = WaveFile.ParseWAV(DialogueHash, voiceLines[DialogueHash]); ttsSynthesing = false; //associatedSource.PlayOneShot(Resources.Load<AudioClip>("Sounds/" + fileNameSaveFile)); } else { tts.Synthesize(OnSynthesize, sentence, "en-US_AllisonV3Voice", null, "audio/wav"); } if (PlayerPrefs.HasKey(activeFile)) { emotionScore = PlayerPrefs.GetFloat(activeFile); } else { ToneInput tempInput = new ToneInput(); tempInput.Text = sentence; tas.Tone(OnToneAnalysis, tempInput, false, tones); } }
public override void TextToSpeech(string outputText) { Debug.Log("Send to TTS Watson: " + outputText); if (!string.IsNullOrEmpty(outputText)) { byte[] synthesizeResponse = null; AudioClip clip = null; _service.Synthesize( callback: (DetailedResponse <byte[]> response, IBMError error) => { if (response != null && response.Result != null && response.Result.Length > 0) { synthesizeResponse = response.Result; clip = WaveFile.ParseWAV("answer", synthesizeResponse); ManageOutput(clip, outputText); } }, text: outputText, voice: "en-US_AllisonVoice", accept: "audio/wav" ); } }
public static IEnumerator Speech(string text) { Debug.Log("Me llaman para hablar"); if (text != "") { byte[] synthesizeResponse = null; AudioClip clip = null; tts.Synthesize( callback: (DetailedResponse <byte[]> response, IBMError error) => { synthesizeResponse = response.Result; Debug.Log("ExampleTextToSpeechV1"); clip = WaveFile.ParseWAV("myClip", synthesizeResponse); PlayClip(clip); }, text: text, voice: allisionVoice, accept: synthesizeMimeType ); while (synthesizeResponse == null) { yield return(null); } yield return(new WaitForSeconds(clip.length)); } yield return(null); }
private IEnumerator ExampleSynthesize(string text) { if (string.IsNullOrEmpty(text)) { text = synthesizeText; Log.Debug("ExampleTextToSpeechV1", "Using default text, please enter your own text in dialog box!"); } byte[] synthesizeResponse = null; AudioClip clip = null; service.Synthesize( callback: (DetailedResponse <byte[]> response, IBMError error) => { synthesizeResponse = response.Result; Log.Debug("ExampleTextToSpeechV1", "Synthesize done!"); clip = WaveFile.ParseWAV("myClip", synthesizeResponse); PlayClip(clip); }, text: text, voice: allisionVoice, accept: synthesizeMimeType ); while (synthesizeResponse == null) { yield return(null); } yield return(new WaitForSeconds(clip.length)); }
private void AudioClipCallback(AudioClip clip) { if (player != null) { //player.StartPlaying(clip); } else { print("***PLAYER MISSING***"); } //AudioData record = new AudioData( clip, Mathf.Max(clip.samples / 2) ); //int midPoint = clip.samples / 2; //float[] samples = new float[midPoint]; //clip.GetData(samples, 0); //AudioData record = new AudioData(); //record.MaxLevel = Mathf.Max(samples); //record.Clip = AudioClip.Create("Recording", midPoint, clip.channels, clip.frequency, false); //record.Clip.SetData(samples, 0); //_speechToText.OnListen(record); AudioClip _audioClip = WaveFile.ParseWAV("testClip", AudioClipToByteArray(clip)); _speechToText.Recognize(_audioClip, OnRecognize); print("AUDIO CLIP SENT"); }
public AudioClip getAudio() { if (audioStream != null && audioStream.Length > 0) { Log.Debug("ExampleTextToSpeech", "Audio stream of {0} bytes received!", audioStream.Length.ToString()); // Use audioStream and play audio _recording = WaveFile.ParseWAV("myClip", audioStream); } return(_recording); }
private void OnSynthesize(DetailedResponse <byte[]> response, IBMError error) { byte[] audioData = response.Result; AudioClip clip = WaveFile.ParseWAV("speech", audioData); speaking.Invoke(clip.length + 0.25f); Log.Debug("SpeechSynthesizer.OnSynthesize()", " called."); PlayClip(clip); }
public void onSynthCompleted(DetailedResponse <byte[]> response, IBMError error) { byte[] synthesizeResponse = null; AudioClip clip = null; synthesizeResponse = response.Result; clip = WaveFile.ParseWAV("myClip", synthesizeResponse); Debug.Log("before playing: " + clip); PlayClip(clip); }
private IEnumerator ProcessText() { Debug.Log("ProcessText"); string nextText = String.Empty; audioStatus = ProcessingStatus.Processing; if (outputAudioSource.isPlaying) { yield return(null); } if (textQueue.Count < 1) { yield return(null); } else { nextText = textQueue.Dequeue(); Debug.Log(nextText); if (String.IsNullOrEmpty(nextText)) { yield return(null); } } byte[] synthesizeResponse = null; AudioClip clip = null; tts_service.Synthesize( callback: (DetailedResponse <byte[]> response, IBMError error) => { synthesizeResponse = response.Result; clip = WaveFile.ParseWAV("myClip", synthesizeResponse); //Place the new clip into the audio queue. audioQueue.Enqueue(clip); }, text: nextText, //////////////////////////////////////// //voice: "en-" + voice, voice: "ko-" + voice, accept: "audio/wav" ); while (synthesizeResponse == null) { yield return(null); } // Set status to indicate text to speech processing task completed audioStatus = ProcessingStatus.Idle; }
private IEnumerator ProcessText() { Debug.Log("ProcessText"); _audioStatus = EProcessingStatus.Processing; if (_outputAudioSource.isPlaying) { yield return(null); } string nextText = String.Empty; if (textQueue.Count < 1) { yield return(null); } else { nextText = textQueue.Dequeue(); Debug.Log(nextText); if (String.IsNullOrEmpty(nextText)) { yield return(null); } } // The method accepts a maximum of 5 KB of input text in the body of the request, and 8 KB for the URL and headers // Doc: https://cloud.ibm.com/apidocs/text-to-speech?code=unity#synthesize byte[] synthesizeResponse = null; AudioClip clip = null; _textToSpeechService.Synthesize( callback: (DetailedResponse <byte[]> response, IBMError error) => { synthesizeResponse = response.Result; clip = WaveFile.ParseWAV("message.wav", synthesizeResponse); //Place the new clip into the audio queue. audioQueue.Enqueue(clip); }, text: nextText, voice: $"en-{voice}", accept: "audio/wav" ); while (synthesizeResponse == null) { yield return(null); } // Set status to indicate text to speech processing task completed _audioStatus = EProcessingStatus.Idle; }
private AudioClip ProcessResponse(string textId, byte [] data) { switch (m_AudioFormat) { case AudioFormatType.WAV: return(WaveFile.ParseWAV(textId, data)); default: break; } Log.Error("TextToSpeech", "Unsupported audio format: {0}", m_AudioFormat.ToString()); return(null); }
void Update() { while (service != null && !service.IsListening) { if (audioStream != null && audioStream.Length > 0) { Log.Debug("ExampleTextToSpeech", "Audio stream of {0} bytes received!", audioStream.Length.ToString()); // Use audioStream and play audio _recording = WaveFile.ParseWAV("myClip", audioStream); PlayClip(_recording); } audioStream = null; StartListening(); // need to connect because service disconnect websocket after transcribing https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-usingWebSocket#WSsend } }
void Start() { LogSystem.InstallDefaultReactors(); // Create credential and instantiate service Credentials credentials = new Credentials(_username, _password, _url); _speechToText = new SpeechToText(credentials); _customCorpusFilePath = Application.dataPath + "/Watson/Examples/ServiceExamples/TestData/test-stt-corpus.txt"; _customWordsFilePath = Application.dataPath + "/Watson/Examples/ServiceExamples/TestData/test-stt-words.json"; _wavFilePath = Application.dataPath + "/Watson/Examples/ServiceExamples/TestData/test-audio.wav"; _audioClip = WaveFile.ParseWAV("testClip", File.ReadAllBytes(_wavFilePath)); Runnable.Run(Examples()); }
private void CallTextToSpeech(string outputText) { Debug.Log("Sent to Watson Text To Speech: " + outputText); byte[] synthesizeResponse = null; AudioClip clip = null; _textToSpeech.Synthesize( callback: (DetailedResponse <byte[]> response, IBMError error) => { synthesizeResponse = response.Result; clip = WaveFile.ParseWAV("myClip", synthesizeResponse); PlayClip(clip); }, text: outputText, voice: "en-US_AllisonVoice", accept: "audio/wav" ); }
private void OnRecordingEnd(AudioClip clip) { if (player != null) { print("AUDIO PLAYBACK"); } //player.StartPlaying(clip); else { OnPlayingEnd(false); } AudioClip _audioClip = WaveFile.ParseWAV("testClip", AudioClipToByteArray(clip)); _speechToText.Recognize(_audioClip, OnRecognize); //apiAiUnity.StartVoiceRequestThread(_aiClip); }
private IEnumerator Synthesize(string synthesizeText, string voice) { Log.Debug("WatsonTTS", "Attempting to Synthesize..."); byte[] synthesizeResponse = null; AudioClip clip = null; // _service.Synthesize( // callback: (DetailedResponse<byte[]> response, IBMError error) => // { // synthesizeResponse = response.Result; // Assert.IsNotNull(synthesizeResponse); // Assert.IsNull(error); // clip = WaveFile.ParseWAV("myClip", synthesizeResponse); // PlayClip(clip); // }, // text: synthesizeText, // voice: voice, // accept: synthesizeMimeType // ); _service.Synthesize( callback: (DetailedResponse <byte[]> response, IBMError error) => { synthesizeResponse = response.Result; Log.Debug("Watson TTS", "Synthesize done!"); clip = WaveFile.ParseWAV("myClip", synthesizeResponse); StartCoroutine(PlayClip(clip)); }, text: synthesizeText, voice: voice, accept: synthesizeMimeType ); while (synthesizeResponse == null) { yield return(null); } yield return(new WaitForSeconds(clip.length)); m_callbackMethod(synthesizeText, clip.length); }
public IEnumerator SynthesizeText(string textToRead, NarratorBehaviour output) { if (textToRead == null || textToRead == "") { textToRead = "Cagin Nicolas Cage in a cage. Yep c**k. Also this is a default message."; } byte[] synthesizeResponse = null; AudioClip clip = null; if (tts != null && isLive) { tts.Synthesize( callback: (DetailedResponse <byte[]> response, IBMError error) => { synthesizeResponse = response.Result; clip = WaveFile.ParseWAV("Narrator_text.wav", synthesizeResponse); Debug.Log("clip"); Debug.Log(clip); if (error != null) { Debug.Log(error.ErrorMessage); } }, text: textToRead, voice: "en-US_MichaelVoice", accept: "audio/wav" ); while (synthesizeResponse == null) { yield return(null); } output.textToSpeechClip = clip; } else { Debug.LogWarning("Custom warning message: Narrator should have spoken, but TTS is null or just in developer mode to debug mock instead"); Debug.LogWarning("Narrator is reading this: " + textToRead); yield return(null); } }
//writes the soundfile gotten from the Text To Speech then plays it. private void OnSynthesize(DetailedResponse <byte[]> resp, IBMError error) { if (!voiceLines.ContainsKey(DialogueHash)) { voiceLines.Add(DialogueHash, resp.Result); } else { Debug.LogWarning("This is not normal, the program shouldn't get here if the hash exists."); } //associatedSource.PlayOneShot(Resources.Load<AudioClip>("Sounds/" + fileNameSaveFile)); BinaryFormatter bf = new BinaryFormatter(); Debug.Log("DataPath: " + Application.persistentDataPath); FileStream file = File.Create(Application.persistentDataPath + "/voiceLines.eld"); bf.Serialize(file, voiceLines); file.Close(); associatedSource.clip = WaveFile.ParseWAV(DialogueHash, resp.Result); ttsSynthesing = false; }
void Update() { if (Input.GetKeyDown(KeyCode.Return)) { service.SynthesizeUsingWebsockets(textInput.text); textInput.text = waitingText; } while (service != null && !service.IsListening) { if (audioStream != null && audioStream.Length > 0) { Log.Debug("ExampleTextToSpeech", "Audio stream of {0} bytes received!", audioStream.Length.ToString()); // Use audioStream and play audio _recording = WaveFile.ParseWAV("myClip", audioStream); PlayClip(_recording); } textInput.text = placeholderText; audioStream = null; StartListening(); // need to connect because service disconnect websocket after transcribing https://cloud.ibm.com/docs/text-to-speech?topic=text-to-speech-usingWebSocket#WSsend } }
private void GetTTS() { // Synthesize Log.Debug("WatsonTTS", "Attempting synthesize."); byte[] synthesizeResponse = null; AudioClip clip = null; TTSService.Synthesize( callback: (DetailedResponse <byte[]> response, IBMError error) => { synthesizeResponse = response.Result; clip = WaveFile.ParseWAV("myClip", synthesizeResponse); wait = clip.length; check = true; PlayClip(clip); }, text: TTS_content, voice: allisionVoice, accept: synthesizeMimeType ); }
public IEnumerator CallTextToSpeech(string outputText) { byte[] synthesizeResponse = null; AudioClip clip = null; textToSpeech.Synthesize( callback: (DetailedResponse <byte[]> response, IBMError error) => { synthesizeResponse = response.Result; clip = WaveFile.ParseWAV("myClip", synthesizeResponse); PlayClip(clip); }, text: outputText, voice: "es-LA_SofiaVoice", accept: "audio/wav" ); while (synthesizeResponse == null) { yield return(null); } yield return(new WaitForSeconds(clip.length)); }
public IEnumerator CreateService() { if (string.IsNullOrEmpty(Assistant_apikey)) { throw new IBMException("Please provide Watson Assistant IAM ApiKey for the service."); } // Create credential and instantiate service // IamAuthenticator authenticator = new IamAuthenticator(apikey: Assistant_apikey, url: serviceUrl); IamAuthenticator authenticator = new IamAuthenticator(apikey: Assistant_apikey); // Wait for tokendata while (!authenticator.CanAuthenticate()) { yield return(null); } Assistant_service = new AssistantService(versionDate, authenticator); if (!string.IsNullOrEmpty(serviceUrl)) { Assistant_service.SetServiceUrl(serviceUrl); } if (string.IsNullOrEmpty(tts_apikey)) { throw new IBMException("Please provide Text-to-speech IAM ApiKey for the service."); } // Create credential and instantiate service tts_authenticator = new IamAuthenticator(apikey: tts_apikey); // Wait for tokendata while (!tts_authenticator.CanAuthenticate()) { yield return(null); } tts_service = new TextToSpeechService(tts_authenticator); if (!string.IsNullOrEmpty(tts_serviceUrl)) { tts_service.SetServiceUrl(tts_serviceUrl); } // Set speech processing status to "Processing" processStatus = "Processing"; // Create services Runnable.Run(speechStreamer.CreateService()); // Ignore input speech while the bot is speaking. speechStreamer.Active = false; Assistant_service.CreateSession(OnCreateSession, assistantId); while (!createSessionTested) { yield return(null); } messageTested = false; var inputMessage = new MessageInput() { Text = inputSpeech, Options = new MessageInputOptions() { ReturnContext = true } }; Assistant_service.Message(OnMessage, assistantId, sessionId); while (!messageTested) { messageTested = false; yield return(null); } //_testString = "I am Bob"; // if (!String.IsNullOrEmpty(_testString)) // { byte[] synthesizeResponse = null; AudioClip clip = null; tts_service.Synthesize( callback: (DetailedResponse <byte[]> response, IBMError error) => { synthesizeResponse = response.Result; clip = WaveFile.ParseWAV("myClip" + counter.ToString(), synthesizeResponse); PlayClip(clip); }, text: _testString, //voice: "en-US_AllisonV3Voice", voice: "en-US_MichaelV3Voice", //voice: "en-US_MichaelVoice", accept: "audio/wav" ); while (synthesizeResponse == null) { yield return(null); } counter++; processStatus = "Finished"; yield return(new WaitForSeconds(clip.length)); // } }
private IEnumerator ProcessChat() { if (source.isPlaying) { yield return(null); } while (processStatus != "Process") { yield return(null); } // When processing the chat, ignore input speech if (processStatus == "Process") { speechStreamer.Active = false; } processStatus = "Processing"; if (String.IsNullOrEmpty(inputSpeech)) { yield return(null); } messageTested = false; var inputMessage = new MessageInput() { Text = inputSpeech, Options = new MessageInputOptions() { ReturnContext = true } }; Assistant_service.Message(OnMessage, assistantId, sessionId, input: inputMessage); while (!messageTested) { messageTested = false; yield return(null); } //_testString = "I am Bob"; if (!String.IsNullOrEmpty(_testString)) { byte[] synthesizeResponse = null; AudioClip clip = null; tts_service.Synthesize( callback: (DetailedResponse <byte[]> response, IBMError error) => { synthesizeResponse = response.Result; clip = WaveFile.ParseWAV("myClip" + counter.ToString(), synthesizeResponse); PlayClip(clip); }, text: _testString, //voice: "en-US_AllisonV3Voice", voice: "en-US_MichaelV3Voice", //voice: "en-US_MichaelVoice", accept: "audio/wav" ); while (synthesizeResponse == null) { yield return(null); } counter++; // Set the flag to know that speech processing has finished processStatus = "Finished"; yield return(new WaitForSeconds(clip.length)); } }
public override IEnumerator RunTest() { LogSystem.InstallDefaultReactors(); try { VcapCredentials vcapCredentials = new VcapCredentials(); fsData data = null; // Get credentials from a credential file defined in environmental variables in the VCAP_SERVICES format. // See https://www.ibm.com/watson/developercloud/doc/common/getting-started-variables.html. var environmentalVariable = Environment.GetEnvironmentVariable("VCAP_SERVICES"); var fileContent = File.ReadAllText(environmentalVariable); // Add in a parent object because Unity does not like to deserialize root level collection types. fileContent = Utility.AddTopLevelObjectToJson(fileContent, "VCAP_SERVICES"); // Convert json to fsResult fsResult r = fsJsonParser.Parse(fileContent, out data); if (!r.Succeeded) { throw new WatsonException(r.FormattedMessages); } // Convert fsResult to VcapCredentials object obj = vcapCredentials; r = _serializer.TryDeserialize(data, obj.GetType(), ref obj); if (!r.Succeeded) { throw new WatsonException(r.FormattedMessages); } // Set credentials from imported credntials Credential credential = vcapCredentials.VCAP_SERVICES["speech_to_text"][TestCredentialIndex].Credentials; _username = credential.Username.ToString(); _password = credential.Password.ToString(); _url = credential.Url.ToString(); } catch { Log.Debug("TestSpeechToText", "Failed to get credentials from VCAP_SERVICES file. Please configure credentials to run this test. For more information, see: https://github.com/watson-developer-cloud/unity-sdk/#authentication"); } // Create credential and instantiate service Credentials credentials = new Credentials(_username, _password, _url); // Or authenticate using token //Credentials credentials = new Credentials(_url) //{ // AuthenticationToken = _token //}; _speechToText = new SpeechToText(credentials); _customCorpusFilePath = Application.dataPath + "/Watson/Examples/ServiceExamples/TestData/test-stt-corpus.txt"; _customWordsFilePath = Application.dataPath + "/Watson/Examples/ServiceExamples/TestData/test-stt-words.json"; _wavFilePath = Application.dataPath + "/Watson/Examples/ServiceExamples/TestData/test-audio.wav"; _audioClip = WaveFile.ParseWAV("testClip", File.ReadAllBytes(_wavFilePath)); Log.Debug("ExampleSpeechToText", "Attempting to recognize"); _speechToText.Recognize(_audioClip, HandleOnRecognize); while (!_recognizeTested) { yield return(null); } // Get models Log.Debug("ExampleSpeechToText", "Attempting to get models"); _speechToText.GetModels(HandleGetModels); while (!_getModelsTested) { yield return(null); } // Get model Log.Debug("ExampleSpeechToText", "Attempting to get model {0}", _modelNameToGet); _speechToText.GetModel(HandleGetModel, _modelNameToGet); while (!_getModelTested) { yield return(null); } // Get customizations Log.Debug("ExampleSpeechToText", "Attempting to get customizations"); _speechToText.GetCustomizations(HandleGetCustomizations); while (!_getCustomizationsTested) { yield return(null); } // Create customization Log.Debug("ExampleSpeechToText", "Attempting create customization"); _speechToText.CreateCustomization(HandleCreateCustomization, "unity-test-customization", "en-US_BroadbandModel", "Testing customization unity"); while (!_createCustomizationsTested) { yield return(null); } // Get customization Log.Debug("ExampleSpeechToText", "Attempting to get customization {0}", _createdCustomizationID); _speechToText.GetCustomization(HandleGetCustomization, _createdCustomizationID); while (!_getCustomizationTested) { yield return(null); } // Get custom corpora Log.Debug("ExampleSpeechToText", "Attempting to get custom corpora for {0}", _createdCustomizationID); _speechToText.GetCustomCorpora(HandleGetCustomCorpora, _createdCustomizationID); while (!_getCustomCorporaTested) { yield return(null); } // Add custom corpus Log.Debug("ExampleSpeechToText", "Attempting to add custom corpus {1} in customization {0}", _createdCustomizationID, _createdCorpusName); _speechToText.AddCustomCorpus(HandleAddCustomCorpus, _createdCustomizationID, _createdCorpusName, true, _customCorpusFilePath); while (!_addCustomCorpusTested) { yield return(null); } // Get custom corpus Log.Debug("ExampleSpeechToText", "Attempting to get custom corpus {1} in customization {0}", _createdCustomizationID, _createdCorpusName); _speechToText.GetCustomCorpus(HandleGetCustomCorpus, _createdCustomizationID, _createdCorpusName); while (!_getCustomCorpusTested) { yield return(null); } // Wait for customization Runnable.Run(CheckCustomizationStatus(_createdCustomizationID)); while (!_isCustomizationReady) { yield return(null); } // Get custom words Log.Debug("ExampleSpeechToText", "Attempting to get custom words."); _speechToText.GetCustomWords(HandleGetCustomWords, _createdCustomizationID); while (!_getCustomWordsTested) { yield return(null); } // Add custom words from path Log.Debug("ExampleSpeechToText", "Attempting to add custom words in customization {0} using Words json path {1}", _createdCustomizationID, _customWordsFilePath); string customWords = File.ReadAllText(_customWordsFilePath); _speechToText.AddCustomWords(HandleAddCustomWordsFromPath, _createdCustomizationID, customWords); while (!_addCustomWordsFromPathTested) { yield return(null); } // Wait for customization _isCustomizationReady = false; Runnable.Run(CheckCustomizationStatus(_createdCustomizationID)); while (!_isCustomizationReady) { yield return(null); } // Add custom words from object Words words = new Words(); Word w0 = new Word(); List <Word> wordList = new List <Word>(); w0.word = "mikey"; w0.sounds_like = new string[1]; w0.sounds_like[0] = "my key"; w0.display_as = "Mikey"; wordList.Add(w0); Word w1 = new Word(); w1.word = "charlie"; w1.sounds_like = new string[1]; w1.sounds_like[0] = "char lee"; w1.display_as = "Charlie"; wordList.Add(w1); Word w2 = new Word(); w2.word = "bijou"; w2.sounds_like = new string[1]; w2.sounds_like[0] = "be joo"; w2.display_as = "Bijou"; wordList.Add(w2); words.words = wordList.ToArray(); Log.Debug("ExampleSpeechToText", "Attempting to add custom words in customization {0} using Words object", _createdCustomizationID); _speechToText.AddCustomWords(HandleAddCustomWordsFromObject, _createdCustomizationID, words); while (!_addCustomWordsFromObjectTested) { yield return(null); } // Wait for customization _isCustomizationReady = false; Runnable.Run(CheckCustomizationStatus(_createdCustomizationID)); while (!_isCustomizationReady) { yield return(null); } // Get custom word Log.Debug("ExampleSpeechToText", "Attempting to get custom word {1} in customization {0}", _createdCustomizationID, words.words[0].word); _speechToText.GetCustomWord(HandleGetCustomWord, _createdCustomizationID, words.words[0].word); while (!_getCustomWordTested) { yield return(null); } // Train customization Log.Debug("ExampleSpeechToText", "Attempting to train customization {0}", _createdCustomizationID); _speechToText.TrainCustomization(HandleTrainCustomization, _createdCustomizationID); while (!_trainCustomizationTested) { yield return(null); } // Wait for customization _isCustomizationReady = false; Runnable.Run(CheckCustomizationStatus(_createdCustomizationID)); while (!_isCustomizationReady) { yield return(null); } // Upgrade customization - not currently implemented in service //Log.Debug("ExampleSpeechToText", "Attempting to upgrade customization {0}", _createdCustomizationID); //_speechToText.UpgradeCustomization(HandleUpgradeCustomization, _createdCustomizationID); //while (!_upgradeCustomizationTested) // yield return null; // Delete custom word Log.Debug("ExampleSpeechToText", "Attempting to delete custom word {1} in customization {0}", _createdCustomizationID, words.words[2].word); _speechToText.DeleteCustomWord(HandleDeleteCustomWord, _createdCustomizationID, words.words[2].word); while (!_deleteCustomWordTested) { yield return(null); } // Delay Log.Debug("ExampleDiscovery", string.Format("Delaying delete environment for {0} sec", _delayTimeInSeconds)); Runnable.Run(Delay(_delayTimeInSeconds)); while (!_readyToContinue) { yield return(null); } _readyToContinue = false; // Delete custom corpus Log.Debug("ExampleSpeechToText", "Attempting to delete custom corpus {1} in customization {0}", _createdCustomizationID, _createdCorpusName); _speechToText.DeleteCustomCorpus(HandleDeleteCustomCorpus, _createdCustomizationID, _createdCorpusName); while (!_deleteCustomCorpusTested) { yield return(null); } // Delay Log.Debug("ExampleDiscovery", string.Format("Delaying delete environment for {0} sec", _delayTimeInSeconds)); Runnable.Run(Delay(_delayTimeInSeconds)); while (!_readyToContinue) { yield return(null); } _readyToContinue = false; // Reset customization Log.Debug("ExampleSpeechToText", "Attempting to reset customization {0}", _createdCustomizationID); _speechToText.ResetCustomization(HandleResetCustomization, _createdCustomizationID); while (!_resetCustomizationTested) { yield return(null); } // Delay Log.Debug("ExampleDiscovery", string.Format("Delaying delete environment for {0} sec", _delayTimeInSeconds)); Runnable.Run(Delay(_delayTimeInSeconds)); while (!_readyToContinue) { yield return(null); } _readyToContinue = false; // Delete customization Log.Debug("ExampleSpeechToText", "Attempting to delete customization {0}", _createdCustomizationID); _speechToText.DeleteCustomization(HandleDeleteCustomization, _createdCustomizationID); while (!_deleteCustomizationsTested) { yield return(null); } Log.Debug("ExampleSpeechToText", "Speech to Text examples complete."); yield break; }