Exemplo n.º 1
0
    // ------------------------------------------
    //  GOOGLE
    // ------------------------------------------

    protected String ProcessAudioStream(Stream stream, String language, String text) {
      Host.Log(this,"ProcessAudioStream: " + language + " " + text);

      // See: https://github.com/gillesdemey/google-speech-v2
      CultureInfo culture = new System.Globalization.CultureInfo(language);
      var stt = new SpeechToText("https://www.google.com/speech-api/v2/recognize?output=json&xjerr=1&client=chromium&maxresults=2&key=" + GoogleKey, culture);

      using (var audio = new MemoryStream()) {
        stream.Position = 0;
        stream.CopyTo(audio);

        audio.Position = 0;
        return stt.Recognize(audio);
      }
    }
Exemplo n.º 2
0
 private void Awake()
 {
     instance = this;
 }
Exemplo n.º 3
0
    private void InitializeServices()
    {
        Credentials asst_credentials = null;

        if (!string.IsNullOrEmpty(assistantUsername) && !string.IsNullOrEmpty(assistantPassword))
        {
            //Authenticate using username and password
            asst_credentials = new Credentials(assistantUsername, assistantPassword, assistantURL);
            _assistant       = new Assistant(asst_credentials);
            //be sure to give it a Version Date
            _assistant.VersionDate = "2018-09-20";
        }
        else if (!string.IsNullOrEmpty(assistantIamApikey))
        {
            //Authenticate using iamApikey
            TokenOptions tokenOptions = new TokenOptions()
            {
                IamApiKey = assistantIamApikey,
                IamUrl    = assistantIamUrl
            };

            asst_credentials = new Credentials(tokenOptions, assistantURL);
        }
        else
        {
            throw new WatsonException("Please provide either username or password or IAM apikey to authenticate the service.");
        }

        Credentials tts_credentials = null;

        if (!string.IsNullOrEmpty(TextToSpeechUsername) && !string.IsNullOrEmpty(TextToSpeechPassword))
        {
            //Authenticate using username and password
            tts_credentials = new Credentials(TextToSpeechUsername, TextToSpeechPassword, TextToSpeechURL);
            _textToSpeech   = new TextToSpeech(tts_credentials);
            //give Watson a voice type
            _textToSpeech.Voice = VoiceType.en_US_Allison;
        }
        else if (!string.IsNullOrEmpty(assistantIamApikey))
        {
            //Authenticate using iamApikey
            TokenOptions tokenOptions = new TokenOptions()
            {
                IamApiKey = TextToSpeechIamApikey,
                IamUrl    = TextToSpeechIamUrl
            };

            tts_credentials = new Credentials(tokenOptions, TextToSpeechURL);
        }
        else
        {
            throw new WatsonException("Please provide either username or password or IAM apikey to authenticate the service.");
        }


        Credentials stt_credentials = null;

        if (!string.IsNullOrEmpty(SpeechToTextUsername) && !string.IsNullOrEmpty(SpeechToTextPassword))
        {
            //Authenticate using username and password
            stt_credentials = new Credentials(SpeechToTextUsername, SpeechToTextPassword, SpeechToTextURL);
            _speechToText   = new SpeechToText(stt_credentials);
        }
        else if (!string.IsNullOrEmpty(SpeechToTextIamApikey))
        {
            //Authenticate using iamApikey
            TokenOptions tokenOptions = new TokenOptions()
            {
                IamApiKey = SpeechToTextIamApikey,
                IamUrl    = SpeechToTextIamUrl
            };

            stt_credentials = new Credentials(tokenOptions, SpeechToTextURL);
        }
        else
        {
            throw new WatsonException("Please provide either username or password or IAM apikey to authenticate the service.");
        }

        // Send first message, create inputObj w/ no context
        Message0();

        Active = true;

        StartRecording();   // Setup recording
    }
Exemplo n.º 4
0
    private IEnumerator createServices()
    {
        Credentials stt_credentials = null;

        //  Create credential and instantiate service
        if (!string.IsNullOrEmpty(speechToTextUsername) && !string.IsNullOrEmpty(speechToTextPassword))
        {
            //  Authenticate using username and password
            stt_credentials = new Credentials(speechToTextUsername, speechToTextPassword, speechToTextServiceUrl);
        }
        else if (!string.IsNullOrEmpty(speechToTextIamApikey))
        {
            //  Authenticate using iamApikey
            TokenOptions tokenOptions = new TokenOptions()
            {
                IamApiKey = speechToTextIamApikey,
                IamUrl    = speechToTextIamUrl
            };

            stt_credentials = new Credentials(tokenOptions, speechToTextServiceUrl);

            while (!stt_credentials.HasIamTokenData())
            {
                yield return(null);
            }
        }
        else
        {
            throw new WatsonException("Please provide either username and password or IAM apikey to authenticate the service.");
        }

        Credentials asst_credentials = null;

        //  Create credential and instantiate service
        if (!string.IsNullOrEmpty(assistantUsername) && !string.IsNullOrEmpty(assistantPassword))
        {
            //  Authenticate using username and password
            asst_credentials = new Credentials(assistantUsername, assistantPassword, assistantServiceUrl);
        }
        else if (!string.IsNullOrEmpty(assistantIamApikey))
        {
            //  Authenticate using iamApikey
            TokenOptions tokenOptions = new TokenOptions()
            {
                IamApiKey = assistantIamApikey,
                IamUrl    = assistantIamUrl
            };

            asst_credentials = new Credentials(tokenOptions, assistantServiceUrl);

            while (!asst_credentials.HasIamTokenData())
            {
                yield return(null);
            }
        }
        else
        {
            throw new WatsonException("Please provide either username and password or IAM apikey to authenticate the service.");
        }


        _speechToText = new SpeechToText(stt_credentials);
        _conversation = new Conversation(asst_credentials);

        _conversation.VersionDate = assistantVersionDate;
        Active = true;

        StartRecording();
    }
        public override IEnumerator RunTest()
        {
            LogSystem.InstallDefaultReactors();

            VcapCredentials vcapCredentials = new VcapCredentials();
            fsData          data            = null;

            string result = null;
            string credentialsFilepath = "../sdk-credentials/credentials.json";

            //  Load credentials file if it exists. If it doesn't exist, don't run the tests.
            if (File.Exists(credentialsFilepath))
            {
                result = File.ReadAllText(credentialsFilepath);
            }
            else
            {
                yield break;
            }

            //  Add in a parent object because Unity does not like to deserialize root level collection types.
            result = Utility.AddTopLevelObjectToJson(result, "VCAP_SERVICES");

            //  Convert json to fsResult
            fsResult r = fsJsonParser.Parse(result, out data);

            if (!r.Succeeded)
            {
                throw new WatsonException(r.FormattedMessages);
            }

            //  Convert fsResult to VcapCredentials
            object obj = vcapCredentials;

            r = _serializer.TryDeserialize(data, obj.GetType(), ref obj);
            if (!r.Succeeded)
            {
                throw new WatsonException(r.FormattedMessages);
            }

            //  Set credentials from imported credntials
            Credential credential = vcapCredentials.GetCredentialByname("speech-to-text-sdk")[0].Credentials;
            //  Create credential and instantiate service
            TokenOptions tokenOptions = new TokenOptions()
            {
                IamApiKey = credential.IamApikey,
            };

            //  Create credential and instantiate service
            Credentials credentials = new Credentials(tokenOptions, credential.Url);

            //  Wait for tokendata
            while (!credentials.HasIamTokenData())
            {
                yield return(null);
            }

            _speechToText             = new SpeechToText(credentials);
            _customCorpusFilePath     = Application.dataPath + "/Watson/Examples/ServiceExamples/TestData/speech-to-text/theJabberwocky-utf8.txt";
            _customWordsFilePath      = Application.dataPath + "/Watson/Examples/ServiceExamples/TestData/speech-to-text/test-stt-words.json";
            _grammarFilePath          = Application.dataPath + "/Watson/Examples/ServiceExamples/TestData/speech-to-text/confirm.abnf";
            _acousticResourceMimeType = Utility.GetMimeType(Path.GetExtension(_acousticResourceUrl));

            Runnable.Run(DownloadAcousticResource());
            while (!_isAudioLoaded)
            {
                yield return(null);
            }

            //  Recognize
            Log.Debug("TestSpeechToText.Examples()", "Attempting to recognize");
            List <string> keywords = new List <string>();

            keywords.Add("speech");
            _speechToText.KeywordsThreshold = 0.5f;
            _speechToText.InactivityTimeout = 120;
            _speechToText.StreamMultipart   = false;
            _speechToText.Keywords          = keywords.ToArray();
            _speechToText.Recognize(HandleOnRecognize, OnFail, _acousticResourceData, _acousticResourceMimeType);
            while (!_recognizeTested)
            {
                yield return(null);
            }

            //  Get models
            Log.Debug("TestSpeechToText.Examples()", "Attempting to get models");
            _speechToText.GetModels(HandleGetModels, OnFail);
            while (!_getModelsTested)
            {
                yield return(null);
            }

            //  Get model
            Log.Debug("TestSpeechToText.Examples()", "Attempting to get model {0}", _modelNameToGet);
            _speechToText.GetModel(HandleGetModel, OnFail, _modelNameToGet);
            while (!_getModelTested)
            {
                yield return(null);
            }

            //  Get customizations
            Log.Debug("TestSpeechToText.Examples()", "Attempting to get customizations");
            _speechToText.GetCustomizations(HandleGetCustomizations, OnFail);
            while (!_getCustomizationsTested)
            {
                yield return(null);
            }

            //  Create customization
            Log.Debug("TestSpeechToText.Examples()", "Attempting create customization");
            _speechToText.CreateCustomization(HandleCreateCustomization, OnFail, "unity-test-customization", "en-US_BroadbandModel", "Testing customization unity");
            while (!_createCustomizationsTested)
            {
                yield return(null);
            }

            //  Get customization
            Log.Debug("TestSpeechToText.Examples()", "Attempting to get customization {0}", _createdCustomizationID);
            _speechToText.GetCustomization(HandleGetCustomization, OnFail, _createdCustomizationID);
            while (!_getCustomizationTested)
            {
                yield return(null);
            }

            //  Get custom corpora
            Log.Debug("TestSpeechToText.Examples()", "Attempting to get custom corpora for {0}", _createdCustomizationID);
            _speechToText.GetCustomCorpora(HandleGetCustomCorpora, OnFail, _createdCustomizationID);
            while (!_getCustomCorporaTested)
            {
                yield return(null);
            }

            //  Add custom corpus
            Log.Debug("TestSpeechToText.Examples()", "Attempting to add custom corpus {1} in customization {0}", _createdCustomizationID, _createdCorpusName);
            string corpusData = File.ReadAllText(_customCorpusFilePath);

            _speechToText.AddCustomCorpus(HandleAddCustomCorpus, OnFail, _createdCustomizationID, _createdCorpusName, true, corpusData);
            while (!_addCustomCorpusTested)
            {
                yield return(null);
            }

            //  Get custom corpus
            Log.Debug("TestSpeechToText.Examples()", "Attempting to get custom corpus {1} in customization {0}", _createdCustomizationID, _createdCorpusName);
            _speechToText.GetCustomCorpus(HandleGetCustomCorpus, OnFail, _createdCustomizationID, _createdCorpusName);
            while (!_getCustomCorpusTested)
            {
                yield return(null);
            }

            //  Wait for customization
            Runnable.Run(CheckCustomizationStatus(_createdCustomizationID));
            while (!_isCustomizationReady)
            {
                yield return(null);
            }

            //  Get custom words
            Log.Debug("TestSpeechToText.Examples()", "Attempting to get custom words.");
            _speechToText.GetCustomWords(HandleGetCustomWords, OnFail, _createdCustomizationID);
            while (!_getCustomWordsTested)
            {
                yield return(null);
            }

            //  Add custom words from path
            Log.Debug("TestSpeechToText.Examples()", "Attempting to add custom words in customization {0} using Words json path {1}", _createdCustomizationID, _customWordsFilePath);
            string customWords = File.ReadAllText(_customWordsFilePath);

            _speechToText.AddCustomWords(HandleAddCustomWordsFromPath, OnFail, _createdCustomizationID, customWords);
            while (!_addCustomWordsFromPathTested)
            {
                yield return(null);
            }

            //  Wait for customization
            _isCustomizationReady = false;
            Runnable.Run(CheckCustomizationStatus(_createdCustomizationID));
            while (!_isCustomizationReady)
            {
                yield return(null);
            }

            //  Add custom words from object
            Words       words    = new Words();
            Word        w0       = new Word();
            List <Word> wordList = new List <Word>();

            w0.word           = "mikey";
            w0.sounds_like    = new string[1];
            w0.sounds_like[0] = "my key";
            w0.display_as     = "Mikey";
            wordList.Add(w0);
            Word w1 = new Word();

            w1.word           = "charlie";
            w1.sounds_like    = new string[1];
            w1.sounds_like[0] = "char lee";
            w1.display_as     = "Charlie";
            wordList.Add(w1);
            Word w2 = new Word();

            w2.word           = "bijou";
            w2.sounds_like    = new string[1];
            w2.sounds_like[0] = "be joo";
            w2.display_as     = "Bijou";
            wordList.Add(w2);
            words.words = wordList.ToArray();

            Log.Debug("TestSpeechToText.Examples()", "Attempting to add custom words in customization {0} using Words object", _createdCustomizationID);
            _speechToText.AddCustomWords(HandleAddCustomWordsFromObject, OnFail, _createdCustomizationID, words);
            while (!_addCustomWordsFromObjectTested)
            {
                yield return(null);
            }

            //  Wait for customization
            _isCustomizationReady = false;
            Runnable.Run(CheckCustomizationStatus(_createdCustomizationID));
            while (!_isCustomizationReady)
            {
                yield return(null);
            }

            //  Get custom word
            Log.Debug("TestSpeechToText.Examples()", "Attempting to get custom word {1} in customization {0}", _createdCustomizationID, words.words[0].word);
            _speechToText.GetCustomWord(HandleGetCustomWord, OnFail, _createdCustomizationID, words.words[0].word);
            while (!_getCustomWordTested)
            {
                yield return(null);
            }

            //  Train customization
            Log.Debug("TestSpeechToText.Examples()", "Attempting to train customization {0}", _createdCustomizationID);
            _speechToText.TrainCustomization(HandleTrainCustomization, OnFail, _createdCustomizationID);
            while (!_trainCustomizationTested)
            {
                yield return(null);
            }

            //  Wait for customization
            _isCustomizationReady = false;
            Runnable.Run(CheckCustomizationStatus(_createdCustomizationID));
            while (!_isCustomizationReady)
            {
                yield return(null);
            }

            //  Delete custom word
            Log.Debug("TestSpeechToText.Examples()", "Attempting to delete custom word {1} in customization {0}", _createdCustomizationID, words.words[2].word);
            _speechToText.DeleteCustomWord(HandleDeleteCustomWord, OnFail, _createdCustomizationID, words.words[2].word);
            while (!_deleteCustomWordTested)
            {
                yield return(null);
            }

            //  Delay
            Log.Debug("TestSpeechToText.Examples()", string.Format("Delaying delete environment for {0} sec", _delayTimeInSeconds));
            Runnable.Run(Delay(_delayTimeInSeconds));
            while (!_readyToContinue)
            {
                yield return(null);
            }

            _readyToContinue = false;
            //  Delete custom corpus
            Log.Debug("TestSpeechToText.Examples()", "Attempting to delete custom corpus {1} in customization {0}", _createdCustomizationID, _createdCorpusName);
            _speechToText.DeleteCustomCorpus(HandleDeleteCustomCorpus, OnFail, _createdCustomizationID, _createdCorpusName);
            while (!_deleteCustomCorpusTested)
            {
                yield return(null);
            }

            //  Delay
            Log.Debug("TestSpeechToText.Examples()", string.Format("Delaying delete environment for {0} sec", _delayTimeInSeconds));
            Runnable.Run(Delay(_delayTimeInSeconds));
            while (!_readyToContinue)
            {
                yield return(null);
            }

            _readyToContinue = false;
            //  Reset customization
            Log.Debug("TestSpeechToText.Examples()", "Attempting to reset customization {0}", _createdCustomizationID);
            _speechToText.ResetCustomization(HandleResetCustomization, OnFail, _createdCustomizationID);
            while (!_resetCustomizationTested)
            {
                yield return(null);
            }

            //  Delay
            Log.Debug("TestSpeechToText.Examples()", string.Format("Delaying delete environment for {0} sec", _delayTimeInSeconds));
            Runnable.Run(Delay(_delayTimeInSeconds));
            while (!_readyToContinue)
            {
                yield return(null);
            }

            //  List Grammars
            Log.Debug("TestSpeechToText.Examples()", "Attempting to list grammars {0}", _createdCustomizationID);
            _speechToText.ListGrammars(OnListGrammars, OnFail, _createdCustomizationID);
            while (!_listGrammarsTested)
            {
                yield return(null);
            }

            //  Add Grammar
            Log.Debug("TestSpeechToText.Examples()", "Attempting to add grammar {0}", _createdCustomizationID);
            string grammarFile = File.ReadAllText(_grammarFilePath);

            _speechToText.AddGrammar(OnAddGrammar, OnFail, _createdCustomizationID, _grammarName, grammarFile, _grammarFileContentType);
            while (!_addGrammarTested)
            {
                yield return(null);
            }

            //  Get Grammar
            Log.Debug("TestSpeechToText.Examples()", "Attempting to get grammar {0}", _createdCustomizationID);
            _speechToText.GetGrammar(OnGetGrammar, OnFail, _createdCustomizationID, _grammarName);
            while (!_getGrammarTested)
            {
                yield return(null);
            }

            //  Wait for customization
            _isCustomizationReady = false;
            Runnable.Run(CheckCustomizationStatus(_createdCustomizationID));
            while (!_isCustomizationReady)
            {
                yield return(null);
            }

            //  Delete Grammar
            Log.Debug("TestSpeechToText.Examples()", "Attempting to delete grammar {0}", _createdCustomizationID);
            _speechToText.DeleteGrammar(OnDeleteGrammar, OnFail, _createdCustomizationID, _grammarName);
            while (!_deleteGrammarTested)
            {
                yield return(null);
            }

            _readyToContinue = false;
            //  Delete customization
            Log.Debug("TestSpeechToText.Examples()", "Attempting to delete customization {0}", _createdCustomizationID);
            _speechToText.DeleteCustomization(HandleDeleteCustomization, OnFail, _createdCustomizationID);
            while (!_deleteCustomizationsTested)
            {
                yield return(null);
            }

            //  List acoustic customizations
            Log.Debug("TestSpeechToText.Examples()", "Attempting to get acoustic customizations");
            _speechToText.GetCustomAcousticModels(HandleGetCustomAcousticModels, OnFail);
            while (!_getAcousticCustomizationsTested)
            {
                yield return(null);
            }

            //  Create acoustic customization
            Log.Debug("TestSpeechToText.Examples()", "Attempting to create acoustic customization");
            _speechToText.CreateAcousticCustomization(HandleCreateAcousticCustomization, OnFail, _createdAcousticModelName);
            while (!_createAcousticCustomizationsTested)
            {
                yield return(null);
            }

            //  Get acoustic customization
            Log.Debug("TestSpeechToText.Examples()", "Attempting to get acoustic customization {0}", _createdAcousticModelId);
            _speechToText.GetCustomAcousticModel(HandleGetCustomAcousticModel, OnFail, _createdAcousticModelId);
            while (!_getAcousticCustomizationTested)
            {
                yield return(null);
            }

            while (!_isAudioLoaded)
            {
                yield return(null);
            }

            //  Create acoustic resource
            Log.Debug("TestSpeechToText.Examples()", "Attempting to create audio resource {1} on {0}", _createdAcousticModelId, _acousticResourceName);
            string mimeType = Utility.GetMimeType(Path.GetExtension(_acousticResourceUrl));

            _speechToText.AddAcousticResource(HandleAddAcousticResource, OnFail, _createdAcousticModelId, _acousticResourceName, mimeType, mimeType, true, _acousticResourceData);
            while (!_addAcousticResourcesTested)
            {
                yield return(null);
            }

            //  Wait for customization
            _isAcousticCustomizationReady = false;
            Runnable.Run(CheckAcousticCustomizationStatus(_createdAcousticModelId));
            while (!_isAcousticCustomizationReady)
            {
                yield return(null);
            }

            //  List acoustic resources
            Log.Debug("TestSpeechToText.Examples()", "Attempting to get audio resources {0}", _createdAcousticModelId);
            _speechToText.GetCustomAcousticResources(HandleGetCustomAcousticResources, OnFail, _createdAcousticModelId);
            while (!_getAcousticResourcesTested)
            {
                yield return(null);
            }

            //  Train acoustic customization
            Log.Debug("TestSpeechToText.Examples()", "Attempting to train acoustic customization {0}", _createdAcousticModelId);
            _speechToText.TrainAcousticCustomization(HandleTrainAcousticCustomization, OnFail, _createdAcousticModelId, null, true);
            while (!_trainAcousticCustomizationsTested)
            {
                yield return(null);
            }

            //  Get acoustic resource
            Log.Debug("TestSpeechToText.Examples()", "Attempting to get audio resource {1} from {0}", _createdAcousticModelId, _acousticResourceName);
            _speechToText.GetCustomAcousticResource(HandleGetCustomAcousticResource, OnFail, _createdAcousticModelId, _acousticResourceName);
            while (!_getAcousticResourceTested)
            {
                yield return(null);
            }

            //  Wait for customization
            _isAcousticCustomizationReady = false;
            Runnable.Run(CheckAcousticCustomizationStatus(_createdAcousticModelId));
            while (!_isAcousticCustomizationReady)
            {
                yield return(null);
            }

            //  Delete acoustic resource
            DeleteAcousticResource();
            while (!_deleteAcousticResource)
            {
                yield return(null);
            }

            //  Delay
            Log.Debug("TestSpeechToText.Examples()", string.Format("Delaying delete acoustic resource for {0} sec", _delayTimeInSeconds));
            Runnable.Run(Delay(_delayTimeInSeconds));
            while (!_readyToContinue)
            {
                yield return(null);
            }

            //  Reset acoustic customization
            Log.Debug("TestSpeechToText.Examples()", "Attempting to reset acoustic customization {0}", _createdAcousticModelId);
            _speechToText.ResetAcousticCustomization(HandleResetAcousticCustomization, OnFail, _createdAcousticModelId);
            while (!_resetAcousticCustomizationsTested)
            {
                yield return(null);
            }

            //  Delay
            Log.Debug("TestSpeechToText.Examples()", string.Format("Delaying delete acoustic customization for {0} sec", _delayTimeInSeconds));
            Runnable.Run(Delay(_delayTimeInSeconds));
            while (!_readyToContinue)
            {
                yield return(null);
            }

            //  Delete acoustic customization
            DeleteAcousticCustomization();
            while (!_deleteAcousticCustomizationsTested)
            {
                yield return(null);
            }

            //  Delay
            Log.Debug("TestSpeechToText.Examples()", string.Format("Delaying complete for {0} sec", _delayTimeInSeconds));
            Runnable.Run(Delay(_delayTimeInSeconds));
            while (!_readyToContinue)
            {
                yield return(null);
            }

            Log.Debug("TestSpeechToText.RunTest()", "Speech to Text examples complete.");

            yield break;
        }
Exemplo n.º 6
0
        public async Task GetResponse()
        {
            try
            {
                string txt = await SpeechToText.RecordSpeechFromMicrophoneAsync();

                if (txt == "")
                {
                    return;
                }
                await AddNewMessage(MessageData.SENDER_ME, txt);

                string response = await TryGetMessageAsync(txt);

                while (response.Contains("COMMAND"))
                {
                    string[] parts = response.Split('-');

                    try
                    {
                        if (parts[1] != "")
                        {
                            await AddNewMessage(MessageData.SENDER_ASSISTANT, parts[1]);
                        }
                    }
                    catch
                    {
                        Debug.WriteLine("Only has COMMAND");
                    }

                    if (parts[0] == MSG_GETDATA)
                    {
                        //RemoteDataGetter.GetInfoFromRemoteDevice();
                        // DEMO:
                        MainPage.Current.PulseDangerZone = new SolidColorBrush(Colors.MistyRose);

                        if (MainPage.Current.PulseDangerZone != new SolidColorBrush(Colors.White))
                        {
                            parts[0] = MainPage.Current.Pulse < 1010 ? MSG_HYPERTENSION : MSG_HYPOTENSION;
                        }

                        else if (MainPage.Current.Temperature < 32 || MainPage.Current.Temperature > 40)
                        {
                            parts[0] = MainPage.Current.Temperature < 32 ? MSG_LOWTEMPERATURE : MSG_HIGHTEMPERATURE;
                        }
                    }

                    if (parts[0] == MSG_CANCELDIAL)
                    {
                        return;
                    }

                    if (parts[0] == MSG_DIALAMBULANCE)
                    {
                        try
                        {
                            await Dial.CallAsync();
                        }
                        catch
                        { }
                        return;
                    }

                    response = await TryGetMessageAsync(parts[0]);
                }

                await AddNewMessage(MessageData.SENDER_ASSISTANT, response);
            }
            catch
            {
                Debug.WriteLine("I DONT HAVE TIME FOR THIS");
            }


            /*if (!lastSent.Contains("COMMAND"))
             * {
             *  txt         = await SpeechToText.RecordSpeechFromMicrophoneAsync(); //MainPage.Current.TB_DEBUG_INPUT.Text;
             *  lastSent    = txt;
             *
             *  MessageData data = new MessageData()
             *  {
             *      Sender      = MessageData.SENDER_ME,
             *      Received    = DateTime.Now,
             *      Message     = txt
             *  };
             *
             *  MainPage.Current.AiMessages.Add(data);
             *  result = await TryGetMessageAsync(txt);
             * }
             *
             * while (true)
             * {
             *  if (result.Contains("COMMAND"))
             *  {
             *      string[] parts = result.Split('-');
             *
             *      if (parts[1] == MSG_CANCELDIAL)
             *      {
             *          /*RESET TO DEFAULT*/
            /*    return;
             * }
             *
             * if (parts[1] == MSG_DIALAMBULANCE)
             * {
             *  await Dial.CallAsync();
             *  return;
             * }
             *
             * MessageData data = new MessageData()
             * {
             *  Sender      = MessageData.SENDER_ASSISTANT,
             *  Received    = DateTime.Now,
             *  Message     = parts[2]
             * };
             *
             * MainPage.Current.AiMessages.Add(data);
             *
             * await SpeechToText.TextToSpeechAsync(MainPage.Current.element, parts[2]);
             *
             * if (parts[1] == "getData")
             * {
             *  /*TRY TO EVALUATE DATA*/

            /*    parts[1] = MSG_LOWTEMPERATURE;
             * }
             *
             * result = await TryGetMessageAsync(parts[1]);
             * }
             * else
             * {
             * MessageData data = new MessageData()
             * {
             *  Sender      = MessageData.SENDER_ASSISTANT,
             *  Received    = DateTime.Now,
             *  Message     = result
             * };
             *
             * MainPage.Current.AiMessages.Add(data);
             * await SpeechToText.TextToSpeechAsync(MainPage.Current.element, result);
             * }
             * }*/
        }
Exemplo n.º 7
0
/**
**/

using UnityEngine;
using System.Collections;
using IBM.Watson.DeveloperCloud.Logging;
using IBM.Watson.DeveloperCloud.Services.SpeechToText.v1;
using IBM.Watson.DeveloperCloud.Utilities;
using IBM.Watson.DeveloperCloud.DataTypes;
using System.Collections.Generic;
using UnityEngine.UI;

using IBM.Watson.DeveloperCloud.Services.TextToSpeech.v1;
using IBM.Watson.DeveloperCloud.Connection;
using System;

public class ExampleStreaming : MonoBehaviour
{

    // STT - BURNER CREDS - DELETE AFTER RECORDING
    private string _username_STT = "";
    private string _password_STT = "";
    private string _url_STT = "https://stream.watsonplatform.net/speech-to-text/api";
    public Text ResultsField;

    private int _recordingRoutine = 0;
    private string _microphoneID = null;
    private AudioClip _recording = null;
    private int _recordingBufferSize = 1;
    private int _recordingHZ = 22050;

    private SpeechToText _speechToText;

    // TEXT TO SPEECH - BURNER CREDENTIALS FOR PUBLIC DEMO I WILL DELETE AFTER RECORDING
    private string _username_TTS = "";
    private string _password_TTS = "";
    private string _url_TTS = "https://stream.watsonplatform.net/text-to-speech/api";

    TextToSpeech _textToSpeech;

    //string _testString = "<speak version=\"1.0\"><say-as interpret-as=\"letters\">I'm sorry</say-as>. <prosody pitch=\"150Hz\">This is Text to Speech!</prosody><express-as type=\"GoodNews\">I'm sorry. This is Text to Speech!</express-as></speak>";

    /// TEST STRINGS OK

    // Pitch Shifting
    //string _testString = "<speak version=\"1.0\"><prosody pitch=\"150Hz\">This is Text to Speech!</prosody></speak>";
    //string _testString = "<speak version=\"1.0\"><prosody pitch=\"250Hz\">This is Text to Speech!</prosody></speak>";
    //string _testString = "<speak version=\"1.0\"><prosody pitch=\"350Hz\">This is Text to Speech!</prosody></speak>";
    //string _testString = "<speak version=\"1.0\"><prosody pitch=\"350Hz\">hi</prosody></speak>";

    // Good news and sorrow and uncertainty - ref https://console.bluemix.net/docs/services/text-to-speech/SSML-expressive.html#expressive
    // <express-as type="GoodNews">This is Text to Speech!</express-as>
    string _testString = "<speak version=\"1.0\"><express-as type=\"GoodNews\">Hello! Good News! Text to Speech is Working!</express-as></speak>";
    //string _testString = "<speak version=\"1.0\"><express-as type=\"Apology\">I am terribly sorry for the quality of service you have received.</express-as></speak>";
    //string _testString = "<speak version=\"1.0\"><express-as type=\"Uncertainty\">Can you please explain it again? I am not sure I understand.</express-as></speak>";

    //string _testString = "<speak version=\"1.0\"><prosody pitch=\\\"350Hz\\\"><express-as type=\"Uncertainty\">Can you please explain it again? I am confused and I'm not sure I understand.</express-as></prosody></speak>";


    string _createdCustomizationId;
    CustomVoiceUpdate _customVoiceUpdate;
    string _customizationName = "unity-example-customization";
    string _customizationLanguage = "en-US";
    string _customizationDescription = "A text to speech voice customization created within Unity.";
    string _testWord = "Watson";

    private bool _synthesizeTested = false;
    private bool _getVoicesTested = false;
    private bool _getVoiceTested = false;
    private bool _getPronuciationTested = false;
    private bool _getCustomizationsTested = false;
    private bool _createCustomizationTested = false;
    private bool _deleteCustomizationTested = false;
    private bool _getCustomizationTested = false;
    private bool _updateCustomizationTested = false;
    private bool _getCustomizationWordsTested = false;
    private bool _addCustomizationWordsTested = false;
    private bool _deleteCustomizationWordTested = false;
    private bool _getCustomizationWordTested = false;



    void Start()
    {
        LogSystem.InstallDefaultReactors();

        //  Create credential and instantiate service
        Credentials credentials_STT = new Credentials(_username_STT, _password_STT, _url_STT);
        Credentials credentials_TTS = new Credentials(_username_TTS, _password_TTS, _url_TTS);

        _speechToText = new SpeechToText(credentials_STT);
        _textToSpeech = new TextToSpeech(credentials_TTS);

        Active = true;
        StartRecording();

        Runnable.Run(Examples());

    }

    public bool Active
    {
        get { return _speechToText.IsListening; }
        set
        {
            if (value && !_speechToText.IsListening)
            {
                _speechToText.DetectSilence = true;
                _speechToText.EnableWordConfidence = true;
                _speechToText.EnableTimestamps = true;
                _speechToText.SilenceThreshold = 0.01f;
                _speechToText.MaxAlternatives = 0;
                _speechToText.EnableInterimResults = true;
                _speechToText.OnError = OnError;
                _speechToText.InactivityTimeout = -1;
                _speechToText.ProfanityFilter = false;
                _speechToText.SmartFormatting = true;
                _speechToText.SpeakerLabels = false;
                _speechToText.WordAlternativesThreshold = null;
                _speechToText.StartListening(OnRecognize, OnRecognizeSpeaker);
            }
            else if (!value && _speechToText.IsListening)
            {
                _speechToText.StopListening();
            }
        }
    }

 
    private void StartRecording()
    {
        if (_recordingRoutine == 0)
        {
            UnityObjectUtil.StartDestroyQueue();
            _recordingRoutine = Runnable.Run(RecordingHandler());
        }
    }

    private void StopRecording()
    {
        if (_recordingRoutine != 0)
        {
            Microphone.End(_microphoneID);
            Runnable.Stop(_recordingRoutine);
            _recordingRoutine = 0;
        }
    }

    private void OnError(string error)
    {
        Active = false;

        Log.Debug("ExampleStreaming.OnError()", "Error! {0}", error);
    }

    private IEnumerator RecordingHandler()
    {
        Log.Debug("ExampleStreaming.RecordingHandler()", "devices: {0}", Microphone.devices);
        _recording = Microphone.Start(_microphoneID, true, _recordingBufferSize, _recordingHZ);
        yield return null;      // let _recordingRoutine get set..

        if (_recording == null)
        {
            StopRecording();
            yield break;
        }

        bool bFirstBlock = true;
        int midPoint = _recording.samples / 2;
        float[] samples = null;

        while (_recordingRoutine != 0 && _recording != null)
        {
            int writePos = Microphone.GetPosition(_microphoneID);
            if (writePos > _recording.samples || !Microphone.IsRecording(_microphoneID))
            {
                Log.Error("ExampleStreaming.RecordingHandler()", "Microphone disconnected.");

                StopRecording();
                yield break;
            }

            if ((bFirstBlock && writePos >= midPoint)
              || (!bFirstBlock && writePos < midPoint))
            {
                // front block is recorded, make a RecordClip and pass it onto our callback.
                samples = new float[midPoint];
                _recording.GetData(samples, bFirstBlock ? 0 : midPoint);

                AudioData record = new AudioData();
                record.MaxLevel = Mathf.Max(Mathf.Abs(Mathf.Min(samples)), Mathf.Max(samples));
                record.Clip = AudioClip.Create("Recording", midPoint, _recording.channels, _recordingHZ, false);
                record.Clip.SetData(samples, 0);

                _speechToText.OnListen(record);

                bFirstBlock = !bFirstBlock;
            }
            else
            {
                // calculate the number of samples remaining until we ready for a block of audio, 
                // and wait that amount of time it will take to record.
                int remaining = bFirstBlock ? (midPoint - writePos) : (_recording.samples - writePos);
                float timeRemaining = (float)remaining / (float)_recordingHZ;

                yield return new WaitForSeconds(timeRemaining);
            }

        }

        yield break;
    }

    private void OnRecognize(SpeechRecognitionEvent result,Dictionary<string, object> customData)
    {
        if (result != null && result.results.Length > 0)
        {
            foreach (var res in result.results)
            {
                foreach (var alt in res.alternatives)
                {
                    string text = string.Format("{0} ({1}, {2:0.00})\n", alt.transcript, res.final ? "Final" : "Interim", alt.confidence);
                    Log.Debug("ExampleStreaming.OnRecognize()", text);
                    ResultsField.text = text;

                    if (alt.transcript.Contains("inhabitants") && ResultsField.text.Contains("Final")) // needs to be final or ECHO happens
                    {
                        _testString = "<speak version=\"1.0\"><express-as type=\"GoodNews\">The original inhabitants of Gippsland, the Kurnai Aboriginal people, have lived in this region for 20,000 years. </express-as></speak>";
                        Runnable.Run(Examples());

                    }
                    if (alt.transcript.Contains("immigration") && ResultsField.text.Contains("Final")) // needs to be final or ECHO happens
                    {
                        _testString = "<speak version=\"1.0\"><prosody pitch=\\\"350Hz\\\"><express-as type=\"GoodNews\">Oh The next big immigration wave was the selectors who moved in from 1875 onwards to set up small dairy farms all over Gippsland, but mainly in the Strzelecki ranges of west and south Gippsland</express-as></prosody></speak>";
                        Runnable.Run(Examples());
                    }  // Cannot ECHO the trigger condition (or be ready for loop

                    if (alt.transcript.Contains("happy") && ResultsField.text.Contains("Final")) // needs to be final or ECHO happens
                    {
                        _testString = "<speak version=\"1.0\"><prosody pitch=\\\"250Hz\\\"><express-as type=\"GoodNews\">It is so glad to hear that!</express-as></prosody></speak>";
                        Runnable.Run(Examples());
                    }  // Cannot ECHO the trigger condition (or be ready for loop

                  



                }

                if (res.keywords_result != null && res.keywords_result.keyword != null)
                {
                    foreach (var keyword in res.keywords_result.keyword)
                    {
                        Log.Debug("ExampleStreaming.OnRecognize()", "keyword: {0}, confidence: {1}, start time: {2}, end time: {3}", keyword.normalized_text, keyword.confidence, keyword.start_time, keyword.end_time);
                    }
                }

                if (res.word_alternatives != null)
                {
                    foreach (var wordAlternative in res.word_alternatives)
                    {
                        Log.Debug("ExampleStreaming.OnRecognize()", "Word alternatives found. Start time: {0} | EndTime: {1}", wordAlternative.start_time, wordAlternative.end_time);
                        foreach (var alternative in wordAlternative.alternatives)
                            Log.Debug("ExampleStreaming.OnRecognize()", "\t word: {0} | confidence: {1}", alternative.word, alternative.confidence);
                    }
                }
            }
        }
    }

    private void OnRecognizeSpeaker(SpeakerRecognitionEvent result,Dictionary<string, object> customData)
    {
        //throw new NotImplementedException();
        if (result != null)
        {
            foreach (SpeakerLabelsResult labelResult in result.speaker_labels)
            {
                Log.Debug("ExampleStreaming.OnRecognize()", string.Format("speaker result: {0} | confidence: {3} | from: {1} | to: {2}", labelResult.speaker, labelResult.from, labelResult.to, labelResult.confidence));
            }
        }
    }


    // TTS CODE
    private IEnumerator Examples()
    {
        //  Synthesize
        Log.Debug("ExampleTextToSpeech.Examples()", "Attempting synthesize.");
        _textToSpeech.Voice = VoiceType.en_US_Allison;
        _textToSpeech.ToSpeech(HandleToSpeechCallback, OnFail, _testString, true);
        while (!_synthesizeTested)
            yield return null;

        //  Get Voices
        Log.Debug("ExampleTextToSpeech.Examples()", "Attempting to get voices.");
        _textToSpeech.GetVoices(OnGetVoices, OnFail);
        while (!_getVoicesTested)
            yield return null;

        //  Get Voice
        Log.Debug("ExampleTextToSpeech.Examples()", "Attempting to get voice {0}.", VoiceType.en_US_Allison);
        _textToSpeech.GetVoice(OnGetVoice, OnFail, VoiceType.en_US_Allison);
        while (!_getVoiceTested)
            yield return null;

        //  Get Pronunciation
        Log.Debug("ExampleTextToSpeech.Examples()", "Attempting to get pronunciation of {0}", _testWord);
        _textToSpeech.GetPronunciation(OnGetPronunciation, OnFail, _testWord, VoiceType.en_US_Allison);
        while (!_getPronuciationTested)
            yield return null;

        //  Get Customizations
        //      Log.Debug("ExampleTextToSpeech.Examples()", "Attempting to get a list of customizations");
        //      _textToSpeech.GetCustomizations(OnGetCustomizations, OnFail);
        //      while (!_getCustomizationsTested)
        //          yield return null;

        //  Create Customization
        //      Log.Debug("ExampleTextToSpeech.Examples()", "Attempting to create a customization");
        //      _textToSpeech.CreateCustomization(OnCreateCustomization, OnFail, _customizationName, _customizationLanguage, _customizationDescription);
        //      while (!_createCustomizationTested)
        //          yield return null;

        //  Get Customization
        //      Log.Debug("ExampleTextToSpeech.Examples()", "Attempting to get a customization");
        //      if (!_textToSpeech.GetCustomization(OnGetCustomization, OnFail, _createdCustomizationId))
        //          Log.Debug("ExampleTextToSpeech.Examples()", "Failed to get custom voice model!");
        //      while (!_getCustomizationTested)
        //          yield return null;

        //  Update Customization
        //      Log.Debug("ExampleTextToSpeech.Examples()", "Attempting to update a customization");
        //      Word[] wordsToUpdateCustomization =
        //      {
        //          new Word()
        //          {
        //              word = "hello",
        //              translation = "hullo"
        //          },
        //          new Word()
        //          {
        //              word = "goodbye",
        //              translation = "gbye"
        //          },
        //          new Word()
        //          {
        //              word = "hi",
        //              translation = "ohioooo"
        //          }
        //      };

        //      _customVoiceUpdate = new CustomVoiceUpdate()
        //      {
        //          words = wordsToUpdateCustomization,
        //          description = "My updated description",
        //          name = "My updated name"
        //      };

        if (!_textToSpeech.UpdateCustomization(OnUpdateCustomization, OnFail, _createdCustomizationId, _customVoiceUpdate))
            Log.Debug("ExampleTextToSpeech.Examples()", "Failed to update customization!");
        while (!_updateCustomizationTested)
            yield return null;

        //  Get Customization Words
        //      Log.Debug("ExampleTextToSpeech.Examples()", "Attempting to get a customization's words");
        //      if (!_textToSpeech.GetCustomizationWords(OnGetCustomizationWords, OnFail, _createdCustomizationId))
        //          Log.Debug("ExampleTextToSpeech.GetCustomizationWords()", "Failed to get {0} words!", _createdCustomizationId);
        //      while (!_getCustomizationWordsTested)
        //          yield return null;

        //  Add Customization Words
        //      Log.Debug("ExampleTextToSpeech.Examples()", "Attempting to add words to a customization");
        //      Word[] wordArrayToAddToCustomization =
        //      {
        //          new Word()
        //          {
        //              word = "bananna",
        //              translation = "arange"
        //          },
        //          new Word()
        //          {
        //              word = "orange",
        //              translation = "gbye"
        //          },
        //          new Word()
        //          {
        //              word = "tomato",
        //              translation = "tomahto"
        //          }
        //      };

        //      Words wordsToAddToCustomization = new Words()
        //      {
        //          words = wordArrayToAddToCustomization
        //      };

        //      if (!_textToSpeech.AddCustomizationWords(OnAddCustomizationWords, OnFail, _createdCustomizationId, wordsToAddToCustomization))
        //          Log.Debug("ExampleTextToSpeech.AddCustomizationWords()", "Failed to add words to {0}!", _createdCustomizationId);
        //      while (!_addCustomizationWordsTested)
        //          yield return null;

        //  Get Customization Word
        //      Log.Debug("ExampleTextToSpeech.Examples()", "Attempting to get the translation of a custom voice model's word.");
        //      string customIdentifierWord = wordsToUpdateCustomization[0].word;
        //      if (!_textToSpeech.GetCustomizationWord(OnGetCustomizationWord, OnFail, _createdCustomizationId, customIdentifierWord))
        //          Log.Debug("ExampleTextToSpeech.GetCustomizationWord()", "Failed to get the translation of {0} from {1}!", customIdentifierWord, _createdCustomizationId);
        //      while (!_getCustomizationWordTested)
        //          yield return null;

        //  Delete Customization Word
        Log.Debug("ExampleTextToSpeech.Examples()", "Attempting to delete customization word from custom voice model.");
        string wordToDelete = "goodbye";
        if (!_textToSpeech.DeleteCustomizationWord(OnDeleteCustomizationWord, OnFail, _createdCustomizationId, wordToDelete))
            Log.Debug("ExampleTextToSpeech.DeleteCustomizationWord()", "Failed to delete {0} from {1}!", wordToDelete, _createdCustomizationId);
        while (!_deleteCustomizationWordTested)
            yield return null;

        //  Delete Customization
        Log.Debug("ExampleTextToSpeech.Examples()", "Attempting to delete a customization");
        if (!_textToSpeech.DeleteCustomization(OnDeleteCustomization, OnFail, _createdCustomizationId))
            Log.Debug("ExampleTextToSpeech.DeleteCustomization()", "Failed to delete custom voice model!");
        while (!_deleteCustomizationTested)
            yield return null;

        Log.Debug("ExampleTextToSpeech.Examples()", "Text to Speech examples complete.");
    }






    void HandleToSpeechCallback(AudioClip clip, Dictionary<string, object> customData = null)
    {
        PlayClip(clip);
    }

    private void PlayClip(AudioClip clip)
    {
        if (Application.isPlaying && clip != null)
        {
            GameObject audioObject = new GameObject("AudioObject");
            AudioSource source = audioObject.AddComponent<AudioSource>();
            source.spatialBlend = 0.0f;
            source.loop = false;
            source.clip = clip;
            source.Play();

            Destroy(audioObject, clip.length);

            _synthesizeTested = true;
        }
    }

    private void OnGetVoices(Voices voices, Dictionary<string, object> customData = null)
    {
        Log.Debug("ExampleTextToSpeech.OnGetVoices()", "Text to Speech - Get voices response: {0}", customData["json"].ToString());
        _getVoicesTested = true;
    }

    private void OnGetVoice(Voice voice, Dictionary<string, object> customData = null)
    {
        Log.Debug("ExampleTextToSpeech.OnGetVoice()", "Text to Speech - Get voice  response: {0}", customData["json"].ToString());
        _getVoiceTested = true;
    }

    private void OnGetPronunciation(Pronunciation pronunciation, Dictionary<string, object> customData = null)
    {
        Log.Debug("ExampleTextToSpeech.OnGetPronunciation()", "Text to Speech - Get pronunciation response: {0}", customData["json"].ToString());
        _getPronuciationTested = true;
    }

    //  private void OnGetCustomizations(Customizations customizations, Dictionary<string, object> customData = null)
    //  {
    //      Log.Debug("ExampleTextToSpeech.OnGetCustomizations()", "Text to Speech - Get customizations response: {0}", customData["json"].ToString());
    //      _getCustomizationsTested = true;
    //  }

    //  private void OnCreateCustomization(CustomizationID customizationID, Dictionary<string, object> customData = null)
    //  {
    //      Log.Debug("ExampleTextToSpeech.OnCreateCustomization()", "Text to Speech - Create customization response: {0}", customData["json"].ToString());
    //      _createdCustomizationId = customizationID.customization_id;
    //      _createCustomizationTested = true;
    //  }

    private void OnDeleteCustomization(bool success, Dictionary<string, object> customData = null)
    {
        Log.Debug("ExampleTextToSpeech.OnDeleteCustomization()", "Text to Speech - Delete customization response: {0}", customData["json"].ToString());
        _createdCustomizationId = null;
        _deleteCustomizationTested = true;
    }

    //  private void OnGetCustomization(Customization customization, Dictionary<string, object> customData = null)
    //  {
    //      Log.Debug("ExampleTextToSpeech.OnGetCustomization()", "Text to Speech - Get customization response: {0}", customData["json"].ToString());
    //      _getCustomizationTested = true;
    //  }

    private void OnUpdateCustomization(bool success, Dictionary<string, object> customData = null)
    {
        Log.Debug("ExampleTextToSpeech.OnUpdateCustomization()", "Text to Speech - Update customization response: {0}", customData["json"].ToString());
        _updateCustomizationTested = true;
    }

    //  private void OnGetCustomizationWords(Words words, Dictionary<string, object> customData = null)
    //  {
    //      Log.Debug("ExampleTextToSpeech.OnGetCustomizationWords()", "Text to Speech - Get customization words response: {0}", customData["json"].ToString());
    //      _getCustomizationWordsTested = true;
    //  }

    private void OnAddCustomizationWords(bool success, Dictionary<string, object> customData = null)
    {
        Log.Debug("ExampleTextToSpeech.OnAddCustomizationWords()", "Text to Speech - Add customization words response: {0}", customData["json"].ToString());
        _addCustomizationWordsTested = true;
    }

    private void OnDeleteCustomizationWord(bool success, Dictionary<string, object> customData = null)
    {
        Log.Debug("ExampleTextToSpeech.OnDeleteCustomizationWord()", "Text to Speech - Delete customization word response: {0}", customData["json"].ToString());
        _deleteCustomizationWordTested = true;
    }

    private void OnGetCustomizationWord(Translation translation, Dictionary<string, object> customData = null)
    {
        Log.Debug("ExampleTextToSpeech.OnGetCustomizationWord()", "Text to Speech - Get customization word response: {0}", customData["json"].ToString());
        _getCustomizationWordTested = true;
    }

    private void OnFail(RESTConnector.Error error, Dictionary<string, object> customData)
    {
        Log.Error("ExampleTextToSpeech.OnFail()", "Error received: {0}", error.ToString());
    }

}
Exemplo n.º 8
0
        private void listBox_SelectionChanged(object sender, SelectionChangedEventArgs e)

        {
            SpeechToText.ConverSpeechToText();
        }
        // Please set the following connection strings in app.config for this WebJob to run:
        // AzureWebJobsDashboard and AzureWebJobsStorage
        static void Main()
        {
            try
            {
                Console.WriteLine("*** STT stating ***");
                DBConnector     dbConnector     = new DBConnector();
                GoogleConnector googleConnector = new GoogleConnector();

                string AZURE_TEMP_FOLDER_PATH = ConfigurationManager.AppSettings["AZURE_TEMP_FOLDER_PATH"];
                var    groups = dbConnector.AllBlobNotProcessed((int)MediaExtensions.WAV, ApiSourceId.Both).GroupBy(i => i.InterviewID).ToList();
                Console.WriteLine("Not processed wavs groups count : " + groups.Count());
                if (groups.Count() > 0)
                {
                    AzureConnector azureConnector = new AzureConnector();
                    foreach (var group in groups)
                    {
                        if (group.Count() > 0)
                        {
                            var tempPath = String.Empty;
                            foreach (var item in group)
                            {
                                Console.WriteLine("-----------------------------------------------------");
                                Console.WriteLine(item.MediaURL);
                                List <Microsoft.CognitiveServices.Speech.SpeechRecognitionResult> recognizedItems = new List <Microsoft.CognitiveServices.Speech.SpeechRecognitionResult>();

                                var tempFolderName = Guid.NewGuid().ToString();
                                tempPath = System.IO.Path.Combine(AZURE_TEMP_FOLDER_PATH, item.InterviewID.ToString());

                                Console.WriteLine("Interviw LanguageID :" + item.LanguageID);
                                Console.WriteLine("InterviwID : " + item.InterviewID);
                                Console.WriteLine("MediaShortName : ", item.MediaShortName);

                                var apisource = dbConnector.GetInterviewApiSource(item.InterviewID, item.LanguageID);
                                if (apisource != null)
                                {
                                    List <SpeechToText> list = new List <SpeechToText>();
                                    if (apisource.SourceID.Equals(ApiSourceId.Azure))
                                    {
                                        if (item.ApiSourceID == ApiSourceId.Azure)
                                        {
                                            #region STT Microsoft
                                            //Only select Question Order
                                            var questionOrder = item.MediaShortName.Substring(item.MediaShortName.Length - 5, 1);

                                            Stream stream = azureConnector.GetMediaBlob(item.MediaURL);

                                            var temp_audio_file = SharedHelper.CreateFileLocaly(stream, tempFolderName, tempPath);

                                            SpeechRecognitionMS.ContinuousRecognitionWithFileAsync(temp_audio_file, item.LanguageID, recognizedItems).Wait();

                                            foreach (var recognitionResult in recognizedItems)
                                            {
                                                var text   = recognitionResult.Text;
                                                var result = SharedHelper.BuildSentimentKeyWordsScoreAsync(text, item.LanguageID);
                                                var sTT    = new SpeechToText()
                                                {
                                                    Confidence     = "Microsoft",
                                                    Text           = text,
                                                    SentimentScore = result.Result.Score,
                                                    KeyWords       = result.Result.KeyWords,
                                                };
                                                list.Add(sTT);
                                            }

                                            var jsonResult = Newtonsoft.Json.JsonConvert.SerializeObject(list);

                                            int quest = 0;
                                            if (int.TryParse(questionOrder, out quest))
                                            {
                                                dbConnector.AddSpeechToText(item.InterviewID, quest, jsonResult);
                                            }
                                            #endregion
                                        }
                                    }
                                    else
                                    {
                                        if (item.ApiSourceID == ApiSourceId.Google)
                                        {
                                            #region STT Google
                                            //STT Google to be tested , then delete the old web Job
                                            //Only select Question Order

                                            var questionOrder = item.MediaShortName.Substring(item.MediaShortName.Length - 5, 1);
                                            Console.WriteLine("QuestionOrder : " + questionOrder);
                                            Console.WriteLine("Media Url : " + item.MediaURL);

                                            var response = SpeechRecognitionGoogle.stt_google(item.MediaShortName, item.LanguageID);
                                            if (response != null)
                                            {
                                                foreach (var result in response.Results)
                                                {
                                                    foreach (var alternative in result.Alternatives)
                                                    {
                                                        var text      = alternative.Transcript;
                                                        var result_ms = SharedHelper.BuildSentimentKeyWordsScoreAsync(text, String.IsNullOrEmpty(item.LanguageID) ? System.Configuration.ConfigurationManager.AppSettings["LanguageCode"] : item.LanguageID);
                                                        Console.WriteLine($"currrent google speech to text : {alternative.Transcript}");
                                                        var sTT = new SpeechToText()
                                                        {
                                                            Confidence     = "google",
                                                            Text           = text,
                                                            SentimentScore = result_ms.Result.Score,
                                                            Order          = int.Parse(questionOrder),
                                                            KeyWords       = result_ms.Result.KeyWords,
                                                        };
                                                        list.Add(sTT);
                                                    }
                                                }
                                            }

                                            var jsonResult = Newtonsoft.Json.JsonConvert.SerializeObject(list);
                                            Console.WriteLine($"Json google speech: {jsonResult}");
                                            dbConnector.AddSpeechToText(item.InterviewID, int.Parse(questionOrder), jsonResult, (int)ApiSourceId.Google);
                                            #endregion
                                        }
                                    }
                                }

                                dbConnector.UpdateInterviewMediaStatus(item.MediaURL, 1);
                                //TODO::Delete  wavs from google storage at the end of the process
                                googleConnector.DeleteObject(item.MediaShortName);

                                var _result = dbConnector.AllBlobProcessed(item.InterviewID, (int)MediaExtensions.WAV);
                                if (_result == "TRUE")
                                {
                                    dbConnector.InsertOrUpdateAVTaskTracker(DBOperationType.Update.ToString(), "", item.InterviewID, TaskLabel.texte.ToString(), (int)StatusId.Finished, 0);
                                }
                            }
                            DeleteTempFiles_If_All_Files_Are_treated(dbConnector, ApiSourceId.Azure, group.FirstOrDefault().InterviewID, tempPath);
                        }
                    }
                }
            }
            catch (Exception e)
            {
                SharedHelper.GetFullException(e);
            }
            Console.WriteLine("***GoogleRecognizeSST Finished ***");
        }
Exemplo n.º 10
0
        /// <summary>
        /// function for converting microphone speech to Text
        /// </summary>
        /// <returns>Task<string></string></returns>
        private static async Task <string> StartSpeechToText()
        {
            SpeechToText obj = new SpeechToText();

            return(await obj.ConvertSpeechtoTextFromMicrophone());
        }
Exemplo n.º 11
0
        public override IEnumerator RunTest()
        {
            LogSystem.InstallDefaultReactors();

            try
            {
                VcapCredentials vcapCredentials = new VcapCredentials();
                fsData          data            = null;

                //  Get credentials from a credential file defined in environmental variables in the VCAP_SERVICES format.
                //  See https://www.ibm.com/watson/developercloud/doc/common/getting-started-variables.html.
                var environmentalVariable = Environment.GetEnvironmentVariable("VCAP_SERVICES");
                var fileContent           = File.ReadAllText(environmentalVariable);

                //  Add in a parent object because Unity does not like to deserialize root level collection types.
                fileContent = Utility.AddTopLevelObjectToJson(fileContent, "VCAP_SERVICES");

                //  Convert json to fsResult
                fsResult r = fsJsonParser.Parse(fileContent, out data);
                if (!r.Succeeded)
                {
                    throw new WatsonException(r.FormattedMessages);
                }

                //  Convert fsResult to VcapCredentials
                object obj = vcapCredentials;
                r = _serializer.TryDeserialize(data, obj.GetType(), ref obj);
                if (!r.Succeeded)
                {
                    throw new WatsonException(r.FormattedMessages);
                }

                //  Set credentials from imported credntials
                Credential credential = vcapCredentials.VCAP_SERVICES["speech_to_text"][TestCredentialIndex].Credentials;
                _username = credential.Username.ToString();
                _password = credential.Password.ToString();
                _url      = credential.Url.ToString();
            }
            catch
            {
                Log.Debug("TestSpeechToText", "Failed to get credentials from VCAP_SERVICES file. Please configure credentials to run this test. For more information, see: https://github.com/watson-developer-cloud/unity-sdk/#authentication");
            }

            //  Create credential and instantiate service
            Credentials credentials = new Credentials(_username, _password, _url);

            //  Or authenticate using token
            //Credentials credentials = new Credentials(_url)
            //{
            //    AuthenticationToken = _token
            //};

            _speechToText         = new SpeechToText(credentials);
            _customCorpusFilePath = Application.dataPath + "/Watson/Examples/ServiceExamples/TestData/test-stt-corpus.txt";
            _customWordsFilePath  = Application.dataPath + "/Watson/Examples/ServiceExamples/TestData/test-stt-words.json";
            _wavFilePath          = Application.dataPath + "/Watson/Examples/ServiceExamples/TestData/test-audio.wav";
            _audioClip            = WaveFile.ParseWAV("testClip", File.ReadAllBytes(_wavFilePath));

            Log.Debug("ExampleSpeechToText", "Attempting to recognize");
            _speechToText.Recognize(_audioClip, HandleOnRecognize);
            while (!_recognizeTested)
            {
                yield return(null);
            }

            //  Get models
            Log.Debug("ExampleSpeechToText", "Attempting to get models");
            _speechToText.GetModels(HandleGetModels);
            while (!_getModelsTested)
            {
                yield return(null);
            }

            //  Get model
            Log.Debug("ExampleSpeechToText", "Attempting to get model {0}", _modelNameToGet);
            _speechToText.GetModel(HandleGetModel, _modelNameToGet);
            while (!_getModelTested)
            {
                yield return(null);
            }

            //  Get customizations
            Log.Debug("ExampleSpeechToText", "Attempting to get customizations");
            _speechToText.GetCustomizations(HandleGetCustomizations);
            while (!_getCustomizationsTested)
            {
                yield return(null);
            }

            //  Create customization
            Log.Debug("ExampleSpeechToText", "Attempting create customization");
            _speechToText.CreateCustomization(HandleCreateCustomization, "unity-test-customization", "en-US_BroadbandModel", "Testing customization unity");
            while (!_createCustomizationsTested)
            {
                yield return(null);
            }

            //  Get customization
            Log.Debug("ExampleSpeechToText", "Attempting to get customization {0}", _createdCustomizationID);
            _speechToText.GetCustomization(HandleGetCustomization, _createdCustomizationID);
            while (!_getCustomizationTested)
            {
                yield return(null);
            }

            //  Get custom corpora
            Log.Debug("ExampleSpeechToText", "Attempting to get custom corpora for {0}", _createdCustomizationID);
            _speechToText.GetCustomCorpora(HandleGetCustomCorpora, _createdCustomizationID);
            while (!_getCustomCorporaTested)
            {
                yield return(null);
            }

            //  Add custom corpus
            Log.Debug("ExampleSpeechToText", "Attempting to add custom corpus {1} in customization {0}", _createdCustomizationID, _createdCorpusName);
            _speechToText.AddCustomCorpus(HandleAddCustomCorpus, _createdCustomizationID, _createdCorpusName, true, _customCorpusFilePath);
            while (!_addCustomCorpusTested)
            {
                yield return(null);
            }

            //  Get custom corpus
            Log.Debug("ExampleSpeechToText", "Attempting to get custom corpus {1} in customization {0}", _createdCustomizationID, _createdCorpusName);
            _speechToText.GetCustomCorpus(HandleGetCustomCorpus, _createdCustomizationID, _createdCorpusName);
            while (!_getCustomCorpusTested)
            {
                yield return(null);
            }

            //  Wait for customization
            Runnable.Run(CheckCustomizationStatus(_createdCustomizationID));
            while (!_isCustomizationReady)
            {
                yield return(null);
            }

            //  Get custom words
            Log.Debug("ExampleSpeechToText", "Attempting to get custom words.");
            _speechToText.GetCustomWords(HandleGetCustomWords, _createdCustomizationID);
            while (!_getCustomWordsTested)
            {
                yield return(null);
            }

            //  Add custom words from path
            Log.Debug("ExampleSpeechToText", "Attempting to add custom words in customization {0} using Words json path {1}", _createdCustomizationID, _customWordsFilePath);
            string customWords = File.ReadAllText(_customWordsFilePath);

            _speechToText.AddCustomWords(HandleAddCustomWordsFromPath, _createdCustomizationID, customWords);
            while (!_addCustomWordsFromPathTested)
            {
                yield return(null);
            }

            //  Wait for customization
            _isCustomizationReady = false;
            Runnable.Run(CheckCustomizationStatus(_createdCustomizationID));
            while (!_isCustomizationReady)
            {
                yield return(null);
            }

            //  Add custom words from object
            Words       words    = new Words();
            Word        w0       = new Word();
            List <Word> wordList = new List <Word>();

            w0.word           = "mikey";
            w0.sounds_like    = new string[1];
            w0.sounds_like[0] = "my key";
            w0.display_as     = "Mikey";
            wordList.Add(w0);
            Word w1 = new Word();

            w1.word           = "charlie";
            w1.sounds_like    = new string[1];
            w1.sounds_like[0] = "char lee";
            w1.display_as     = "Charlie";
            wordList.Add(w1);
            Word w2 = new Word();

            w2.word           = "bijou";
            w2.sounds_like    = new string[1];
            w2.sounds_like[0] = "be joo";
            w2.display_as     = "Bijou";
            wordList.Add(w2);
            words.words = wordList.ToArray();

            Log.Debug("ExampleSpeechToText", "Attempting to add custom words in customization {0} using Words object", _createdCustomizationID);
            _speechToText.AddCustomWords(HandleAddCustomWordsFromObject, _createdCustomizationID, words);
            while (!_addCustomWordsFromObjectTested)
            {
                yield return(null);
            }

            //  Wait for customization
            _isCustomizationReady = false;
            Runnable.Run(CheckCustomizationStatus(_createdCustomizationID));
            while (!_isCustomizationReady)
            {
                yield return(null);
            }

            //  Get custom word
            Log.Debug("ExampleSpeechToText", "Attempting to get custom word {1} in customization {0}", _createdCustomizationID, words.words[0].word);
            _speechToText.GetCustomWord(HandleGetCustomWord, _createdCustomizationID, words.words[0].word);
            while (!_getCustomWordTested)
            {
                yield return(null);
            }

            //  Train customization
            Log.Debug("ExampleSpeechToText", "Attempting to train customization {0}", _createdCustomizationID);
            _speechToText.TrainCustomization(HandleTrainCustomization, _createdCustomizationID);
            while (!_trainCustomizationTested)
            {
                yield return(null);
            }

            //  Wait for customization
            _isCustomizationReady = false;
            Runnable.Run(CheckCustomizationStatus(_createdCustomizationID));
            while (!_isCustomizationReady)
            {
                yield return(null);
            }

            //  Upgrade customization - not currently implemented in service
            //Log.Debug("ExampleSpeechToText", "Attempting to upgrade customization {0}", _createdCustomizationID);
            //_speechToText.UpgradeCustomization(HandleUpgradeCustomization, _createdCustomizationID);
            //while (!_upgradeCustomizationTested)
            //    yield return null;

            //  Delete custom word
            Log.Debug("ExampleSpeechToText", "Attempting to delete custom word {1} in customization {0}", _createdCustomizationID, words.words[2].word);
            _speechToText.DeleteCustomWord(HandleDeleteCustomWord, _createdCustomizationID, words.words[2].word);
            while (!_deleteCustomWordTested)
            {
                yield return(null);
            }

            //  Delay
            Log.Debug("ExampleDiscovery", string.Format("Delaying delete environment for {0} sec", _delayTimeInSeconds));
            Runnable.Run(Delay(_delayTimeInSeconds));
            while (!_readyToContinue)
            {
                yield return(null);
            }

            _readyToContinue = false;
            //  Delete custom corpus
            Log.Debug("ExampleSpeechToText", "Attempting to delete custom corpus {1} in customization {0}", _createdCustomizationID, _createdCorpusName);
            _speechToText.DeleteCustomCorpus(HandleDeleteCustomCorpus, _createdCustomizationID, _createdCorpusName);
            while (!_deleteCustomCorpusTested)
            {
                yield return(null);
            }

            //  Delay
            Log.Debug("ExampleDiscovery", string.Format("Delaying delete environment for {0} sec", _delayTimeInSeconds));
            Runnable.Run(Delay(_delayTimeInSeconds));
            while (!_readyToContinue)
            {
                yield return(null);
            }

            _readyToContinue = false;
            //  Reset customization
            Log.Debug("ExampleSpeechToText", "Attempting to reset customization {0}", _createdCustomizationID);
            _speechToText.ResetCustomization(HandleResetCustomization, _createdCustomizationID);
            while (!_resetCustomizationTested)
            {
                yield return(null);
            }

            //  Delay
            Log.Debug("ExampleDiscovery", string.Format("Delaying delete environment for {0} sec", _delayTimeInSeconds));
            Runnable.Run(Delay(_delayTimeInSeconds));
            while (!_readyToContinue)
            {
                yield return(null);
            }

            _readyToContinue = false;
            //  Delete customization
            Log.Debug("ExampleSpeechToText", "Attempting to delete customization {0}", _createdCustomizationID);
            _speechToText.DeleteCustomization(HandleDeleteCustomization, _createdCustomizationID);
            while (!_deleteCustomizationsTested)
            {
                yield return(null);
            }

            Log.Debug("ExampleSpeechToText", "Speech to Text examples complete.");

            yield break;
        }
Exemplo n.º 12
0
 public void TranscribeObject()
 {
     speechToText = new SpeechToText(this, awsServices);
 }
Exemplo n.º 13
0
    private IEnumerator createServices()
    {
        Credentials stt_credentials = null;

        //  Create credential and instantiate service
        if (!string.IsNullOrEmpty(speechToTextUsername) && !string.IsNullOrEmpty(speechToTextPassword))
        {
            //  Authenticate using username and password
            stt_credentials = new Credentials(speechToTextUsername, speechToTextPassword, speechToTextServiceUrl);
        }
        else if (!string.IsNullOrEmpty(speechToTextIamApikey))
        {
            //  Authenticate using iamApikey
            TokenOptions tokenOptions = new TokenOptions()
            {
                IamApiKey = speechToTextIamApikey,
                IamUrl    = speechToTextIamUrl
            };

            stt_credentials = new Credentials(tokenOptions, speechToTextServiceUrl);

            while (!stt_credentials.HasIamTokenData())
            {
                yield return(null);
            }
        }
        else
        {
            throw new WatsonException("Please provide either username and password or IAM apikey to authenticate the service.");
        }

        Credentials asst_credentials = null;

        //  Create credential and instantiate service
        if (!string.IsNullOrEmpty(assistantUsername) && !string.IsNullOrEmpty(assitantPassword))
        {
            //  Authenticate using username and password
            asst_credentials = new Credentials(assistantUsername, assitantPassword, assistantServiceUrl);
        }
        else if (!string.IsNullOrEmpty(assistantIamApikey))
        {
            Log.Debug("createServices()", "IAM key", "key {0}", assistantIamApikey);
            //  Authenticate using iamApikey
            TokenOptions tokenOptions = new TokenOptions()
            {
                IamApiKey = assistantIamApikey,
                IamUrl    = assistantIamUrl
            };

            asst_credentials = new Credentials(tokenOptions, assistantServiceUrl);

            while (!asst_credentials.HasIamTokenData())
            {
                yield return(null);
            }
        }
        else
        {
            throw new WatsonException("Please provide either username and password or IAM apikey to authenticate the service.");
        }

        Credentials lang_credentials = null;

        //  Create credential and instantiate service
        if (!string.IsNullOrEmpty(translatorUsername) && !string.IsNullOrEmpty(translatorPassword))
        {
            //  Authenticate using username and password
            lang_credentials = new Credentials(translatorUsername, translatorPassword, translatorServiceUrl);
        }
        else if (!string.IsNullOrEmpty(translatorIamApikey))
        {
            Log.Debug("createServices()", "IAM key", "key {0}", translatorIamApikey);

            //  Authenticate using iamApikey
            TokenOptions tokenOptions = new TokenOptions()
            {
                IamApiKey = translatorIamApikey,
                IamUrl    = translatorIamUrl
            };

            lang_credentials = new Credentials(tokenOptions, translatorServiceUrl);

            while (!lang_credentials.HasIamTokenData())
            {
                yield return(null);
            }

            Log.Debug("createServices()", "lang_creds", " {0}", lang_credentials);
        }
        else
        {
            throw new WatsonException("Please provide either username and password or IAM apikey to authenticate the service.");
        }

        _speechToText        = new SpeechToText(stt_credentials);
        _conversation        = new Conversation(asst_credentials);
        _language_translator = new LanguageTranslator(translatorVersionDate, lang_credentials);

        _speechToText.RecognizeModel = "ja-JP_BroadbandModel";
        _conversation.VersionDate    = assistantVersionDate;

        Active = true;

        StartRecording();
    }
    void Start()
    {
        LogSystem.InstallDefaultReactors();


        #region http custom headers
        //  Create credential and instantiate Assistant service
        Credentials assistantCredentials = new Credentials(_assistantUsername, _assistantPassword, _assistantUrl);

        _assistant             = new Assistant(assistantCredentials);
        _assistant.VersionDate = _assistantVersionDate;

        Dictionary <string, object> input = new Dictionary <string, object>();
        input.Add("text", _inputString);
        MessageRequest messageRequest = new MessageRequest()
        {
            Input            = input,
            AlternateIntents = true
        };

        //  Create customData object
        Dictionary <string, object> assistantCustomData = new Dictionary <string, object>();
        //  Create a dictionary of custom headers
        Dictionary <string, string> assistantCustomHeaders = new Dictionary <string, string>();
        //  Add to the header dictionary
        assistantCustomHeaders.Add("X-Watson-Metadata", "customer_id=some-assistant-customer-id");
        //  Add the header dictionary to the custom data object
        assistantCustomData.Add(Constants.String.CUSTOM_REQUEST_HEADERS, assistantCustomHeaders);

        //  Logging what we will send
        if (assistantCustomData.ContainsKey(Constants.String.CUSTOM_REQUEST_HEADERS))
        {
            Log.Debug("ExampleCustomHeader.Start()", "Assistant custom request headers:");
            foreach (KeyValuePair <string, string> kvp in assistantCustomData[Constants.String.CUSTOM_REQUEST_HEADERS] as Dictionary <string, string> )
            {
                Log.Debug("ExampleCustomHeader.Start()", "\t{0}: {1}", kvp.Key, kvp.Value);
            }
        }

        //  Call service using custom data object
        _assistant.Message(OnMessage, OnFail, _assistantWorkspaceId, messageRequest, customData: assistantCustomData);
        #endregion

        #region websocket custom headers
        //  Websocket custom headers
        //  Create credential and instantiate Speech to Text service
        Credentials speechToTextCredentials = new Credentials(_speechToTextUsername, _speechToTextPassword, _speechToTextUrl);

        _speechToText = new SpeechToText(speechToTextCredentials);

        //  Create customData object
        speechToTextCustomData = new Dictionary <string, object>();
        //  Create a dictionary of custom headers
        Dictionary <string, string> speechToTextCustomHeaders = new Dictionary <string, string>();
        //  Add header to the dictionary
        speechToTextCustomHeaders.Add("X-Watson-Metadata", "customer_id=some-speech-to-text-customer-id");
        //  Add the header dictionary to the custom data object
        speechToTextCustomData.Add(Constants.String.CUSTOM_REQUEST_HEADERS, speechToTextCustomHeaders);

        //  Logging what we will send
        if (speechToTextCustomData.ContainsKey(Constants.String.CUSTOM_REQUEST_HEADERS))
        {
            Log.Debug("ExampleCustomHeader.Start()", "Speech to text custom request headers:");
            foreach (KeyValuePair <string, string> kvp in speechToTextCustomData[Constants.String.CUSTOM_REQUEST_HEADERS] as Dictionary <string, string> )
            {
                Log.Debug("ExampleCustomHeader.Start()", "\t{0}: {1}", kvp.Key, kvp.Value);
            }
        }

        //  Call service using custom data object (see Active)
        Active = true;

        StartRecording();
        #endregion
    }
Exemplo n.º 15
0
 private void Listen_Click(object sender, RoutedEventArgs e)
 {
     SpeechToText.ConverSpeechToText();
 }
Exemplo n.º 16
0
        public override IEnumerator RunTest()
        {
            LogSystem.InstallDefaultReactors();

            VcapCredentials vcapCredentials = new VcapCredentials();
            fsData          data            = null;

            string result = null;

            var vcapUrl      = Environment.GetEnvironmentVariable("VCAP_URL");
            var vcapUsername = Environment.GetEnvironmentVariable("VCAP_USERNAME");
            var vcapPassword = Environment.GetEnvironmentVariable("VCAP_PASSWORD");

            using (SimpleGet simpleGet = new SimpleGet(vcapUrl, vcapUsername, vcapPassword))
            {
                while (!simpleGet.IsComplete)
                {
                    yield return(null);
                }

                result = simpleGet.Result;
            }

            //  Add in a parent object because Unity does not like to deserialize root level collection types.
            result = Utility.AddTopLevelObjectToJson(result, "VCAP_SERVICES");

            //  Convert json to fsResult
            fsResult r = fsJsonParser.Parse(result, out data);

            if (!r.Succeeded)
            {
                throw new WatsonException(r.FormattedMessages);
            }

            //  Convert fsResult to VcapCredentials
            object obj = vcapCredentials;

            r = _serializer.TryDeserialize(data, obj.GetType(), ref obj);
            if (!r.Succeeded)
            {
                throw new WatsonException(r.FormattedMessages);
            }

            //  Set credentials from imported credntials
            Credential credential = vcapCredentials.VCAP_SERVICES["speech_to_text"];

            _username = credential.Username.ToString();
            _password = credential.Password.ToString();
            _url      = credential.Url.ToString();

            //  Create credential and instantiate service
            Credentials credentials = new Credentials(_username, _password, _url);

            //  Or authenticate using token
            //Credentials credentials = new Credentials(_url)
            //{
            //    AuthenticationToken = _token
            //};

            _speechToText             = new SpeechToText(credentials);
            _customCorpusFilePath     = Application.dataPath + "/Watson/Examples/ServiceExamples/TestData/theJabberwocky-utf8.txt";
            _customWordsFilePath      = Application.dataPath + "/Watson/Examples/ServiceExamples/TestData/test-stt-words.json";
            _acousticResourceMimeType = Utility.GetMimeType(Path.GetExtension(_acousticResourceUrl));

            Runnable.Run(DownloadAcousticResource());
            while (!_isAudioLoaded)
            {
                yield return(null);
            }

            //  Recognize
            Log.Debug("ExampleSpeechToText.Examples()", "Attempting to recognize");
            List <string> keywords = new List <string>();

            keywords.Add("speech");
            _speechToText.KeywordsThreshold = 0.5f;
            _speechToText.InactivityTimeout = 120;
            _speechToText.StreamMultipart   = false;
            _speechToText.Keywords          = keywords.ToArray();
            _speechToText.Recognize(HandleOnRecognize, OnFail, _acousticResourceData, _acousticResourceMimeType);
            while (!_recognizeTested)
            {
                yield return(null);
            }

            //  Get models
            Log.Debug("ExampleSpeechToText.Examples()", "Attempting to get models");
            _speechToText.GetModels(HandleGetModels, OnFail);
            while (!_getModelsTested)
            {
                yield return(null);
            }

            //  Get model
            Log.Debug("ExampleSpeechToText.Examples()", "Attempting to get model {0}", _modelNameToGet);
            _speechToText.GetModel(HandleGetModel, OnFail, _modelNameToGet);
            while (!_getModelTested)
            {
                yield return(null);
            }

            //  Get customizations
            Log.Debug("ExampleSpeechToText.Examples()", "Attempting to get customizations");
            _speechToText.GetCustomizations(HandleGetCustomizations, OnFail);
            while (!_getCustomizationsTested)
            {
                yield return(null);
            }

            //  Create customization
            Log.Debug("ExampleSpeechToText.Examples()", "Attempting create customization");
            _speechToText.CreateCustomization(HandleCreateCustomization, OnFail, "unity-test-customization", "en-US_BroadbandModel", "Testing customization unity");
            while (!_createCustomizationsTested)
            {
                yield return(null);
            }

            //  Get customization
            Log.Debug("ExampleSpeechToText.Examples()", "Attempting to get customization {0}", _createdCustomizationID);
            _speechToText.GetCustomization(HandleGetCustomization, OnFail, _createdCustomizationID);
            while (!_getCustomizationTested)
            {
                yield return(null);
            }

            //  Get custom corpora
            Log.Debug("ExampleSpeechToText.Examples()", "Attempting to get custom corpora for {0}", _createdCustomizationID);
            _speechToText.GetCustomCorpora(HandleGetCustomCorpora, OnFail, _createdCustomizationID);
            while (!_getCustomCorporaTested)
            {
                yield return(null);
            }

            //  Add custom corpus
            Log.Debug("ExampleSpeechToText.Examples()", "Attempting to add custom corpus {1} in customization {0}", _createdCustomizationID, _createdCorpusName);
            string corpusData = File.ReadAllText(_customCorpusFilePath);

            _speechToText.AddCustomCorpus(HandleAddCustomCorpus, OnFail, _createdCustomizationID, _createdCorpusName, true, corpusData);
            while (!_addCustomCorpusTested)
            {
                yield return(null);
            }

            //  Get custom corpus
            Log.Debug("ExampleSpeechToText.Examples()", "Attempting to get custom corpus {1} in customization {0}", _createdCustomizationID, _createdCorpusName);
            _speechToText.GetCustomCorpus(HandleGetCustomCorpus, OnFail, _createdCustomizationID, _createdCorpusName);
            while (!_getCustomCorpusTested)
            {
                yield return(null);
            }

            //  Wait for customization
            Runnable.Run(CheckCustomizationStatus(_createdCustomizationID));
            while (!_isCustomizationReady)
            {
                yield return(null);
            }

            //  Get custom words
            Log.Debug("ExampleSpeechToText.Examples()", "Attempting to get custom words.");
            _speechToText.GetCustomWords(HandleGetCustomWords, OnFail, _createdCustomizationID);
            while (!_getCustomWordsTested)
            {
                yield return(null);
            }

            //  Add custom words from path
            Log.Debug("ExampleSpeechToText.Examples()", "Attempting to add custom words in customization {0} using Words json path {1}", _createdCustomizationID, _customWordsFilePath);
            string customWords = File.ReadAllText(_customWordsFilePath);

            _speechToText.AddCustomWords(HandleAddCustomWordsFromPath, OnFail, _createdCustomizationID, customWords);
            while (!_addCustomWordsFromPathTested)
            {
                yield return(null);
            }

            //  Wait for customization
            _isCustomizationReady = false;
            Runnable.Run(CheckCustomizationStatus(_createdCustomizationID));
            while (!_isCustomizationReady)
            {
                yield return(null);
            }

            //  Add custom words from object
            Words       words    = new Words();
            Word        w0       = new Word();
            List <Word> wordList = new List <Word>();

            w0.word           = "mikey";
            w0.sounds_like    = new string[1];
            w0.sounds_like[0] = "my key";
            w0.display_as     = "Mikey";
            wordList.Add(w0);
            Word w1 = new Word();

            w1.word           = "charlie";
            w1.sounds_like    = new string[1];
            w1.sounds_like[0] = "char lee";
            w1.display_as     = "Charlie";
            wordList.Add(w1);
            Word w2 = new Word();

            w2.word           = "bijou";
            w2.sounds_like    = new string[1];
            w2.sounds_like[0] = "be joo";
            w2.display_as     = "Bijou";
            wordList.Add(w2);
            words.words = wordList.ToArray();

            Log.Debug("ExampleSpeechToText.Examples()", "Attempting to add custom words in customization {0} using Words object", _createdCustomizationID);
            _speechToText.AddCustomWords(HandleAddCustomWordsFromObject, OnFail, _createdCustomizationID, words);
            while (!_addCustomWordsFromObjectTested)
            {
                yield return(null);
            }

            //  Wait for customization
            _isCustomizationReady = false;
            Runnable.Run(CheckCustomizationStatus(_createdCustomizationID));
            while (!_isCustomizationReady)
            {
                yield return(null);
            }

            //  Get custom word
            Log.Debug("ExampleSpeechToText.Examples()", "Attempting to get custom word {1} in customization {0}", _createdCustomizationID, words.words[0].word);
            _speechToText.GetCustomWord(HandleGetCustomWord, OnFail, _createdCustomizationID, words.words[0].word);
            while (!_getCustomWordTested)
            {
                yield return(null);
            }

            //  Train customization
            Log.Debug("ExampleSpeechToText.Examples()", "Attempting to train customization {0}", _createdCustomizationID);
            _speechToText.TrainCustomization(HandleTrainCustomization, OnFail, _createdCustomizationID);
            while (!_trainCustomizationTested)
            {
                yield return(null);
            }

            //  Wait for customization
            _isCustomizationReady = false;
            Runnable.Run(CheckCustomizationStatus(_createdCustomizationID));
            while (!_isCustomizationReady)
            {
                yield return(null);
            }

            //  Upgrade customization - not currently implemented in service
            //Log.Debug("ExampleSpeechToText.Examples()", "Attempting to upgrade customization {0}", _createdCustomizationID);
            //_speechToText.UpgradeCustomization(HandleUpgradeCustomization, _createdCustomizationID);
            //while (!_upgradeCustomizationTested)
            //    yield return null;

            //  Delete custom word
            Log.Debug("ExampleSpeechToText.Examples()", "Attempting to delete custom word {1} in customization {0}", _createdCustomizationID, words.words[2].word);
            _speechToText.DeleteCustomWord(HandleDeleteCustomWord, OnFail, _createdCustomizationID, words.words[2].word);
            while (!_deleteCustomWordTested)
            {
                yield return(null);
            }

            //  Delay
            Log.Debug("ExampleSpeechToText.Examples()", string.Format("Delaying delete environment for {0} sec", _delayTimeInSeconds));
            Runnable.Run(Delay(_delayTimeInSeconds));
            while (!_readyToContinue)
            {
                yield return(null);
            }

            _readyToContinue = false;
            //  Delete custom corpus
            Log.Debug("ExampleSpeechToText.Examples()", "Attempting to delete custom corpus {1} in customization {0}", _createdCustomizationID, _createdCorpusName);
            _speechToText.DeleteCustomCorpus(HandleDeleteCustomCorpus, OnFail, _createdCustomizationID, _createdCorpusName);
            while (!_deleteCustomCorpusTested)
            {
                yield return(null);
            }

            //  Delay
            Log.Debug("ExampleSpeechToText.Examples()", string.Format("Delaying delete environment for {0} sec", _delayTimeInSeconds));
            Runnable.Run(Delay(_delayTimeInSeconds));
            while (!_readyToContinue)
            {
                yield return(null);
            }

            _readyToContinue = false;
            //  Reset customization
            Log.Debug("ExampleSpeechToText.Examples()", "Attempting to reset customization {0}", _createdCustomizationID);
            _speechToText.ResetCustomization(HandleResetCustomization, OnFail, _createdCustomizationID);
            while (!_resetCustomizationTested)
            {
                yield return(null);
            }

            //  Delay
            Log.Debug("ExampleSpeechToText.Examples()", string.Format("Delaying delete environment for {0} sec", _delayTimeInSeconds));
            Runnable.Run(Delay(_delayTimeInSeconds));
            while (!_readyToContinue)
            {
                yield return(null);
            }

            _readyToContinue = false;
            //  Delete customization
            Log.Debug("ExampleSpeechToText.Examples()", "Attempting to delete customization {0}", _createdCustomizationID);
            _speechToText.DeleteCustomization(HandleDeleteCustomization, OnFail, _createdCustomizationID);
            while (!_deleteCustomizationsTested)
            {
                yield return(null);
            }

            //  List acoustic customizations
            Log.Debug("ExampleSpeechToText.Examples()", "Attempting to get acoustic customizations");
            _speechToText.GetCustomAcousticModels(HandleGetCustomAcousticModels, OnFail);
            while (!_getAcousticCustomizationsTested)
            {
                yield return(null);
            }

            //  Create acoustic customization
            Log.Debug("ExampleSpeechToText.Examples()", "Attempting to create acoustic customization");
            _speechToText.CreateAcousticCustomization(HandleCreateAcousticCustomization, OnFail, _createdAcousticModelName);
            while (!_createAcousticCustomizationsTested)
            {
                yield return(null);
            }

            //  Get acoustic customization
            Log.Debug("ExampleSpeechToText.Examples()", "Attempting to get acoustic customization {0}", _createdAcousticModelId);
            _speechToText.GetCustomAcousticModel(HandleGetCustomAcousticModel, OnFail, _createdAcousticModelId);
            while (!_getAcousticCustomizationTested)
            {
                yield return(null);
            }

            while (!_isAudioLoaded)
            {
                yield return(null);
            }

            //  Create acoustic resource
            Log.Debug("ExampleSpeechToText.Examples()", "Attempting to create audio resource {1} on {0}", _createdAcousticModelId, _acousticResourceName);
            string mimeType = Utility.GetMimeType(Path.GetExtension(_acousticResourceUrl));

            _speechToText.AddAcousticResource(HandleAddAcousticResource, OnFail, _createdAcousticModelId, _acousticResourceName, mimeType, mimeType, true, _acousticResourceData);
            while (!_addAcousticResourcesTested)
            {
                yield return(null);
            }

            //  Wait for customization
            _isAcousticCustomizationReady = false;
            Runnable.Run(CheckAcousticCustomizationStatus(_createdAcousticModelId));
            while (!_isAcousticCustomizationReady)
            {
                yield return(null);
            }

            //  List acoustic resources
            Log.Debug("ExampleSpeechToText.Examples()", "Attempting to get audio resources {0}", _createdAcousticModelId);
            _speechToText.GetCustomAcousticResources(HandleGetCustomAcousticResources, OnFail, _createdAcousticModelId);
            while (!_getAcousticResourcesTested)
            {
                yield return(null);
            }

            //  Train acoustic customization
            Log.Debug("ExampleSpeechToText.Examples()", "Attempting to train acoustic customization {0}", _createdAcousticModelId);
            _speechToText.TrainAcousticCustomization(HandleTrainAcousticCustomization, OnFail, _createdAcousticModelId, null, true);
            while (!_trainAcousticCustomizationsTested)
            {
                yield return(null);
            }

            //  Get acoustic resource
            Log.Debug("ExampleSpeechToText.Examples()", "Attempting to get audio resource {1} from {0}", _createdAcousticModelId, _acousticResourceName);
            _speechToText.GetCustomAcousticResource(HandleGetCustomAcousticResource, OnFail, _createdAcousticModelId, _acousticResourceName);
            while (!_getAcousticResourceTested)
            {
                yield return(null);
            }

            //  Wait for customization
            _isAcousticCustomizationReady = false;
            Runnable.Run(CheckAcousticCustomizationStatus(_createdAcousticModelId));
            while (!_isAcousticCustomizationReady)
            {
                yield return(null);
            }

            //  Delete acoustic resource
            DeleteAcousticResource();
            while (!_deleteAcousticResource)
            {
                yield return(null);
            }

            //  Delay
            Log.Debug("ExampleSpeechToText.Examples()", string.Format("Delaying delete acoustic resource for {0} sec", _delayTimeInSeconds));
            Runnable.Run(Delay(_delayTimeInSeconds));
            while (!_readyToContinue)
            {
                yield return(null);
            }

            //  Reset acoustic customization
            Log.Debug("ExampleSpeechToText.Examples()", "Attempting to reset acoustic customization {0}", _createdAcousticModelId);
            _speechToText.ResetAcousticCustomization(HandleResetAcousticCustomization, OnFail, _createdAcousticModelId);
            while (!_resetAcousticCustomizationsTested)
            {
                yield return(null);
            }

            //  Delay
            Log.Debug("ExampleSpeechToText.Examples()", string.Format("Delaying delete acoustic customization for {0} sec", _delayTimeInSeconds));
            Runnable.Run(Delay(_delayTimeInSeconds));
            while (!_readyToContinue)
            {
                yield return(null);
            }

            //  Delete acoustic customization
            DeleteAcousticCustomization();
            while (!_deleteAcousticCustomizationsTested)
            {
                yield return(null);
            }

            //  Delay
            Log.Debug("ExampleSpeechToText.Examples()", string.Format("Delaying complete for {0} sec", _delayTimeInSeconds));
            Runnable.Run(Delay(_delayTimeInSeconds));
            while (!_readyToContinue)
            {
                yield return(null);
            }

            Log.Debug("TestSpeechToText.RunTest()", "Speech to Text examples complete.");

            yield break;
        }
        protected async override void OnNavigatedTo(NavigationEventArgs e)
        {
            // Set the page to standby mode to start off with
            this.GoToVisualState("Standby");

            _ctsVideoMonitor = new CancellationTokenSource();

            // Initialize the text-to-speech class instance. It uses the MediaElement on the page to play sounds thus you need to pass it a reference to.
            _tts = new TextToSpeech(this.media, this.Dispatcher);

            // Initialize the speech-to-text class instance which is used to recognize speech commands by the customer
            _speechToText = new SpeechToText(
                _SpeechToTextLanguageName,
                _SpeechToTextInitialSilenceTimeoutInSeconds,
                _SpeechToTextBabbleTimeoutInSeconds,
                _SpeechToTextEndSilenceTimeoutInSeconds);
            await _speechToText.InitializeRecognizerAsync();

            _speechToText.OnHypothesis     += _speechToText_OnHypothesis;
            _speechToText.CapturingStarted += _speechToText_CapturingStarted;
            _speechToText.CapturingEnded   += _speechToText_CapturingEnded;

            // Landscape preference set to landscape
            DisplayInformation.AutoRotationPreferences = DisplayOrientations.Landscape;

            // Keeps the screen alive i.e. prevents screen from going to sleep
            _displayRequest = new DisplayRequest();
            _displayRequest.RequestActive();

            // Find all the video cameras on the device
            var cameras = await DeviceInformation.FindAllAsync(DeviceClass.VideoCapture);

            // Choose the first externally plugged in camera found
            var preferredCamera = cameras.FirstOrDefault(deviceInfo => deviceInfo.EnclosureLocation == null);

            // If no external camera, choose the front facing camera ELSE choose the first available camera found
            if (preferredCamera == null)
            {
                preferredCamera = cameras.FirstOrDefault(deviceInfo => deviceInfo.EnclosureLocation?.Panel == Windows.Devices.Enumeration.Panel.Front) ?? cameras.FirstOrDefault();
            }

            //  No camera found on device
            if (preferredCamera == null)
            {
                Debug.WriteLine("No camera found on device!");
                return;
            }

            // Initialize and start the camera video stream into the app preview window
            _mediaCapture = new MediaCapture();
            await _mediaCapture.InitializeAsync(new MediaCaptureInitializationSettings()
            {
                StreamingCaptureMode = StreamingCaptureMode.Video,
                VideoDeviceId        = preferredCamera.Id
            });

            videoPreview.Source = _mediaCapture;
            await _mediaCapture.StartPreviewAsync();

            // Ensure state is clear
            await this.EndSessionAsync();

            // Initiate monitoring of the video stream for faces
            _faceMonitoringTask = this.FaceMonitoringAsync(_ctsVideoMonitor.Token);

            base.OnNavigatedTo(e);
        }