public void Recognize(AudioClip clip, List <string[]> contexts, Enumerators.LanguageCode language)
        {
            if (_currentConfig == null)
            {
                throw new NotImplementedException("Config isn't seted! Use SetConfig method!");
            }

            if (clip == null)
            {
                throw new NotImplementedException("AudioClip isn't seted!");
            }

            string postData = string.Empty;
            string uri      = string.Empty;

            switch (_currentConfig.recognitionType)
            {
            case Enumerators.GoogleNetworkType.SPEECH_RECOGNIZE:
            {
                if (!_gcSpeechRecognition.isUseAPIKeyFromPrefab)
                {
                    uri = Constants.RECOGNIZE_REQUEST_URL + Constants.API_KEY_PARAM + Constants.GC_API_KEY;
                }
                else
                {
                    uri = Constants.RECOGNIZE_REQUEST_URL + Constants.API_KEY_PARAM + _gcSpeechRecognition.apiKey;
                }

                postData = JsonUtility.ToJson(GenerateRecognizeRequest(
                                                  AudioConvert.Convert(clip, _currentConfig.audioEncoding,
                                                                       _currentConfig.useVolumeMultiplier,
                                                                       _currentConfig.audioVolumeMultiplier), contexts, language));
            }
            break;

            case Enumerators.GoogleNetworkType.SPEECH_LONGRECOGNIZE:
            {
                if (!_gcSpeechRecognition.isUseAPIKeyFromPrefab)
                {
                    uri = Constants.LONG_RECOGNIZE_REQUEST_URL + Constants.API_KEY_PARAM + Constants.GC_API_KEY;
                }
                else
                {
                    uri = Constants.LONG_RECOGNIZE_REQUEST_URL + Constants.API_KEY_PARAM + _gcSpeechRecognition.apiKey;
                }

                postData = JsonUtility.ToJson(GenerateRecognizeRequest(
                                                  AudioConvert.Convert(clip, _currentConfig.audioEncoding,
                                                                       _currentConfig.useVolumeMultiplier,
                                                                       _currentConfig.audioVolumeMultiplier), contexts, language));
            }
            break;

            default:
                throw new NotSupportedException(_currentConfig.recognitionType + " doesn't supported!");
            }

            _networking.SendRequest(uri, postData, _currentConfig.recognitionType);
        }
示例#2
0
        public void Recognize(AudioClip clip, List <string[]> contexts, Enumerators.LanguageCode language)
        {
            if (_currentConfig == null)
            {
                throw new NotImplementedException("Config isn't seted! Use SetConfig method!");
            }

            if (clip == null)
            {
                throw new NotImplementedException("AudioClip isn't seted!");
            }

            string postData = string.Empty;
            string uri      = string.Empty;

            switch (_currentConfig.recognitionType)
            {
            case Enumerators.SpeechRecognitionType.SYNC:
            {
                uri = Constants.RECOGNIZE_REQUEST_URL + Constants.API_KEY_PARAM + Constants.GC_API_KEY;

                postData = JsonUtility.ToJson(GenerateSyncRequest(
                                                  AudioConvert.Convert(clip, _currentConfig.audioEncoding,
                                                                       _currentConfig.useVolumeMultiplier,
                                                                       _currentConfig.audioVolumeMultiplier), contexts, language));
            }
            break;

            case Enumerators.SpeechRecognitionType.ASYNC:
            {
                Debug.Log("Async(Long) speech recognition isn't fully implemented!");

                uri = Constants.LONG_RECOGNIZE_REQUEST_URL + Constants.API_KEY_PARAM + Constants.GC_API_KEY;

                postData = JsonUtility.ToJson(GenerateSyncRequest(
                                                  AudioConvert.Convert(clip, _currentConfig.audioEncoding,
                                                                       _currentConfig.useVolumeMultiplier,
                                                                       _currentConfig.audioVolumeMultiplier), contexts, language));
            }
            break;

            default:
                throw new NotSupportedException(_currentConfig.recognitionType + " doesn't supported!");
            }

            _networking.SendRequest(uri, postData);
        }
 public string PrepareLanguage(Enumerators.LanguageCode lang)
 {
     return(lang.ToString().Replace("_", "-"));
 }
        private RecognitionRequest GenerateRecognizeRequest(string content, List <string[]> contexts, Enumerators.LanguageCode language)
        {
            RecognitionRequest request = new RecognitionRequest();

            request.config.encoding        = _currentConfig.audioEncoding.ToString();
            request.config.languageCode    = language.ToString().Replace("_", "-");
            request.config.sampleRateHertz = _currentConfig.sampleRate;
            request.config.maxAlternatives = _currentConfig.maxAlternatives;
            request.config.profanityFilter = _currentConfig.isEnabledProfanityFilter;

            if (contexts != null)
            {
                request.config.speechContexts = new SpeechContext[contexts.Count];

                for (int i = 0; i < contexts.Count; i++)
                {
                    request.config.speechContexts[i]         = new SpeechContext();
                    request.config.speechContexts[i].phrases = contexts[i];
                }
            }

            request.audio.content = content;

            return(request);
        }
示例#5
0
 public void SetLanguage(Enumerators.LanguageCode language)
 {
     _speechRecognitionManager.CurrentConfig.defaultLanguage = language;
 }
示例#6
0
 public long Recognize(AudioClip clip, List <string[]> contexts, Enumerators.LanguageCode language)
 {
     return(_speechRecognitionManager.Recognize(clip, contexts, language));
 }
 public string PrepareLanguage(Enumerators.LanguageCode lang)
 {
     return(_textToSpeechManager.PrepareLanguage(lang));
 }