示例#1
0
        public void StopRecord()
        {
            if (!IsRecording || !ReadyToRecord())
            {
                return;
            }

            IsRecording = false;

            CustomMicrophone.End(MicrophoneDevice);

            if (!DetectVoice)
            {
                LastRecordedRaw  = _currentRecordingVoice.ToArray();
                LastRecordedClip = AudioConvert.Convert(LastRecordedRaw, _microphoneWorkingAudioClip.channels);
            }

            if (_currentRecordingVoice != null)
            {
                _currentRecordingVoice.Clear();
            }

            _currentAudioSamples   = null;
            _currentRecordingVoice = null;

#if NET_2_0 || NET_2_0_SUBSET
            if (RecordEndedEvent != null)
            {
                RecordEndedEvent(LatestVoiceAudioClip, LastRecordedRaw);
            }
#else
            RecordEndedEvent?.Invoke(LastRecordedClip, LastRecordedRaw);
#endif
        }
        public void Recognize(AudioClip clip, List <string[]> contexts, Enumerators.LanguageCode language)
        {
            if (_currentConfig == null)
            {
                throw new NotImplementedException("Config isn't seted! Use SetConfig method!");
            }

            if (clip == null)
            {
                throw new NotImplementedException("AudioClip isn't seted!");
            }

            string postData = string.Empty;
            string uri      = string.Empty;

            switch (_currentConfig.recognitionType)
            {
            case Enumerators.GoogleNetworkType.SPEECH_RECOGNIZE:
            {
                if (!_gcSpeechRecognition.isUseAPIKeyFromPrefab)
                {
                    uri = Constants.RECOGNIZE_REQUEST_URL + Constants.API_KEY_PARAM + Constants.GC_API_KEY;
                }
                else
                {
                    uri = Constants.RECOGNIZE_REQUEST_URL + Constants.API_KEY_PARAM + _gcSpeechRecognition.apiKey;
                }

                postData = JsonUtility.ToJson(GenerateRecognizeRequest(
                                                  AudioConvert.Convert(clip, _currentConfig.audioEncoding,
                                                                       _currentConfig.useVolumeMultiplier,
                                                                       _currentConfig.audioVolumeMultiplier), contexts, language));
            }
            break;

            case Enumerators.GoogleNetworkType.SPEECH_LONGRECOGNIZE:
            {
                if (!_gcSpeechRecognition.isUseAPIKeyFromPrefab)
                {
                    uri = Constants.LONG_RECOGNIZE_REQUEST_URL + Constants.API_KEY_PARAM + Constants.GC_API_KEY;
                }
                else
                {
                    uri = Constants.LONG_RECOGNIZE_REQUEST_URL + Constants.API_KEY_PARAM + _gcSpeechRecognition.apiKey;
                }

                postData = JsonUtility.ToJson(GenerateRecognizeRequest(
                                                  AudioConvert.Convert(clip, _currentConfig.audioEncoding,
                                                                       _currentConfig.useVolumeMultiplier,
                                                                       _currentConfig.audioVolumeMultiplier), contexts, language));
            }
            break;

            default:
                throw new NotSupportedException(_currentConfig.recognitionType + " doesn't supported!");
            }

            _networking.SendRequest(uri, postData, _currentConfig.recognitionType);
        }
示例#3
0
        public void Recognize(AudioClip clip, List <string[]> contexts, Enumerators.LanguageCode language)
        {
            if (_currentConfig == null)
            {
                throw new NotImplementedException("Config isn't seted! Use SetConfig method!");
            }

            if (clip == null)
            {
                throw new NotImplementedException("AudioClip isn't seted!");
            }

            string postData = string.Empty;
            string uri      = string.Empty;

            switch (_currentConfig.recognitionType)
            {
            case Enumerators.SpeechRecognitionType.SYNC:
            {
                uri = Constants.RECOGNIZE_REQUEST_URL + Constants.API_KEY_PARAM + Constants.GC_API_KEY;

                postData = JsonUtility.ToJson(GenerateSyncRequest(
                                                  AudioConvert.Convert(clip, _currentConfig.audioEncoding,
                                                                       _currentConfig.useVolumeMultiplier,
                                                                       _currentConfig.audioVolumeMultiplier), contexts, language));
            }
            break;

            case Enumerators.SpeechRecognitionType.ASYNC:
            {
                Debug.Log("Async(Long) speech recognition isn't fully implemented!");

                uri = Constants.LONG_RECOGNIZE_REQUEST_URL + Constants.API_KEY_PARAM + Constants.GC_API_KEY;

                postData = JsonUtility.ToJson(GenerateSyncRequest(
                                                  AudioConvert.Convert(clip, _currentConfig.audioEncoding,
                                                                       _currentConfig.useVolumeMultiplier,
                                                                       _currentConfig.audioVolumeMultiplier), contexts, language));
            }
            break;

            default:
                throw new NotSupportedException(_currentConfig.recognitionType + " doesn't supported!");
            }

            _networking.SendRequest(uri, postData);
        }
示例#4
0
        public void Update()
        {
            if (IsRecording)
            {
                _currentSamplePosition = CustomMicrophone.GetPosition(MicrophoneDevice);
                CustomMicrophone.GetRawData(ref _currentAudioSamples, _microphoneWorkingAudioClip);

                if (DetectVoice)
                {
                    bool isTalking = _voiceDetectionManager.HasDetectedVoice(AudioClip2ByteConverter.FloatToByte(_currentAudioSamples));

                    if (isTalking)
                    {
                        _endTalkingDelay = 0f;
                    }
                    else
                    {
                        _endTalkingDelay += Time.deltaTime;
                    }

                    if (!_isTalking && isTalking)
                    {
                        _isTalking = true;

#if NET_2_0 || NET_2_0_SUBSET
                        if (TalkBeganEvent != null)
                        {
                            TalkBeganEvent();
                        }
#else
                        TalkBeganEvent?.Invoke();
#endif
                    }
                    else if (_isTalking && !isTalking && _endTalkingDelay >= _speechRecognitionManager.CurrentConfig.voiceDetectionEndTalkingDelay)
                    {
                        _isTalking = false;

                        LastRecordedRaw  = _currentRecordingVoice.ToArray();
                        LastRecordedClip = AudioConvert.Convert(LastRecordedRaw, _microphoneWorkingAudioClip.channels);

                        _currentRecordingVoice.Clear();
#if NET_2_0 || NET_2_0_SUBSET
                        if (TalkEnded != null)
                        {
                            TalkEnded(LatestVoiceAudioClip, LastRecordedRaw);
                        }
#else
                        TalkEndedEvent?.Invoke(LastRecordedClip, LastRecordedRaw);
#endif
                    }
                    else if (_isTalking && isTalking)
                    {
                        AddAudioSamplesIntoBuffer();
                    }
                }
                else
                {
                    AddAudioSamplesIntoBuffer();
                }

                _previousSamplePosition = _currentSamplePosition;
            }
        }