コード例 #1
0
        private void FinishedRecordEventHandler(AudioClip clip)
        {
            if (_startRecordButton.interactable)
            {
                _speechRecognitionState.color = Color.yellow;
            }

            if (clip == null)
            {
                return;
            }

            RecognitionConfig config = RecognitionConfig.GetDefault();

            config.languageCode      = ((Enumerators.LanguageCode)_languageDropdown.value).Parse();
            config.audioChannelCount = clip.channels;
            // configure other parameters of the config if need

            GeneralRecognitionRequest recognitionRequest = new GeneralRecognitionRequest()
            {
                audio = new RecognitionAudioContent()
                {
                    content = clip.ToBase64()
                },
                //audio = new RecognitionAudioUri() // for Google Cloud Storage object
                //{
                //	uri = "gs://bucketName/object_name"
                //},
                config = config
            };

            _speechRecognition.Recognize(recognitionRequest);
        }
コード例 #2
0
        private void FinishedRecordEventHandler(AudioClip clip, float[] raw)
        {
            if (!_voiceDetectionToggle.isOn && _startRecordButton.interactable)
            {
                _speechRecognitionState.color = Color.yellow;
            }

            if (clip == null || !_recognizeDirectlyToggle.isOn)
            {
                return;
            }

            RecognitionConfig config = RecognitionConfig.GetDefault();

            //config.languageCode = ((Enumerators.LanguageCode)_languageDropdown.value).Parse();
            config.languageCode   = "ko-KR";
            config.speechContexts = new SpeechContext[]
            {
                new SpeechContext()
                {
                    phrases = _contextPhrasesInputField.text.Replace(" ", string.Empty).Split(',')
                }
            };

            config.audioChannelCount = clip.channels;
            // configure other parameters of the config if need

            GeneralRecognitionRequest recognitionRequest = new GeneralRecognitionRequest()
            {
                audio = new RecognitionAudioContent()
                {
                    content = raw.ToBase64()
                },
                //audio = new RecognitionAudioUri() // for Google Cloud Storage object
                //{
                //	uri = "gs://bucketName/object_name"
                //},
                config = config
            };

            if (_longRunningRecognizeToggle.isOn)
            {
                _speechRecognition.LongRunningRecognize(recognitionRequest);
            }
            else
            {
                _speechRecognition.Recognize(recognitionRequest);
            }
        }
コード例 #3
0
        // Recognize speech by GCSR
        public async Task <string> RecognizeOnceAsync()
        {
            // Update RecognitionId
            var currentRecognitionId = Guid.NewGuid().ToString();

            recognitionId = currentRecognitionId;

            // For debugging and testing
            if (UseDummy)
            {
                await Task.Delay(1000);

                return(DummyText);
            }

            try
            {
                // Start recording
                speechRecognition.StartRecord(true);
                nowRecording = true;

                // Wait for talking ends or timeout
                var startTime = Time.time;
                while (nowRecording)
                {
                    if (Time.time - startTime > Timeout)
                    {
                        Debug.Log($"Recording timeout");
                        speechRecognition.StopRecord();
                        return(string.Empty);
                    }
                    await Task.Delay(50);
                }

                // Stop recording just after voice detected
                speechRecognition.StopRecord();

                // Exit if RecognitionId is updated by another request
                if (recognitionId != currentRecognitionId)
                {
                    Debug.Log($"Id was updated by another request: Current {currentRecognitionId} / Global {recognitionId}");
                    return(string.Empty);
                }

                // Exit if audio clip to recognize is empty
                if (recordedClip == null)
                {
                    Debug.LogError("No audio clip to recognize");
                    return(string.Empty);
                }

                // Set config for each request
                var config = RecognitionConfig.GetDefault();
                config.languageCode      = Language;
                config.audioChannelCount = recordedClip.channels;

                // Compose request
                var recognitionRequest = new GeneralRecognitionRequest()
                {
                    audio = new RecognitionAudioContent()
                    {
                        content = recordedRawData.ToBase64()
                    },
                    config = config
                };
                string postData = JsonConvert.SerializeObject(recognitionRequest);
                var    content  = new StringContent(postData, Encoding.UTF8, "application/json");

                // Post to recognize
                using (var client = new HttpClient())
                {
                    var response = await client.PostAsync("https://speech.googleapis.com/v1/speech:recognize?key=" + speechRecognition.apiKey, content);

                    if (!response.IsSuccessStatusCode)
                    {
                        throw new HttpRequestException($"Error occured while calling GCSR ({response.StatusCode.ToString()})");
                    }
                    var responseContent = await response.Content.ReadAsStringAsync();

                    var recognitionResponse = JsonConvert.DeserializeObject <RecognitionResponse>(responseContent);

                    // Return empty when nothing recognized
                    if (recognitionResponse.results.Length == 0 || recognitionResponse.results[0].alternatives.Length == 0)
                    {
                        Debug.Log("Nothing recognized by GCSR");
                        return(string.Empty);
                    }

                    // Return recognized text
                    return(recognitionResponse.results[0].alternatives[0].transcript);
                }
            }
            catch (Exception ex)
            {
                Debug.LogError($"Error occured in recognizing by GCSR: {ex.Message}\n{ex.StackTrace}");
            }

            return(string.Empty);
        }