コード例 #1
0
        public void Update()
        {
            if (IsRecording)
            {
                _currentSamplePosition = Microphone.GetPosition(_microphoneDevice);
                _microphoneWorkingAudioClip.GetData(_currentAudioSamples, 0);

                if (IsEnabledVoiceDetection)
                {
                    bool isTalking = _voiceDetectionManager.CheckVoice(AudioClip2ByteConverter.FloatToByte(_currentAudioSamples));

                    if (!_isTalking && isTalking)
                    {
                        _isTalking = true;

                        if (BeginTalkigEvent != null)
                        {
                            BeginTalkigEvent();
                        }
                    }
                    else if (_isTalking && !isTalking)
                    {
                        _isTalking = false;

                        _latestVoiceAudioClip = MakeAudioClipFromSamples(_currentRecordingVoice.ToArray());

                        if (EndTalkigEvent != null)
                        {
                            EndTalkigEvent(_latestVoiceAudioClip);
                        }

                        _currentRecordingVoice.Clear();
                    }
                    else if (_isTalking)
                    {
                        AddVoiceSamples();
                    }
                }
                else
                {
                    AddVoiceSamples();
                }

                _previousSamplePosition = _currentSamplePosition;
            }
        }
コード例 #2
0
        public static string Convert(float[] raw, Enumerators.AudioEncoding encoding, bool increaseVolume = false, float volume = 1f)
        {
            byte[] audioArray;

            switch (encoding)
            {
            case Enumerators.AudioEncoding.LINEAR16:
            {
                if (increaseVolume)
                {
                    raw = AudioClip2ByteConverter.ByteToFloat(AudioClipRaw2ByteConverter.AudioClipRawToByte(raw, increaseVolume, volume));
                }

                audioArray = AudioClipRaw2PCMConverter.AudioClipRaw2PCM(raw);
            }
            break;

            default:
                throw new System.NotSupportedException(encoding + " doesn't supported for converting!");
            }

            return(System.Convert.ToBase64String(audioArray));
        }
コード例 #3
0
        public void Update()
        {
            if (IsRecording)
            {
                _currentSamplePosition = CustomMicrophone.GetPosition(MicrophoneDevice);
                CustomMicrophone.GetRawData(ref _currentAudioSamples, _microphoneWorkingAudioClip);

                if (DetectVoice)
                {
                    bool isTalking = _voiceDetectionManager.HasDetectedVoice(AudioClip2ByteConverter.FloatToByte(_currentAudioSamples));

                    if (isTalking)
                    {
                        _endTalkingDelay = 0f;
                    }
                    else
                    {
                        _endTalkingDelay += Time.deltaTime;
                    }

                    if (!_isTalking && isTalking)
                    {
                        _isTalking = true;

#if NET_2_0 || NET_2_0_SUBSET
                        if (TalkBeganEvent != null)
                        {
                            TalkBeganEvent();
                        }
#else
                        TalkBeganEvent?.Invoke();
#endif
                    }
                    else if (_isTalking && !isTalking && _endTalkingDelay >= _speechRecognitionManager.CurrentConfig.voiceDetectionEndTalkingDelay)
                    {
                        _isTalking = false;

                        LastRecordedRaw  = _currentRecordingVoice.ToArray();
                        LastRecordedClip = AudioConvert.Convert(LastRecordedRaw, _microphoneWorkingAudioClip.channels);

                        _currentRecordingVoice.Clear();
#if NET_2_0 || NET_2_0_SUBSET
                        if (TalkEnded != null)
                        {
                            TalkEnded(LatestVoiceAudioClip, LastRecordedRaw);
                        }
#else
                        TalkEndedEvent?.Invoke(LastRecordedClip, LastRecordedRaw);
#endif
                    }
                    else if (_isTalking && isTalking)
                    {
                        AddAudioSamplesIntoBuffer();
                    }
                }
                else
                {
                    AddAudioSamplesIntoBuffer();
                }

                _previousSamplePosition = _currentSamplePosition;
            }
        }