コード例 #1
0
    /// <summary>
    /// OnAudioFilterRead is used to capture live microphone audio when recording or recognizing.
    /// When OnAudioFilterRead is implemented, Unity inserts a custom filter into the audio DSP chain.
    /// The filter is inserted in the same order as the MonoBehaviour script is shown in the inspector.
    /// OnAudioFilterRead is called every time a chunk of audio is sent to the filter (this happens
    /// frequently, every ~20ms depending on the sample rate and platform).
    /// </summary>
    /// <param name="data">The audio data is an array of floats ranging from[-1.0f;1.0f]. Here it contains
    /// audio from AudioClip on the AudioSource, which itself receives data from the microphone.</param>
    /// <param name="channels"></param>
    void OnAudioFilterRead(float[] data, int channels)
    {
        //Debug.Log($"Received audio data of size: {data.Length} - First sample: {data[0]}");
        Debug.Log($"Received audio data: {channels} channel(s), size {data.Length} samples.");

        if (isRecording || isRecognizing)
        {
            byte[] audiodata = ConvertAudioClipDataToInt16ByteArray(data);
            for (int i = 0; i < data.Length; i++)
            {
                // Mute all the samples to avoid audio feedback into the microphone
                data[i] = 0.0f;
            }
            if (isRecording) // We're only concerned with saving all audio data if we're persist to a file
            {
                recordingData.AddRange(audiodata);
                recordingSamples += audiodata.Length;
            }
            else // if we're not recording, then we're in recognition mode
            {
                recoServiceClient.SendAudioPacket(requestId, audiodata);
            }
        }
    }
コード例 #2
0
    /// <summary>
    /// OnAudioFilterRead is used to capture live microphone audio when recording or recognizing.
    /// When OnAudioFilterRead is implemented, Unity inserts a custom filter into the audio DSP chain.
    /// The filter is inserted in the same order as the MonoBehaviour script is shown in the inspector.
    /// OnAudioFilterRead is called every time a chunk of audio is sent to the filter (this happens
    /// frequently, every ~20ms depending on the sample rate and platform).
    /// </summary>
    /// <param name="data">The audio data is an array of floats ranging from[-1.0f;1.0f]. Here it contains
    /// audio from AudioClip on the AudioSource, which itself receives data from the microphone.</param>
    /// <param name="channels"></param>
    void OnAudioFilterRead(float[] data, int channels)
    {
        try
        {
            //Debug.Log($"Received audio data of size: {data.Length} - First sample: {data[0]}");

            // Debug.Log($"Received audio data: {channels} channel(s), size {data.Length} samples.");

            float maxAudio = 0f;

            //Debug.Log($"Received audio data: {channels} channel(s), size {data.Length} samples.");

            if (isRecording || isRecognizing)
            {
                byte[] audiodata = ConvertAudioClipDataToInt16ByteArray(data);
                for (int i = 0; i < data.Length; i++)
                {
                    if (UseClientSideSilenceDetection)
                    {
                        // Get the max amplitude out of the sample
                        maxAudio = Mathf.Max(maxAudio, Mathf.Abs(data[i]));
                    }

                    // Mute all the samples to avoid audio feedback into the microphone
                    data[i] = 0.0f;
                }

                if (UseClientSideSilenceDetection)
                {
                    // Was THIS sample silent?
                    bool silentThisSample = (maxAudio <= SilenceThreshold);
                    if (silentThisSample)
                    {
                        // Yes this sample was silent.
                        // If we haven't been in silence yet, notify that we're entering silence
                        if (!isSilent)
                        {
                            Debug.Log($"Silence Starting... ({maxAudio})");
                            isSilent        = true;
                            silenceStarted  = DateTime.Now.Ticks; // Must use ticks since Unity's Time class can't be used on this thread.
                            silenceNotified = false;
                        }
                        else
                        {
                            // Looks like we've been in silence for a while.
                            // If we haven't already notified of a timeout, check to see if a timeout has occurred.
                            if (!silenceNotified)
                            {
                                // Have we crossed the silence threshold
                                TimeSpan duration = TimeSpan.FromTicks(DateTime.Now.Ticks - silenceStarted);
                                if (duration.TotalSeconds >= SilenceTimeout)
                                {
                                    Debug.Log("Silence Timeout");

                                    // Mark notified
                                    silenceNotified = true;

                                    // Notify
                                    OnSpeechEnded();
                                }
                            }
                        }
                    }
                    else
                    {
                        // No this sample was not silent.
                        // Check to see if we're leaving silence.
                        if (isSilent)
                        {
                            Debug.Log($"Silence Ended ({maxAudio})");

                            // No longer silent
                            isSilent = false;
                        }
                    }
                }
                if (isRecording) // We're only concerned with saving all audio data if we're persist to a file
                {
                    recordingData.AddRange(audiodata);
                    recordingSamples += audiodata.Length;
                }
                else // if we're not recording, then we're in recognition mode
                {
                    recoServiceClient.SendAudioPacket(requestId, audiodata);
                }
            }
        }
        catch (Exception ex)
        {
            string msg = String.Format("Error: Something went wrong when reading live audio data from the microphone. See error details below:{0}{1}{2}{3}",
                                       Environment.NewLine, ex.ToString(), Environment.NewLine, ex.Message);
            Debug.LogError(msg);
            UpdateUICanvasLabel(msg, FontStyle.Normal);
        }
    }