private void OnAudioFilterRead(float[] data, int channels)
        {
            if (!UnityCompositorInterface.IsRecording())
            {
                return;
            }

            //Create new stream
            if (audioMemoryStream == null)
            {
                audioMemoryStream    = new MemoryStream();
                audioStreamWriter    = new BinaryWriter(audioMemoryStream);
                audioStartTime       = AudioSettings.dspTime;
                numCachedAudioFrames = 0;
            }

            //Put data into stream
            for (int i = 0; i < data.Length; i++)
            {
                // Rescale float to short range for encoding.
                short audioEntry = (short)(data[i] * short.MaxValue);
                audioStreamWriter.Write(audioEntry);
            }

            numCachedAudioFrames++;

            //Send to compositor (buffer a few calls to reduce potential timing errors between packages)
            if (numCachedAudioFrames >= MAX_NUM_CACHED_AUDIO_FRAMES)
            {
                audioStreamWriter.Flush();
                byte[] outBytes = audioMemoryStream.ToArray();
                audioMemoryStream = null;
                UnityCompositorInterface.SetAudioData(outBytes, outBytes.Length, audioStartTime);
            }
        }
 /// <summary>
 /// Stops audio recording by ensuring the audio stream is fully written immediately.
 /// </summary>
 private void StopRecordingAudio()
 {
     //Send any left over stream
     if (audioMemoryStream != null)
     {
         audioStreamWriter.Flush();
         byte[] outBytes = audioMemoryStream.ToArray();
         UnityCompositorInterface.SetAudioData(outBytes, outBytes.Length, audioStartTime);
         audioMemoryStream = null;
     }
 }
        // This function is not/not always called on the main thread.
        private void OnAudioFilterRead(float[] data, int channels)
        {
            if (!UnityCompositorInterface.IsRecording())
            {
                return;
            }

            //Create new stream
            if (audioMemoryStream == null)
            {
                audioMemoryStream = new MemoryStream();
                audioStreamWriter = new BinaryWriter(audioMemoryStream);
                double audioSettingsTime = AudioSettings.dspTime;                                                                                      // Audio time in seconds, more accurate than Time.time
                double captureFrameTime  = UnityCompositorInterface.GetCaptureFrameIndex() * UnityCompositorInterface.GetColorDuration() / 10000000.0; // Capture Frame Time in seconds
                DebugLog($"Obtained Audio Sample, AudioSettingsTime:{audioSettingsTime}, CaptureFrameTime:{captureFrameTime}");
                audioStartTime       = captureFrameTime;
                numCachedAudioFrames = 0;
            }

            //Put data into stream
            for (int i = 0; i < data.Length; i++)
            {
                // Rescale float to short range for encoding.
                short audioEntry = (short)(data[i] * short.MaxValue);
                audioStreamWriter.Write(audioEntry);
            }

            numCachedAudioFrames++;

            //Send to compositor (buffer a few calls to reduce potential timing errors between packages)
            if (numCachedAudioFrames >= MAX_NUM_CACHED_AUDIO_FRAMES)
            {
                audioStreamWriter.Flush();
                byte[] outBytes = audioMemoryStream.ToArray();
                audioMemoryStream = null;

                // The Unity compositor assumes that the audioStartTime will be in capture frame sample time.
                // Above we default to capture frame time compared to AudioSettings.dspTime.
                // Any interpolation between these two time sources needs to be done in the editor before handing sample time values to the compositor.
                UnityCompositorInterface.SetAudioData(outBytes, outBytes.Length, audioStartTime);
            }
        }