} // Audio duration. public void getAllAudioChannelData(out float[] data, out double time, out int samplesPerChannel) { if (!isAllAudioChEnabled) { print(LOG_TAG + " this function only works for isAllAudioEnabled == true."); data = null; time = 0; samplesPerChannel = 0; return; } var dataPtr = new IntPtr(); var lengthPerChannel = 0; double audioNativeTime = NativeClass.nativeGetAudioData(decoderID, ref dataPtr, ref lengthPerChannel); float[] buff = null; if (lengthPerChannel > 0) { buff = new float[lengthPerChannel * audioChannels]; Marshal.Copy(dataPtr, buff, 0, buff.Length); NativeClass.nativeFreeAudioData(decoderID); } data = buff; time = audioNativeTime; samplesPerChannel = lengthPerChannel; }
private void pullAudioData(object sender, DoWorkEventArgs e) { var dataPtr = IntPtr.Zero; // Pointer to get audio data from native. var tempBuff = new float[0]; // Buffer to copy audio data from dataPtr to audioDataBuff. var audioFrameLength = 0; double lastTime = -1.0f; // Avoid to schedule the same audio data set. audioDataBuff = new List <float>(); while (decoderState >= NativeClass.DecoderState.START) { if (decoderState != NativeClass.DecoderState.SEEK_FRAME) { double audioNativeTime = NativeClass.nativeGetAudioData(decoderID, ref dataPtr, ref audioFrameLength); if (0 < audioNativeTime && lastTime != audioNativeTime && decoderState != NativeClass.DecoderState.SEEK_FRAME && audioFrameLength != 0) { if (firstAudioFrameTime == -1.0) { firstAudioFrameTime = audioNativeTime; } lastTime = audioNativeTime; audioFrameLength *= audioChannels; if (tempBuff.Length != audioFrameLength) { // For dynamic audio data length, reallocate the memory if needed. tempBuff = new float[audioFrameLength]; } Marshal.Copy(dataPtr, tempBuff, 0, audioFrameLength); lock (_lock) { audioDataBuff.AddRange(tempBuff); } } if (audioNativeTime != -1.0) { NativeClass.nativeFreeAudioData(decoderID); } Thread.Sleep(2); } } lock (_lock) { audioDataBuff.Clear(); audioDataBuff = null; } }