private void ExtractMedia() { if (MediaDecoder == null) { throw new InvalidOperationException("The Media Codec Extractor has not been initialized"); } if (!isInitialized) { throw new InvalidOperationException("The Media Codec has not been initialized for a media"); } var bufferInfo = new MediaCodec.BufferInfo(); var waitDefaultTime = TimeSpan.FromMilliseconds(10); MediaDecoder.Start(); while (true) { var waitTime = waitDefaultTime; // time to wait at the end of the loop iteration //Process the commands if (ProcessCommandsAndUpdateCurrentState()) { waitTime = TimeSpan.Zero; } // terminate the thread on disposal if (currentState == SchedulerAsyncCommandEnum.Dispose) { return; } //================================================================================================= //Extract video inputs if (!inputExtractionDone) { int inputBufIndex = MediaDecoder.DequeueInputBuffer(0); if (inputBufIndex >= 0) { waitTime = TimeSpan.Zero; var inputBuffer = MediaDecoder.GetInputBuffer(inputBufIndex); // Read the sample data into the ByteBuffer. This neither respects nor updates inputBuf's position, limit, etc. int chunkSize = mediaExtractor.ReadSampleData(inputBuffer, 0); if (chunkSize > 0) { if (mediaExtractor.SampleTrackIndex != mediaTrackIndex) { throw new Exception($"Got media sample from track {mediaExtractor.SampleTrackIndex}, track expected {mediaTrackIndex}"); } MediaDecoder.QueueInputBuffer(inputBufIndex, 0, chunkSize, mediaExtractor.SampleTime, 0); mediaExtractor.Advance(); } else // End of stream -- send empty frame with EOS flag set. { MediaDecoder.QueueInputBuffer(inputBufIndex, 0, 0, 0L, MediaCodecBufferFlags.EndOfStream); inputExtractionDone = true; } } else { //do nothing: the input buffer queue is full (we need to output them first) } } //================================================================================================= // Process the output buffers if (ShouldProcessDequeueOutput(ref waitTime)) { int indexOutput = MediaDecoder.DequeueOutputBuffer(bufferInfo, 0); switch (indexOutput) { case (int)MediaCodecInfoState.TryAgainLater: // decoder not ready yet (haven't processed input yet) case (int)MediaCodecInfoState.OutputBuffersChanged: //deprecated: we just ignore it break; case (int)MediaCodecInfoState.OutputFormatChanged: Logger.Verbose("decoder output format changed: " + MediaDecoder.OutputFormat.ToString()); break; default: // the index of the output buffer if (indexOutput < 0) { Logger.Warning("unexpected index from decoder.dequeueOutputBuffer: " + indexOutput); isEOF = true; break; } if ((bufferInfo.Flags & MediaCodecBufferFlags.EndOfStream) != 0) { isEOF = true; MediaDecoder.ReleaseOutputBuffer(indexOutput, false); break; } MediaCurrentTime = TimeSpanExtensions.FromMicroSeconds(bufferInfo.PresentationTimeUs); ProcessOutputBuffer(bufferInfo, indexOutput); break; } } if (waitTime > TimeSpan.Zero) { // sleep required time to avoid active looping // Note: do not sleep more than 'waitDefaultTime' to continue processing play commands Utilities.Sleep(TimeSpanExtensions.Min(waitDefaultTime, waitTime)); } } }
/** * Work loop. */ private void doExtract(MediaExtractor extractor, int trackIndex, MediaCodec decoder, CodecOutputSurface outputSurface) { Stopwatch stopWatch = new Stopwatch(); const int TIMEOUT_USEC = 10000; ByteBuffer [] decoderInputBuffers = decoder.GetInputBuffers(); MediaCodec.BufferInfo info = new MediaCodec.BufferInfo(); int inputChunk = 0; int decodeCount = 0; var frameTimestamps = new List <long>(); bool outputDone = false; bool inputDone = false; //speed vs accuracy tradeoffs https://stackoverflow.com/questions/34132444/google-mobile-vision-poor-facedetector-performance-without-camerasource //reducing bitmap resolution helps the most and thats ok because i'm not using them after var detector = new FaceDetector.Builder(Application.Context) .SetTrackingEnabled(true) //tracking enables false makes it much slow wtf?!?! .SetClassificationType(ClassificationType.All) .SetProminentFaceOnly(true) // no diff //.SetMinFaceSize((float)0.1) //small performance gain when removed .SetMode(FaceDetectionMode.Fast) // tiny small performance gain .Build(); while (!outputDone) { stopWatch.Start(); // Feed more data to the decoder. if (!inputDone) { int inputBufIndex = decoder.DequeueInputBuffer(TIMEOUT_USEC); if (inputBufIndex >= 0) { ByteBuffer inputBuf = decoderInputBuffers[inputBufIndex]; // Read the sample data into the ByteBuffer. This neither respects nor // updates inputBuf's position, limit, etc. int chunkSize = extractor.ReadSampleData(inputBuf, 0); if (chunkSize < 0) { // End of stream -- send empty frame with EOS flag set. decoder.QueueInputBuffer(inputBufIndex, 0, 0, 0L, MediaCodec.BufferFlagEndOfStream); inputDone = true; //if (VERBOSE) Log.d(TAG, "sent input EOS"); } else { if (extractor.SampleTrackIndex != trackIndex) { //Log.w(TAG, "WEIRD: got sample from track " + extractor.getSampleTrackIndex() + ", expected " + trackIndex); } frameTimestamps.Add(extractor.SampleTime); //might need to play with offset here to get right sync from decoder decoder.QueueInputBuffer(inputBufIndex, 0, chunkSize, extractor.SampleTime, 0 /*flags*/); //if (VERBOSE) { // Log.d(TAG, "submitted frame " + inputChunk + " to dec, size=" + // chunkSize); //} inputChunk++; extractor.Advance(); } } else { //if (VERBOSE) Log.d(TAG, "input buffer not available"); } } if (!outputDone) { int decoderStatus = decoder.DequeueOutputBuffer(info, TIMEOUT_USEC); if (decoderStatus == (int)MediaCodecInfoState.TryAgainLater) { // no output available yet //if (VERBOSE) Log.d(TAG, "no output from decoder available"); } else if (decoderStatus == (int)MediaCodecInfoState.OutputBuffersChanged) { // not important for us, since we're using Surface //if (VERBOSE) Log.d(TAG, "decoder output buffers changed"); } else if (decoderStatus == (int)MediaCodecInfoState.OutputFormatChanged) { //MediaFormat newFormat = decoder.OutputFormat; //if (VERBOSE) Log.d(TAG, "decoder output format changed: " + newFormat); } else if (decoderStatus < 0) { //fail("unexpected result from decoder.dequeueOutputBuffer: " + decoderStatus); throw new InvalidOperationException(); } else { //if (VERBOSE) Log.d(TAG, "surface decoder given buffer " + decoderStatus + " (size=" + info.size + ")"); if ((info.Flags & MediaCodecBufferFlags.EndOfStream) != 0) { //if (VERBOSE) Log.d(TAG, "output EOS"); outputDone = true; } bool doRender = (info.Size != 0); //could not get this working!!! // As soon as we call releaseOutputBuffer, the buffer will be forwarded // to SurfaceTexture to convert to a texture. The API doesn't guarantee // that the texture will be available before the call returns, so we // need to wait for the onFrameAvailable callback to fire. decoder.ReleaseOutputBuffer(decoderStatus, doRender); if (doRender) { //outputSurface.awaitNewImage(); //could not get callback to work and even so do not want to wait 2.5 seconds for each frame, might need to revist outputSurface.mTextureRender.checkGlError("before updateTexImage"); outputSurface.mSurfaceTexture.UpdateTexImage(); outputSurface.drawImage(true); //Log.Info("innerSTOPWATCH_begin!!!!:", stopWatch.ElapsedMilliseconds.ToString()); //can't call face detector this way its too slow or maybe there is a busy loop??? //_FaceFetchDataTasks.Add(Task.Run(() => CreateFaceframes(detector, outputSurface.GetFramebitmap(), decodeCount, frameTimestamps[decodeCount]))); //if (decodeCount % 2 ==0) //doesn't help that much and messes with rating algo CreateFaceframes(detector, outputSurface.GetFramebitmap(), frameTimestamps[decodeCount]); //Log.Info("innerSTOPWATCH_end!!!!:", stopWatch.ElapsedMilliseconds.ToString()); decodeCount++; } } } } stopWatch.Stop(); Log.Info("inner STOPWATCH!!!!:", string.Format("numberofframes = {0}, totaltime = {1}", decodeCount, stopWatch.ElapsedMilliseconds)); detector.Release(); }
private bool ExtractSomeAudioData(out bool endOfFile) { endOfFile = extractionOutputDone; if (endOfFile) { return(false); } var hasExtractedData = false; int TimeoutUs = 20000; MediaCodec.BufferInfo info = new MediaCodec.BufferInfo(); if (!extractionInputDone) { int inputBufIndex = audioMediaDecoder.DequeueInputBuffer(TimeoutUs); if (inputBufIndex >= 0) { Java.Nio.ByteBuffer inputBuffer = audioMediaDecoder.GetInputBuffer(inputBufIndex); //Read the sample data into the ByteBuffer. This neither respects nor updates inputBuf's position, limit, etc. int chunkSize = audioMediaExtractor.ReadSampleData(inputBuffer, 0); if (chunkSize < 0) { //End of stream: send empty frame with EOS flag set audioMediaDecoder.QueueInputBuffer(inputBufIndex, 0, 0, 0L, MediaCodecBufferFlags.EndOfStream); extractionInputDone = true; //Logger.Verbose("sent input EOS"); } else { if (audioMediaExtractor.SampleTrackIndex != trackIndexAudio) { Logger.Warning(string.Format("got audio sample from track {0}, expected {1}", audioMediaExtractor.SampleTrackIndex, trackIndexAudio)); } var presentationTimeMicroSeconds = audioMediaExtractor.SampleTime; audioMediaDecoder.QueueInputBuffer(inputBufIndex, 0, chunkSize, presentationTimeMicroSeconds, 0); audioMediaExtractor.Advance(); } } else { //do nothing: the input buffer queue is full (we need to output them first) //continue; } } int decoderStatus = audioMediaDecoder.DequeueOutputBuffer(info, TimeoutUs); switch (decoderStatus) { case (int)MediaCodecInfoState.TryAgainLater: { Logger.Verbose("no output from decoder available"); break; } case (int)MediaCodecInfoState.OutputFormatChanged: { MediaFormat newFormat = audioMediaDecoder.OutputFormat; string newFormatStr = newFormat.ToString(); Logger.Verbose("audio decoder output format changed: " + newFormatStr); break; } case (int)MediaCodecInfoState.OutputBuffersChanged: { //deprecated: we just ignore it break; } default: { if (decoderStatus < 0) { throw new InvalidOperationException(string.Format("unexpected result from audio decoder.DequeueOutputBuffer: {0}", decoderStatus)); } if ((info.Flags & MediaCodecBufferFlags.EndOfStream) != 0) { Logger.Verbose("audio: output EOS"); extractionOutputDone = true; } if (info.Size > 0) { hasExtractedData = true; var buffer = audioMediaDecoder.GetOutputBuffer(decoderStatus); var presentationTime = TimeSpanExtensions.FromMicroSeconds(info.PresentationTimeUs); if (StorageBuffer.CountDataBytes + info.Size <= StorageBuffer.Data.Length) { buffer.Get(StorageBuffer.Data, StorageBuffer.CountDataBytes, info.Size); // Read the buffer all at once buffer.Clear(); // MUST DO!!! OTHERWISE THE NEXT TIME YOU GET THIS SAME BUFFER BAD THINGS WILL HAPPEN buffer.Position(0); if (StorageBuffer.CountDataBytes == 0) { StorageBuffer.PresentationTime = presentationTime; } StorageBuffer.CountDataBytes += info.Size; } else { Logger.Error("The storage buffer has reached full capacity. Current data will be dropped"); } } audioMediaDecoder.ReleaseOutputBuffer(decoderStatus, false); break; } } endOfFile = extractionOutputDone; return(hasExtractedData); }
public static Result DecodeAudio(FileDescriptor descriptor, long offset, long length) { using (var extractor = new MediaExtractor()) { extractor.SetDataSource(descriptor, offset, length); MediaFormat format = null; string mime = null; for (int i = 0; i < extractor.TrackCount; i++) { format = extractor.GetTrackFormat(i); mime = format.GetString(MediaFormat.KeyMime); if (!mime.StartsWith("audio/")) { continue; } extractor.SelectTrack(i); } if (format == null || !mime.StartsWith("audio/")) { throw new ContentLoadException("Could not find any audio track."); } int sampleRate = format.GetInteger(MediaFormat.KeySampleRate); long duration = format.GetLong(MediaFormat.KeyDuration); int channels = format.GetInteger(MediaFormat.KeyChannelCount); int samples = (int)(sampleRate * duration / 1000000d); var output = new byte[samples * 2]; int timeoutsLeft = 1000; var decoder = MediaCodecPool.RentDecoder(mime); try { decoder.Configure(format, null, null, MediaCodecConfigFlags.None); decoder.Start(); ByteBuffer[] inputBuffers = decoder.GetInputBuffers(); ByteBuffer[] outputBuffers = decoder.GetOutputBuffers(); var bufferInfo = new MediaCodec.BufferInfo(); int totalOffset = 0; bool endOfStream = false; while (true) { // we dont need to have a endOfStream local, // but it saves us a few calls to the decoder if (!endOfStream) { int inputBufIndex = decoder.DequeueInputBuffer(5000); if (inputBufIndex >= 0) { int size = extractor.ReadSampleData(inputBuffers[inputBufIndex], 0); if (size > 0) { decoder.QueueInputBuffer( inputBufIndex, 0, size, extractor.SampleTime, MediaCodecBufferFlags.None); } if (!extractor.Advance()) { endOfStream = true; decoder.QueueInputBuffer( inputBufIndex, 0, 0, 0, MediaCodecBufferFlags.EndOfStream); } } } int decoderStatus = decoder.DequeueOutputBuffer(bufferInfo, 5000); if (decoderStatus >= 0) { IntPtr bufferPtr = outputBuffers[decoderStatus].GetDirectBufferAddress(); IntPtr offsetPtr = bufferPtr + bufferInfo.Offset; int size = bufferInfo.Size; Marshal.Copy(offsetPtr, output, totalOffset, size); decoder.ReleaseOutputBuffer(decoderStatus, render: false); totalOffset += size; if (bufferInfo.Flags == MediaCodecBufferFlags.EndOfStream) { if (totalOffset != output.Length) { throw new ContentLoadException( "Reached end of stream before reading expected amount of samples."); } break; } } else if (decoderStatus == (int)MediaCodecInfoState.OutputBuffersChanged) { outputBuffers = decoder.GetOutputBuffers(); } else if (decoderStatus == (int)MediaCodecInfoState.TryAgainLater) { if (timeoutsLeft-- <= 0) { break; } } } } finally { decoder.Stop(); MediaCodecPool.ReturnDecoder(mime, decoder); } if (timeoutsLeft <= 0) { throw new ContentLoadException("Could not load sound effect in designated time frame."); } return(new Result(output, sampleRate, channels, mime)); } }
private static bool genVideoUsingMuxer(String srcPath, String dstPath, long startMicroSeconds, long endMicroSeconds, bool useAudio, bool useVideo) { if (startMicroSeconds == endMicroSeconds) { throw new InvalidParameterException("You shit!! end has to be greater than start!!"); } // Set up MediaExtractor to read from the source. MediaExtractor extractor = new MediaExtractor(); extractor.SetDataSource(srcPath); int trackCount = extractor.TrackCount; // Set up MediaMuxer for the destination. var muxer = new MediaMuxer(dstPath, MediaMuxer.OutputFormat.MuxerOutputMpeg4); // Set up the tracks and retrieve the max buffer size for selected // tracks. Dictionary <int, int> indexMap = new Dictionary <int, int>(trackCount); int bufferSize = -1; for (int i = 0; i < trackCount; i++) { MediaFormat format = extractor.GetTrackFormat(i); String mime = format.GetString(MediaFormat.KeyMime); bool selectCurrentTrack = false; if (mime.StartsWith("audio/") && useAudio) { selectCurrentTrack = true; } else if (mime.StartsWith("video/") && useVideo) { selectCurrentTrack = true; } if (selectCurrentTrack) { extractor.SelectTrack(i); int dstIndex = muxer.AddTrack(format); indexMap.Add(i, dstIndex); if (format.ContainsKey(MediaFormat.KeyMaxInputSize)) { int newSize = format.GetInteger(MediaFormat.KeyMaxInputSize); bufferSize = newSize > bufferSize? newSize : bufferSize; } } } if (bufferSize < 0) { bufferSize = DEFAULT_BUFFER_SIZE; } // Set up the orientation and starting time for extractor. MediaMetadataRetriever retrieverSrc = new MediaMetadataRetriever(); retrieverSrc.SetDataSource(srcPath); String degreesString = retrieverSrc.ExtractMetadata(MediaMetadataRetriever.MetadataKeyVideoRotation); if (degreesString != null) { int degrees = Integer.ParseInt(degreesString); if (degrees >= 0) { muxer.SetOrientationHint(degrees); } } if (startMicroSeconds > 0) { extractor.SeekTo(startMicroSeconds, MediaExtractor.SeekToClosestSync); } // Copy the samples from MediaExtractor to MediaMuxer. We will loop // for copying each sample and stop when we get to the end of the source // file or exceed the end time of the trimming. int offset = 0; int trackIndex = -1; ByteBuffer dstBuf = ByteBuffer.Allocate(bufferSize); MediaCodec.BufferInfo bufferInfo = new MediaCodec.BufferInfo(); try { muxer.Start(); while (true) { bufferInfo.Offset = offset; bufferInfo.Size = extractor.ReadSampleData(dstBuf, offset); if (bufferInfo.Size < 0) { Log.Info(LOGTAG, "Saw input EOS."); bufferInfo.Size = 0; break; } else { bufferInfo.PresentationTimeUs = extractor.SampleTime; if (endMicroSeconds > 0 && bufferInfo.PresentationTimeUs > endMicroSeconds) { Log.Info(LOGTAG, "The current sample is over the trim end time."); break; } else { bufferInfo.Flags = GetSyncsampleflags(extractor.SampleFlags); //had to map this shit not sure if its right trackIndex = extractor.SampleTrackIndex; muxer.WriteSampleData(indexMap[trackIndex], dstBuf, bufferInfo); extractor.Advance(); } } } muxer.Stop(); } catch (IllegalStateException e) { // Swallow the exception due to malformed source. Log.Info(LOGTAG, "The source video file is malformed"); return(false); } finally { muxer.Release(); } return(true); }
/// <summary> /// if both inputPath string and inputUri are not null, this /// method will use the Uri. Else, set one or the other /// /// They cannot both be null /// </summary> /// <param name="startMs">the start ms for trimming</param> /// <param name="endMs">the final ms for trimming</param> /// <param name="inputPath">optional input path string</param> /// <param name="muxer">the muxer to use for writing bytes</param> /// <param name="trackIndexOverride">the track index for muxer read/write to</param> /// <param name="bufferInfo">an input bufferinfo to get properties from</param> /// <param name="outputPath">the output path for method to check after finished encoding</param> /// <param name="ptOffset">the presentation time offset for audio, used in syncing audio and video</param> /// <param name="inputUri">optional inputUri to read from</param> /// <returns></returns> public async Task <string> HybridMuxingTrimmer(int startMs, int endMs, string inputPath, MediaMuxer muxer, int trackIndexOverride = -1, BufferInfo bufferInfo = null, string outputPath = null, long ptOffset = 0, Android.Net.Uri inputUri = null) { var tio = trackIndexOverride; await Task.Run(() => { if (outputPath == null) { outputPath = FileToMp4.LatestOutputPath; } MediaExtractor ext = new MediaExtractor(); if (inputUri != null) { ext.SetDataSource(Android.App.Application.Context, inputUri, null); } else { ext.SetDataSource(inputPath); } int trackCount = ext.TrackCount; Dictionary <int, int> indexDict = new Dictionary <int, int>(trackCount); int bufferSize = -1; for (int i = 0; i < trackCount; i++) { MediaFormat format = ext.GetTrackFormat(i); string mime = format.GetString(MediaFormat.KeyMime); bool selectCurrentTrack = false; if (mime.StartsWith("audio/")) { selectCurrentTrack = true; } else if (mime.StartsWith("video/")) { selectCurrentTrack = false; } /*rerouted to gl video encoder*/ if (selectCurrentTrack) { ext.SelectTrack(i); if (tio != -1) { indexDict.Add(i, i); } if (format.ContainsKey(MediaFormat.KeyMaxInputSize)) { int newSize = format.GetInteger(MediaFormat.KeyMaxInputSize); bufferSize = newSize > bufferSize ? newSize : bufferSize; } } } MediaMetadataRetriever retrieverSrc = new MediaMetadataRetriever(); if (!System.String.IsNullOrWhiteSpace(inputPath)) { retrieverSrc.SetDataSource(inputPath); } else { retrieverSrc.SetDataSource(Android.App.Application.Context, inputUri); } string degreesString = retrieverSrc.ExtractMetadata(MetadataKey.VideoRotation); if (degreesString != null) // unused ATM but will be useful for stabilized videoview in streaming { int degrees = int.Parse(degreesString); if (degrees >= 0) /* muxer.SetOrientationHint(degrees); */ } { //muxer won't accept this param once started } if (startMs > 0) { ext.SeekTo(startMs * 1000, MediaExtractorSeekTo.ClosestSync); } int offset = 0; if (bufferInfo == null) { bufferInfo = new MediaCodec.BufferInfo(); } ByteBuffer dstBuf = ByteBuffer.Allocate(bufferSize); long us = endMs * 1000; long uo = us + ptOffset; int cf = 0; try { FileToMp4.AudioEncodingInProgress = true; while (true) { bufferInfo.Offset = offset; bufferInfo.Size = ext.ReadSampleData(dstBuf, offset); if (bufferInfo.Size < 0) { bufferInfo.Size = 0; break; } else { cf++; bufferInfo.PresentationTimeUs = ext.SampleTime + ptOffset; if (ext.SampleTime >= us) { break; } //out of while else { bufferInfo.Flags = MFlags2MCodecBuff(ext.SampleFlags); if (tio == -1) { muxer.WriteSampleData(FileToMp4.LatestAudioTrackIndex, dstBuf, bufferInfo); } else { muxer.WriteSampleData(tio, dstBuf, bufferInfo); } if (cf >= 240) //only send the muxer eventargs once every x frames to reduce CPU load { Notify(ext.SampleTime, us); cf = 0; } } ext.Advance(); } } } catch (Java.Lang.IllegalStateException e) { this.Progress.Invoke(new MuxerEventArgs(ext.SampleTime, us, null, true, true)); Console.WriteLine("The source video file is malformed"); } catch (Java.Lang.Exception ex) { this.Progress.Invoke(new MuxerEventArgs(ext.SampleTime, us, null, true, true)); Console.WriteLine(ex.Message); } if (AppSettings.Logging.SendToConsole) { System.Console.WriteLine($"DrainEncoder audio finished @ {bufferInfo.PresentationTimeUs}"); } }); FileToMp4.AudioEncodingInProgress = false; try { if (!FileToMp4.VideoEncodingInProgress) { muxer.Stop(); muxer.Release(); muxer = null; } } catch (Java.Lang.Exception ex) { Log.Debug("MuxingEncoder", ex.Message); } if (outputPath != null) { var success = System.IO.File.Exists(outputPath); if (success) { this.Progress.Invoke(new MuxerEventArgs(endMs * 1000, endMs, outputPath, true)); return(outputPath); } } return(null); //nothing to look for }
public Task <bool> TrimAsync(int startMS, int lengthMS, string inputPath, string outputPath) { return(Task.Run <bool>(() => { try { bool didOperationSucceed = false; MediaExtractor extractor = new MediaExtractor(); extractor.SetDataSource(inputPath); int trackCount = extractor.TrackCount; // Set up MediaMuxer for the destination. MediaMuxer muxer; muxer = new MediaMuxer(outputPath, MuxerOutputType.Mpeg4); // Set up the tracks and retrieve the max buffer size for selected // tracks. Dictionary <int, int> indexDict = new Dictionary <int, int>(trackCount); int bufferSize = -1; for (int i = 0; i < trackCount; i++) { MediaFormat format = extractor.GetTrackFormat(i); string mime = format.GetString(MediaFormat.KeyMime); bool selectCurrentTrack = false; if (mime.StartsWith("audio/")) { selectCurrentTrack = true; } else if (mime.StartsWith("video/")) { selectCurrentTrack = true; } if (selectCurrentTrack) { extractor.SelectTrack(i); int dstIndex = muxer.AddTrack(format); indexDict.Add(i, dstIndex); if (format.ContainsKey(MediaFormat.KeyMaxInputSize)) { int newSize = format.GetInteger(MediaFormat.KeyMaxInputSize); bufferSize = newSize > bufferSize ? newSize : bufferSize; } } } if (bufferSize < 0) { bufferSize = 1337; //TODO: I don't know what to put here tbh, it will most likely be above 0 at this point anyways :) } // Set up the orientation and starting time for extractor. MediaMetadataRetriever retrieverSrc = new MediaMetadataRetriever(); retrieverSrc.SetDataSource(inputPath); string degreesString = retrieverSrc.ExtractMetadata(MetadataKey.VideoRotation); if (degreesString != null) { int degrees = int.Parse(degreesString); if (degrees >= 0) { muxer.SetOrientationHint(degrees); } } if (startMS > 0) { extractor.SeekTo(startMS * 1000, MediaExtractorSeekTo.ClosestSync); } // Copy the samples from MediaExtractor to MediaMuxer. We will loop // for copying each sample and stop when we get to the end of the source // file or exceed the end time of the trimming. int offset = 0; int trackIndex = -1; ByteBuffer dstBuf = ByteBuffer.Allocate(bufferSize); MediaCodec.BufferInfo bufferInfo = new MediaCodec.BufferInfo(); try { muxer.Start(); while (true) { bufferInfo.Offset = offset; bufferInfo.Size = extractor.ReadSampleData(dstBuf, offset); if (bufferInfo.Size < 0) { bufferInfo.Size = 0; break; } else { bufferInfo.PresentationTimeUs = extractor.SampleTime; if (lengthMS > 0 && bufferInfo.PresentationTimeUs > ((startMS + lengthMS - 1) * 1000)) { Console.WriteLine("The current sample is over the trim end time."); break; } else { bufferInfo.Flags = ConvertMediaExtractorSampleFlagsToMediaCodecBufferFlags(extractor.SampleFlags); trackIndex = extractor.SampleTrackIndex; muxer.WriteSampleData(indexDict[trackIndex], dstBuf, bufferInfo); extractor.Advance(); } } } muxer.Stop(); didOperationSucceed = true; //deleting the old file //JFile file = new JFile(srcPath); //file.Delete(); } catch (IllegalStateException e) { // Swallow the exception due to malformed source. Console.WriteLine("The source video file is malformed"); } finally { muxer.Release(); } return didOperationSucceed; } catch (System.Exception xx) { return false; } })); // Set up MediaExtractor to read from the source. }
private void ResampleVideo(MediaExtractor extractor, MediaCodec decoder, SamplerClip clip) { ByteBuffer[] decoderInputBuffers = decoder.GetInputBuffers(); ByteBuffer[] encoderOutputBuffers = mEncoder.GetOutputBuffers(); MediaCodec.BufferInfo info = new MediaCodec.BufferInfo(); int inputChunk = 0; int outputCount = 0; long endTime = clip.getEndTime(); if (endTime == -1) { endTime = clip.getVideoDuration(); } bool outputDoneNextTimeWeCheck = false; bool outputDone = false; bool inputDone = false; bool decoderDone = false; while (!outputDone) { // Feed more data to the decoder. if (!inputDone) { int inputBufIndex = decoder.DequeueInputBuffer(TIMEOUT_USEC); if (inputBufIndex >= 0) { if (extractor.SampleTime / 1000 >= endTime) { // End of stream -- send empty frame with EOS flag set. decoder.QueueInputBuffer(inputBufIndex, 0, 0, 0L, MediaCodecBufferFlags.EndOfStream); inputDone = true; } else { // Copy a chunk of input to the decoder. The first chunk should have // the BUFFER_FLAG_CODEC_CONFIG flag set. ByteBuffer inputBuf = decoderInputBuffers[inputBufIndex]; inputBuf.Clear(); int sampleSize = extractor.ReadSampleData(inputBuf, 0); if (sampleSize < 0) { decoder.QueueInputBuffer(inputBufIndex, 0, 0, 0, MediaCodecBufferFlags.EndOfStream); } else { decoder.QueueInputBuffer(inputBufIndex, 0, sampleSize, extractor.SampleTime, 0); extractor.Advance(); } inputChunk++; } } } // Assume output is available. Loop until both assumptions are false. bool decoderOutputAvailable = !decoderDone; bool encoderOutputAvailable = true; while (decoderOutputAvailable || encoderOutputAvailable) { // Start by draining any pending output from the encoder. It's important to // do this before we try to stuff any more data in. int encoderStatus = mEncoder.DequeueOutputBuffer(info, TIMEOUT_USEC); if (encoderStatus == (int)MediaCodecInfoState.TryAgainLater) { encoderOutputAvailable = false; } else if (encoderStatus == (int)MediaCodecInfoState.OutputBuffersChanged) { encoderOutputBuffers = mEncoder.GetOutputBuffers(); } else if (encoderStatus == (int)MediaCodecInfoState.OutputFormatChanged) { MediaFormat newFormat = mEncoder.OutputFormat; mTrackIndex = mMuxer.AddTrack(newFormat); mMuxer.Start(); mMuxerStarted = true; } else if (encoderStatus < 0) { // fail( "unexpected result from encoder.dequeueOutputBuffer: " + encoderStatus ); } else { // encoderStatus >= 0 ByteBuffer encodedData = encoderOutputBuffers[encoderStatus]; if (encodedData == null) { // fail( "encoderOutputBuffer " + encoderStatus + " was null" ); } // Write the data to the output "file". if (info.Size != 0) { encodedData.Position(info.Offset); encodedData.Limit(info.Offset + info.Size); outputCount++; mMuxer.WriteSampleData(mTrackIndex, encodedData, info); } outputDone = (info.Flags & MediaCodecBufferFlags.EndOfStream) != 0; mEncoder.ReleaseOutputBuffer(encoderStatus, false); } if (outputDoneNextTimeWeCheck) { outputDone = true; } if (encoderStatus != (int)MediaCodecInfoState.TryAgainLater) { // Continue attempts to drain output. continue; } // Encoder is drained, check to see if we've got a new frame of output from // the decoder. (The output is going to a Surface, rather than a ByteBuffer, // but we still get information through BufferInfo.) if (!decoderDone) { int decoderStatus = decoder.DequeueOutputBuffer(info, TIMEOUT_USEC); if (decoderStatus == (int)MediaCodecInfoState.TryAgainLater) { decoderOutputAvailable = false; } else if (decoderStatus == (int)MediaCodecInfoState.OutputBuffersChanged) { // decoderOutputBuffers = decoder.GetOutputBuffers(); } else if (decoderStatus == (int)MediaCodecInfoState.OutputFormatChanged) { // expected before first buffer of data MediaFormat newFormat = decoder.OutputFormat; } else if (decoderStatus < 0) { // fail( "unexpected result from decoder.dequeueOutputBuffer: " + decoderStatus ); } else { // decoderStatus >= 0 // The ByteBuffers are null references, but we still get a nonzero // size for the decoded data. bool doRender = (info.Size != 0); // As soon as we call ReleaseOutputBuffer, the buffer will be forwarded // to SurfaceTexture to convert to a texture. The API doesn't // guarantee that the texture will be available before the call // returns, so we need to wait for the onFrameAvailable callback to // fire. If we don't wait, we risk rendering from the previous frame. decoder.ReleaseOutputBuffer(decoderStatus, doRender); if (doRender) { mOutputSurface.AwaitNewImage(true); mOutputSurface.DrawImage(); // Send it to the encoder. long nSecs = info.PresentationTimeUs * 1000; if (clip.getStartTime() != -1) { nSecs = (info.PresentationTimeUs - (clip.getStartTime() * 1000)) * 1000; } nSecs = Java.Lang.Math.Max(0, nSecs); mEncoderPresentationTimeUs += (nSecs - mLastSampleTime); mLastSampleTime = nSecs; mInputSurface.SetPresentationTime(mEncoderPresentationTimeUs); mInputSurface.SwapBuffers(); } if ((info.Flags & MediaCodecBufferFlags.EndOfStream) != 0) { // mEncoder.signalEndOfInputStream(); outputDoneNextTimeWeCheck = true; } } } } } if (inputChunk != outputCount) { // throw new RuntimeException( "frame lost: " + inputChunk + " in, " + outputCount + " out" ); } }
public static SortedList <long, ByteBuffer> GetFrames(Context context, String path) { try { var bArray = new SortedList <long, ByteBuffer>(); MediaExtractor extractor = new MediaExtractor(); extractor.SetDataSource(path); int trackIndex = selectTrack(extractor); if (trackIndex < 0) { throw new InvalidParameterException("F**K you no track"); } extractor.SelectTrack(trackIndex); //var mRetriever = new MediaMetadataRetriever(); //mRetriever.SetDataSource(path); //var fps = mRetriever.ExtractMetadata(Android.Media.MetadataKey.CaptureFramerate); //var dur = DurationMS(mRetriever); //ByteBuffer inputBuffer = ByteBuffer.Allocate(1280 * 720); //Android.Net.Uri videoFileUri = Android.Net.Uri.Parse(path); //MediaPlayer mp = MediaPlayer.Create(context, videoFileUri); //int millis = mp.Duration; List <ByteBuffer> buffers = new List <ByteBuffer>(); buffers.Add(ByteBuffer.Allocate(1280 * 720)); //for (int i = 33333; i < dur * 1000; i += 33333) //{ // Bitmap bitmap = mRetriever.GetFrameAtTime(i, Option.Closest); // bArray.Add(bitmap); //} int i = 0; extractor.Advance(); while (extractor.ReadSampleData(buffers[i], 0) >= 0) { //var bbuffer = Utils.deepCopy(inputBuffer); var buffcopy = buffers[i].Duplicate(); //int trackIndex = extractor.getSampleTrackIndex(); //long presentationTimeUs = extractor.SampleTime; bArray.Add(extractor.SampleTime, buffcopy); buffers.Add(ByteBuffer.Allocate(1280 * 720)); i++; extractor.Advance(); } //mediaExtractor.Advance(); //var fps = 1000000f / (float)mediaExtractor.SampleTime; //extractor.SeekTo(0, MediaExtractorSeekTo.None); return(bArray); } catch (Exception e) { return(null); } }
public void Decode(MediaCodec _Decoder, MediaExtractor extractor) { Stopwatch s = new Stopwatch(); s.Start(); int TIMEOUT_USEC = 10000; ByteBuffer[] encoderInputBuffers = _Decoder.GetInputBuffers(); ByteBuffer[] outputBuffers = _Decoder.GetOutputBuffers(); var mBufferInfo = new MediaCodec.BufferInfo(); bool inputDone = false; var index = 0; try { while (true) { if (!inputDone) { int inputBufIndex = _Decoder.DequeueInputBuffer(TIMEOUT_USEC); if (inputBufIndex >= 0) { ByteBuffer buffer = encoderInputBuffers[inputBufIndex]; //long ptsUsec = computePresentationTime(frameIndex); int sampleSize = extractor.ReadSampleData(buffer, 0); if (sampleSize < 0) { // Send an empty frame with the end-of-stream flag set. If we set EOS on a frame with data, that frame data will be ignored, and the output will be short one frame. _Decoder.QueueInputBuffer(inputBufIndex, 0, 0, 0, MediaCodec.BufferFlagEndOfStream); inputDone = true; Log.Info(TAG, "sent input EOS (with zero-length frame)"); } else { Log.Info(TAG, "adding encoded video to decoder input "); _Decoder.QueueInputBuffer(inputBufIndex, 0, sampleSize, extractor.SampleTime, 0); extractor.Advance(); } } else { // either all in use, or we timed out during initial setup Log.Warn(TAG, "input buffer not available"); } } //ByteBuffer[] encoderOutputBuffers = _Decoder.GetOutputBuffers(); int encoderStatus = _Decoder.DequeueOutputBuffer(mBufferInfo, TIMEOUT_USEC); if (encoderStatus == (int)MediaCodecInfoState.TryAgainLater) { Log.Info(TAG, "no output available, spinning to await EOS"); } else if (encoderStatus == (int)MediaCodecInfoState.OutputBuffersChanged) { // not expected for an encoder Log.Warn(TAG, "not expected OutputBuffersChanged happened"); outputBuffers = _Decoder.GetOutputBuffers(); } else if (encoderStatus == (int)MediaCodecInfoState.OutputFormatChanged) { // should happen before receiving buffers, and should only happen once //if (_MuxerStarted) //{ // Log.Error(TAG, "format changed twice and should never happen"); // throw new RuntimeException("format changed twice"); //} //MediaFormat newFormat = _Decoder.OutputFormat; //Log.Info(TAG, "format changed and starting MUX"); //_TrackIndex = _Muxer.AddTrack(newFormat); //_Muxer.Start(); //_MuxerStarted = true; } else if (encoderStatus < 0) { Log.Warn(TAG, "unexpected but lets ignore"); // let's ignore it } else { ByteBuffer encodedData = outputBuffers[encoderStatus]; if (encodedData == null) { Log.Error(TAG, string.Format("encoderOutputBuffer {0} was null!!", encoderStatus)); throw new RuntimeException(string.Format("encoderOutputBuffer {0} was null!!", encoderStatus)); } if ((mBufferInfo.Flags & MediaCodecBufferFlags.CodecConfig) != 0) { // The codec config data was pulled out and fed to the muxer when we got // the INFO_OUTPUT_FORMAT_CHANGED status. Ignore it. mBufferInfo.Size = 0; } if (mBufferInfo.Size != 0) { //if (!_MuxerStarted) //{ // Log.Error(TAG, "muxer hasnt started!!"); // throw new RuntimeException("muxer hasnt started"); //} // adjust the ByteBuffer values to match BufferInfo (not needed?) old //encodedData.Position(mBufferInfo.Offset); //encodedData.Limit(mBufferInfo.Offset + this.mBufferInfo.Size); try { //byte[] dst = new byte[outputBuffers[encoderStatus].Capacity()]; //outputBuffers[encoderStatus].Get(dst); //ByteBuffer buffer = outputBuffers[encoderStatus]; //byte[] ba = new byte[encodedData.Remaining()]; //encodedData.Get(ba); //ByteBuffer buffer = outputBuffers[encoderStatus]; //buffer.Position(mBufferInfo.Offset); //buffer.Limit(mBufferInfo.Offset + mBufferInfo.Size); //byte[] ba = new byte[buffer.Remaining()]; //buffer.Get(ba); //if (index < 10) //{ YuvImage yuv = Utils.GetYUVImage(encodedData, _CameraColorFormat, _Width, _Height); //var imagedata = yuv.GetYuvData(); //Utils.swapNV21_NV12(ref imagedata, _Width, _Height); //Image might need to be corrected later //Bitmap b = Utils.GetBitmap(yuv, _Width, _Height); //Bitmap bmp = BitmapFactory.DecodeByteArray(ba, 0, ba.Length);// this return null //var createfilepath = new File(_downloadsfilesdir, DateTime.Now.Ticks + ".png").AbsolutePath; //using (FileStream bos = new FileStream(createfilepath, FileMode.CreateNew)) //{ // b.Compress(Bitmap.CompressFormat.Png, 100, bos); //} //b.Recycle(); //} index++; //writeFrameToSDCard(dst, i, dst.length); //i++; } catch (Exception e) { //Log("iDecodeActivity", "Error while creating bitmap with: "); } _Decoder.ReleaseOutputBuffer(encoderStatus, false); } if ((mBufferInfo.Flags & MediaCodecBufferFlags.EndOfStream) != 0) { Log.Info(TAG, "End of Stream Reached!!"); break; } } } s.Stop(); Log.Info("inner STOPWATCH!!!!:", string.Format("numberofframes = {0}, totaltime = {1}", index, s.ElapsedMilliseconds)); } catch (Exception e) { Log.Error(TAG, "Decode or Muxer failed", e, e.Message); throw; } }