protected override async Task LoadInlineMedias() { if (Config?.Visual?.InlineMedia != true) { return; } var mediaEntities = Model?.Entities?.MediaEntities ?? Enumerable.Empty <MediaEntity>(); var entities = mediaEntities.Distinct(TwitterComparers.MediaEntityComparer); foreach (var vm in entities.Select(entity => new StatusMediaViewModel(entity, Context.UserId))) { vm.OpenRequested += Image_OpenRequested; _InlineMedias.Add(vm); } var urlEntities = Model?.Entities?.UrlEntities ?? Enumerable.Empty <UrlEntity>(); var urls = urlEntities.Distinct(TwitterComparers.UrlEntityComparer).Select(e => e.ExpandedUrl); foreach (var url in urls) { var extracted = await MediaExtractor.ExtractMedia(url); if (extracted != null) { var vm = new StatusMediaViewModel(extracted); vm.OpenRequested += Image_OpenRequested; _InlineMedias.Add(vm); } } RaisePropertyChanged(nameof(InlineMedias)); }
internal VideoLengthExtractor(System.IO.Stream stream, long reportedLength) { _reportedLength = reportedLength; extractor = new MediaExtractor(); extractor.SetDataSource(new ReadSeekStreamMediaSource(stream)); var format = SelectFirstVideoTrack() ?? throw new Exception("Stream has no video track"); // Get the original video size var naturalWidth = format.GetInteger(MediaFormat.KeyWidth); var naturalHeight = format.GetInteger(MediaFormat.KeyHeight); decoder = MediaCodec.CreateDecoderByType(format.GetString(MediaFormat.KeyMime)); ///Surface. //extractor.SeekTo(0, MediaExtractorSeekTo.) var info = new MediaCodec.BufferInfo(); //info. //videoLengthExtractor. }
partial void InitializeImpl() { using (var inputFile = new Java.IO.File(mediaDataUrl)) { if (!inputFile.CanRead()) { throw new Exception(string.Format("Unable to read: {0} ", inputFile.AbsolutePath)); } using (var inputFileStream = new Java.IO.FileInputStream(inputFile.AbsolutePath)) { var audioMediaExtractor = new MediaExtractor(); audioMediaExtractor.SetDataSource(inputFileStream.FD, startPosition, length); var trackIndexAudio = StreamedBufferSoundSource.FindAudioTrack(audioMediaExtractor); if (trackIndexAudio < 0) { return; } audioMediaExtractor.SelectTrack(trackIndexAudio); var audioFormat = audioMediaExtractor.GetTrackFormat(trackIndexAudio); //Get the audio settings //should we override the settings (channels, sampleRate, ...) from DynamicSoundSource? Channels = audioFormat.GetInteger(MediaFormat.KeyChannelCount); SampleRate = audioFormat.GetInteger(MediaFormat.KeySampleRate); MediaDuration = TimeSpanExtensions.FromMicroSeconds(audioFormat.GetLong(MediaFormat.KeyDuration)); } } }
public static int GetMediaFormatPropertyInteger(Uri uri, string key, int defaultValue) { int value = defaultValue; MediaExtractor extractor = new MediaExtractor(); try { extractor.SetDataSource(uri.ToString()); } catch (System.Exception e) { return(value); } MediaFormat format = GetTrackFormat(extractor, MIME_TYPE_AVC); extractor.Release(); if (format.ContainsKey(key)) { value = format.GetInteger(key); } return(value); }
/** * Write a media sample to the decoder. * * A "sample" here refers to a single atomic access unit in the media stream. The definition * of "access unit" is dependent on the type of encoding used, but it typically refers to * a single frame of video or a few seconds of audio. {@link android.media.MediaExtractor} * extracts data from a stream one sample at a time. * * @param extractor Instance of {@link android.media.MediaExtractor} wrapping the media. * * @param presentationTimeUs The time, relative to the beginning of the media stream, * at which this buffer should be rendered. * * @param flags Flags to pass to the decoder. See {@link MediaCodec#queueInputBuffer(int, * int, int, long, int)} * * @throws MediaCodec.CryptoException */ public bool writeSample(MediaExtractor extractor, bool isSecure, long presentationTimeUs, int flags) { bool result = false; if (mAvailableInputBuffers.Any()) { int index = mAvailableInputBuffers.Dequeue(); ByteBuffer buffer = mInputBuffers[index]; // reads the sample from the file using extractor into the buffer int size = extractor.ReadSampleData(buffer, 0); if (size <= 0) { flags |= (int)MediaCodec.BufferFlagEndOfStream; } // Submit the buffer to the codec for decoding. The presentationTimeUs // indicates the position (play time) for the current sample. if (!isSecure) { mDecoder.QueueInputBuffer(index, 0, size, presentationTimeUs, (MediaCodecBufferFlags)flags); } else { extractor.GetSampleCryptoInfo(sCryptoInfo); mDecoder.QueueSecureInputBuffer(index, 0, sCryptoInfo, presentationTimeUs, (MediaCodecBufferFlags)flags); } result = true; } return(result); }
private void FeedClipToEncoder(SamplerClip clip) { mLastSampleTime = 0; MediaCodec decoder = null; MediaExtractor extractor = SetupExtractorForClip(clip); if (extractor == null) { return; } int trackIndex = GetVideoTrackIndex(extractor); extractor.SelectTrack(trackIndex); MediaFormat clipFormat = extractor.GetTrackFormat(trackIndex); if (clip.getStartTime() != -1) { extractor.SeekTo(clip.getStartTime() * 1000, MediaExtractorSeekTo.PreviousSync); clip.setStartTime(extractor.SampleTime / 1000); } try { decoder = MediaCodec.CreateDecoderByType(MediaHelper.MIME_TYPE_AVC); mOutputSurface = new OutputSurface(); decoder.Configure(clipFormat, mOutputSurface.Surface, null, 0); decoder.Start(); ResampleVideo(extractor, decoder, clip); } catch (System.Exception e) { } finally { if (mOutputSurface != null) { mOutputSurface.Release(); } if (decoder != null) { decoder.Stop(); decoder.Release(); } if (extractor != null) { extractor.Release(); extractor = null; } } }
public void Initialize(IServiceRegistry services, string url, long startPosition, long length) { if (isInitialized) { return; } try { inputFile = new Java.IO.File(url); if (!inputFile.CanRead()) { throw new Exception(string.Format("Unable to read: {0} ", inputFile.AbsolutePath)); } inputFileDescriptor = new Java.IO.FileInputStream(inputFile); // =================================================================================================== // Initialize the audio media extractor mediaExtractor = new MediaExtractor(); mediaExtractor.SetDataSource(inputFileDescriptor.FD, startPosition, length); var videoTrackIndex = FindTrack(mediaExtractor, MediaType.Video); var audioTrackIndex = FindTrack(mediaExtractor, MediaType.Audio); HasAudioTrack = audioTrackIndex >= 0; mediaTrackIndex = MediaType == MediaType.Audio ? audioTrackIndex : videoTrackIndex; if (mediaTrackIndex < 0) { throw new Exception(string.Format($"No {MediaType} track found in: {inputFile.AbsolutePath}")); } mediaExtractor.SelectTrack(mediaTrackIndex); var trackFormat = mediaExtractor.GetTrackFormat(mediaTrackIndex); MediaDuration = TimeSpanExtensions.FromMicroSeconds(trackFormat.GetLong(MediaFormat.KeyDuration)); ExtractMediaMetadata(trackFormat); // Create a MediaCodec mediadecoder, and configure it with the MediaFormat from the mediaExtractor // It's very important to use the format from the mediaExtractor because it contains a copy of the CSD-0/CSD-1 codec-specific data chunks. var mime = trackFormat.GetString(MediaFormat.KeyMime); MediaDecoder = MediaCodec.CreateDecoderByType(mime); MediaDecoder.Configure(trackFormat, decoderOutputSurface, null, 0); isInitialized = true; StartWorker(); } catch (Exception e) { Release(); throw e; } }
protected void updateSourceMedia(SourceMedia sourceMedia, Android.Net.Uri uri) { sourceMedia.uri = uri; sourceMedia.size = TranscoderUtils.GetSize(this, uri); sourceMedia.duration = getMediaDuration(uri) / 1000f; try { MediaExtractor mediaExtractor = new MediaExtractor(); mediaExtractor.SetDataSource(this, uri, null); sourceMedia.tracks = new List<MediaTrackFormat>(mediaExtractor.TrackCount); for (int track = 0; track < mediaExtractor.TrackCount; track++) { MediaFormat mediaFormat = mediaExtractor.GetTrackFormat(track); var mimeType = mediaFormat.GetString(MediaFormat.KeyMime); if (mimeType == null) { continue; } if (mimeType.StartsWith("video")) { VideoTrackFormat videoTrack = new VideoTrackFormat(track, mimeType); videoTrack.width = getInt(mediaFormat, MediaFormat.KeyWidth); videoTrack.height = getInt(mediaFormat, MediaFormat.KeyHeight); videoTrack.duration = getLong(mediaFormat, MediaFormat.KeyDuration); videoTrack.frameRate = MediaFormatUtils.GetFrameRate(mediaFormat, new Java.Lang.Integer(-1)).IntValue(); videoTrack.keyFrameInterval = MediaFormatUtils.GetIFrameInterval(mediaFormat, new Java.Lang.Integer(-1)).IntValue(); videoTrack.rotation = getInt(mediaFormat, TrackMetadataUtil.KEY_ROTATION, 0); videoTrack.bitrate = getInt(mediaFormat, MediaFormat.KeyBitRate); sourceMedia.tracks.Add(videoTrack); } else if (mimeType.StartsWith("audio")) { AudioTrackFormat audioTrack = new AudioTrackFormat(track, mimeType); audioTrack.channelCount = getInt(mediaFormat, MediaFormat.KeyChannelCount); audioTrack.samplingRate = getInt(mediaFormat, MediaFormat.KeySampleRate); audioTrack.duration = getLong(mediaFormat, MediaFormat.KeyDuration); audioTrack.bitrate = getInt(mediaFormat, MediaFormat.KeyBitRate); sourceMedia.tracks.Add(audioTrack); } else { sourceMedia.tracks.Add(new GenericTrackFormat(track, mimeType)); } } } catch (IOException ex) { System.Diagnostics.Debug.WriteLine($"Failed to extract sourceMedia: {ex.Message}"); } sourceMedia.NotifyChange(); }
public override async Task <IMediaItem> Play(string uri) { var mediaItem = await MediaExtractor.CreateMediaItem(uri); var mediaItemToPlay = await AddMediaItemsToQueue(new List <IMediaItem> { mediaItem }, true); await MediaPlayer.Play(mediaItemToPlay); return(mediaItem); }
private async Task ExtractMediaInformation(IMediaFile mediaFile) { var index = MediaQueue.IndexOf(mediaFile); await MediaExtractor.ExtractMediaInfo(mediaFile); if (index >= 0) { MediaQueue[index] = mediaFile; } OnMediaFileChanged(CurrentPlaybackManager, new MediaFileChangedEventArgs(mediaFile)); }
protected override async Task LoadInlineMedias() { if (Config?.Visual?.InlineMedia != true) { return; } var videos = (Model?.ExtendedEntities?.MediaEntities?.Where(e => e.Type == "animated_gif" || e.Type == "video") ?? Enumerable.Empty <MediaEntity>()).ToArray(); var mediaEntities = Model?.Entities?.MediaEntities ?? Enumerable.Empty <MediaEntity>(); var extendedEntities = Model?.ExtendedEntities?.MediaEntities ?? Enumerable.Empty <MediaEntity>(); var entities = mediaEntities.Concat(extendedEntities) .Distinct(TwitterComparers.MediaEntityComparer) .Except(videos, TwitterComparers.MediaEntityComparer); entities = entities.Concat(videos); foreach (var vm in entities.Select(entity => new StatusMediaViewModel(entity))) { if (_InlineMedias.Contains(vm)) { continue; } vm.OpenRequested += Image_OpenRequested; _InlineMedias.Add(vm); } var urlEntities = Model?.Entities?.UrlEntities ?? Enumerable.Empty <UrlEntity>(); var extendedUrlEntities = Model?.ExtendedEntities?.UrlEntities ?? Enumerable.Empty <UrlEntity>(); var urls = urlEntities.Concat(extendedUrlEntities).Distinct(TwitterComparers.UrlEntityComparer).Select(e => e.ExpandedUrl); foreach (var url in urls) { var extracted = await MediaExtractor.ExtractMedia(url); if (extracted != null) { var vm = new StatusMediaViewModel(extracted, new Uri(url)); if (_InlineMedias.Contains(vm)) { continue; } vm.OpenRequested += Image_OpenRequested; _InlineMedias.Add(vm); } } RaisePropertyChanged(nameof(InlineMedias)); }
public static MediaFormat GetTrackFormat(MediaExtractor extractor, string mimeType) { for (int i = 0; i < extractor.TrackCount; i++) { MediaFormat format = extractor.GetTrackFormat(i); string trackMimeType = format.GetString(MediaFormat.KeyMime); if (mimeType.Equals(trackMimeType)) { return(format); } } return(null); }
/** * * @param filePath */ public void startPlay(string path) { eosReceived = false; mExtractor = new MediaExtractor(); try { mExtractor.SetDataSource(path); } catch (IOException e) { e.PrintStackTrace(); } int channel = 0; for (int i = 0; i < mExtractor.TrackCount; i++) { MediaFormat format = mExtractor.GetTrackFormat(i); string mime = format.GetString(MediaFormat.KeyMime); if (mime.StartsWith("audio/")) { mExtractor.SelectTrack(i); Log.Debug("TAG", "format : " + format); ByteBuffer csd = format.GetByteBuffer("csd-0"); for (int k = 0; k < csd.Capacity(); ++k) { Log.Error("TAG", "csd : " + csd.ToArray<Byte>()[k]); } mSampleRate = format.GetInteger(MediaFormat.KeySampleRate); channel = format.GetInteger(MediaFormat.KeyChannelCount); break; } } MediaFormat format2 = makeAACCodecSpecificData(MediaCodecInfo.CodecProfileLevel.AACObjectLC, mSampleRate, channel); if (format2 == null) return; mDecoder = MediaCodec.createDecoderByType("audio/mp4a-latm"); mDecoder.configure(format, null, null, 0); if (mDecoder == null) { Log.e("DecodeActivity", "Can't find video info!"); return; } mDecoder.start(); new Thread(AACDecoderAndPlayRunnable).start(); }
partial void ReleaseMediaInternal() { audioMediaDecoder?.Stop(); audioMediaDecoder?.Release(); audioMediaDecoder = null; audioMediaExtractor?.Release(); audioMediaExtractor = null; InputFileStream?.Dispose(); InputFileStream = null; InputFile?.Dispose(); InputFile = null; }
private MediaExtractor SetupExtractorForClip(SamplerClip clip) { MediaExtractor extractor = new MediaExtractor(); try { extractor.SetDataSource(clip.getUri().ToString()); } catch (System.Exception e) { return(null); } return(extractor); }
public override async Task <IEnumerable <IMediaItem> > Play(IEnumerable <string> items) { List <IMediaItem> mediaItems = new List <IMediaItem>(); foreach (var uri in items) { mediaItems.Add(await MediaExtractor.CreateMediaItem(uri)); } var mediaItemToPlay = await AddMediaItemsToQueue(mediaItems, true); await MediaPlayer.Play(mediaItemToPlay); return(MediaQueue); }
/** * Selects the video track, if any. * * @return the track index, or -1 if no video track is found. */ private int selectTrack(MediaExtractor extractor) { // Select the first video track we find, ignore the rest. int numTracks = extractor.TrackCount; for (int i = 0; i < numTracks; i++) { MediaFormat format = extractor.GetTrackFormat(i); String mime = format.GetString(MediaFormat.KeyMime); if (mime.StartsWith("video/")) { return(i); } } return(-1); }
partial void InitializeMediaExtractor(string mediaDataUrl, long startPosition, long length) { if (mediaDataUrl == null) { throw new ArgumentNullException(nameof(mediaDataUrl)); } ReleaseMediaInternal(); InputFile = new Java.IO.File(mediaDataUrl); if (!InputFile.CanRead()) { throw new Exception(string.Format("Unable to read: {0} ", InputFile.AbsolutePath)); } InputFileStream = new Java.IO.FileInputStream(InputFile.AbsolutePath); audioMediaExtractor = new MediaExtractor(); audioMediaExtractor.SetDataSource(InputFileStream.FD, startPosition, length); trackIndexAudio = FindAudioTrack(audioMediaExtractor); if (trackIndexAudio < 0) { ReleaseMediaInternal(); Logger.Error($"The input file '{mediaDataUrl}' does not contain any audio track."); return; } audioMediaExtractor.SelectTrack(trackIndexAudio); var audioFormat = audioMediaExtractor.GetTrackFormat(trackIndexAudio); var mime = audioFormat.GetString(MediaFormat.KeyMime); audioMediaDecoder = MediaCodec.CreateDecoderByType(mime); audioMediaDecoder.Configure(audioFormat, null, null, 0); //Get the audio settings //should we override the settings (channels, sampleRate, ...) from DynamicSoundSource? Channels = audioFormat.GetInteger(MediaFormat.KeyChannelCount); SampleRate = audioFormat.GetInteger(MediaFormat.KeySampleRate); MediaDuration = TimeSpanExtensions.FromMicroSeconds(audioFormat.GetLong(MediaFormat.KeyDuration)); audioMediaDecoder.Start(); extractionOutputDone = false; extractionInputDone = false; }
private int GetVideoTrackIndex(MediaExtractor extractor) { for (int trackIndex = 0; trackIndex < extractor.TrackCount; trackIndex++) { MediaFormat format = extractor.GetTrackFormat(trackIndex); string mime = format.GetString(MediaFormat.KeyMime); if (mime != null) { if (mime.Equals("video/avc")) { return(trackIndex); } } } return(-1); }
private void Release() { Scheduler.UnregisterExtractor(this); //to avoid receiving any more event from the scheduler MediaDecoder?.Stop(); MediaDecoder?.Release(); MediaDecoder = null; mediaExtractor?.Release(); mediaExtractor = null; inputFile = null; MediaMetadata = null; MediaDuration = TimeSpan.Zero; inputFileDescriptor?.Close(); inputFileDescriptor = null; isInitialized = false; }
// Selects the video track, if any. internal static int FindAudioTrack(MediaExtractor extractor) { string prefix = "audio/"; // Select the first video track we find, ignore the rest. int numTracks = extractor.TrackCount; for (int i = 0; i < numTracks; i++) { MediaFormat format = extractor.GetTrackFormat(i); String mime = format.GetString(MediaFormat.KeyMime); if (mime.StartsWith(prefix)) { Logger.Verbose(string.Format("Extractor selected track {0} ({1}): {2}", i, mime, format)); return(i); } } return(-1); }
public static MediaFormat GetAudioTrackFormat(string filepath, Android.Net.Uri inputUri = null) { MediaExtractor extractor = new MediaExtractor(); if (inputUri != null) { extractor.SetDataSource(Android.App.Application.Context, inputUri, null); } else if (filepath != null) { extractor.SetDataSource(filepath); } int trackCount = extractor.TrackCount; int bufferSize = -1; for (int i = 0; i < trackCount; i++) { MediaFormat format = extractor.GetTrackFormat(i); string mime = format.GetString(MediaFormat.KeyMime); bool selectCurrentTrack = false; if (mime.StartsWith("audio/")) { selectCurrentTrack = true; } else if (mime.StartsWith("video/")) { selectCurrentTrack = false; } if (selectCurrentTrack) { extractor.SelectTrack(i); if (format.ContainsKey(MediaFormat.KeyMaxInputSize)) { int newSize = format.GetInteger(MediaFormat.KeyMaxInputSize); bufferSize = newSize > bufferSize ? newSize : bufferSize; } return(format); } } return(null); }
private async Task GetMediaInformation(IEnumerable <IMediaFile> mediaFiles) { foreach (var mediaFile in mediaFiles) { try { var index = MediaQueue.IndexOf(mediaFile); var info = await MediaExtractor.ExtractMediaInfo(mediaFile); if (index >= 0) { MediaQueue[index] = info; } OnMediaFileChanged(CurrentPlaybackManager, new MediaFileChangedEventArgs(info)); } catch (Exception e) { OnMediaFileFailed(this, new MediaFileFailedEventArgs(e, mediaFile)); } } }
// Selects the video track, if any. private static int FindTrack(MediaExtractor extractor, MediaType trackType) { string prefix = null; switch (trackType) { case MediaType.Video: prefix = "video/"; break; case MediaType.Audio: prefix = "audio/"; break; default: return(-1); } // Select the first video track we find, ignore the rest. int numTracks = extractor.TrackCount; for (int i = 0; i < numTracks; i++) { MediaFormat format = extractor.GetTrackFormat(i); String mime = format.GetString(MediaFormat.KeyMime); if (mime.StartsWith(prefix)) { if (ListSupportedMediaCodecs.FindDecoderForFormat(format) != null) { Logger.Verbose(string.Format("Extractor selected track {0} ({1}): {2}", i, mime, format)); return(i); } } } return(-1); }
/// <summary> /// if both inputPath string and inputUri are not null, this /// method will use the Uri. Else, set one or the other /// /// They cannot both be null /// </summary> /// <param name="startMs">the start ms for trimming</param> /// <param name="endMs">the final ms for trimming</param> /// <param name="inputPath">optional input path string</param> /// <param name="muxer">the muxer to use for writing bytes</param> /// <param name="trackIndexOverride">the track index for muxer read/write to</param> /// <param name="bufferInfo">an input bufferinfo to get properties from</param> /// <param name="outputPath">the output path for method to check after finished encoding</param> /// <param name="ptOffset">the presentation time offset for audio, used in syncing audio and video</param> /// <param name="inputUri">optional inputUri to read from</param> /// <returns></returns> public async Task <string> HybridMuxingTrimmer(int startMs, int endMs, string inputPath, MediaMuxer muxer, int trackIndexOverride = -1, BufferInfo bufferInfo = null, string outputPath = null, long ptOffset = 0, Android.Net.Uri inputUri = null) { var tio = trackIndexOverride; await Task.Run(() => { if (outputPath == null) { outputPath = FileToMp4.LatestOutputPath; } MediaExtractor ext = new MediaExtractor(); if (inputUri != null) { ext.SetDataSource(Android.App.Application.Context, inputUri, null); } else { ext.SetDataSource(inputPath); } int trackCount = ext.TrackCount; Dictionary <int, int> indexDict = new Dictionary <int, int>(trackCount); int bufferSize = -1; for (int i = 0; i < trackCount; i++) { MediaFormat format = ext.GetTrackFormat(i); string mime = format.GetString(MediaFormat.KeyMime); bool selectCurrentTrack = false; if (mime.StartsWith("audio/")) { selectCurrentTrack = true; } else if (mime.StartsWith("video/")) { selectCurrentTrack = false; } /*rerouted to gl video encoder*/ if (selectCurrentTrack) { ext.SelectTrack(i); if (tio != -1) { indexDict.Add(i, i); } if (format.ContainsKey(MediaFormat.KeyMaxInputSize)) { int newSize = format.GetInteger(MediaFormat.KeyMaxInputSize); bufferSize = newSize > bufferSize ? newSize : bufferSize; } } } MediaMetadataRetriever retrieverSrc = new MediaMetadataRetriever(); if (!System.String.IsNullOrWhiteSpace(inputPath)) { retrieverSrc.SetDataSource(inputPath); } else { retrieverSrc.SetDataSource(Android.App.Application.Context, inputUri); } string degreesString = retrieverSrc.ExtractMetadata(MetadataKey.VideoRotation); if (degreesString != null) // unused ATM but will be useful for stabilized videoview in streaming { int degrees = int.Parse(degreesString); if (degrees >= 0) /* muxer.SetOrientationHint(degrees); */ } { //muxer won't accept this param once started } if (startMs > 0) { ext.SeekTo(startMs * 1000, MediaExtractorSeekTo.ClosestSync); } int offset = 0; if (bufferInfo == null) { bufferInfo = new MediaCodec.BufferInfo(); } ByteBuffer dstBuf = ByteBuffer.Allocate(bufferSize); long us = endMs * 1000; long uo = us + ptOffset; int cf = 0; try { FileToMp4.AudioEncodingInProgress = true; while (true) { bufferInfo.Offset = offset; bufferInfo.Size = ext.ReadSampleData(dstBuf, offset); if (bufferInfo.Size < 0) { bufferInfo.Size = 0; break; } else { cf++; bufferInfo.PresentationTimeUs = ext.SampleTime + ptOffset; if (ext.SampleTime >= us) { break; } //out of while else { bufferInfo.Flags = MFlags2MCodecBuff(ext.SampleFlags); if (tio == -1) { muxer.WriteSampleData(FileToMp4.LatestAudioTrackIndex, dstBuf, bufferInfo); } else { muxer.WriteSampleData(tio, dstBuf, bufferInfo); } if (cf >= 240) //only send the muxer eventargs once every x frames to reduce CPU load { Notify(ext.SampleTime, us); cf = 0; } } ext.Advance(); } } } catch (Java.Lang.IllegalStateException e) { this.Progress.Invoke(new MuxerEventArgs(ext.SampleTime, us, null, true, true)); Console.WriteLine("The source video file is malformed"); } catch (Java.Lang.Exception ex) { this.Progress.Invoke(new MuxerEventArgs(ext.SampleTime, us, null, true, true)); Console.WriteLine(ex.Message); } if (AppSettings.Logging.SendToConsole) { System.Console.WriteLine($"DrainEncoder audio finished @ {bufferInfo.PresentationTimeUs}"); } }); FileToMp4.AudioEncodingInProgress = false; try { if (!FileToMp4.VideoEncodingInProgress) { muxer.Stop(); muxer.Release(); muxer = null; } } catch (Java.Lang.Exception ex) { Log.Debug("MuxingEncoder", ex.Message); } if (outputPath != null) { var success = System.IO.File.Exists(outputPath); if (success) { this.Progress.Invoke(new MuxerEventArgs(endMs * 1000, endMs, outputPath, true)); return(outputPath); } } return(null); //nothing to look for }
public static Result DecodeAudio(FileDescriptor descriptor, long offset, long length) { using (var extractor = new MediaExtractor()) { extractor.SetDataSource(descriptor, offset, length); MediaFormat format = null; string mime = null; for (int i = 0; i < extractor.TrackCount; i++) { format = extractor.GetTrackFormat(i); mime = format.GetString(MediaFormat.KeyMime); if (!mime.StartsWith("audio/")) { continue; } extractor.SelectTrack(i); } if (format == null || !mime.StartsWith("audio/")) { throw new ContentLoadException("Could not find any audio track."); } int sampleRate = format.GetInteger(MediaFormat.KeySampleRate); long duration = format.GetLong(MediaFormat.KeyDuration); int channels = format.GetInteger(MediaFormat.KeyChannelCount); int samples = (int)(sampleRate * duration / 1000000d); var output = new byte[samples * 2]; int timeoutsLeft = 1000; var decoder = MediaCodecPool.RentDecoder(mime); try { decoder.Configure(format, null, null, MediaCodecConfigFlags.None); decoder.Start(); ByteBuffer[] inputBuffers = decoder.GetInputBuffers(); ByteBuffer[] outputBuffers = decoder.GetOutputBuffers(); var bufferInfo = new MediaCodec.BufferInfo(); int totalOffset = 0; bool endOfStream = false; while (true) { // we dont need to have a endOfStream local, // but it saves us a few calls to the decoder if (!endOfStream) { int inputBufIndex = decoder.DequeueInputBuffer(5000); if (inputBufIndex >= 0) { int size = extractor.ReadSampleData(inputBuffers[inputBufIndex], 0); if (size > 0) { decoder.QueueInputBuffer( inputBufIndex, 0, size, extractor.SampleTime, MediaCodecBufferFlags.None); } if (!extractor.Advance()) { endOfStream = true; decoder.QueueInputBuffer( inputBufIndex, 0, 0, 0, MediaCodecBufferFlags.EndOfStream); } } } int decoderStatus = decoder.DequeueOutputBuffer(bufferInfo, 5000); if (decoderStatus >= 0) { IntPtr bufferPtr = outputBuffers[decoderStatus].GetDirectBufferAddress(); IntPtr offsetPtr = bufferPtr + bufferInfo.Offset; int size = bufferInfo.Size; Marshal.Copy(offsetPtr, output, totalOffset, size); decoder.ReleaseOutputBuffer(decoderStatus, render: false); totalOffset += size; if (bufferInfo.Flags == MediaCodecBufferFlags.EndOfStream) { if (totalOffset != output.Length) { throw new ContentLoadException( "Reached end of stream before reading expected amount of samples."); } break; } } else if (decoderStatus == (int)MediaCodecInfoState.OutputBuffersChanged) { outputBuffers = decoder.GetOutputBuffers(); } else if (decoderStatus == (int)MediaCodecInfoState.TryAgainLater) { if (timeoutsLeft-- <= 0) { break; } } } } finally { decoder.Stop(); MediaCodecPool.ReturnDecoder(mime, decoder); } if (timeoutsLeft <= 0) { throw new ContentLoadException("Could not load sound effect in designated time frame."); } return(new Result(output, sampleRate, channels, mime)); } }
/** * Work loop. */ private void doExtract(MediaExtractor extractor, int trackIndex, MediaCodec decoder, CodecOutputSurface outputSurface) { Stopwatch stopWatch = new Stopwatch(); const int TIMEOUT_USEC = 10000; ByteBuffer [] decoderInputBuffers = decoder.GetInputBuffers(); MediaCodec.BufferInfo info = new MediaCodec.BufferInfo(); int inputChunk = 0; int decodeCount = 0; var frameTimestamps = new List <long>(); bool outputDone = false; bool inputDone = false; //speed vs accuracy tradeoffs https://stackoverflow.com/questions/34132444/google-mobile-vision-poor-facedetector-performance-without-camerasource //reducing bitmap resolution helps the most and thats ok because i'm not using them after var detector = new FaceDetector.Builder(Application.Context) .SetTrackingEnabled(true) //tracking enables false makes it much slow wtf?!?! .SetClassificationType(ClassificationType.All) .SetProminentFaceOnly(true) // no diff //.SetMinFaceSize((float)0.1) //small performance gain when removed .SetMode(FaceDetectionMode.Fast) // tiny small performance gain .Build(); while (!outputDone) { stopWatch.Start(); // Feed more data to the decoder. if (!inputDone) { int inputBufIndex = decoder.DequeueInputBuffer(TIMEOUT_USEC); if (inputBufIndex >= 0) { ByteBuffer inputBuf = decoderInputBuffers[inputBufIndex]; // Read the sample data into the ByteBuffer. This neither respects nor // updates inputBuf's position, limit, etc. int chunkSize = extractor.ReadSampleData(inputBuf, 0); if (chunkSize < 0) { // End of stream -- send empty frame with EOS flag set. decoder.QueueInputBuffer(inputBufIndex, 0, 0, 0L, MediaCodec.BufferFlagEndOfStream); inputDone = true; //if (VERBOSE) Log.d(TAG, "sent input EOS"); } else { if (extractor.SampleTrackIndex != trackIndex) { //Log.w(TAG, "WEIRD: got sample from track " + extractor.getSampleTrackIndex() + ", expected " + trackIndex); } frameTimestamps.Add(extractor.SampleTime); //might need to play with offset here to get right sync from decoder decoder.QueueInputBuffer(inputBufIndex, 0, chunkSize, extractor.SampleTime, 0 /*flags*/); //if (VERBOSE) { // Log.d(TAG, "submitted frame " + inputChunk + " to dec, size=" + // chunkSize); //} inputChunk++; extractor.Advance(); } } else { //if (VERBOSE) Log.d(TAG, "input buffer not available"); } } if (!outputDone) { int decoderStatus = decoder.DequeueOutputBuffer(info, TIMEOUT_USEC); if (decoderStatus == (int)MediaCodecInfoState.TryAgainLater) { // no output available yet //if (VERBOSE) Log.d(TAG, "no output from decoder available"); } else if (decoderStatus == (int)MediaCodecInfoState.OutputBuffersChanged) { // not important for us, since we're using Surface //if (VERBOSE) Log.d(TAG, "decoder output buffers changed"); } else if (decoderStatus == (int)MediaCodecInfoState.OutputFormatChanged) { //MediaFormat newFormat = decoder.OutputFormat; //if (VERBOSE) Log.d(TAG, "decoder output format changed: " + newFormat); } else if (decoderStatus < 0) { //fail("unexpected result from decoder.dequeueOutputBuffer: " + decoderStatus); throw new InvalidOperationException(); } else { //if (VERBOSE) Log.d(TAG, "surface decoder given buffer " + decoderStatus + " (size=" + info.size + ")"); if ((info.Flags & MediaCodecBufferFlags.EndOfStream) != 0) { //if (VERBOSE) Log.d(TAG, "output EOS"); outputDone = true; } bool doRender = (info.Size != 0); //could not get this working!!! // As soon as we call releaseOutputBuffer, the buffer will be forwarded // to SurfaceTexture to convert to a texture. The API doesn't guarantee // that the texture will be available before the call returns, so we // need to wait for the onFrameAvailable callback to fire. decoder.ReleaseOutputBuffer(decoderStatus, doRender); if (doRender) { //outputSurface.awaitNewImage(); //could not get callback to work and even so do not want to wait 2.5 seconds for each frame, might need to revist outputSurface.mTextureRender.checkGlError("before updateTexImage"); outputSurface.mSurfaceTexture.UpdateTexImage(); outputSurface.drawImage(true); //Log.Info("innerSTOPWATCH_begin!!!!:", stopWatch.ElapsedMilliseconds.ToString()); //can't call face detector this way its too slow or maybe there is a busy loop??? //_FaceFetchDataTasks.Add(Task.Run(() => CreateFaceframes(detector, outputSurface.GetFramebitmap(), decodeCount, frameTimestamps[decodeCount]))); //if (decodeCount % 2 ==0) //doesn't help that much and messes with rating algo CreateFaceframes(detector, outputSurface.GetFramebitmap(), frameTimestamps[decodeCount]); //Log.Info("innerSTOPWATCH_end!!!!:", stopWatch.ElapsedMilliseconds.ToString()); decodeCount++; } } } } stopWatch.Stop(); Log.Info("inner STOPWATCH!!!!:", string.Format("numberofframes = {0}, totaltime = {1}", decodeCount, stopWatch.ElapsedMilliseconds)); detector.Release(); }
public Task <bool> TrimAsync(int startMS, int lengthMS, string inputPath, string outputPath) { return(Task.Run <bool>(() => { try { bool didOperationSucceed = false; MediaExtractor extractor = new MediaExtractor(); extractor.SetDataSource(inputPath); int trackCount = extractor.TrackCount; // Set up MediaMuxer for the destination. MediaMuxer muxer; muxer = new MediaMuxer(outputPath, MuxerOutputType.Mpeg4); // Set up the tracks and retrieve the max buffer size for selected // tracks. Dictionary <int, int> indexDict = new Dictionary <int, int>(trackCount); int bufferSize = -1; for (int i = 0; i < trackCount; i++) { MediaFormat format = extractor.GetTrackFormat(i); string mime = format.GetString(MediaFormat.KeyMime); bool selectCurrentTrack = false; if (mime.StartsWith("audio/")) { selectCurrentTrack = true; } else if (mime.StartsWith("video/")) { selectCurrentTrack = true; } if (selectCurrentTrack) { extractor.SelectTrack(i); int dstIndex = muxer.AddTrack(format); indexDict.Add(i, dstIndex); if (format.ContainsKey(MediaFormat.KeyMaxInputSize)) { int newSize = format.GetInteger(MediaFormat.KeyMaxInputSize); bufferSize = newSize > bufferSize ? newSize : bufferSize; } } } if (bufferSize < 0) { bufferSize = 1337; //TODO: I don't know what to put here tbh, it will most likely be above 0 at this point anyways :) } // Set up the orientation and starting time for extractor. MediaMetadataRetriever retrieverSrc = new MediaMetadataRetriever(); retrieverSrc.SetDataSource(inputPath); string degreesString = retrieverSrc.ExtractMetadata(MetadataKey.VideoRotation); if (degreesString != null) { int degrees = int.Parse(degreesString); if (degrees >= 0) { muxer.SetOrientationHint(degrees); } } if (startMS > 0) { extractor.SeekTo(startMS * 1000, MediaExtractorSeekTo.ClosestSync); } // Copy the samples from MediaExtractor to MediaMuxer. We will loop // for copying each sample and stop when we get to the end of the source // file or exceed the end time of the trimming. int offset = 0; int trackIndex = -1; ByteBuffer dstBuf = ByteBuffer.Allocate(bufferSize); MediaCodec.BufferInfo bufferInfo = new MediaCodec.BufferInfo(); try { muxer.Start(); while (true) { bufferInfo.Offset = offset; bufferInfo.Size = extractor.ReadSampleData(dstBuf, offset); if (bufferInfo.Size < 0) { bufferInfo.Size = 0; break; } else { bufferInfo.PresentationTimeUs = extractor.SampleTime; if (lengthMS > 0 && bufferInfo.PresentationTimeUs > ((startMS + lengthMS - 1) * 1000)) { Console.WriteLine("The current sample is over the trim end time."); break; } else { bufferInfo.Flags = ConvertMediaExtractorSampleFlagsToMediaCodecBufferFlags(extractor.SampleFlags); trackIndex = extractor.SampleTrackIndex; muxer.WriteSampleData(indexDict[trackIndex], dstBuf, bufferInfo); extractor.Advance(); } } } muxer.Stop(); didOperationSucceed = true; //deleting the old file //JFile file = new JFile(srcPath); //file.Delete(); } catch (IllegalStateException e) { // Swallow the exception due to malformed source. Console.WriteLine("The source video file is malformed"); } finally { muxer.Release(); } return didOperationSucceed; } catch (System.Exception xx) { return false; } })); // Set up MediaExtractor to read from the source. }
/** * Tests extraction from an MP4 to a series of PNG files. * <p> * We scale the video to 640x480 for the PNG just to demonstrate that we can scale the * video with the GPU. If the input video has a different aspect ratio, we could preserve * it by adjusting the GL viewport to get letterboxing or pillarboxing, but generally if * you're extracting frames you don't want black bars. */ public void extractMpegFrames(int saveWidth, int saveHeight) { MediaCodec decoder = null; CodecOutputSurface outputSurface = null; MediaExtractor extractor = null; try { // must be an absolute path The MediaExtractor error messages aren't very useful. Check to see if the input file exists so we can throw a better one if it's not there. File inputFile = new File(_filesdir, INPUT_FILE); if (!inputFile.CanRead()) { throw new FileNotFoundException("Unable to read " + inputFile); } extractor = new MediaExtractor(); extractor.SetDataSource(inputFile.ToString()); int trackIndex = selectTrack(extractor); if (trackIndex < 0) { throw new RuntimeException("No video track found in " + inputFile); } extractor.SelectTrack(trackIndex); MediaFormat format = extractor.GetTrackFormat(trackIndex); if (VERBOSE) { Log.Info(TAG, "Video size is " + format.GetInteger(MediaFormat.KeyWidth) + "x" + format.GetInteger(MediaFormat.KeyHeight)); } // Could use width/height from the MediaFormat to get full-size frames. outputSurface = new CodecOutputSurface(saveWidth, saveHeight); // Create a MediaCodec decoder, and configure it with the MediaFormat from the // extractor. It's very important to use the format from the extractor because // it contains a copy of the CSD-0/CSD-1 codec-specific data chunks. String mime = format.GetString(MediaFormat.KeyMime); decoder = MediaCodec.CreateDecoderByType(mime); decoder.Configure(format, outputSurface.getSurface(), null, 0); decoder.Start(); doExtract(extractor, trackIndex, decoder, outputSurface); } finally { // release everything we grabbed if (outputSurface != null) { outputSurface.release(); outputSurface = null; } if (decoder != null) { decoder.Stop(); decoder.Release(); decoder = null; } if (extractor != null) { extractor.Release(); extractor = null; } } }
private static bool genVideoUsingMuxer(String srcPath, String dstPath, long startMicroSeconds, long endMicroSeconds, bool useAudio, bool useVideo) { if (startMicroSeconds == endMicroSeconds) { throw new InvalidParameterException("You shit!! end has to be greater than start!!"); } // Set up MediaExtractor to read from the source. MediaExtractor extractor = new MediaExtractor(); extractor.SetDataSource(srcPath); int trackCount = extractor.TrackCount; // Set up MediaMuxer for the destination. var muxer = new MediaMuxer(dstPath, MediaMuxer.OutputFormat.MuxerOutputMpeg4); // Set up the tracks and retrieve the max buffer size for selected // tracks. Dictionary <int, int> indexMap = new Dictionary <int, int>(trackCount); int bufferSize = -1; for (int i = 0; i < trackCount; i++) { MediaFormat format = extractor.GetTrackFormat(i); String mime = format.GetString(MediaFormat.KeyMime); bool selectCurrentTrack = false; if (mime.StartsWith("audio/") && useAudio) { selectCurrentTrack = true; } else if (mime.StartsWith("video/") && useVideo) { selectCurrentTrack = true; } if (selectCurrentTrack) { extractor.SelectTrack(i); int dstIndex = muxer.AddTrack(format); indexMap.Add(i, dstIndex); if (format.ContainsKey(MediaFormat.KeyMaxInputSize)) { int newSize = format.GetInteger(MediaFormat.KeyMaxInputSize); bufferSize = newSize > bufferSize? newSize : bufferSize; } } } if (bufferSize < 0) { bufferSize = DEFAULT_BUFFER_SIZE; } // Set up the orientation and starting time for extractor. MediaMetadataRetriever retrieverSrc = new MediaMetadataRetriever(); retrieverSrc.SetDataSource(srcPath); String degreesString = retrieverSrc.ExtractMetadata(MediaMetadataRetriever.MetadataKeyVideoRotation); if (degreesString != null) { int degrees = Integer.ParseInt(degreesString); if (degrees >= 0) { muxer.SetOrientationHint(degrees); } } if (startMicroSeconds > 0) { extractor.SeekTo(startMicroSeconds, MediaExtractor.SeekToClosestSync); } // Copy the samples from MediaExtractor to MediaMuxer. We will loop // for copying each sample and stop when we get to the end of the source // file or exceed the end time of the trimming. int offset = 0; int trackIndex = -1; ByteBuffer dstBuf = ByteBuffer.Allocate(bufferSize); MediaCodec.BufferInfo bufferInfo = new MediaCodec.BufferInfo(); try { muxer.Start(); while (true) { bufferInfo.Offset = offset; bufferInfo.Size = extractor.ReadSampleData(dstBuf, offset); if (bufferInfo.Size < 0) { Log.Info(LOGTAG, "Saw input EOS."); bufferInfo.Size = 0; break; } else { bufferInfo.PresentationTimeUs = extractor.SampleTime; if (endMicroSeconds > 0 && bufferInfo.PresentationTimeUs > endMicroSeconds) { Log.Info(LOGTAG, "The current sample is over the trim end time."); break; } else { bufferInfo.Flags = GetSyncsampleflags(extractor.SampleFlags); //had to map this shit not sure if its right trackIndex = extractor.SampleTrackIndex; muxer.WriteSampleData(indexMap[trackIndex], dstBuf, bufferInfo); extractor.Advance(); } } } muxer.Stop(); } catch (IllegalStateException e) { // Swallow the exception due to malformed source. Log.Info(LOGTAG, "The source video file is malformed"); return(false); } finally { muxer.Release(); } return(true); }