/**
         * Edits a video file, saving the contents to a new file. This involves decoding and
         * re-encoding, not to mention conversions between YUV and RGB, and so may be lossy.
         * <p>
         * If we recognize the decoded format we can do this in Java code using the ByteBuffer[]
         * output, but it's not practical to support all OEM formats. By using a SurfaceTexture
         * for output and a Surface for input, we can avoid issues with obscure formats and can
         * use a fragment shader to do transformations.
         */
        private VideoChunks editVideoFile(VideoChunks inputData)
        {
            if (AppSettings.Logging.SendToConsole)
            {
                Log.Debug(TAG, "editVideoFile " + mWidth + "x" + mHeight);
            }
            VideoChunks   outputData    = new VideoChunks();
            MediaCodec    decoder       = null;
            MediaCodec    encoder       = null;
            InputSurface  inputSurface  = null;
            OutputSurface outputSurface = null;

            try {
                MediaFormat inputFormat = inputData.getMediaFormat();
                // Create an encoder format that matches the input format. (Might be able to just
                // re-use the format used to generate the video, since we want it to be the same.)
                MediaFormat outputFormat = MediaFormat.CreateVideoFormat(MIME_TYPE, mWidth, mHeight);
                outputFormat.SetInteger(MediaFormat.KeyColorFormat, (int)MediaCodecInfo.CodecCapabilities.COLORFormatSurface);
                outputFormat.SetInteger(MediaFormat.KeyBitRate, inputFormat.GetInteger(MediaFormat.KeyBitRate));
                outputFormat.SetInteger(MediaFormat.KeyFrameRate, inputFormat.GetInteger(MediaFormat.KeyFrameRate));
                outputFormat.SetInteger(MediaFormat.KeyIFrameInterval, inputFormat.GetInteger(MediaFormat.KeyIFrameInterval));
                outputData.setMediaFormat(outputFormat);
                encoder = MediaCodec.CreateEncoderByType(MIME_TYPE);
                encoder.Configure(outputFormat, null, null, MediaCodecConfigFlags.Encode);
                inputSurface = new InputSurface(encoder.CreateInputSurface());
                inputSurface.MakeCurrent();
                encoder.Start();
                // OutputSurface uses the EGL context created by InputSurface.
                decoder       = MediaCodec.CreateDecoderByType(MIME_TYPE);
                outputSurface = new OutputSurface();
                outputSurface.ChangeFragmentShader(FRAGMENT_SHADER);
                decoder.Configure(inputFormat, outputSurface.Surface, null, 0);
                decoder.Start();
                editVideoData(inputData, decoder, outputSurface, inputSurface, encoder, outputData);
            } finally {
                if (AppSettings.Logging.SendToConsole)
                {
                    Log.Debug(TAG, "shutting down encoder, decoder");
                }
                if (outputSurface != null)
                {
                    outputSurface.Release();
                }
                if (inputSurface != null)
                {
                    inputSurface.Release();
                }
                if (encoder != null)
                {
                    encoder.Stop();
                    encoder.Release();
                }
                if (decoder != null)
                {
                    decoder.Stop();
                    decoder.Release();
                }
            }
            return(outputData);
        }
Exemple #2
0
        // For audio: http://stackoverflow.com/questions/22673011/how-to-extract-pcm-samples-from-mediacodec-decoders-output

        private void EncodeCameraToMp4()
        {
            // arbitrary but popular values

            try {
                prepareMediaPlayer();
                prepareEncoder();
                _inputSurface.MakeCurrent();
                prepareSurfaceTexture();

                _mediaPlayer.Start();

                var st         = _outputSurface.SurfaceTexture;
                int frameCount = 0;

                var  curShad     = false;
                bool isCompleted = false;
                _mediaPlayer.Completion += (object sender, System.EventArgs e) =>
                {
                    isCompleted = true;
                };
                while (!isCompleted)
                {
                    // Feed any pending encoder output into the muxer.

                    drainEncoder(false);

                    if ((frameCount % _fps) == 0)
                    {
                        curShad = !curShad;
                    }

                    // We flash it between rgb and bgr to quickly demonstrate shading is working
                    if (curShad)
                    {
                        _outputSurface.ChangeFragmentShader(FRAGMENT_SHADER1);
                    }
                    else
                    {
                        _outputSurface.ChangeFragmentShader(FRAGMENT_SHADER2);
                    }

                    frameCount++;

                    // Acquire a new frame of input, and render it to the Surface.  If we had a
                    // GLSurfaceView we could switch EGL contexts and call drawImage() a second
                    // time to render it on screen.  The texture can be shared between contexts by
                    // passing the GLSurfaceView's EGLContext as eglCreateContext()'s share_context
                    // argument.
                    if (!_outputSurface.AwaitNewImage())
                    {
                        break;
                    }
                    _outputSurface.DrawImage();

                    // Set the presentation time stamp from the SurfaceTexture's time stamp.  This
                    // will be used by MediaMuxer to set the PTS in the video.

                    _inputSurface.SetPresentationTime(st.Timestamp);

                    // Submit it to the encoder.  The eglSwapBuffers call will block if the input
                    // is full, which would be bad if it stayed full until we dequeued an output
                    // buffer (which we can't do, since we're stuck here).  So long as we fully drain
                    // the encoder before supplying additional input, the system guarantees that we
                    // can supply another frame without blocking.
                    if (VERBOSE)
                    {
                        Log.Debug(TAG, "sending frame to encoder");
                    }
                    _inputSurface.SwapBuffers();
                }

                // send end-of-stream to encoder, and drain remaining output
                drainEncoder(true);
            } finally {
                // release everything we grabbed
                releaseMediaPlayer();
                releaseEncoder();
                releaseSurfaceTexture();
            }
        }
Exemple #3
0
        private string EncodeFileToMp4(string inputPath, string outputPath, bool encodeAudio = true, Android.Net.Uri inputUri = null)
        {
            LatestInputVideoLength = AudioEncoding.GetVideoLength(inputPath, inputUri);
            LatestAudioInputFormat = AudioEncoding.GetAudioTrackFormat(inputPath, inputUri);
            EstimateTotalSize(LatestInputVideoLength, _bitRate);
            try
            {
                prepareMediaPlayer(inputPath, inputUri);
                prepareEncoder(outputPath);
                _inputSurface.MakeCurrent();
                prepareWeakSurfaceTexture();
                _mediaPlayer.Start();
                _mediaPlayer.SetAudioStreamType(Android.Media.Stream.VoiceCall);
                _mediaPlayer.SetVolume(0, 0);
                _frameCount = 0;
            }
            catch (System.Exception ex) { Log.Debug("VideoEncoder", ex.Message); }
            VideoEncodingInProgress = true;
            while (true)
            {
                D(false);
                _frameCount++;

                /*
                 * Disable this to make it faster when not debugging
                 */
#if DEBUG
                if (_frameCount >= 120 && AppSettings.Logging.SendToConsole)
                {
                    System.Console.WriteLine($"FileToMp4 exited @ {_outputSurface.WeakSurfaceTexture.Timestamp} " +
                                             $" | encoded bits {_bitsEncodedSoFar} of estimated {_estimatedTotalSize}");
                }
#endif
                // Acquire a new frame of input, and render it to the Surface.  If we had a
                // GLSurfaceView we could switch EGL contexts and call drawImage() a second
                // time to render it on screen.  The texture can be shared between contexts by
                // passing the GLSurfaceView's EGLContext as eglCreateContext()'s share_context
                // argument.
                if (!_outputSurface.AwaitNewImage(true))
                {
                    break;
                }
                _outputSurface.DrawImage();

                // Set the presentation time stamp from the WeakSurfaceTexture's time stamp.  This
                // will be used by MediaMuxer to set the PTS in the video.

                _inputSurface.SetPresentationTime(_outputSurface.WeakSurfaceTexture.Timestamp);

                //if (AppSettings.Logging.SendToConsole) Log.Debug("MediaLoop", "Set Time " + st.Timestamp);
                // Submit it to the encoder.  The eglSwapBuffers call will block if the input
                // is full, which would be bad if it stayed full until we dequeued an output
                // buffer (which we can't do, since we're stuck here).  So long as we fully drain
                // the encoder before supplying additional input, the system guarantees that we
                // can supply another frame without blocking.
                //if (AppSettings.Logging.SendToConsole) Log.Debug(TAG, "sending frame to encoder:");
                _inputSurface.SwapBuffers();
                if (_bitsEncodedSoFar >= _estimatedTotalSize)
                {
                    break;
                }
            }
            D(true);
            VideoEncodingInProgress = false;
#if DEBUG
            if (AppSettings.Logging.SendToConsole)
            {
                System.Console.WriteLine($"DrainEncoder started @ {_firstKnownBuffer} exited @ " +
                                         $"{_outputSurface.WeakSurfaceTexture.Timestamp}  " +
                                         $"| encoded bits {_bitsEncodedSoFar} of estimated {_estimatedTotalSize}");
            }
#endif
            try
            {
                releaseMediaPlayer();
                releaseEncoder();
                releaseWeakSurfaceTexture();
            }catch { }
            _firstKnownBuffer   = 0;
            _estimatedTotalSize = 0;
            _frameCount         = 0;
            _bitsEncodedSoFar   = 0;
            _bfi = new BufferInfo();
            if (!AudioEncodingInProgress)
            {
                _muxer.Stop(); // if the audio encoding isn't still running then we'll stop everything and return
                _muxer.Release();
                _muxer = null;
                if (File.Exists(outputPath))
                {
                    this.Progress.Invoke(new EncoderMinArgs(EncodedBits(_bfi.Size), _estimatedTotalSize, true, false, outputPath));
                    return(outputPath);
                }
            }
            this.Progress.Invoke(new EncoderMinArgs(EncodedBits(_bfi.Size), _estimatedTotalSize, false, false, null));
            return(null); //file isn't finished processing yet
        }
Exemple #4
0
        private void encodeCameraToMpeg()
        {
            // arbitrary but popular values
            int encWidth   = 640;
            int encHeight  = 480;
            int encBitRate = 6000000;                  // Mbps

            Log.Debug(TAG, MIME_TYPE + " output " + encWidth + "x" + encHeight + " @" + encBitRate);

            try {
                prepareCamera(encWidth, encHeight);
                prepareEncoder(encWidth, encHeight, encBitRate);
                _inputSurface.MakeCurrent();
                prepareSurfaceTexture();

                _camera.StartPreview();

                long startWhen  = JavaSystem.NanoTime();
                long desiredEnd = startWhen + DURATION_SEC * 1000000000L;
                var  st         = _outputSurface.SurfaceTexture;
                int  frameCount = 0;

                var curShad = false;

                while (JavaSystem.NanoTime() < desiredEnd)
                {
                    // Feed any pending encoder output into the muxer.
                    drainEncoder(false);

                    if ((frameCount % 24) == 0)
                    {
                        curShad = !curShad;
                    }

                    if (curShad)
                    {
                        _outputSurface.ChangeFragmentShader(FRAGMENT_SHADER1);
                    }
                    else
                    {
                        _outputSurface.ChangeFragmentShader(FRAGMENT_SHADER2);
                    }

                    frameCount++;

                    // Acquire a new frame of input, and render it to the Surface.  If we had a
                    // GLSurfaceView we could switch EGL contexts and call drawImage() a second
                    // time to render it on screen.  The texture can be shared between contexts by
                    // passing the GLSurfaceView's EGLContext as eglCreateContext()'s share_context
                    // argument.
                    _outputSurface.AwaitNewImage();
                    _outputSurface.DrawImage();

                    // Set the presentation time stamp from the SurfaceTexture's time stamp.  This
                    // will be used by MediaMuxer to set the PTS in the video.
                    if (AppSettings.Logging.SendToConsole)
                    {
                        Log.Debug(TAG, "present: " +
                                  ((st.Timestamp - startWhen) / 1000000.0) + "ms");
                    }
                    _inputSurface.SetPresentationTime(st.Timestamp);

                    // Submit it to the encoder.  The eglSwapBuffers call will block if the input
                    // is full, which would be bad if it stayed full until we dequeued an output
                    // buffer (which we can't do, since we're stuck here).  So long as we fully drain
                    // the encoder before supplying additional input, the system guarantees that we
                    // can supply another frame without blocking.
                    if (AppSettings.Logging.SendToConsole)
                    {
                        Log.Debug(TAG, "sending frame to encoder");
                    }
                    _inputSurface.SwapBuffers();
                }

                // send end-of-stream to encoder, and drain remaining output
                drainEncoder(true);
            } finally {
                // release everything we grabbed
                releaseCamera();
                releaseEncoder();
                releaseSurfaceTexture();
            }
        }
        /**
         * Generates a test video file, saving it as VideoChunks. We generate frames with GL to
         * avoid having to deal with multiple YUV formats.
         *
         * @return true on success, false on "soft" failure
         */
        private bool generateVideoFile(VideoChunks output)
        {
            if (AppSettings.Logging.SendToConsole)
            {
                Log.Debug(TAG, "generateVideoFile " + mWidth + "x" + mHeight);
            }
            MediaCodec   encoder      = null;
            InputSurface inputSurface = null;

            try {
                MediaCodecInfo codecInfo = selectCodec(MIME_TYPE);
                if (codecInfo == null)
                {
                    // Don't fail CTS if they don't have an AVC codec (not here, anyway).
                    Log.Error(TAG, "Unable to find an appropriate codec for " + MIME_TYPE);
                    return(false);
                }
                if (AppSettings.Logging.SendToConsole)
                {
                    Log.Debug(TAG, "found codec: " + codecInfo.Name);
                }
                // We avoid the device-specific limitations on width and height by using values that
                // are multiples of 16, which all tested devices seem to be able to handle.
                MediaFormat format = MediaFormat.CreateVideoFormat(MIME_TYPE, mWidth, mHeight);
                // Set some properties. Failing to specify some of these can cause the MediaCodec
                // configure() call to throw an unhelpful exception.
                format.SetInteger(MediaFormat.KeyColorFormat,
                                  (int)MediaCodecCapabilities.Formatsurface);
                format.SetInteger(MediaFormat.KeyBitRate, mBitRate);
                format.SetInteger(MediaFormat.KeyFrameRate, FRAME_RATE);
                format.SetInteger(MediaFormat.KeyIFrameInterval, IFRAME_INTERVAL);
                if (AppSettings.Logging.SendToConsole)
                {
                    Log.Debug(TAG, "format: " + format);
                }
                output.setMediaFormat(format);
                // Create a MediaCodec for the desired codec, then configure it as an encoder with
                // our desired properties.
                encoder = MediaCodec.CreateByCodecName(codecInfo.Name);
                encoder.Configure(format, null, null, MediaCodecConfigFlags.Encode);
                inputSurface = new InputSurface(encoder.CreateInputSurface());
                inputSurface.MakeCurrent();
                encoder.Start();
                generateVideoData(encoder, inputSurface, output);
            } finally {
                if (encoder != null)
                {
                    if (AppSettings.Logging.SendToConsole)
                    {
                        Log.Debug(TAG, "releasing encoder");
                    }
                    encoder.Stop();
                    encoder.Release();
                    if (AppSettings.Logging.SendToConsole)
                    {
                        Log.Debug(TAG, "released encoder");
                    }
                }
                if (inputSurface != null)
                {
                    inputSurface.Release();
                }
            }
            return(true);
        }