コード例 #1
0
        /**
         * Edits a video file, saving the contents to a new file. This involves decoding and
         * re-encoding, not to mention conversions between YUV and RGB, and so may be lossy.
         * <p>
         * If we recognize the decoded format we can do this in Java code using the ByteBuffer[]
         * output, but it's not practical to support all OEM formats. By using a SurfaceTexture
         * for output and a Surface for input, we can avoid issues with obscure formats and can
         * use a fragment shader to do transformations.
         */
        private VideoChunks editVideoFile(VideoChunks inputData)
        {
            if (AppSettings.Logging.SendToConsole)
            {
                Log.Debug(TAG, "editVideoFile " + mWidth + "x" + mHeight);
            }
            VideoChunks   outputData    = new VideoChunks();
            MediaCodec    decoder       = null;
            MediaCodec    encoder       = null;
            InputSurface  inputSurface  = null;
            OutputSurface outputSurface = null;

            try {
                MediaFormat inputFormat = inputData.getMediaFormat();
                // Create an encoder format that matches the input format. (Might be able to just
                // re-use the format used to generate the video, since we want it to be the same.)
                MediaFormat outputFormat = MediaFormat.CreateVideoFormat(MIME_TYPE, mWidth, mHeight);
                outputFormat.SetInteger(MediaFormat.KeyColorFormat, (int)MediaCodecInfo.CodecCapabilities.COLORFormatSurface);
                outputFormat.SetInteger(MediaFormat.KeyBitRate, inputFormat.GetInteger(MediaFormat.KeyBitRate));
                outputFormat.SetInteger(MediaFormat.KeyFrameRate, inputFormat.GetInteger(MediaFormat.KeyFrameRate));
                outputFormat.SetInteger(MediaFormat.KeyIFrameInterval, inputFormat.GetInteger(MediaFormat.KeyIFrameInterval));
                outputData.setMediaFormat(outputFormat);
                encoder = MediaCodec.CreateEncoderByType(MIME_TYPE);
                encoder.Configure(outputFormat, null, null, MediaCodecConfigFlags.Encode);
                inputSurface = new InputSurface(encoder.CreateInputSurface());
                inputSurface.MakeCurrent();
                encoder.Start();
                // OutputSurface uses the EGL context created by InputSurface.
                decoder       = MediaCodec.CreateDecoderByType(MIME_TYPE);
                outputSurface = new OutputSurface();
                outputSurface.ChangeFragmentShader(FRAGMENT_SHADER);
                decoder.Configure(inputFormat, outputSurface.Surface, null, 0);
                decoder.Start();
                editVideoData(inputData, decoder, outputSurface, inputSurface, encoder, outputData);
            } finally {
                if (AppSettings.Logging.SendToConsole)
                {
                    Log.Debug(TAG, "shutting down encoder, decoder");
                }
                if (outputSurface != null)
                {
                    outputSurface.Release();
                }
                if (inputSurface != null)
                {
                    inputSurface.Release();
                }
                if (encoder != null)
                {
                    encoder.Stop();
                    encoder.Release();
                }
                if (decoder != null)
                {
                    decoder.Stop();
                    decoder.Release();
                }
            }
            return(outputData);
        }
コード例 #2
0
        // For audio: http://stackoverflow.com/questions/22673011/how-to-extract-pcm-samples-from-mediacodec-decoders-output

        private void EncodeCameraToMp4()
        {
            // arbitrary but popular values

            try {
                prepareMediaPlayer();
                prepareEncoder();
                _inputSurface.MakeCurrent();
                prepareSurfaceTexture();

                _mediaPlayer.Start();

                var st         = _outputSurface.SurfaceTexture;
                int frameCount = 0;

                var  curShad     = false;
                bool isCompleted = false;
                _mediaPlayer.Completion += (object sender, System.EventArgs e) =>
                {
                    isCompleted = true;
                };
                while (!isCompleted)
                {
                    // Feed any pending encoder output into the muxer.

                    drainEncoder(false);

                    if ((frameCount % _fps) == 0)
                    {
                        curShad = !curShad;
                    }

                    // We flash it between rgb and bgr to quickly demonstrate shading is working
                    if (curShad)
                    {
                        _outputSurface.ChangeFragmentShader(FRAGMENT_SHADER1);
                    }
                    else
                    {
                        _outputSurface.ChangeFragmentShader(FRAGMENT_SHADER2);
                    }

                    frameCount++;

                    // Acquire a new frame of input, and render it to the Surface.  If we had a
                    // GLSurfaceView we could switch EGL contexts and call drawImage() a second
                    // time to render it on screen.  The texture can be shared between contexts by
                    // passing the GLSurfaceView's EGLContext as eglCreateContext()'s share_context
                    // argument.
                    if (!_outputSurface.AwaitNewImage())
                    {
                        break;
                    }
                    _outputSurface.DrawImage();

                    // Set the presentation time stamp from the SurfaceTexture's time stamp.  This
                    // will be used by MediaMuxer to set the PTS in the video.

                    _inputSurface.SetPresentationTime(st.Timestamp);

                    // Submit it to the encoder.  The eglSwapBuffers call will block if the input
                    // is full, which would be bad if it stayed full until we dequeued an output
                    // buffer (which we can't do, since we're stuck here).  So long as we fully drain
                    // the encoder before supplying additional input, the system guarantees that we
                    // can supply another frame without blocking.
                    if (VERBOSE)
                    {
                        Log.Debug(TAG, "sending frame to encoder");
                    }
                    _inputSurface.SwapBuffers();
                }

                // send end-of-stream to encoder, and drain remaining output
                drainEncoder(true);
            } finally {
                // release everything we grabbed
                releaseMediaPlayer();
                releaseEncoder();
                releaseSurfaceTexture();
            }
        }
コード例 #3
0
        private void encodeCameraToMpeg()
        {
            // arbitrary but popular values
            int encWidth   = 640;
            int encHeight  = 480;
            int encBitRate = 6000000;                  // Mbps

            Log.Debug(TAG, MIME_TYPE + " output " + encWidth + "x" + encHeight + " @" + encBitRate);

            try {
                prepareCamera(encWidth, encHeight);
                prepareEncoder(encWidth, encHeight, encBitRate);
                _inputSurface.MakeCurrent();
                prepareSurfaceTexture();

                _camera.StartPreview();

                long startWhen  = JavaSystem.NanoTime();
                long desiredEnd = startWhen + DURATION_SEC * 1000000000L;
                var  st         = _outputSurface.SurfaceTexture;
                int  frameCount = 0;

                var curShad = false;

                while (JavaSystem.NanoTime() < desiredEnd)
                {
                    // Feed any pending encoder output into the muxer.
                    drainEncoder(false);

                    if ((frameCount % 24) == 0)
                    {
                        curShad = !curShad;
                    }

                    if (curShad)
                    {
                        _outputSurface.ChangeFragmentShader(FRAGMENT_SHADER1);
                    }
                    else
                    {
                        _outputSurface.ChangeFragmentShader(FRAGMENT_SHADER2);
                    }

                    frameCount++;

                    // Acquire a new frame of input, and render it to the Surface.  If we had a
                    // GLSurfaceView we could switch EGL contexts and call drawImage() a second
                    // time to render it on screen.  The texture can be shared between contexts by
                    // passing the GLSurfaceView's EGLContext as eglCreateContext()'s share_context
                    // argument.
                    _outputSurface.AwaitNewImage();
                    _outputSurface.DrawImage();

                    // Set the presentation time stamp from the SurfaceTexture's time stamp.  This
                    // will be used by MediaMuxer to set the PTS in the video.
                    if (AppSettings.Logging.SendToConsole)
                    {
                        Log.Debug(TAG, "present: " +
                                  ((st.Timestamp - startWhen) / 1000000.0) + "ms");
                    }
                    _inputSurface.SetPresentationTime(st.Timestamp);

                    // Submit it to the encoder.  The eglSwapBuffers call will block if the input
                    // is full, which would be bad if it stayed full until we dequeued an output
                    // buffer (which we can't do, since we're stuck here).  So long as we fully drain
                    // the encoder before supplying additional input, the system guarantees that we
                    // can supply another frame without blocking.
                    if (AppSettings.Logging.SendToConsole)
                    {
                        Log.Debug(TAG, "sending frame to encoder");
                    }
                    _inputSurface.SwapBuffers();
                }

                // send end-of-stream to encoder, and drain remaining output
                drainEncoder(true);
            } finally {
                // release everything we grabbed
                releaseCamera();
                releaseEncoder();
                releaseSurfaceTexture();
            }
        }