/** * Checks the video file to see if the contents match our expectations. We decode the * video to a Surface and check the pixels with GL. */ private void checkVideoFile(VideoChunks inputData) { OutputSurface surface = null; MediaCodec decoder = null; mLargestColorDelta = -1; if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "checkVideoFile"); } try { surface = new OutputSurface(mWidth, mHeight); MediaFormat format = inputData.getMediaFormat(); decoder = MediaCodec.CreateDecoderByType(MIME_TYPE); decoder.Configure(format, surface.Surface, null, 0); decoder.Start(); int badFrames = checkVideoData(inputData, decoder, surface); if (badFrames != 0) { fail("Found " + badFrames + " bad frames"); } } finally { if (surface != null) { surface.Release(); } if (decoder != null) { decoder.Stop(); decoder.Release(); } Log.Info(TAG, "Largest color delta: " + mLargestColorDelta); } }
/** * Edits a video file, saving the contents to a new file. This involves decoding and * re-encoding, not to mention conversions between YUV and RGB, and so may be lossy. * <p> * If we recognize the decoded format we can do this in Java code using the ByteBuffer[] * output, but it's not practical to support all OEM formats. By using a SurfaceTexture * for output and a Surface for input, we can avoid issues with obscure formats and can * use a fragment shader to do transformations. */ private VideoChunks editVideoFile(VideoChunks inputData) { if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "editVideoFile " + mWidth + "x" + mHeight); } VideoChunks outputData = new VideoChunks(); MediaCodec decoder = null; MediaCodec encoder = null; InputSurface inputSurface = null; OutputSurface outputSurface = null; try { MediaFormat inputFormat = inputData.getMediaFormat(); // Create an encoder format that matches the input format. (Might be able to just // re-use the format used to generate the video, since we want it to be the same.) MediaFormat outputFormat = MediaFormat.CreateVideoFormat(MIME_TYPE, mWidth, mHeight); outputFormat.SetInteger(MediaFormat.KeyColorFormat, (int)MediaCodecInfo.CodecCapabilities.COLORFormatSurface); outputFormat.SetInteger(MediaFormat.KeyBitRate, inputFormat.GetInteger(MediaFormat.KeyBitRate)); outputFormat.SetInteger(MediaFormat.KeyFrameRate, inputFormat.GetInteger(MediaFormat.KeyFrameRate)); outputFormat.SetInteger(MediaFormat.KeyIFrameInterval, inputFormat.GetInteger(MediaFormat.KeyIFrameInterval)); outputData.setMediaFormat(outputFormat); encoder = MediaCodec.CreateEncoderByType(MIME_TYPE); encoder.Configure(outputFormat, null, null, MediaCodecConfigFlags.Encode); inputSurface = new InputSurface(encoder.CreateInputSurface()); inputSurface.MakeCurrent(); encoder.Start(); // OutputSurface uses the EGL context created by InputSurface. decoder = MediaCodec.CreateDecoderByType(MIME_TYPE); outputSurface = new OutputSurface(); outputSurface.ChangeFragmentShader(FRAGMENT_SHADER); decoder.Configure(inputFormat, outputSurface.Surface, null, 0); decoder.Start(); editVideoData(inputData, decoder, outputSurface, inputSurface, encoder, outputData); } finally { if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "shutting down encoder, decoder"); } if (outputSurface != null) { outputSurface.Release(); } if (inputSurface != null) { inputSurface.Release(); } if (encoder != null) { encoder.Stop(); encoder.Release(); } if (decoder != null) { decoder.Stop(); decoder.Release(); } } return(outputData); }
/** * Tests editing of a video file with GL. */ private void videoEditTest() { VideoChunks sourceChunks = new VideoChunks(); if (!generateVideoFile(sourceChunks)) { // No AVC codec? Fail silently. return; } if (DEBUG_SAVE_FILE) { // Save a copy to a file. We call it ".mp4", but it's actually just an elementary // stream, so not all video players will know what to do with it. string dirName = _workingDirectory; string fileName = "vedit1_" + mWidth + "x" + mHeight + ".264"; var fullPathFile = System.IO.Path.Combine(dirName, fileName); if (System.IO.File.Exists(fullPathFile)) { System.IO.File.Delete(fullPathFile); } sourceChunks.saveToFile(System.IO.Path.Combine(dirName, fileName)); } VideoChunks destChunks = editVideoFile(sourceChunks); if (DEBUG_SAVE_FILE) { string dirName = _workingDirectory; string fileName = "vedit2_" + mWidth + "x" + mHeight + ".264"; var fullPathFile = System.IO.Path.Combine(dirName, fileName); if (System.IO.File.Exists(fullPathFile)) { System.IO.File.Delete(fullPathFile); } destChunks.saveToFile(System.IO.Path.Combine(dirName, fileName)); } checkVideoFile(destChunks); }
/** * Checks the video data. * * @return the number of bad frames */ private int checkVideoData(VideoChunks inputData, MediaCodec decoder, OutputSurface surface) { const int TIMEOUT_USEC = 1000; ByteBuffer[] decoderInputBuffers = decoder.GetInputBuffers(); ByteBuffer[] decoderOutputBuffers = decoder.GetOutputBuffers(); MediaCodec.BufferInfo info = new MediaCodec.BufferInfo(); int inputChunk = 0; int checkIndex = 0; int badFrames = 0; bool outputDone = false; bool inputDone = false; while (!outputDone) { if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "check loop"); } // Feed more data to the decoder. if (!inputDone) { int inputBufIndex = decoder.DequeueInputBuffer(TIMEOUT_USEC); if (inputBufIndex >= 0) { if (inputChunk == inputData.NumChunks) { // End of stream -- send empty frame with EOS flag set. decoder.QueueInputBuffer(inputBufIndex, 0, 0, 0L, MediaCodec.BufferFlagEndOfStream); inputDone = true; if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "sent input EOS"); } } else { // Copy a chunk of input to the decoder. The first chunk should have // the BUFFER_FLAG_CODEC_CONFIG flag set. ByteBuffer inputBuf = decoderInputBuffers[inputBufIndex]; inputBuf.Clear(); inputData.getChunkData(inputChunk, inputBuf); int flags = inputData.getChunkFlags(inputChunk); long time = inputData.getChunkTime(inputChunk); decoder.QueueInputBuffer(inputBufIndex, 0, inputBuf.Position(), time, (MediaCodecBufferFlags)flags); if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "submitted frame " + inputChunk + " to dec, size=" + inputBuf.Position() + " flags=" + flags); } inputChunk++; } } else { if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "input buffer not available"); } } } if (!outputDone) { int decoderStatus = decoder.DequeueOutputBuffer(info, TIMEOUT_USEC); if (decoderStatus == (int)MediaCodec.InfoTryAgainLater) { // no output available yet if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "no output from decoder available"); } } else if (decoderStatus == (int)MediaCodec.InfoOutputBuffersChanged) { decoderOutputBuffers = decoder.GetOutputBuffers(); if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "decoder output buffers changed"); } } else if (decoderStatus == (int)MediaCodec.InfoOutputFormatChanged) { MediaFormat newFormat = decoder.OutputFormat; if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "decoder output format changed: " + newFormat); } } else if (decoderStatus < 0) { fail("unexpected result from decoder.dequeueOutputBuffer: " + decoderStatus); } else // decoderStatus >= 0 { ByteBuffer decodedData = decoderOutputBuffers[decoderStatus]; if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "surface decoder given buffer " + decoderStatus + " (size=" + info.Size + ")"); } if ((info.Flags & MediaCodec.BufferFlagEndOfStream) != 0) { if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "output EOS"); } outputDone = true; } bool doRender = (info.Size != 0); // As soon as we call releaseOutputBuffer, the buffer will be forwarded // to SurfaceTexture to convert to a texture. The API doesn't guarantee // that the texture will be available before the call returns, so we // need to wait for the onFrameAvailable callback to fire. decoder.ReleaseOutputBuffer(decoderStatus, doRender); if (doRender) { if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "awaiting frame " + checkIndex); } assertEquals("Wrong time stamp", computePresentationTime(checkIndex), info.PresentationTimeUs); surface.AwaitNewImage(); surface.DrawImage(); if (!checkSurfaceFrame(checkIndex++)) { badFrames++; } } } } } return(badFrames); }
/** * Edits a stream of video data. */ private void editVideoData(VideoChunks inputData, MediaCodec decoder, OutputSurface outputSurface, InputSurface inputSurface, MediaCodec encoder, VideoChunks outputData) { const int TIMEOUT_USEC = 10000; ByteBuffer[] decoderInputBuffers = decoder.GetInputBuffers(); ByteBuffer[] encoderOutputBuffers = encoder.GetOutputBuffers(); MediaCodec.BufferInfo info = new MediaCodec.BufferInfo(); int inputChunk = 0; int outputCount = 0; bool outputDone = false; bool inputDone = false; bool decoderDone = false; while (!outputDone) { if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "edit loop"); } // Feed more data to the decoder. if (!inputDone) { int inputBufIndex = decoder.DequeueInputBuffer(TIMEOUT_USEC); if (inputBufIndex >= 0) { if (inputChunk == inputData.NumChunks) { // End of stream -- send empty frame with EOS flag set. decoder.QueueInputBuffer(inputBufIndex, 0, 0, 0L, MediaCodecBufferFlags.EndOfStream); inputDone = true; if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "sent input EOS (with zero-length frame)"); } } else { // Copy a chunk of input to the decoder. The first chunk should have // the BUFFER_FLAG_CODEC_CONFIG flag set. ByteBuffer inputBuf = decoderInputBuffers[inputBufIndex]; inputBuf.Clear(); inputData.getChunkData(inputChunk, inputBuf); int flags = inputData.getChunkFlags(inputChunk); long time = inputData.getChunkTime(inputChunk); decoder.QueueInputBuffer(inputBufIndex, 0, inputBuf.Position(), time, (MediaCodecBufferFlags)flags); // TODO: Not sure if it's MediaCodecBufferFlags, verify. if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "submitted frame " + inputChunk + " to dec, size=" + inputBuf.Position() + " flags=" + flags); } inputChunk++; } } else { if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "input buffer not available"); } } } // Assume output is available. Loop until both assumptions are false. bool decoderOutputAvailable = !decoderDone; bool encoderOutputAvailable = true; while (decoderOutputAvailable || encoderOutputAvailable) { // Start by draining any pending output from the encoder. It's important to // do this before we try to stuff any more data in. int encoderStatus = encoder.DequeueOutputBuffer(info, TIMEOUT_USEC); if (encoderStatus == (int)MediaCodecInfoState.TryAgainLater) { // no output available yet if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "no output from encoder available"); } encoderOutputAvailable = false; } else if (encoderStatus == (int)MediaCodecInfoState.OutputBuffersChanged) { encoderOutputBuffers = encoder.GetOutputBuffers(); if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "encoder output buffers changed"); } } else if (encoderStatus == (int)MediaCodecInfoState.OutputFormatChanged) { MediaFormat newFormat = encoder.OutputFormat; if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "encoder output format changed: " + newFormat); } } else if (encoderStatus < 0) { fail("unexpected result from encoder.dequeueOutputBuffer: " + encoderStatus); } else // encoderStatus >= 0 { ByteBuffer encodedData = encoderOutputBuffers[encoderStatus]; if (encodedData == null) { fail("encoderOutputBuffer " + encoderStatus + " was null"); } // Write the data to the output "file". if (info.Size != 0) { encodedData.Position(info.Offset); encodedData.Limit(info.Offset + info.Size); outputData.addChunk(encodedData, (int)info.Flags, info.PresentationTimeUs); outputCount++; if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "encoder output " + info.Size + " bytes"); } } outputDone = (info.Flags & MediaCodec.BufferFlagEndOfStream) != 0; encoder.ReleaseOutputBuffer(encoderStatus, false); } if (encoderStatus != (int)MediaCodec.InfoTryAgainLater) { // Continue attempts to drain output. continue; } // Encoder is drained, check to see if we've got a new frame of output from // the decoder. (The output is going to a Surface, rather than a ByteBuffer, // but we still get information through BufferInfo.) if (!decoderDone) { int decoderStatus = decoder.DequeueOutputBuffer(info, TIMEOUT_USEC); if (decoderStatus == (int)MediaCodec.InfoTryAgainLater) { // no output available yet if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "no output from decoder available"); } decoderOutputAvailable = false; } else if (decoderStatus == (int)MediaCodec.InfoOutputBuffersChanged) { //decoderOutputBuffers = decoder.getOutputBuffers(); if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "decoder output buffers changed (we don't care)"); } } else if (decoderStatus == (int)MediaCodec.InfoOutputFormatChanged) { // expected before first buffer of data MediaFormat newFormat = decoder.OutputFormat; if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "decoder output format changed: " + newFormat); } } else if (decoderStatus < 0) { fail("unexpected result from decoder.dequeueOutputBuffer: " + decoderStatus); } else // decoderStatus >= 0 { if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "surface decoder given buffer " + decoderStatus + " (size=" + info.Size + "("); } // The ByteBuffers are null references, but we still get a nonzero // size for the decoded data. bool doRender = (info.Size != 0); // As soon as we call releaseOutputBuffer, the buffer will be forwarded // to SurfaceTexture to convert to a texture. The API doesn't // guarantee that the texture will be available before the call // returns, so we need to wait for the onFrameAvailable callback to // fire. If we don't wait, we risk rendering from the previous frame. decoder.ReleaseOutputBuffer(decoderStatus, doRender); if (doRender) { // This waits for the image and renders it after it arrives. if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "awaiting frame"); } outputSurface.AwaitNewImage(); outputSurface.DrawImage(); // Send it to the encoder. inputSurface.SetPresentationTime(info.PresentationTimeUs * 1000); if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "swapBuffers"); } inputSurface.SwapBuffers(); } if ((info.Flags & MediaCodec.BufferFlagEndOfStream) != 0) { // forward decoder EOS to encoder if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "signaling input EOS"); } if (WORK_AROUND_BUGS) { // Bail early, possibly dropping a frame. return; } else { encoder.SignalEndOfInputStream(); } } } } } } if (inputChunk != outputCount) { throw new RuntimeException("frame lost: " + inputChunk + " in, " + outputCount + " out"); } }
/** * Generates video frames, feeds them into the encoder, and writes the output to the * VideoChunks instance. */ private void generateVideoData(MediaCodec encoder, InputSurface inputSurface, VideoChunks output) { ByteBuffer[] encoderOutputBuffers = encoder.GetOutputBuffers(); MediaCodec.BufferInfo info = new MediaCodec.BufferInfo(); int generateIndex = 0; int outputCount = 0; // Loop until the output side is done. bool inputDone = false; bool outputDone = false; while (!outputDone) { if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "gen loop"); } // If we're not done submitting frames, generate a new one and submit it. The // eglSwapBuffers call will block if the input is full. if (!inputDone) { if (generateIndex == NUM_FRAMES) { // Send an empty frame with the end-of-stream flag set. if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "signaling input EOS"); } if (WORK_AROUND_BUGS) { // Might drop a frame, but at least we won't crash mediaserver. try { Thread.Sleep(500); } catch (InterruptedException ie) {} outputDone = true; } else { encoder.SignalEndOfInputStream(); } inputDone = true; } else { generateSurfaceFrame(generateIndex); inputSurface.SetPresentationTime(computePresentationTime(generateIndex) * 1000); if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "inputSurface swapBuffers"); } inputSurface.SwapBuffers(); } generateIndex++; } // Check for output from the encoder. If there's no output yet, we either need to // provide more input, or we need to wait for the encoder to work its magic. We // can't actually tell which is the case, so if we can't get an output buffer right // away we loop around and see if it wants more input. // // If we do find output, drain it all before supplying more input. while (true) { int encoderStatus = encoder.DequeueOutputBuffer(info, TIMEOUT_USEC); if (encoderStatus == (int)MediaCodecInfoState.TryAgainLater) { // no output available yet if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "no output from encoder available"); } break; // out of while } else if (encoderStatus == (int)MediaCodecInfoState.OutputBuffersChanged) { // not expected for an encoder encoderOutputBuffers = encoder.GetOutputBuffers(); if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "encoder output buffers changed"); } } else if (encoderStatus == (int)MediaCodecInfoState.OutputFormatChanged) { // not expected for an encoder MediaFormat newFormat = encoder.OutputFormat; if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "encoder output format changed: " + newFormat); } } else if (encoderStatus < 0) { fail("unexpected result from encoder.dequeueOutputBuffer: " + encoderStatus); } else // encoderStatus >= 0 { ByteBuffer encodedData = encoderOutputBuffers[encoderStatus]; if (encodedData == null) { fail("encoderOutputBuffer " + encoderStatus + " was null"); } // Codec config flag must be set iff this is the first chunk of output. This // may not hold for all codecs, but it appears to be the case for video/avc. assertTrue((info.Flags & MediaCodec.BufferFlagCodecConfig) != 0 || outputCount != 0); if (info.Size != 0) { // Adjust the ByteBuffer values to match BufferInfo. encodedData.Position(info.Offset); encodedData.Limit(info.Offset + info.Size); output.addChunk(encodedData, (int)info.Flags, info.PresentationTimeUs); outputCount++; } encoder.ReleaseOutputBuffer(encoderStatus, false); if ((info.Flags & MediaCodec.BufferFlagEndOfStream) != 0) { outputDone = true; break; // out of while } } } } assertEquals("Frame count", NUM_FRAMES + 1, outputCount); }
/** * Generates a test video file, saving it as VideoChunks. We generate frames with GL to * avoid having to deal with multiple YUV formats. * * @return true on success, false on "soft" failure */ private bool generateVideoFile(VideoChunks output) { if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "generateVideoFile " + mWidth + "x" + mHeight); } MediaCodec encoder = null; InputSurface inputSurface = null; try { MediaCodecInfo codecInfo = selectCodec(MIME_TYPE); if (codecInfo == null) { // Don't fail CTS if they don't have an AVC codec (not here, anyway). Log.Error(TAG, "Unable to find an appropriate codec for " + MIME_TYPE); return(false); } if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "found codec: " + codecInfo.Name); } // We avoid the device-specific limitations on width and height by using values that // are multiples of 16, which all tested devices seem to be able to handle. MediaFormat format = MediaFormat.CreateVideoFormat(MIME_TYPE, mWidth, mHeight); // Set some properties. Failing to specify some of these can cause the MediaCodec // configure() call to throw an unhelpful exception. format.SetInteger(MediaFormat.KeyColorFormat, (int)MediaCodecCapabilities.Formatsurface); format.SetInteger(MediaFormat.KeyBitRate, mBitRate); format.SetInteger(MediaFormat.KeyFrameRate, FRAME_RATE); format.SetInteger(MediaFormat.KeyIFrameInterval, IFRAME_INTERVAL); if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "format: " + format); } output.setMediaFormat(format); // Create a MediaCodec for the desired codec, then configure it as an encoder with // our desired properties. encoder = MediaCodec.CreateByCodecName(codecInfo.Name); encoder.Configure(format, null, null, MediaCodecConfigFlags.Encode); inputSurface = new InputSurface(encoder.CreateInputSurface()); inputSurface.MakeCurrent(); encoder.Start(); generateVideoData(encoder, inputSurface, output); } finally { if (encoder != null) { if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "releasing encoder"); } encoder.Stop(); encoder.Release(); if (AppSettings.Logging.SendToConsole) { Log.Debug(TAG, "released encoder"); } } if (inputSurface != null) { inputSurface.Release(); } } return(true); }