static int GetFreeSurfaceIndex(mfxFrameSurface1 *pSurfacesPool, int n) { for (int i = 0; i < n; i++) { if (0 == pSurfacesPool[i].Data.Locked) { return(i); } } return((int)mfxStatus.MFX_ERR_NOT_FOUND); }
/// <returns> /// true:keep calling me /// false:this phase done /// </returns> bool Flush2(mfxFrameSurface1 **frame) { *frame = (mfxFrameSurface1 *)0; // mfxSyncPoint syncpD; mfxFrameSurface1 *pmfxOutSurface = (mfxFrameSurface1 *)0; // bool UseVPP = false; // if (UseVPP) return(false); #if false // // Stage 3: Retrieve the buffered VPP frames // //while (MFX_ERR_NONE <= sts) { int nIndex2 = GetFreeSurfaceIndex(pmfxSurfaces2, nSurfNumVPPOut); // Find free frame surface QuickSyncStatic.ThrowOnBadStatus((mfxStatus)nIndex2, "cannot find free surface"); // Process a frame asychronously (returns immediately) sts = mfxVPP->RunFrameVPPAsync(NULL, pmfxSurfaces2[nIndex2], NULL, &syncpV); if (MFX_ERR_MORE_DATA == sts) { return(sts); // continue; } MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_SURFACE); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); //MSDK_BREAK_ON_ERROR(sts); sts = session.SyncOperation(syncpV, 60000); // Synchronize. Wait until frame processing is ready MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); ++nFrame; if (bEnableOutput) { //sts = WriteRawFrame(pmfxSurfaces2[nIndex2], fSink); //MSDK_BREAK_ON_ERROR(sts); *frame = pmfxSurfaces2[nIndex2]; return(sts); //printf("Frame number: %d\r", nFrame); } //} // MFX_ERR_MORE_DATA indicates that all buffers has been fetched, exit in case of other errors //MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_DATA); //MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); #endif }
public bool Flush2(out mfxFrameSurface1?frame) { mfxFrameSurface1 *p = null; frame = null; var sts = NativeLLDecoderUnsafeNativeMethods.NativeDecoder_Flush2(h, &p); if (sts == mfxStatus.MFX_ERR_MORE_SURFACE) // decoder needs to be called again, it is eating memory.SWmode { return(true); } if (sts == mfxStatus.MFX_ERR_MORE_DATA) { return(false); } QuickSyncStatic.ThrowOnBadStatus(sts, nameof(NativeLLDecoderUnsafeNativeMethods.NativeDecoder_Flush2)); if (p != null) { frame = *p; } return(true); }
/// <summary> /// /// </summary> /// <param name="frame"></param> /// <returns> /// true:keep calling me /// false:this phase done /// </returns> bool Flush1(mfxFrameSurface1 **frame) { mfxStatus sts = 0; * frame = (mfxFrameSurface1 *)0; mfxSyncPoint syncpD, syncpV; mfxFrameSurface1 *pmfxOutSurface = (mfxFrameSurface1 *)0; int nIndex = 0; int nIndex2 = 0; // // Stage 2: Retrieve the buffered decoded frames // //while (MFX_ERR_NONE <= sts || MFX_ERR_MORE_SURFACE == sts) { if (mfxStatus.MFX_WRN_DEVICE_BUSY == sts) { Thread.Sleep(1); // Wait if device is busy, then repeat the same call to DecodeFrameAsync } nIndex = GetFreeSurfaceIndex(pmfxSurfaces); // Find free frame surface QuickSyncStatic.ThrowOnBadStatus((mfxStatus)nIndex, "cannot find free surface"); // Decode a frame asychronously (returns immediately) fixed(mfxFrameSurface1 *p1 = &pmfxSurfaces[nIndex]) sts = UnsafeNativeMethods.MFXVideoDECODE_DecodeFrameAsync(session, null, p1, &pmfxOutSurface, &syncpD); // Ignore warnings if output is available, // if no output and no action required just repeat the DecodeFrameAsync call if (mfxStatus.MFX_ERR_NONE < sts && syncpD.sync_ptr != null) { sts = mfxStatus.MFX_ERR_NONE; } if (!enableVPP) { if (mfxStatus.MFX_ERR_NONE == sts) { sts = UnsafeNativeMethods.MFXVideoCORE_SyncOperation(session, syncpD, 60000); // Synchronize. Wait until decoded frame is ready *frame = pmfxOutSurface; } } if (sts == mfxStatus.MFX_ERR_MORE_SURFACE) // decoder needs to be called again, it is eating memory.SWmode { return(true); } if (sts == mfxStatus.MFX_ERR_MORE_DATA) { return(false); } if (sts < 0) { throw new QuickSyncException("Flush1 fail", sts); } if (enableVPP && sts == mfxStatus.MFX_ERR_NONE) { fixed(mfxFrameSurface1 *p1 = &pmfxSurfaces2[nIndex2]) { nIndex2 = GetFreeSurfaceIndex(pmfxSurfaces2); // Find free frame surface QuickSyncStatic.ThrowOnBadStatus((mfxStatus)nIndex2, "cannot find free surface"); for (;;) { // Process a frame asychronously (returns immediately) sts = UnsafeNativeMethods.MFXVideoVPP_RunFrameVPPAsync(session, pmfxOutSurface, p1, null, &syncpV); //if (sts == MFX_WRN_VIDEO_PARAM_CHANGED) // ; if (mfxStatus.MFX_ERR_NONE < sts && syncpV.sync_ptr == null) { // repeat the call if warning and no output if (mfxStatus.MFX_WRN_DEVICE_BUSY == sts) { Thread.Sleep(1); // wait if device is busy } } else if (mfxStatus.MFX_ERR_NONE < sts && syncpV.sync_ptr != null) { sts = mfxStatus.MFX_ERR_NONE; // ignore warnings if output is available break; } else { break; // not a warning } // VPP needs more data, let decoder decode another frame as input if (mfxStatus.MFX_ERR_MORE_DATA == sts) { //continue; return(false); } else if (mfxStatus.MFX_ERR_MORE_SURFACE == sts) { // Not relevant for the illustrated workload! Therefore not handled. // Relevant for cases when VPP produces more frames at output than consumes at input. E.g. framerate conversion 30 fps -> 60 fps //break; return(true); } else if (sts < 0) { throw new QuickSyncException("RunFrameVPPAsync fail", sts); } // MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); //MSDK_BREAK_ON_ERROR(sts); } if (mfxStatus.MFX_ERR_NONE == sts && syncpV.sync != IntPtr.Zero) { sts = UnsafeNativeMethods.MFXVideoCORE_SyncOperation(session, syncpV, 60000); // Synchronize. Wait until decoded frame is ready *frame = p1; } } } return(true); //} // MFX_ERR_MORE_DATA means that decoder is done with buffered frames, need to go to VPP buffering loop, exit in case of other errors //MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_DATA); //MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); }
/// <summary> /// Place a decoded frame in 'frame' if one is available. /// </summary> /// <param name="frame">Where to pyt frame.</param> /// <returns> /// true:keep calling me /// false:this phase done /// </returns> /// bool DecodeFrame(mfxFrameSurface1 **frame) { mfxStatus sts = 0; *frame = (mfxFrameSurface1 *)0; mfxSyncPoint syncpD; mfxSyncPoint syncpV; mfxFrameSurface1 *pmfxOutSurface = (mfxFrameSurface1 *)0; int nIndex = 0; int nIndex2 = 0; // // Stage 1: Main decoding loop // if (mfxStatus.MFX_ERR_NONE <= sts || mfxStatus.MFX_ERR_MORE_DATA == sts || mfxStatus.MFX_ERR_MORE_SURFACE == sts) { if (mfxStatus.MFX_WRN_DEVICE_BUSY == sts) { Thread.Sleep(1); // Wait if device is busy, then repeat the same call to DecodeFrameAsync } //if (MFX_ERR_MORE_DATA == sts) { // sts = ReadBitStreamData(&config.mfxBS, fSource); // Read more data into input bit stream // MSDK_BREAK_ON_ERROR(sts); //} foo: if (mfxStatus.MFX_ERR_MORE_SURFACE == sts || mfxStatus.MFX_ERR_NONE == sts) { nIndex = GetFreeSurfaceIndex(pmfxSurfaces); // Find free frame surface QuickSyncStatic.ThrowOnBadStatus((mfxStatus)nIndex, "cannot find free surface"); } // Decode a frame asychronously (returns immediately) // - If input bitstream contains multiple frames DecodeFrameAsync will start decoding multiple frames, and remove them from bitstream // it might have been better to use marshal.XXX to pin this? fixed(mfxFrameSurface1 *p1 = &pmfxSurfaces[nIndex]) fixed(mfxBitstream * p2 = &bitstream) { sts = UnsafeNativeMethods.MFXVideoDECODE_DecodeFrameAsync(session, p2, p1, &pmfxOutSurface, &syncpD); if (!enableVPP && mfxStatus.MFX_ERR_NONE == sts && syncpD.sync != IntPtr.Zero) { sts = UnsafeNativeMethods.MFXVideoCORE_SyncOperation(session, syncpD, 60000); // Synchronize. Wait until decoded frame is ready *frame = pmfxOutSurface; } } // Decode a frame asychronously (returns immediately) //sts = mfxDEC->DecodeFrameAsync(&config.mfxBS, pmfxSurfaces[nIndex], &pmfxOutSurface, &syncpD); //if (sts == MFX_WRN_VIDEO_PARAM_CHANGED) // ; // I had a problem where I was getting a lot of these, I suspect // when you get this return code, and you sync anyway, it forces more of them // be sure to test this statement under vmware in software mode // it seems this uniquely happens there that it uses this to ask for more internal surfaces. if (sts == mfxStatus.MFX_ERR_MORE_SURFACE) { goto foo; } // Ignore warnings if output is available, // if no output and no action required just repeat the DecodeFrameAsync call if (mfxStatus.MFX_ERR_NONE < sts && syncpD.sync != IntPtr.Zero) { sts = mfxStatus.MFX_ERR_NONE; } } if (sts == mfxStatus.MFX_ERR_MORE_SURFACE) // decoder needs to be called again, it is eating memory.SWmode { return(true); } if (sts == mfxStatus.MFX_ERR_MORE_DATA) { return(false); } if (sts < 0) { throw new QuickSyncException("DecodeFrame fail", sts); } if (enableVPP && sts == mfxStatus.MFX_ERR_NONE) { fixed(mfxFrameSurface1 *p1 = &pmfxSurfaces2[nIndex2]) { nIndex2 = GetFreeSurfaceIndex(pmfxSurfaces2); // Find free frame surface QuickSyncStatic.ThrowOnBadStatus((mfxStatus)nIndex2, "cannot find free surface"); tryagain: // Process a frame asychronously (returns immediately) sts = UnsafeNativeMethods.MFXVideoVPP_RunFrameVPPAsync(session, pmfxOutSurface, p1, null, &syncpV); //if (sts == MFX_WRN_VIDEO_PARAM_CHANGED) // ; if (mfxStatus.MFX_ERR_NONE < sts && syncpV.sync_ptr == null) { // repeat the call if warning and no output if (mfxStatus.MFX_WRN_DEVICE_BUSY == sts) { Thread.Sleep(1); // wait if device is busy goto tryagain; } } else if (mfxStatus.MFX_ERR_NONE < sts && syncpV.sync_ptr != null) { sts = mfxStatus.MFX_ERR_NONE; // ignore warnings if output is available } else if (mfxStatus.MFX_ERR_MORE_DATA == sts) // VPP needs more data, let decoder decode another frame as input { //continue; return(false); } else if (mfxStatus.MFX_ERR_MORE_SURFACE == sts) { // Not relevant for the illustrated workload! Therefore not handled. // Relevant for cases when VPP produces more frames at output than consumes at input. E.g. framerate conversion 30 fps -> 60 fps //break; return(true); } else if (sts < 0) { throw new QuickSyncException("RunFrameVPPAsync fail", sts); } // MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); //MSDK_BREAK_ON_ERROR(sts); else if (mfxStatus.MFX_ERR_NONE == sts && syncpV.sync != IntPtr.Zero) { sts = UnsafeNativeMethods.MFXVideoCORE_SyncOperation(session, syncpV, 60000); // Synchronize. Wait until decoded frame is ready *frame = p1; return(true); } } } return(true); }
/// <summary>Initializes a new instance of the <see cref="LowLevelTranscoderCSharp"/> class.</summary> /// <param name="config">The configuration.</param> /// <param name="impl">The implementation.</param> /// <param name="forceSystemMemory">if set to <c>true</c> [force system memory].</param> public LowLevelTranscoderCSharp(TranscoderConfiguration config, mfxIMPL impl = mfxIMPL.MFX_IMPL_AUTO, bool forceSystemMemory = false) { mfxStatus sts; mfxVideoParam mfxDecParams = config.decParams; mfxVideoParam mfxVPPParams = config.vppParams; mfxVideoParam mfxEncParams = config.encParams; session = new mfxSession(); var ver = new mfxVersion() { Major = 1, Minor = 3 }; fixed(mfxSession *s = &session) sts = UnsafeNativeMethods.MFXInit(impl, &ver, s); QuickSyncStatic.ThrowOnBadStatus(sts, "MFXInit"); //deviceSetup = new DeviceSetup(session, forceSystemMemory); // mfxVideoParam mfxDecParams = new mfxVideoParam(); // mfxDecParams.mfx.CodecId = CodecId.MFX_CODEC_AVC; int bufsize = (int)1e6; mfxBS = (mfxBitstream *)MyAllocHGlobalAndZero(sizeof(mfxBitstream)); mfxBS->Data = MyAllocHGlobalAndZero(bufsize); mfxBS->DataLength = (uint)0; mfxBS->MaxLength = (uint)bufsize; mfxBS->DataOffset = 0; int outwidth = mfxDecParams.mfx.FrameInfo.CropW; int outheight = mfxDecParams.mfx.FrameInfo.CropH; // Query number of required surfaces for VPP //mfxFrameAllocRequest[] VPPRequest = new mfxFrameAllocRequest[2]; // [0] - in, [1] - out TwoMfxFrameAllocRequest VPPRequest; sts = UnsafeNativeMethods.MFXVideoVPP_QueryIOSurf(session, &mfxVPPParams, (mfxFrameAllocRequest *)&VPPRequest); if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION) { warnings.Add(nameof(UnsafeNativeMethods.MFXVideoVPP_QueryIOSurf), sts); sts = 0; } QuickSyncStatic.ThrowOnBadStatus(sts, "vpp.queryiosurf"); // Query number required surfaces for dec mfxFrameAllocRequest DecRequest; sts = UnsafeNativeMethods.MFXVideoDECODE_QueryIOSurf(session, &mfxDecParams, &DecRequest); if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION) { warnings.Add(nameof(UnsafeNativeMethods.MFXVideoDECODE_QueryIOSurf), sts); sts = 0; } QuickSyncStatic.ThrowOnBadStatus(sts, nameof(UnsafeNativeMethods.MFXVideoDECODE_QueryIOSurf)); // Query number of required surfaces for enc mfxFrameAllocRequest EncRequest = new mfxFrameAllocRequest(); sts = UnsafeNativeMethods.MFXVideoENCODE_QueryIOSurf(session, &mfxEncParams, &EncRequest); if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION) { warnings.Add(nameof(UnsafeNativeMethods.MFXVideoENCODE_QueryIOSurf), sts); sts = 0; } QuickSyncStatic.ThrowOnBadStatus(sts, nameof(UnsafeNativeMethods.MFXVideoENCODE_QueryIOSurf)); // Determine the required number of surfaces for decoder output (VPP input) and for VPP output (encoder input) nSurfNumDecVPP = DecRequest.NumFrameSuggested + VPPRequest.In.NumFrameSuggested + mfxVPPParams.AsyncDepth; nSurfNumVPPEnc = EncRequest.NumFrameSuggested + VPPRequest.Out.NumFrameSuggested + mfxVPPParams.AsyncDepth; { Trace.Assert((mfxEncParams.IOPattern & IOPattern.MFX_IOPATTERN_IN_SYSTEM_MEMORY) != 0); Trace.Assert((mfxDecParams.IOPattern & IOPattern.MFX_IOPATTERN_OUT_SYSTEM_MEMORY) != 0); UInt16 width = (UInt16)QuickSyncStatic.ALIGN32(DecRequest.Info.Width); UInt16 height = (UInt16)QuickSyncStatic.ALIGN32(DecRequest.Info.Height); int bitsPerPixel = 12; int surfaceSize = width * height * bitsPerPixel / 8; var decVppSurfaceBuffers = Marshal.AllocHGlobal(surfaceSize * nSurfNumDecVPP); var vppEncSurfaceBuffers = Marshal.AllocHGlobal(surfaceSize * nSurfNumVPPEnc); pSurfaces = (mfxFrameSurface1 *)MyAllocHGlobalAndZero(sizeof(mfxFrameSurface1) * nSurfNumDecVPP); pSurfaces2 = (mfxFrameSurface1 *)MyAllocHGlobalAndZero(sizeof(mfxFrameSurface1) * nSurfNumVPPEnc); for (int i = 0; i < nSurfNumDecVPP; i++) { pSurfaces[i] = new mfxFrameSurface1(); pSurfaces[i].Info = DecRequest.Info; pSurfaces[i].Data.Y_ptr = (byte *)decVppSurfaceBuffers + i * surfaceSize; pSurfaces[i].Data.U_ptr = pSurfaces[i].Data.Y_ptr + width * height; pSurfaces[i].Data.V_ptr = pSurfaces[i].Data.U_ptr + 1; pSurfaces[i].Data.Pitch = width; } for (int i = 0; i < nSurfNumVPPEnc; i++) { pSurfaces2[i] = new mfxFrameSurface1(); pSurfaces2[i].Info = EncRequest.Info; pSurfaces2[i].Data.Y_ptr = (byte *)vppEncSurfaceBuffers + i * surfaceSize; pSurfaces2[i].Data.U_ptr = pSurfaces2[i].Data.Y_ptr + width * height; pSurfaces2[i].Data.V_ptr = pSurfaces2[i].Data.U_ptr + 1; pSurfaces2[i].Data.Pitch = width; } } sts = UnsafeNativeMethods.MFXVideoDECODE_Init(session, &mfxDecParams); if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION) { warnings.Add(nameof(UnsafeNativeMethods.MFXVideoDECODE_Init), sts); sts = 0; } QuickSyncStatic.ThrowOnBadStatus(sts, "decode.init"); sts = UnsafeNativeMethods.MFXVideoENCODE_Init(session, &mfxEncParams); if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION) { warnings.Add(nameof(UnsafeNativeMethods.MFXVideoENCODE_Init), sts); sts = 0; } QuickSyncStatic.ThrowOnBadStatus(sts, "encode.init"); sts = UnsafeNativeMethods.MFXVideoVPP_Init(session, &mfxVPPParams); if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION) { warnings.Add(nameof(UnsafeNativeMethods.MFXVideoVPP_Init), sts); sts = 0; } QuickSyncStatic.ThrowOnBadStatus(sts, "vpp.init"); //mfxExtVPPDoNotUse zz; //zz.Header.BufferId = BufferId.MFX_EXTBUFF_VPP_DONOTUSE; //zz.Header.BufferSz = (uint)sizeof(mfxExtVPPDoUse); //mfxExtBuffer** pExtParamsVPPx = stackalloc mfxExtBuffer*[1]; //pExtParamsVPPx[0] = (mfxExtBuffer*)&zz; //var t1 = stackalloc uint[100]; //zz.AlgList = t1; //zz.NumAlg = 100; //mfxVideoParam par; //par.ExtParam = pExtParamsVPPx; //par.NumExtParam = 1; //sts = UnsafeNativeMethods.MFXVideoVPP_GetVideoParam(session, &par); //Trace.Assert(sts == mfxStatus.MFX_ERR_NONE); //Console.WriteLine(zz.NumAlg); //for (int i = 0; i < 10; i++) //{ // Console.WriteLine((BufferId)t1[i]); //} mfxVideoParam par; // Retrieve video parameters selected by encoder. // - BufferSizeInKB parameter is required to set bit stream buffer size par = new mfxVideoParam(); sts = UnsafeNativeMethods.MFXVideoENCODE_GetVideoParam(session, &par); QuickSyncStatic.ThrowOnBadStatus(sts, "enc.getvideoparams"); // Create task pool to improve asynchronous performance (greater GPU utilization) taskPoolSize = mfxEncParams.AsyncDepth; // number of tasks that can be submitted, before synchronizing is required // Task* pTasks = stackalloc Task[taskPoolSize]; pTasks = (Task *)MyAllocHGlobalAndZero(sizeof(Task) * taskPoolSize); // GCHandle gch3 = GCHandle.Alloc(pTasks, GCHandleType.Pinned); for (int i = 0; i < taskPoolSize; i++) { // Prepare Media SDK bit stream buffer pTasks[i].mfxBS.MaxLength = (uint)(par.mfx.BufferSizeInKB * 1000); pTasks[i].mfxBS.Data = MyAllocHGlobalAndZero((int)pTasks[i].mfxBS.MaxLength); Trace.Assert(pTasks[i].mfxBS.Data != IntPtr.Zero); } // GCHandle gch3 = GCHandle.Alloc(pTasks, GCHandleType.Pinned); }
/// <summary>Get frames during 2nd stage of flushing</summary> /// <param name="bitStreamChunk">A single frame</param> /// <returns>true if you should continue to call this method, false if you must go to the next stage.</returns> public bool Flush2(ref BitStreamChunk bitStreamChunk) { mfxSyncPoint syncpV; mfxFrameSurface1 *pmfxOutSurface = (mfxFrameSurface1 *)0; int nIndex2 = 0; bitStreamChunk.bytesAvailable = 0; //////// mfxStatus sts = mfxStatus.MFX_ERR_NONE; // // Stage 3: Retrieve buffered frames from VPP // if (mfxStatus.MFX_ERR_NONE <= sts || mfxStatus.MFX_ERR_MORE_DATA == sts || mfxStatus.MFX_ERR_MORE_SURFACE == sts) { int nTaskIdx = GetFreeTaskIndex(pTasks, taskPoolSize); // Find free task if ((int)mfxStatus.MFX_ERR_NOT_FOUND == nTaskIdx) { // No more free tasks, need to sync sts = UnsafeNativeMethods.MFXVideoCORE_SyncOperation(session, pTasks[nFirstSyncTask].syncp, 60000); QuickSyncStatic.ThrowOnBadStatus(sts, "syncOper"); if (bitStreamChunk.bitstream == null || bitStreamChunk.bitstream.Length < pTasks[nFirstSyncTask].mfxBS.DataLength) { bitStreamChunk.bitstream = new byte[pTasks[nFirstSyncTask].mfxBS.DataLength]; } Trace.Assert(pTasks[nFirstSyncTask].mfxBS.DataOffset == 0); Marshal.Copy(pTasks[nFirstSyncTask].mfxBS.Data, bitStreamChunk.bitstream, 0, (int)pTasks[nFirstSyncTask].mfxBS.DataLength); bitStreamChunk.bytesAvailable = (int)pTasks[nFirstSyncTask].mfxBS.DataLength; // WriteBitStreamFrame(pTasks[nFirstSyncTask].mfxBS, outbs); //MSDK_BREAK_ON_ERROR(sts); pTasks[nFirstSyncTask].syncp.sync = IntPtr.Zero; pTasks[nFirstSyncTask].mfxBS.DataLength = 0; pTasks[nFirstSyncTask].mfxBS.DataOffset = 0; nFirstSyncTask = (nFirstSyncTask + 1) % taskPoolSize; return(true); } else { int compositeFrameIndex = 0; //morevpp: nIndex2 = GetFreeSurfaceIndex(pSurfaces2, nSurfNumVPPEnc); // Find free frame surface Trace.Assert(nIndex2 != (int)mfxStatus.MFX_ERR_NOT_FOUND); for (;;) { var z = pmfxOutSurface; z = null; // if (compositeFrameIndex == 1) // z = overlay; Trace.Assert(compositeFrameIndex <= 1); // Process a frame asychronously (returns immediately) sts = UnsafeNativeMethods.MFXVideoVPP_RunFrameVPPAsync(session, z, &pSurfaces2[nIndex2], (mfxExtVppAuxData *)0, &syncpV); // COMPOSITING if (mfxStatus.MFX_ERR_NONE < sts && syncpV.sync == IntPtr.Zero) { // repeat the call if warning and no output if (mfxStatus.MFX_WRN_DEVICE_BUSY == sts) { Thread.Sleep(1); // wait if device is busy } } else if (mfxStatus.MFX_ERR_NONE < sts && syncpV.sync != IntPtr.Zero) { sts = mfxStatus.MFX_ERR_NONE; // ignore warnings if output is available break; } else { if (sts != mfxStatus.MFX_ERR_MORE_DATA && sts != mfxStatus.MFX_ERR_MORE_SURFACE) { QuickSyncStatic.ThrowOnBadStatus(sts, "vppAsync"); } break; // not a warning } } //VPP needs more data, let decoder decode another frame as input if (mfxStatus.MFX_ERR_MORE_DATA == sts) { return(false); // compositeFrameIndex++; //goto morevpp; } else if (mfxStatus.MFX_ERR_MORE_SURFACE == sts) { // Not relevant for the illustrated workload! Therefore not handled. // Relevant for cases when VPP produces more frames at output than consumes at input. E.g. framerate conversion 30 fps -> 60 fps QuickSyncStatic.ThrowOnBadStatus(sts, "vpp");; } else if (mfxStatus.MFX_ERR_NONE != sts) { QuickSyncStatic.ThrowOnBadStatus(sts, "vpp"); } ; for (;;) { // Encode a frame asychronously (returns immediately) //sts = mfxENC.EncodeFrameAsync(NULL, pSurfaces2[nIndex2], &pTasks[nTaskIdx].mfxBS, &pTasks[nTaskIdx].syncp); sts = UnsafeNativeMethods.MFXVideoENCODE_EncodeFrameAsync(session, (mfxEncodeCtrl *)0, &pSurfaces2[nIndex2], &pTasks[nTaskIdx].mfxBS, &pTasks[nTaskIdx].syncp); if (mfxStatus.MFX_ERR_NONE < sts && pTasks[nTaskIdx].syncp.sync == IntPtr.Zero) { // repeat the call if warning and no output if (mfxStatus.MFX_WRN_DEVICE_BUSY == sts) { Thread.Sleep(1); // wait if device is busy } } else if (mfxStatus.MFX_ERR_NONE < sts && pTasks[nTaskIdx].syncp.sync != IntPtr.Zero) { sts = mfxStatus.MFX_ERR_NONE; // ignore warnings if output is available break; } else if (mfxStatus.MFX_ERR_NOT_ENOUGH_BUFFER == sts) { // Allocate more bitstream buffer memory here if needed... break; } else { if (sts != mfxStatus.MFX_ERR_MORE_DATA && sts != mfxStatus.MFX_ERR_MORE_SURFACE) { QuickSyncStatic.ThrowOnBadStatus(sts, "encodeAsync"); } break; } } } } // MFX_ERR_MORE_DATA means that file has ended, need to go to buffering loop, exit in case of other errors //MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_DATA); if (sts == mfxStatus.MFX_ERR_MORE_DATA) { return(false); } QuickSyncStatic.ThrowOnBadStatus(sts, "dec or enc or vpp"); return(true); }
public static extern mfxStatus NativeEncoder_EncodeFrame(IntPtr handle, mfxFrameSurface1 *frame);
unsafe static void Main(string[] args) { ConfirmQuickSyncReadiness.HaltIfNotReady(); Environment.CurrentDirectory = AppDomain.CurrentDomain.BaseDirectory; // keep ascending directories until 'media' folder is found for (int i = 0; i < 10 && !Directory.Exists("Media"); i++) { Directory.SetCurrentDirectory(".."); } Directory.SetCurrentDirectory("Media"); CodecId codecId = CodecId.MFX_CODEC_JPEG; FourCC fourcc = FourCC.UYVY; // supported: RGB4, YUY2 NV12 [UYVY through tricks! see below] mfxIMPL impl = mfxIMPL.MFX_IMPL_AUTO; int width, height; string inFilename; //inFilename = "BigBuckBunny_320x180." + fourcc + ".yuv"; width = 320; height = 180; inFilename = "BigBuckBunny_1920x1080." + fourcc + ".yuv"; width = 1920; height = 1080; string outFilename = Path.ChangeExtension(inFilename, "enc.jpeg"); Console.WriteLine("Working directory: {0}", Environment.CurrentDirectory); Console.WriteLine("Input filename: {0}", inFilename); Console.WriteLine("Input width: {0} Input height: {1}", width, height); if (!File.Exists(inFilename)) { Console.WriteLine("Input file not found."); Console.WriteLine("Please let Decoder1 run to completion to create input file"); Console.WriteLine("Press any key to exit."); Console.ReadKey(); return; } Stream infs, outfs; BenchmarkTimer bt = null; #if !ENABLE_BENCHMARK infs = File.Open(inFilename, FileMode.Open); outfs = File.Open(outFilename, FileMode.Create); #else // delete this code for most simple example // * Benchmark Mode * // this block does a couple things: // 1. causes the file to be pre-read into memory so we are not timing disk reads. // 2. replaces the output stream with a NullStream so nothing gets written to disk. // 3. Starts the timer for benchmarking // this pre-reads file into memory for benchmarking long maximumMemoryToAllocate = (long)4L * 1024 * 1024 * 1024; Console.WriteLine("Pre-reading input"); infs = new PreReadLargeMemoryStream(File.Open(inFilename, FileMode.Open), maximumMemoryToAllocate); Console.WriteLine("Input read"); outfs = new NullStream(); bt = new BenchmarkTimer(); bt.Start(); int minimumFrames = 4000; #endif Console.WriteLine("Output filename: {0}", Path.GetFileName((outfs as FileStream)?.Name ?? "NO OUTPUT")); Console.WriteLine(); // The encoder cannot encode UYVY, but if you are the only decoder of the JPEG // files, you can encode UYVY as YUY2 and everything is good. if (fourcc == FourCC.UYVY) { fourcc = FourCC.YUY2; } mfxVideoParam mfxEncParams = new mfxVideoParam(); mfxEncParams.mfx.CodecId = codecId; mfxEncParams.mfx.TargetUsage = TargetUsage.MFX_TARGETUSAGE_BALANCED; //mfxEncParams.mfx.TargetKbps = 2000; //mfxEncParams.mfx.RateControlMethod = RateControlMethod.MFX_RATECONTROL_VBR; mfxEncParams.mfx.Quality = 90; mfxEncParams.mfx.Interleaved = 1; mfxEncParams.mfx.FrameInfo.FrameRateExtN = 30; mfxEncParams.mfx.FrameInfo.FrameRateExtD = 1; mfxEncParams.mfx.FrameInfo.FourCC = fourcc; switch (fourcc) { case FourCC.NV12: case FourCC.YV12: mfxEncParams.mfx.FrameInfo.ChromaFormat = ChromaFormat.MFX_CHROMAFORMAT_YUV420; break; case FourCC.YUY2: mfxEncParams.mfx.FrameInfo.ChromaFormat = ChromaFormat.MFX_CHROMAFORMAT_YUV422V; // fatal on SKYLAKE! mfxEncParams.mfx.FrameInfo.ChromaFormat = ChromaFormat.MFX_CHROMAFORMAT_YUV422; break; case FourCC.RGB4: mfxEncParams.mfx.FrameInfo.ChromaFormat = ChromaFormat.MFX_CHROMAFORMAT_YUV444; break; default: Trace.Assert(false); break; } mfxEncParams.mfx.FrameInfo.PicStruct = PicStruct.MFX_PICSTRUCT_PROGRESSIVE; mfxEncParams.mfx.FrameInfo.CropX = 0; mfxEncParams.mfx.FrameInfo.CropY = 0; mfxEncParams.mfx.FrameInfo.CropW = (ushort)width; mfxEncParams.mfx.FrameInfo.CropH = (ushort)height; // Width must be a multiple of 16 // Height must be a multiple of 16 in case of frame picture and a multiple of 32 in case of field picture mfxEncParams.mfx.FrameInfo.Width = QuickSyncStatic.ALIGN16(width); mfxEncParams.mfx.FrameInfo.Height = QuickSyncStatic.AlignHeightTo32or16(height, mfxEncParams.mfx.FrameInfo.PicStruct); mfxEncParams.IOPattern = IOPattern.MFX_IOPATTERN_IN_SYSTEM_MEMORY; // must be 'in system memory' mfxEncParams.AsyncDepth = 4; // Pipeline depth. Best at 4 mfxEncParams.mfx.FrameInfo.Width = QuickSyncStatic.ALIGN32(width); mfxEncParams.mfx.FrameInfo.Height = QuickSyncStatic.ALIGN32(height); BitStreamChunk bsc = new BitStreamChunk(); //where we receive compressed frame data ILowLevelEncoder encoder = new LowLevelEncoder(mfxEncParams, impl); //ILowLevelEncoder encoder = new LowLevelEncoder(mfxEncParams, impl); string impltext = QuickSyncStatic.ImplementationString(encoder.session); Console.WriteLine("Implementation = {0}", impltext); // not needed for YUY2 encoding //var formatConverter = new NV12FromXXXXConverter(fileFourcc, width, height); int inputFrameLength = width * height * VideoUtility.GetBitsPerPixel(fourcc) / 8; byte[] uncompressed = new byte[inputFrameLength]; int count = 0; // we do not call encoder.LockFrame() and encoder.UnlockFrame() as this example is // for system memory. while (infs.Read(uncompressed, 0, inputFrameLength) == inputFrameLength) { int ix = encoder.GetFreeFrameIndex(); //this call relys locks in authoritative array of surf //formatConverter.ConvertToNV12FrameSurface(ref encoder.Frames[ix], uncompressed, 0); mfxFrameSurface1 *f = (mfxFrameSurface1 *)encoder.Frames[ix]; switch (fourcc) { case FourCC.NV12: Trace.Assert(f->Data.Pitch == width * 1); fixed(byte *aa = &uncompressed[0]) FastMemcpyMemmove.memcpy(f->Data.Y, (IntPtr)aa, height * width); fixed(byte *aa = &uncompressed[height * width]) FastMemcpyMemmove.memcpy(f->Data.UV, (IntPtr)aa, height / 2 * width); break; case FourCC.YUY2: Trace.Assert(f->Data.Pitch == width * 2); fixed(byte *aa = &uncompressed[0]) FastMemcpyMemmove.memcpy(f->Data.Y, (IntPtr)aa, height * width * 2); break; default: Trace.Assert(false); break; } encoder.EncodeFrame(ix, ref bsc); if (bsc.bytesAvailable > 0) { outfs.Write(bsc.bitstream, 0, bsc.bytesAvailable); if (++count % 100 == 0) { Console.Write("Frame {0}\r", count); } } #if ENABLE_BENCHMARK // delete this code for most simple example if (infs.Position + inputFrameLength - 1 >= infs.Length) { infs.Position = 0; } if (count >= minimumFrames) { break; } #endif } while (encoder.Flush(ref bsc)) { if (bsc.bytesAvailable > 0) { outfs.Write(bsc.bitstream, 0, bsc.bytesAvailable); if (++count % 100 == 0) { Console.Write("Frame {0}\r", count); } } } if (bt != null) { bt.StopAndReport(count, infs.Position, outfs.Position); } infs.Close(); outfs.Close(); encoder.Dispose(); Console.WriteLine("Encoded {0} frames", count); // make sure program always waits for user, except F5-Release run if (Debugger.IsAttached || Environment.GetEnvironmentVariable("VisualStudioVersion") == null) { Console.WriteLine("done - press a key to exit"); Console.ReadKey(); } }
public static extern mfxStatus MFXVideoENCODE_EncodeFrameAsync(mfxSession session, mfxEncodeCtrl *ctrl, mfxFrameSurface1 *surface, mfxBitstream *bs, mfxSyncPoint *syncp);
public static extern mfxStatus MFXVideoVPP_RunFrameVPPAsyncEx(mfxSession session, mfxFrameSurface1 *insurf, mfxFrameSurface1 *work, mfxFrameSurface1 **outsurf, mfxSyncPoint *syncp);
public static extern mfxStatus MFXVideoVPP_RunFrameVPPAsync(mfxSession session, mfxFrameSurface1 *insurf, mfxFrameSurface1 *outsurf, mfxExtVppAuxData *aux, mfxSyncPoint *syncp);
public static extern mfxStatus MFXVideoDECODE_DecodeFrameAsync(mfxSession session, mfxBitstream *bs, mfxFrameSurface1 *surface_work, mfxFrameSurface1 **surface_out, mfxSyncPoint *syncp);