/// <summary> /// The first stage of flushing the internal frame buffers. /// </summary> /// <param name="frame">The frame into which valid frames will be placed.</param> /// <returns> /// True indicates more data is needed. /// False indicates buffer is sufficient. /// </returns> public bool Flush1(mfxFrameSurface1 frame) { if (stage == Stage.MemoryAllocated) { stage++; } Trace.Assert(stage == Stage.DecodingDone); void *foo = (void *)0; frame.ptr = (void *)0; mfxStatus sts = UnsafeNativeMethods.CLowLevelDecoder_Flush1(handle, &foo); if (sts == mfxStatus.MFX_ERR_MORE_SURFACE) // decoder needs to be called again, it is eating memory.SWmode { return(false); } if (sts == mfxStatus.MFX_ERR_MORE_DATA) { stage++; return(true); } if (sts != 0) { throw new LimeVideoSDKLowLevelException("Flush1 fail", sts); } frame.ptr = foo; return(false); }
/// <summary>Converts an NV12 mfxFrameSurface1 to another FourCC with IntPtr destination.</summary> /// <param name="frame">Inout frame.</param> /// <param name="outputFourCC">The output FourCC.</param> /// <param name="outbuf">The outbuf destination.</param> /// <param name="width">The width.</param> /// <param name="height">The height.</param> /// <param name="outputPitch">The output pitch.</param> /// <param name="outputlen">The outputlen.</param> public unsafe void ConvertFromNV12FrameSurface1(mfxFrameSurface1 frame, FourCC outputFourCC, IntPtr outbuf, int width, int height, int outputPitch, int outputlen) { Trace.Assert(height * outputPitch == outputlen); byte *nv12 = (byte *)frame.Data.Y; byte *nv12uv = (byte *)frame.Data.UV; int pitch = frame.Data.PitchHigh << 16 | frame.Data.PitchLow; this.ConvertFromNV12(outputFourCC, nv12, nv12uv, (byte *)outbuf, width, height, outputlen, width, outputPitch); return; }
/// <summary> /// If a valid frame is in frame, the raw data (Y+UV) will be written to outbs /// </summary> /// <param name="frame"></param> /// <param name="outbs"></param> public void WriteFrameNV12(mfxFrameSurface1 frame, Stream outbs) { var y = frame.Data.Y; for (int i = 0; i < frame.Info.CropH; i++) { Marshal.Copy(y + i * frame.Data.Pitch, row, 0, row.Length); outbs.Write(row, 0, row.Length); } // IntPtr o = (IntPtr)frame.ptr + frame.Info.Height * frame.Data.Pitch; IntPtr uv = frame.Data.UV; for (int i = 0; i < frame.Info.CropH / 2; i++) { Marshal.Copy(uv + i * frame.Data.Pitch, row, 0, row.Length); outbs.Write(row, 0, row.Length); } }
/// <summary> /// Takes /// </summary> /// <param name="nv12surf"></param> /// <param name="srcframe"></param> /// <param name="offset"></param> public unsafe void ConvertToNV12FrameSurface(ref mfxFrameSurface1 nv12surf, byte[] srcframe, int offset) { int compactFrameSize = w * h * VideoUtility.GetBitsPerPixel(infourcc) / 8; //Trace.Assert(srcframe.Length == compactFrameSize); // expect by caller //encoder.LockFrame(f); fixed(byte *frameptr = &srcframe[offset]) { byte *nv12 = (byte *)nv12surf.Data.Y; byte *nv12uv = (byte *)nv12surf.Data.UV; llconverter.ConvertToNV12FrameSurface(infourcc, nv12surf.Data.Pitch, w, h, frameptr, nv12, nv12uv); } //encoder.UnlockFrame(f); }
public bool DecodeFrame(mfxFrameSurface1 frame) { Trace.Assert(stage == Stage.MemoryAllocated); //Console.WriteLine(999); mfxStatus sts = UnsafeNativeMethods.MFXVideoDECODE_DecodeFrameAsync(session, &mfxBS, ) //Console.WriteLine(sts) ; if (sts == mfxStatus.MFX_ERR_MORE_SURFACE) // decoder needs to be called again, it is eating memory.SWmode { return(false); } if (sts == mfxStatus.MFX_ERR_MORE_DATA) { // stage++; return(true); } if (sts < 0) { throw new LimeVideoSDKLowLevelException("DecodeFrame fail", sts); } if (sts == mfxStatus.MFX_WRN_VIDEO_PARAM_CHANGED) { frame.WarningVideoParamChanged = true; } else { frame.WarningVideoParamChanged = false; if (sts > 0) { Console.WriteLine("warn: " + sts.ToString()); } } frame.ptr = foo; return(false); }
private static void RenderFrameX(StreamDecoder d, mfxFrameSurface1 surf) { var m_pDXGIBackBuffer = swapChain.GetBackBuffer <Texture2D>(0); if (useSystemMemoryNotVideoMemory) { Trace.Assert(surf.Data.B != IntPtr.Zero); //ResourceRegion? rr = new ResourceRegion(0, 0, 0, 1920, 1080, 1); ResourceRegion?rr = null; device.ImmediateContext.UpdateSubresource(m_pDXGIBackBuffer, 0, rr, surf.Data.B, surf.Data.Pitch, 0); } else { Trace.Assert(surf.Data.MemId != IntPtr.Zero); IntPtr dx11frameHandle = d.lowLevelDecoder.videoAccelerationSupport.FrameGetHandle(surf.Data.MemId); // CustomMemId* cm = (CustomMemId*) var texture2d = new Texture2D(dx11frameHandle); device.ImmediateContext.CopySubresourceRegion(texture2d, 0, null, m_pDXGIBackBuffer, 0); } swapChain.Present(2, PresentFlags.None); }
/// <summary>Initializes a new instance of the <see cref="LowLevelEncoderCSharp"/> class.</summary> /// <param name="mfxEncParams">The encoder parameters.</param> /// <param name="impl">The implementation.</param> public LowLevelEncoderCSharp(mfxVideoParam mfxEncParams, mfxIMPL impl = mfxIMPL.MFX_IMPL_AUTO) { mfxStatus sts; session = new mfxSession(); var ver = new mfxVersion() { Major = 1, Minor = 3 }; fixed(mfxSession *s = &session) sts = UnsafeNativeMethods.MFXInit(impl, &ver, s); QuickSyncStatic.ThrowOnBadStatus(sts, "MFXInit"); //deviceSetup = new DeviceSetup(session, false); sts = UnsafeNativeMethods.MFXVideoENCODE_Query(session, &mfxEncParams, &mfxEncParams); if (sts > 0) { warnings.Add(nameof(UnsafeNativeMethods.MFXVideoENCODE_Query), sts); sts = 0; } QuickSyncStatic.ThrowOnBadStatus(sts, "encodequery"); mfxFrameAllocRequest EncRequest; sts = UnsafeNativeMethods.MFXVideoENCODE_QueryIOSurf(session, &mfxEncParams, &EncRequest); QuickSyncStatic.ThrowOnBadStatus(sts, "queryiosurf"); EncRequest.NumFrameSuggested = (ushort)(EncRequest.NumFrameSuggested + mfxEncParams.AsyncDepth); EncRequest.Type |= (FrameMemoryType)0x2000; // WILL_WRITE; // This line is only required for Windows DirectX11 to ensure that surfaces can be written to by the application UInt16 numSurfaces = EncRequest.NumFrameSuggested; // - Width and height of buffer must be aligned, a multiple of 32 // - Frame surface array keeps pointers all surface planes and general frame info UInt16 width = (UInt16)QuickSyncStatic.ALIGN32(mfxEncParams.mfx.FrameInfo.Width); UInt16 height = (UInt16)QuickSyncStatic.ALIGN32(mfxEncParams.mfx.FrameInfo.Height); int bitsPerPixel = VideoUtility.GetBitsPerPixel(mfxEncParams.mfx.FrameInfo.FourCC); int surfaceSize = width * height * bitsPerPixel / 8; //byte[] surftaceBuffers = new byte[surfaceSize * numSurfaces]; //XXX IntPtr surfaceBuffers = Marshal.AllocHGlobal(surfaceSize * numSurfaces); byte * surfaceBuffersPtr = (byte *)surfaceBuffers; // // Allocate surface headers (mfxFrameSurface1) for decoder Frames = new mfxFrameSurface1[numSurfaces]; //MSDK_CHECK_POINTER(pmfxSurfaces, MFX_ERR_MEMORY_ALLOC); for (int i = 0; i < numSurfaces; i++) { Frames[i] = new mfxFrameSurface1(); Frames[i].Info = mfxEncParams.mfx.FrameInfo; switch (mfxEncParams.mfx.FrameInfo.FourCC) { case FourCC.NV12: Frames[i].Data.Y_ptr = (byte *)surfaceBuffers + i * surfaceSize; Frames[i].Data.U_ptr = Frames[i].Data.Y_ptr + width * height; Frames[i].Data.V_ptr = Frames[i].Data.U_ptr + 1; Frames[i].Data.Pitch = width; break; case FourCC.YUY2: Frames[i].Data.Y_ptr = (byte *)surfaceBuffers + i * surfaceSize; Frames[i].Data.U_ptr = Frames[i].Data.Y_ptr + 1; Frames[i].Data.V_ptr = Frames[i].Data.U_ptr + 3; Frames[i].Data.Pitch = (ushort)(width * 2); break; default: //find sysmem_allocator.cpp for more help throw new NotImplementedException(); } } frameIntPtrs = new IntPtr[Frames.Length]; for (int i = 0; i < Frames.Length; i++) { fixed(mfxFrameSurface1 *a = &Frames[i]) frameIntPtrs[i] = (IntPtr)a; } sts = UnsafeNativeMethods.MFXVideoENCODE_Init(session, &mfxEncParams); if (sts > 0) { warnings.Add(nameof(UnsafeNativeMethods.MFXVideoENCODE_Init), sts); sts = 0; } QuickSyncStatic.ThrowOnBadStatus(sts, "encodeinit"); mfxVideoParam par; UnsafeNativeMethods.MFXVideoENCODE_GetVideoParam(session, &par); QuickSyncStatic.ThrowOnBadStatus(sts, "encodegetvideoparam"); // from mediasdkjpeg-man.pdf // BufferSizeInKB = 4 + (Width * Height * BytesPerPx + 1023) / 1024; //where Width and Height are weight and height of the picture in pixel, BytesPerPx is number of //byte for one pixel.It equals to 1 for monochrome picture, 1.5 for NV12 and YV12 color formats, // 2 for YUY2 color format, and 3 for RGB32 color format(alpha channel is not encoded). if (par.mfx.BufferSizeInKB == 0 && mfxEncParams.mfx.CodecId == CodecId.MFX_CODEC_JPEG) { par.mfx.BufferSizeInKB = (ushort)((4 + (mfxEncParams.mfx.FrameInfo.CropW * mfxEncParams.mfx.FrameInfo.CropH * 3 + 1023)) / 1000); } //printf("bufsize %d\n", par.mfx.BufferSizeInKB); // Create task pool to improve asynchronous performance (greater GPU utilization) int taskPoolSize = mfxEncParams.AsyncDepth; // number of tasks that can be submitted, before synchronizing is required //Task* pTasks = stackalloc Task[taskPoolSize]; // GCHandle gch3 = GCHandle.Alloc(pTasks, GCHandleType.Pinned); pTasks = new Task[taskPoolSize]; for (int i = 0; i < taskPoolSize; i++) { // Prepare Media SDK bit stream buffer pTasks[i].mfxBS.MaxLength = (uint)(par.mfx.BufferSizeInKB * 1000); pTasks[i].mfxBS.Data = Marshal.AllocHGlobal((int)pTasks[i].mfxBS.MaxLength); Trace.Assert(pTasks[i].mfxBS.Data != IntPtr.Zero); } pinningHandles.Add(GCHandle.Alloc(pTasks, GCHandleType.Pinned)); pinningHandles.Add(GCHandle.Alloc(Frames, GCHandleType.Pinned)); }
public void UnlockFrame(ref mfxFrameSurface1 frame, ref mfxFrameData frameData) { throw new NotImplementedException(); }
/// <summary> /// Constructor /// </summary> public LowLevelDecoderCSharp(mfxVideoParam mfxDecParamsX, mfxVideoParam?VPPParamsX = null, mfxIMPL impl = mfxIMPL.MFX_IMPL_AUTO) { mfxStatus sts; bool enableVPP = VPPParamsX != null; if (VPPParamsX == null) { // Create a default VPPParamsX var foo = new mfxVideoParam(); foo.AsyncDepth = 1; foo.IOPattern = IOPattern.MFX_IOPATTERN_OUT_SYSTEM_MEMORY | IOPattern.MFX_IOPATTERN_IN_SYSTEM_MEMORY; foo.vpp.In = mfxDecParamsX.mfx.FrameInfo; foo.vpp.Out = mfxDecParamsX.mfx.FrameInfo; VPPParamsX = foo; } mfxVideoParam VPPParams = VPPParamsX != null ? VPPParamsX.Value : new mfxVideoParam(); mfxVideoParam mfxDecParams = mfxDecParamsX; // NOTE // IF I am worried about interop issues with stuff moving due to GC, // just pin ever single blitable here pinningHandles.Add(GCHandle.Alloc(pmfxSurfaces, GCHandleType.Pinned)); pinningHandles.Add(GCHandle.Alloc(pmfxSurfaces2, GCHandleType.Pinned)); //pinningHandles.Add(GCHandle.Alloc(struct1, GCHandleType.Pinned)); //pinningHandles.Add(GCHandle.Alloc(struct1, GCHandleType.Pinned)); this.videoParam = mfxDecParams; this.enableVPP = enableVPP; session = new mfxSession(); var ver = new mfxVersion() { Major = 1, Minor = 3 }; fixed(mfxSession *s = &session) sts = UnsafeNativeMethods.MFXInit(impl, &ver, s); QuickSyncStatic.ThrowOnBadStatus(sts, "MFXInit"); bool decVideoMemOut = (mfxDecParams.IOPattern & IOPattern.MFX_IOPATTERN_OUT_VIDEO_MEMORY) != 0; bool vppVideoMemIn = (VPPParams.IOPattern & IOPattern.MFX_IOPATTERN_IN_VIDEO_MEMORY) != 0; bool vppVideoMemOut = (VPPParams.IOPattern & IOPattern.MFX_IOPATTERN_OUT_VIDEO_MEMORY) != 0; Trace.Assert(!enableVPP || decVideoMemOut == vppVideoMemIn, "When the VPP is enabled, the memory type from DEC into VPP must be of same type"); if (vppVideoMemIn || vppVideoMemOut) { //if you want to use video memory, you need to have a way to allocate the Direct3D or Vaapi frames videoAccelerationSupport = new VideoAccelerationSupport(session); } fixed(mfxFrameAllocRequest *p = &DecRequest) sts = UnsafeNativeMethods.MFXVideoDECODE_QueryIOSurf(session, &mfxDecParams, p); if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION) { sts = 0; } QuickSyncStatic.ThrowOnBadStatus(sts, "DECODE_QueryIOSurf"); if (enableVPP) { fixed(mfxFrameAllocRequest *p = &VPPRequest[0]) sts = UnsafeNativeMethods.MFXVideoVPP_QueryIOSurf(session, &VPPParams, p); if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION) { sts = 0; } QuickSyncStatic.ThrowOnBadStatus(sts, "VPP_QueryIOSurf"); VPPRequest[1].Type |= FrameMemoryType.WILL_READ; } //mfxU16 nSurfNumDecVPP = DecRequest.NumFrameSuggested + VPPRequest[0].NumFrameSuggested; //mfxU16 nSurfNumVPPOut = VPPRequest[1].NumFrameSuggested; int nSurfNumVPPOut = 0; var numSurfaces = DecRequest.NumFrameSuggested + VPPRequest[0].NumFrameSuggested + VPPParams.AsyncDepth; if (enableVPP) { nSurfNumVPPOut = 0 + VPPRequest[1].NumFrameSuggested + VPPParams.AsyncDepth; } bitstreamBuffer = Marshal.AllocHGlobal(defaultBitstreamBufferSize); bitstream.Data = bitstreamBuffer; bitstream.DataLength = 0; bitstream.MaxLength = (uint)defaultBitstreamBufferSize; bitstream.DataOffset = 0; //mfxFrameAllocRequest DecRequest; //sts = UnsafeNativeMethods.MFXVideoDECODE_QueryIOSurf(session, &mfxDecParams, &DecRequest); //if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION) sts = 0; //Trace.Assert(sts == mfxStatus.MFX_ERR_NONE); //allocate decoder frames via directx mfxFrameAllocResponse DecResponse = new mfxFrameAllocResponse(); if (decVideoMemOut) { DecRequest.NumFrameMin = DecRequest.NumFrameSuggested = (ushort)numSurfaces; fixed(mfxFrameAllocRequest *p = &DecRequest) videoAccelerationSupport.AllocFrames(p, &DecResponse); } //allocate vpp frames via directx mfxFrameAllocResponse EncResponse = new mfxFrameAllocResponse(); if (vppVideoMemOut) { VPPRequest[1].NumFrameMin = VPPRequest[1].NumFrameSuggested = (ushort)nSurfNumVPPOut; fixed(mfxFrameAllocRequest *p = &VPPRequest[1]) videoAccelerationSupport.AllocFrames(p, &EncResponse); } // Allocate surfaces for decoder // - Width and height of buffer must be aligned, a multiple of 32 // - Frame surface array keeps pointers all surface planes and general frame info UInt16 width = (UInt16)QuickSyncStatic.ALIGN32(DecRequest.Info.Width); UInt16 height = (UInt16)QuickSyncStatic.ALIGN32(DecRequest.Info.Height); int bitsPerPixel = VideoUtility.GetBitsPerPixel(mfxDecParams.mfx.FrameInfo.FourCC); int surfaceSize = width * height * bitsPerPixel / 8; //byte[] surfaceBuffers = new byte[surfaceSize * numSurfaces]; //XXX if (!decVideoMemOut) { surfaceBuffers = Marshal.AllocHGlobal(surfaceSize * numSurfaces); } // // Allocate surface headers (mfxFrameSurface1) for decoder pmfxSurfaces = new mfxFrameSurface1[numSurfaces]; pinningHandles.Add(GCHandle.Alloc(pmfxSurfaces, GCHandleType.Pinned)); //MSDK_CHECK_POINTER(pmfxSurfaces, MFX_ERR_MEMORY_ALLOC); for (int i = 0; i < numSurfaces; i++) { pmfxSurfaces[i] = new mfxFrameSurface1(); pmfxSurfaces[i].Info = mfxDecParams.mfx.FrameInfo; if (!decVideoMemOut) { switch (mfxDecParams.mfx.FrameInfo.FourCC) { case FourCC.NV12: pmfxSurfaces[i].Data.Y_ptr = (byte *)surfaceBuffers + i * surfaceSize; pmfxSurfaces[i].Data.U_ptr = pmfxSurfaces[i].Data.Y_ptr + width * height; pmfxSurfaces[i].Data.V_ptr = pmfxSurfaces[i].Data.U_ptr + 1; pmfxSurfaces[i].Data.Pitch = width; break; case FourCC.YUY2: pmfxSurfaces[i].Data.Y_ptr = (byte *)surfaceBuffers + i * surfaceSize; pmfxSurfaces[i].Data.U_ptr = pmfxSurfaces[i].Data.Y_ptr + 1; pmfxSurfaces[i].Data.V_ptr = pmfxSurfaces[i].Data.U_ptr + 3; pmfxSurfaces[i].Data.Pitch = (ushort)(width * 2); break; default: //find sysmem_allocator.cpp for more help throw new NotImplementedException(); } } else { pmfxSurfaces[i].Data.MemId = DecResponse.mids_ptr[i]; // MID (memory id) represent one D3D NV12 surface } } if (enableVPP) { UInt16 width2 = (UInt16)QuickSyncStatic.ALIGN32(VPPRequest[1].Info.CropW); UInt16 height2 = (UInt16)QuickSyncStatic.ALIGN32(VPPRequest[1].Info.CropH); int bitsPerPixel2 = VideoUtility.GetBitsPerPixel(VPPParams.vpp.Out.FourCC); // NV12 format is a 12 bits per pixel format int surfaceSize2 = width2 * height2 * bitsPerPixel2 / 8; int pitch2 = width2 * bitsPerPixel2 / 8; if (!vppVideoMemOut) { surfaceBuffers2 = Marshal.AllocHGlobal(surfaceSize2 * nSurfNumVPPOut); } pmfxSurfaces2 = new mfxFrameSurface1[nSurfNumVPPOut]; pinningHandles.Add(GCHandle.Alloc(pmfxSurfaces2, GCHandleType.Pinned)); //MSDK_CHECK_POINTER(pmfxSurfaces, MFX_ERR_MEMORY_ALLOC); for (int i = 0; i < nSurfNumVPPOut; i++) { pmfxSurfaces2[i] = new mfxFrameSurface1(); pmfxSurfaces2[i].Info = VPPParams.vpp.Out; if (!vppVideoMemOut) { pmfxSurfaces2[i].Data.Pitch = (ushort)pitch2; switch (VPPParams.vpp.Out.FourCC) { case FourCC.NV12: pmfxSurfaces2[i].Data.Y_ptr = (byte *)surfaceBuffers2 + i * surfaceSize2; pmfxSurfaces2[i].Data.U_ptr = pmfxSurfaces2[i].Data.Y_ptr + width * height; pmfxSurfaces2[i].Data.V_ptr = pmfxSurfaces2[i].Data.U_ptr + 1; break; case FourCC.RGB4: pmfxSurfaces2[i].Data.B_ptr = (byte *)surfaceBuffers2 + i * surfaceSize2; pmfxSurfaces2[i].Data.G_ptr = (byte *)surfaceBuffers2 + i * surfaceSize2 + 1; pmfxSurfaces2[i].Data.R_ptr = (byte *)surfaceBuffers2 + i * surfaceSize2 + 2; // pmfxSurfaces2[i].Data.A_ptr = (byte*)surfaceBuffers2 + i * surfaceSize2+3; // pmfxSurfaces2[i].Data. = pmfxSurfaces2[i].Data.Y_ptr + width * height; // pmfxSurfaces2[i].Data.V_ptr = pmfxSurfaces2[i].Data.U_ptr + 1; break; default: break; } } else { pmfxSurfaces2[i].Data.MemId = EncResponse.mids_ptr[i]; // MID (memory id) represent one D3D NV12 surface } } } sts = UnsafeNativeMethods.MFXVideoDECODE_Init(session, &mfxDecParams); if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION) { sts = 0; } QuickSyncStatic.ThrowOnBadStatus(sts, "MFXVideoDECODE_Init"); if (enableVPP) { sts = UnsafeNativeMethods.MFXVideoVPP_Init(session, &VPPParams); if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION) { sts = 0; } QuickSyncStatic.ThrowOnBadStatus(sts, "MFXVideoVPP_Init"); } }
/// <summary>Initializes a new instance of the <see cref="LowLevelTranscoderCSharp"/> class.</summary> /// <param name="config">The configuration.</param> /// <param name="impl">The implementation.</param> /// <param name="forceSystemMemory">if set to <c>true</c> [force system memory].</param> public LowLevelTranscoderCSharp(TranscoderConfiguration config, mfxIMPL impl = mfxIMPL.MFX_IMPL_AUTO, bool forceSystemMemory = false) { mfxStatus sts; mfxVideoParam mfxDecParams = config.decParams; mfxVideoParam mfxVPPParams = config.vppParams; mfxVideoParam mfxEncParams = config.encParams; session = new mfxSession(); var ver = new mfxVersion() { Major = 1, Minor = 3 }; fixed(mfxSession *s = &session) sts = UnsafeNativeMethods.MFXInit(impl, &ver, s); QuickSyncStatic.ThrowOnBadStatus(sts, "MFXInit"); //deviceSetup = new DeviceSetup(session, forceSystemMemory); // mfxVideoParam mfxDecParams = new mfxVideoParam(); // mfxDecParams.mfx.CodecId = CodecId.MFX_CODEC_AVC; int bufsize = (int)1e6; mfxBS = (mfxBitstream *)MyAllocHGlobalAndZero(sizeof(mfxBitstream)); mfxBS->Data = MyAllocHGlobalAndZero(bufsize); mfxBS->DataLength = (uint)0; mfxBS->MaxLength = (uint)bufsize; mfxBS->DataOffset = 0; int outwidth = mfxDecParams.mfx.FrameInfo.CropW; int outheight = mfxDecParams.mfx.FrameInfo.CropH; // Query number of required surfaces for VPP //mfxFrameAllocRequest[] VPPRequest = new mfxFrameAllocRequest[2]; // [0] - in, [1] - out TwoMfxFrameAllocRequest VPPRequest; sts = UnsafeNativeMethods.MFXVideoVPP_QueryIOSurf(session, &mfxVPPParams, (mfxFrameAllocRequest *)&VPPRequest); if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION) { warnings.Add(nameof(UnsafeNativeMethods.MFXVideoVPP_QueryIOSurf), sts); sts = 0; } QuickSyncStatic.ThrowOnBadStatus(sts, "vpp.queryiosurf"); // Query number required surfaces for dec mfxFrameAllocRequest DecRequest; sts = UnsafeNativeMethods.MFXVideoDECODE_QueryIOSurf(session, &mfxDecParams, &DecRequest); if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION) { warnings.Add(nameof(UnsafeNativeMethods.MFXVideoDECODE_QueryIOSurf), sts); sts = 0; } QuickSyncStatic.ThrowOnBadStatus(sts, nameof(UnsafeNativeMethods.MFXVideoDECODE_QueryIOSurf)); // Query number of required surfaces for enc mfxFrameAllocRequest EncRequest = new mfxFrameAllocRequest(); sts = UnsafeNativeMethods.MFXVideoENCODE_QueryIOSurf(session, &mfxEncParams, &EncRequest); if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION) { warnings.Add(nameof(UnsafeNativeMethods.MFXVideoENCODE_QueryIOSurf), sts); sts = 0; } QuickSyncStatic.ThrowOnBadStatus(sts, nameof(UnsafeNativeMethods.MFXVideoENCODE_QueryIOSurf)); // Determine the required number of surfaces for decoder output (VPP input) and for VPP output (encoder input) nSurfNumDecVPP = DecRequest.NumFrameSuggested + VPPRequest.In.NumFrameSuggested + mfxVPPParams.AsyncDepth; nSurfNumVPPEnc = EncRequest.NumFrameSuggested + VPPRequest.Out.NumFrameSuggested + mfxVPPParams.AsyncDepth; { Trace.Assert((mfxEncParams.IOPattern & IOPattern.MFX_IOPATTERN_IN_SYSTEM_MEMORY) != 0); Trace.Assert((mfxDecParams.IOPattern & IOPattern.MFX_IOPATTERN_OUT_SYSTEM_MEMORY) != 0); UInt16 width = (UInt16)QuickSyncStatic.ALIGN32(DecRequest.Info.Width); UInt16 height = (UInt16)QuickSyncStatic.ALIGN32(DecRequest.Info.Height); int bitsPerPixel = 12; int surfaceSize = width * height * bitsPerPixel / 8; var decVppSurfaceBuffers = Marshal.AllocHGlobal(surfaceSize * nSurfNumDecVPP); var vppEncSurfaceBuffers = Marshal.AllocHGlobal(surfaceSize * nSurfNumVPPEnc); pSurfaces = (mfxFrameSurface1 *)MyAllocHGlobalAndZero(sizeof(mfxFrameSurface1) * nSurfNumDecVPP); pSurfaces2 = (mfxFrameSurface1 *)MyAllocHGlobalAndZero(sizeof(mfxFrameSurface1) * nSurfNumVPPEnc); for (int i = 0; i < nSurfNumDecVPP; i++) { pSurfaces[i] = new mfxFrameSurface1(); pSurfaces[i].Info = DecRequest.Info; pSurfaces[i].Data.Y_ptr = (byte *)decVppSurfaceBuffers + i * surfaceSize; pSurfaces[i].Data.U_ptr = pSurfaces[i].Data.Y_ptr + width * height; pSurfaces[i].Data.V_ptr = pSurfaces[i].Data.U_ptr + 1; pSurfaces[i].Data.Pitch = width; } for (int i = 0; i < nSurfNumVPPEnc; i++) { pSurfaces2[i] = new mfxFrameSurface1(); pSurfaces2[i].Info = EncRequest.Info; pSurfaces2[i].Data.Y_ptr = (byte *)vppEncSurfaceBuffers + i * surfaceSize; pSurfaces2[i].Data.U_ptr = pSurfaces2[i].Data.Y_ptr + width * height; pSurfaces2[i].Data.V_ptr = pSurfaces2[i].Data.U_ptr + 1; pSurfaces2[i].Data.Pitch = width; } } sts = UnsafeNativeMethods.MFXVideoDECODE_Init(session, &mfxDecParams); if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION) { warnings.Add(nameof(UnsafeNativeMethods.MFXVideoDECODE_Init), sts); sts = 0; } QuickSyncStatic.ThrowOnBadStatus(sts, "decode.init"); sts = UnsafeNativeMethods.MFXVideoENCODE_Init(session, &mfxEncParams); if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION) { warnings.Add(nameof(UnsafeNativeMethods.MFXVideoENCODE_Init), sts); sts = 0; } QuickSyncStatic.ThrowOnBadStatus(sts, "encode.init"); sts = UnsafeNativeMethods.MFXVideoVPP_Init(session, &mfxVPPParams); if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION) { warnings.Add(nameof(UnsafeNativeMethods.MFXVideoVPP_Init), sts); sts = 0; } QuickSyncStatic.ThrowOnBadStatus(sts, "vpp.init"); //mfxExtVPPDoNotUse zz; //zz.Header.BufferId = BufferId.MFX_EXTBUFF_VPP_DONOTUSE; //zz.Header.BufferSz = (uint)sizeof(mfxExtVPPDoUse); //mfxExtBuffer** pExtParamsVPPx = stackalloc mfxExtBuffer*[1]; //pExtParamsVPPx[0] = (mfxExtBuffer*)&zz; //var t1 = stackalloc uint[100]; //zz.AlgList = t1; //zz.NumAlg = 100; //mfxVideoParam par; //par.ExtParam = pExtParamsVPPx; //par.NumExtParam = 1; //sts = UnsafeNativeMethods.MFXVideoVPP_GetVideoParam(session, &par); //Trace.Assert(sts == mfxStatus.MFX_ERR_NONE); //Console.WriteLine(zz.NumAlg); //for (int i = 0; i < 10; i++) //{ // Console.WriteLine((BufferId)t1[i]); //} mfxVideoParam par; // Retrieve video parameters selected by encoder. // - BufferSizeInKB parameter is required to set bit stream buffer size par = new mfxVideoParam(); sts = UnsafeNativeMethods.MFXVideoENCODE_GetVideoParam(session, &par); QuickSyncStatic.ThrowOnBadStatus(sts, "enc.getvideoparams"); // Create task pool to improve asynchronous performance (greater GPU utilization) taskPoolSize = mfxEncParams.AsyncDepth; // number of tasks that can be submitted, before synchronizing is required // Task* pTasks = stackalloc Task[taskPoolSize]; pTasks = (Task *)MyAllocHGlobalAndZero(sizeof(Task) * taskPoolSize); // GCHandle gch3 = GCHandle.Alloc(pTasks, GCHandleType.Pinned); for (int i = 0; i < taskPoolSize; i++) { // Prepare Media SDK bit stream buffer pTasks[i].mfxBS.MaxLength = (uint)(par.mfx.BufferSizeInKB * 1000); pTasks[i].mfxBS.Data = MyAllocHGlobalAndZero((int)pTasks[i].mfxBS.MaxLength); Trace.Assert(pTasks[i].mfxBS.Data != IntPtr.Zero); } // GCHandle gch3 = GCHandle.Alloc(pTasks, GCHandleType.Pinned); }
unsafe static public Bitmap GetBitmap(this ILowLevelDecoder decoder, mfxFrameSurface1 surface) { #if false // if ( this.Data.MemId == IntPtr.Zero) { Trace.Assert(this.Info.FourCC == FourCC.NV12, "AsBitmap only works on NV12 currently"); Bitmap bmp = new Bitmap(this.Info.CropW, this.Info.CropH, PixelFormat.Format32bppArgb); Rectangle rect = new Rectangle(0, 0, bmp.Width, bmp.Height); // var a = new FrameFormatConverterFromNV12(FourCC.BGR4,bmp.Width, bmp.Height); var a = new NV12ToXXXXLowLevelConverter(); BitmapData bmpData = bmp.LockBits(rect, ImageLockMode.ReadWrite, bmp.PixelFormat); // Get the address of the first line. IntPtr ptr = bmpData.Scan0; a.ConvertFromNV12FrameSurface(FourCC.BGR4, this.Data.Y_ptr, this.Data.UV_ptr, (byte*)bmpData.Scan0, bmp.Width, bmp.Height, bmpData.Stride * bmpData.Height, this.Info.CropW, bmpData.Stride); return bmp; #else Trace.Assert(surface.Info.FourCC == FourCC.RGB4, "For vidmem, AsBitmap only works on RGB4 currently"); Bitmap bmp = new Bitmap(surface.Info.CropW, surface.Info.CropH, PixelFormat.Format32bppArgb); Rectangle rect = new Rectangle(0, 0, bmp.Width, bmp.Height); BitmapData bmpData; bmpData = bmp.LockBits(rect, ImageLockMode.ReadWrite, bmp.PixelFormat); // Get the address of the first line. IntPtr ptr = bmpData.Scan0; mfxFrameData fd = new mfxFrameData(); decoder.LockFrame((IntPtr)(&surface)); Console.WriteLine(fd.B); int pitch = fd.PitchHigh << 16 | fd.PitchLow; var zm = new byte[pitch * surface.Info.CropH]; //for (int i = 0; i < zm.Length; i++) //{ // zm[i] = 1; //} // Marshal.Copy(fd.B, zm, 0, zm.Length); // Console.WriteLine(zm.Sum(z => (long)z)); if (pitch == bmpData.Stride) { FastMemcpyMemmove.memcpy(ptr, fd.B, pitch * surface.Info.CropH); } else { int minpitch = Math.Min(pitch, bmpData.Stride); for (int i = 0; i < surface.Info.CropH; i++) { FastMemcpyMemmove.memcpy(ptr + bmpData.Stride * i, fd.B + pitch * i, minpitch); } } decoder.UnlockFrame((IntPtr)(&surface)); bmp.UnlockBits(bmpData); return bmp; } #endif }