/// <summary> /// Dispose /// </summary> /// <param name="disposing"></param> protected virtual void Dispose(bool disposing) { if (!disposed) { if (disposing) { // Free other state (managed objects). } if (plugin_uid != null && plugin_uid.Length == 16) { mfxStatus sts = mfxStatus.MFX_ERR_NONE; fixed(byte *uid = plugin_uid) sts = UnsafeNativeMethods.MFXVideoUSER_UnLoad(session, uid); QuickSyncStatic.ThrowOnBadStatus(sts, "MFXVideoUSER_UnLoad"); } foreach (var item in pinningHandles) { item.Free(); } // Set large fields to null. disposed = true; } }
public unsafe void AllocFrames(mfxFrameAllocRequest *req, mfxFrameAllocResponse *resp) { // if ( req->Type) var sts = VideoAccelerationSupportPInvoke.VideoAccelerationSupport_Alloc(acceleratorHandle, req, resp); QuickSyncStatic.ThrowOnBadStatus(sts, "VideoAccelerationSupport_Alloc"); }
void GetBitstreamIfAny(ref BitStreamChunk bsc) { mfxStatus sts = 0; bsc.bytesAvailable = 0; Trace.Assert(pTasks[nFirstSyncTask].syncp.sync_ptr != null); // No more free tasks, need to sync sts = UnsafeNativeMethods.MFXVideoCORE_SyncOperation(session, pTasks[nFirstSyncTask].syncp, 60000); QuickSyncStatic.ThrowOnBadStatus(sts, "syncoper"); // sts = WriteBitStreamFrame(&pTasks[nFirstSyncTask].mfxBS, fSink); // MSDK_BREAK_ON_ERROR(g); int n = (int)pTasks[nFirstSyncTask].mfxBS.DataLength; if (bsc.bitstream == null || bsc.bitstream.Length < n) { bsc.bitstream = new byte[pTasks[nFirstSyncTask].mfxBS.MaxLength]; } Trace.Assert(pTasks[nFirstSyncTask].mfxBS.DataOffset == 0); Marshal.Copy(pTasks[nFirstSyncTask].mfxBS.Data, bsc.bitstream, 0, n); bsc.bytesAvailable = n; pTasks[nFirstSyncTask].mfxBS.DataLength = 0; pTasks[nFirstSyncTask].syncp.sync_ptr = null; nFirstSyncTask = (nFirstSyncTask + 1) % pTasks.Length; }
/// <summary> /// /// </summary> /// <param name="bsc"></param> /// <returns>true:all done, false:continue to call me</returns> public bool Flush4(ref BitStreamChunk bsc) { mfxStatus sts; bsc.bytesAvailable = 0; while (pTasks[nFirstSyncTask].syncp.sync != IntPtr.Zero) { sts = UnsafeNativeMethods.MFXVideoCORE_SyncOperation(session, pTasks[nFirstSyncTask].syncp, 60000); QuickSyncStatic.ThrowOnBadStatus(sts, "syncOper"); if (bsc.bitstream == null || bsc.bitstream.Length < pTasks[nFirstSyncTask].mfxBS.DataLength) { bsc.bitstream = new byte[pTasks[nFirstSyncTask].mfxBS.DataLength]; } Trace.Assert(pTasks[nFirstSyncTask].mfxBS.DataOffset == 0); Marshal.Copy(pTasks[nFirstSyncTask].mfxBS.Data, bsc.bitstream, 0, (int)pTasks[nFirstSyncTask].mfxBS.DataLength); bsc.bytesAvailable = (int)pTasks[nFirstSyncTask].mfxBS.DataLength; // WriteBitStreamFrame(pTasks[nFirstSyncTask].mfxBS, outbs); //MSDK_BREAK_ON_ERROR(sts); pTasks[nFirstSyncTask].syncp.sync = IntPtr.Zero; pTasks[nFirstSyncTask].mfxBS.DataLength = 0; pTasks[nFirstSyncTask].mfxBS.DataOffset = 0; nFirstSyncTask = (nFirstSyncTask + 1) % taskPoolSize; return(true); } return(false); }
/// <summary>Builds the transcoder configuration from stream.</summary> /// <param name="inStream">The in stream.</param> /// <param name="inputCodecId">The input codec identifier.</param> /// <param name="outputCodecId">The output codec identifier.</param> /// <param name="implementation">The implementation.</param> /// <param name="useOpaqueSurfaces">if set to <c>true</c> [use opaque surfaces].</param> /// <returns></returns> public static TranscoderConfiguration BuildTranscoderConfigurationFromStream(Stream inStream, CodecId inputCodecId, CodecId outputCodecId, mfxIMPL implementation = mfxIMPL.MFX_IMPL_AUTO, bool useOpaqueSurfaces = true) { TranscoderConfiguration config = new TranscoderConfiguration(); long oldposition = inStream.Position; config.decParams = QuickSyncStatic.DecodeHeader(inStream, inputCodecId, implementation); inStream.Position = oldposition; //config.decParams.mfx.CodecId was set in last function //config.encParams.mfx.CodecId will get set below in a func int width = config.decParams.mfx.FrameInfo.CropW; int height = config.decParams.mfx.FrameInfo.CropH; config.vppParams = TranscoderSetupVPPParameters(width, height); config.encParams = TranscoderSetupEncoderParameters(width, height, outputCodecId); config.decParams.IOPattern = IOPattern.MFX_IOPATTERN_OUT_SYSTEM_MEMORY; config.vppParams.IOPattern = IOPattern.MFX_IOPATTERN_IN_SYSTEM_MEMORY | IOPattern.MFX_IOPATTERN_OUT_SYSTEM_MEMORY; config.encParams.IOPattern = IOPattern.MFX_IOPATTERN_IN_SYSTEM_MEMORY; // Configure Media SDK to keep more operations in flight // - AsyncDepth represents the number of tasks that can be submitted, before synchronizing is required ushort asyncdepth = 4; config.decParams.AsyncDepth = asyncdepth; config.encParams.AsyncDepth = asyncdepth; config.vppParams.AsyncDepth = asyncdepth; return(config); }
/// <summary> /// Rounds height upto value divisible by 16, /// or 32 in the case where picstruct is MFX_PICSTRUCT_PROGRESSIVE /// </summary> /// <param name="height"></param> /// <param name="picstruct">Used to decide 16 or 32 for rounding up</param> /// <returns></returns> public static ushort AlignHeightTo32or16(int height, PicStruct picstruct) { ushort v = (PicStruct.MFX_PICSTRUCT_PROGRESSIVE == picstruct) ? (ushort)QuickSyncStatic.ALIGN16(height) : (ushort)QuickSyncStatic.ALIGN32(height); return(v); }
public unsafe IntPtr FrameGetHandle(IntPtr mid) { IntPtr handle; mfxStatus sts; sts = VideoAccelerationSupportPInvoke.VideoAccelerationSupport_GetFrameHDL(acceleratorHandle, mid, &handle); QuickSyncStatic.ThrowOnBadStatus(sts, "VideoAccelerationSupport_GetFrameHDL"); return(handle); }
public unsafe IntPtr DeviceGetHandle(mfxHandleType type) { mfxStatus sts; IntPtr handle; sts = VideoAccelerationSupportPInvoke.VideoAccelerationSupport_DeviceGetHandle(acceleratorHandle, type, &handle); QuickSyncStatic.ThrowOnBadStatus(sts, "VideoAccelerationSupport_DeviceGetHandle"); return(handle); }
/// <summary>Get a human readable implementation description.</summary> /// <param name="session">The session.</param> /// <returns></returns> public static string ImplementationString(mfxSession session) { mfxIMPL impl; var sts = UnsafeNativeMethods.MFXQueryIMPL(session, &impl); QuickSyncStatic.ThrowOnBadStatus(sts, nameof(UnsafeNativeMethods.MFXQueryIMPL)); return(ImplementationString(impl)); }
public int GetFreeFrameIndex() { int i = NativeLLEncoderUnsafeNativeMethods.NativeEncoder_GetFreeFrameIndex(h); if (i < 0) { QuickSyncStatic.ThrowOnBadStatus((mfxStatus)i, nameof(NativeLLEncoderUnsafeNativeMethods.NativeEncoder_GetFreeFrameIndex)); } return(i); }
public unsafe void EncodeFrame(int frameIndex, ref BitStreamChunk bitstreamChunk) { bitstreamChunk.bytesAvailable = 0; var a = frameIntPtrs[frameIndex]; var sts = NativeLLEncoderUnsafeNativeMethods.NativeEncoder_EncodeFrame(h, (mfxFrameSurface1 *)a); QuickSyncStatic.ThrowOnBadStatus(sts, nameof(NativeLLEncoderUnsafeNativeMethods.NativeEncoder_EncodeFrame)); CopyOutBitstream(ref bitstreamChunk); }
bool Flush1(ref BitStreamChunk bsc) { bsc.bytesAvailable = 0; if (GetBitstreamIfFull(ref bsc)) { return(true); } mfxStatus sts = 0; int nTaskIdx = GetFreeTaskIndex(pTasks); // Find free task Trace.Assert((int)mfxStatus.MFX_ERR_NOT_FOUND != nTaskIdx); for (;;) { // Encode a frame asychronously (returns immediately) fixed(mfxBitstream *b = &pTasks[nTaskIdx].mfxBS) fixed(mfxSyncPoint * c = &pTasks[nTaskIdx].syncp) sts = UnsafeNativeMethods.MFXVideoENCODE_EncodeFrameAsync(session, null, null, b, c); if (mfxStatus.MFX_ERR_NONE < sts && !(pTasks[nTaskIdx].syncp.sync_ptr != null)) { // Repeat the call if warning and no output if (mfxStatus.MFX_WRN_DEVICE_BUSY == sts) { Thread.Sleep(1); // Wait if device is busy, then repeat the same call } } else if (mfxStatus.MFX_ERR_NONE < sts && pTasks[nTaskIdx].syncp.sync_ptr != null) { sts = mfxStatus.MFX_ERR_NONE; // Ignore warnings if output is available break; } else { break; } } // MFX_ERR_MORE_DATA means that the input file has ended, need to go to buffering loop, exit in case of other errors //MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_DATA); if (sts == mfxStatus.MFX_ERR_MORE_DATA) { return(false); // no more to flush here } sts = mfxStatus.MFX_ERR_NONE; //MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); QuickSyncStatic.ThrowOnBadStatus(sts, "flush1.encodeFrameAsync"); return(true); // yes, call me again, more to flush }
public unsafe void UnlockFrame(IntPtr memid, mfxFrameData *ptr = null) { if (ptr == null) { ptr = &(((mfxFrameSurface1 *)memid)->Data); } mfxStatus sts; // fixed (mfxFrameData* p = &ptr) sts = VideoAccelerationSupportPInvoke.VideoAccelerationSupport_UnlockFrame(acceleratorHandle, memid, ptr); QuickSyncStatic.ThrowOnBadStatus(sts, "VideoAccelerationSupport_UnlockFrame"); }
bool Flush2(ref BitStreamChunk bitstreamChunk) { bitstreamChunk.bytesAvailable = 0; var sts = NativeLLEncoderUnsafeNativeMethods.NativeEncoder_Flush2(h); if (sts == mfxStatus.MFX_ERR_MORE_DATA) { return(false); } QuickSyncStatic.ThrowOnBadStatus(sts, nameof(NativeLLEncoderUnsafeNativeMethods.NativeEncoder_Flush2)); CopyOutBitstream(ref bitstreamChunk); return(true); }
/// <returns> /// true:keep calling me /// false:this phase done /// </returns> bool Flush2(mfxFrameSurface1 **frame) { *frame = (mfxFrameSurface1 *)0; // mfxSyncPoint syncpD; mfxFrameSurface1 *pmfxOutSurface = (mfxFrameSurface1 *)0; // bool UseVPP = false; // if (UseVPP) return(false); #if false // // Stage 3: Retrieve the buffered VPP frames // //while (MFX_ERR_NONE <= sts) { int nIndex2 = GetFreeSurfaceIndex(pmfxSurfaces2, nSurfNumVPPOut); // Find free frame surface QuickSyncStatic.ThrowOnBadStatus((mfxStatus)nIndex2, "cannot find free surface"); // Process a frame asychronously (returns immediately) sts = mfxVPP->RunFrameVPPAsync(NULL, pmfxSurfaces2[nIndex2], NULL, &syncpV); if (MFX_ERR_MORE_DATA == sts) { return(sts); // continue; } MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_SURFACE); MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); //MSDK_BREAK_ON_ERROR(sts); sts = session.SyncOperation(syncpV, 60000); // Synchronize. Wait until frame processing is ready MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); ++nFrame; if (bEnableOutput) { //sts = WriteRawFrame(pmfxSurfaces2[nIndex2], fSink); //MSDK_BREAK_ON_ERROR(sts); *frame = pmfxSurfaces2[nIndex2]; return(sts); //printf("Frame number: %d\r", nFrame); } //} // MFX_ERR_MORE_DATA indicates that all buffers has been fetched, exit in case of other errors //MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_DATA); //MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); #endif }
/// <summary>Reads the file header information.</summary> /// <param name="codecId">The codec identifier.</param> /// <param name="impl">The implementation.</param> /// <param name="infs">The infs.</param> /// <param name="outIOPattern">The out io pattern.</param> /// <returns></returns> public static unsafe mfxVideoParam ReadFileHeaderInfo(CodecId codecId, mfxIMPL impl, Stream infs, IOPattern outIOPattern) { long oldposition = infs.Position; var buf = new byte[65536]; //avail after init int n = infs.Read(buf, 0, buf.Length); if (n < buf.Length) { Array.Resize(ref buf, n); } infs.Position = oldposition; var decoderParameters = QuickSyncStatic.DecodeHeader(buf, codecId, impl); decoderParameters.IOPattern = outIOPattern; return(decoderParameters); }
//static public IEnumerable<byte[]> DecodeStream(Stream s, FourCC fourcc = FourCC.NV12, AccelerationLevel acceleration = AccelerationLevel.BestAvailableAccelerationUseGPUorCPU) //{ // return null; //} /// <summary> /// Construct the decoder. /// </summary> /// <param name="stream">Stream be read from</param> /// <param name="codecId">What format the bitstream is in: AVC, HEVC, MJPEG, ...</param> /// <param name="impl">implementation to use</param> /// <param name="outIOPattern">memory type for decoding</param> public StreamDecoder(Stream stream, CodecId codecId, mfxIMPL impl = mfxIMPL.MFX_IMPL_AUTO, IOPattern outIOPattern = IOPattern.MFX_IOPATTERN_OUT_SYSTEM_MEMORY) { long oldposition = stream.Position; var buf = new byte[65536]; //avail after init int n = stream.Read(buf, 0, buf.Length); if (n < buf.Length) { Array.Resize(ref buf, n); } stream.Position = oldposition; this.decoderParameters = QuickSyncStatic.DecodeHeader(buf, codecId, impl); this.decoderParameters.IOPattern = outIOPattern; lowLevelDecoder = new LowLevelDecoder(decoderParameters, null, impl); Init(stream); }
/// <summary>Attempts to decode a byte array using codecId as the format indicator. /// If the array is decodable stream parameters such as width, height, etc... will be returned</summary> /// <param name="bitstream">The bitstream.</param> /// <param name="codecId">The codec identifier.</param> /// <param name="impl">The implementation.</param> /// <returns>A video parameter structure describing the bitstream.</returns> public static mfxVideoParam DecodeHeader(byte[] bitstream, CodecId codecId, mfxIMPL impl = mfxIMPL.MFX_IMPL_AUTO) { mfxVideoParam mfxDecParam; mfxStatus sts; var v = new mfxVersion(); v.Major = 1; v.Minor = 0; var session = new mfxSession(); sts = UnsafeNativeMethods.MFXInit(impl, &v, &session); QuickSyncStatic.ThrowOnBadStatus(sts, "MFXInit"); try { mfxBitstream bs; mfxDecParam.mfx.CodecId = codecId; mfxDecParam.IOPattern = IOPattern.MFX_IOPATTERN_OUT_SYSTEM_MEMORY; fixed(byte *pp = &bitstream[0]) { // bs.Data_ptr = p; bs.Data = (IntPtr)pp; bs.DataLength = (uint)bitstream.Length; bs.MaxLength = (uint)bitstream.Length; bs.DataOffset = 0; sts = UnsafeNativeMethods.MFXVideoDECODE_DecodeHeader(session, &bs, &mfxDecParam); QuickSyncStatic.ThrowOnBadStatus(sts, "decodeheader"); } } finally { UnsafeNativeMethods.MFXClose(session); } mfxDecParam.IOPattern = (IOPattern)0; // we do not want this to be the source of IOPattern // must be set it another place so it doesnt default to sysmem return(mfxDecParam); }
unsafe public LowLevelEncoderNative(mfxVideoParam mfxEncParams, mfxIMPL impl) { mfxStatus sts; this._session = new mfxSession(); var ver = new mfxVersion() { Major = 1, Minor = 3 }; fixed(mfxSession *s = &_session) sts = UnsafeNativeMethods.MFXInit(impl, &ver, s); QuickSyncStatic.ThrowOnBadStatus(sts, nameof(UnsafeNativeMethods.MFXInit)); h = NativeLLEncoderUnsafeNativeMethods.NativeEncoder_New(); Trace.Assert(h != IntPtr.Zero); shared = (EncoderShared *)h; shared->session = _session; shared->mfxEncParams = mfxEncParams; Trace.Assert(shared->safety == sizeof(EncoderShared)); sts = NativeLLEncoderUnsafeNativeMethods.NativeEncoder_Init(h); QuickSyncStatic.ThrowOnBadStatus(sts, nameof(NativeLLEncoderUnsafeNativeMethods.NativeEncoder_Init)); frameIntPtrs = new IntPtr[shared->nEncSurfNum]; for (int i = 0; i < frameIntPtrs.Length; i++) { frameIntPtrs[i] = (IntPtr)shared->pmfxSurfaces[i]; } GetAndPrintWarnings(); }
public bool Flush2(out mfxFrameSurface1?frame) { mfxFrameSurface1 *p = null; frame = null; var sts = NativeLLDecoderUnsafeNativeMethods.NativeDecoder_Flush2(h, &p); if (sts == mfxStatus.MFX_ERR_MORE_SURFACE) // decoder needs to be called again, it is eating memory.SWmode { return(true); } if (sts == mfxStatus.MFX_ERR_MORE_DATA) { return(false); } QuickSyncStatic.ThrowOnBadStatus(sts, nameof(NativeLLDecoderUnsafeNativeMethods.NativeDecoder_Flush2)); if (p != null) { frame = *p; } return(true); }
/// <summary>Initializes a new instance of the <see cref="LowLevelEncoderCSharp"/> class.</summary> /// <param name="mfxEncParams">The encoder parameters.</param> /// <param name="impl">The implementation.</param> public LowLevelEncoderCSharp(mfxVideoParam mfxEncParams, mfxIMPL impl = mfxIMPL.MFX_IMPL_AUTO) { mfxStatus sts; session = new mfxSession(); var ver = new mfxVersion() { Major = 1, Minor = 3 }; fixed(mfxSession *s = &session) sts = UnsafeNativeMethods.MFXInit(impl, &ver, s); QuickSyncStatic.ThrowOnBadStatus(sts, "MFXInit"); //deviceSetup = new DeviceSetup(session, false); sts = UnsafeNativeMethods.MFXVideoENCODE_Query(session, &mfxEncParams, &mfxEncParams); if (sts > 0) { warnings.Add(nameof(UnsafeNativeMethods.MFXVideoENCODE_Query), sts); sts = 0; } QuickSyncStatic.ThrowOnBadStatus(sts, "encodequery"); mfxFrameAllocRequest EncRequest; sts = UnsafeNativeMethods.MFXVideoENCODE_QueryIOSurf(session, &mfxEncParams, &EncRequest); QuickSyncStatic.ThrowOnBadStatus(sts, "queryiosurf"); EncRequest.NumFrameSuggested = (ushort)(EncRequest.NumFrameSuggested + mfxEncParams.AsyncDepth); EncRequest.Type |= (FrameMemoryType)0x2000; // WILL_WRITE; // This line is only required for Windows DirectX11 to ensure that surfaces can be written to by the application UInt16 numSurfaces = EncRequest.NumFrameSuggested; // - Width and height of buffer must be aligned, a multiple of 32 // - Frame surface array keeps pointers all surface planes and general frame info UInt16 width = (UInt16)QuickSyncStatic.ALIGN32(mfxEncParams.mfx.FrameInfo.Width); UInt16 height = (UInt16)QuickSyncStatic.ALIGN32(mfxEncParams.mfx.FrameInfo.Height); int bitsPerPixel = VideoUtility.GetBitsPerPixel(mfxEncParams.mfx.FrameInfo.FourCC); int surfaceSize = width * height * bitsPerPixel / 8; //byte[] surftaceBuffers = new byte[surfaceSize * numSurfaces]; //XXX IntPtr surfaceBuffers = Marshal.AllocHGlobal(surfaceSize * numSurfaces); byte * surfaceBuffersPtr = (byte *)surfaceBuffers; // // Allocate surface headers (mfxFrameSurface1) for decoder Frames = new mfxFrameSurface1[numSurfaces]; //MSDK_CHECK_POINTER(pmfxSurfaces, MFX_ERR_MEMORY_ALLOC); for (int i = 0; i < numSurfaces; i++) { Frames[i] = new mfxFrameSurface1(); Frames[i].Info = mfxEncParams.mfx.FrameInfo; switch (mfxEncParams.mfx.FrameInfo.FourCC) { case FourCC.NV12: Frames[i].Data.Y_ptr = (byte *)surfaceBuffers + i * surfaceSize; Frames[i].Data.U_ptr = Frames[i].Data.Y_ptr + width * height; Frames[i].Data.V_ptr = Frames[i].Data.U_ptr + 1; Frames[i].Data.Pitch = width; break; case FourCC.YUY2: Frames[i].Data.Y_ptr = (byte *)surfaceBuffers + i * surfaceSize; Frames[i].Data.U_ptr = Frames[i].Data.Y_ptr + 1; Frames[i].Data.V_ptr = Frames[i].Data.U_ptr + 3; Frames[i].Data.Pitch = (ushort)(width * 2); break; default: //find sysmem_allocator.cpp for more help throw new NotImplementedException(); } } frameIntPtrs = new IntPtr[Frames.Length]; for (int i = 0; i < Frames.Length; i++) { fixed(mfxFrameSurface1 *a = &Frames[i]) frameIntPtrs[i] = (IntPtr)a; } sts = UnsafeNativeMethods.MFXVideoENCODE_Init(session, &mfxEncParams); if (sts > 0) { warnings.Add(nameof(UnsafeNativeMethods.MFXVideoENCODE_Init), sts); sts = 0; } QuickSyncStatic.ThrowOnBadStatus(sts, "encodeinit"); mfxVideoParam par; UnsafeNativeMethods.MFXVideoENCODE_GetVideoParam(session, &par); QuickSyncStatic.ThrowOnBadStatus(sts, "encodegetvideoparam"); // from mediasdkjpeg-man.pdf // BufferSizeInKB = 4 + (Width * Height * BytesPerPx + 1023) / 1024; //where Width and Height are weight and height of the picture in pixel, BytesPerPx is number of //byte for one pixel.It equals to 1 for monochrome picture, 1.5 for NV12 and YV12 color formats, // 2 for YUY2 color format, and 3 for RGB32 color format(alpha channel is not encoded). if (par.mfx.BufferSizeInKB == 0 && mfxEncParams.mfx.CodecId == CodecId.MFX_CODEC_JPEG) { par.mfx.BufferSizeInKB = (ushort)((4 + (mfxEncParams.mfx.FrameInfo.CropW * mfxEncParams.mfx.FrameInfo.CropH * 3 + 1023)) / 1000); } //printf("bufsize %d\n", par.mfx.BufferSizeInKB); // Create task pool to improve asynchronous performance (greater GPU utilization) int taskPoolSize = mfxEncParams.AsyncDepth; // number of tasks that can be submitted, before synchronizing is required //Task* pTasks = stackalloc Task[taskPoolSize]; // GCHandle gch3 = GCHandle.Alloc(pTasks, GCHandleType.Pinned); pTasks = new Task[taskPoolSize]; for (int i = 0; i < taskPoolSize; i++) { // Prepare Media SDK bit stream buffer pTasks[i].mfxBS.MaxLength = (uint)(par.mfx.BufferSizeInKB * 1000); pTasks[i].mfxBS.Data = Marshal.AllocHGlobal((int)pTasks[i].mfxBS.MaxLength); Trace.Assert(pTasks[i].mfxBS.Data != IntPtr.Zero); } pinningHandles.Add(GCHandle.Alloc(pTasks, GCHandleType.Pinned)); pinningHandles.Add(GCHandle.Alloc(Frames, GCHandleType.Pinned)); }
/// <summary>Encodes a frame.</summary> /// <param name="frameIndex">Index of the frame to encode.</param> /// <param name="bitStreamChunk">Output frames bitstream data, if available</param> public void EncodeFrame(int frameIndex, ref BitStreamChunk bitStreamChunk) { mfxStatus sts = 0; bitStreamChunk.bytesAvailable = 0; GetBitstreamIfFull(ref bitStreamChunk); // int nEncSurfIdx = 0; int nTaskIdx = GetFreeTaskIndex(pTasks); // Find free task Trace.Assert((int)mfxStatus.MFX_ERR_NOT_FOUND != nTaskIdx); //int nsource = 0; //var buf = new byte[pTasks[0].mfxBS.MaxLength]; // // Stage 1: Main encoding loop // //if (mfxStatus.MFX_ERR_NONE <= sts || mfxStatus.MFX_ERR_MORE_DATA == sts) //{ // } // else // { //nEncSurfIdx = GetFreeSurfaceIndex(pmfxSurfaces); // Find free frame surface //MSDK_CHECK_ERROR(MFX_ERR_NOT_FOUND, nEncSurfIdx, MFX_ERR_MEMORY_ALLOC); //Trace.Assert(nEncSurfIdx != (int)mfxStatus.MFX_ERR_NOT_FOUND); // Surface locking required when read/write D3D surfaces //sts = mfxAllocator.Lock(mfxAllocator.pthis, pmfxSurfaces[nEncSurfIdx]->Data.MemId, &(pmfxSurfaces[nEncSurfIdx]->Data)); //MSDK_BREAK_ON_ERROR(sts); // sts = LoadRawFrame(pmfxSurfaces[nEncSurfIdx], fSource); // MSDK_BREAK_ON_ERROR(sts); // from the prototype, we just copy the frame data from a byte array, // but in this class we are passed a prepared frame. //int pfs = 320 * 180 * 3 / 2; //if (nsource * pfs >= yuv.Length) // break; //int stride = pmfxSurfaces[nEncSurfIdx].Data.Pitch; //for (int i = 0; i < h; i++) // Marshal.Copy(yuv, nsource * pfs + i * w, pmfxSurfaces[nEncSurfIdx].Data.Y + stride * i, w); //for (int i = 0; i < h / 2; i++) // Marshal.Copy(yuv, nsource * pfs + i * w + h * w, pmfxSurfaces[nEncSurfIdx].Data.UV + stride * i, w); //sts = mfxAllocator.Unlock(mfxAllocator.pthis, pmfxSurfaces[nEncSurfIdx]->Data.MemId, &(pmfxSurfaces[nEncSurfIdx]->Data)); //MSDK_BREAK_ON_ERROR(sts); // Frames[nEncSurfIdx] = frame; for (;;) { // Encode a frame asychronously (returns immediately) fixed(mfxFrameSurface1 *a = &Frames[frameIndex]) fixed(mfxBitstream * b = &pTasks[nTaskIdx].mfxBS) fixed(mfxSyncPoint * c = &pTasks[nTaskIdx].syncp) sts = UnsafeNativeMethods.MFXVideoENCODE_EncodeFrameAsync(session, null, a, b, c); if (mfxStatus.MFX_ERR_NONE < sts && !(pTasks[nTaskIdx].syncp.sync_ptr != null)) { // Repeat the call if warning and no output if (mfxStatus.MFX_WRN_DEVICE_BUSY == sts) { Thread.Sleep(1); // Wait if device is busy, then repeat the same call } } else if (mfxStatus.MFX_ERR_NONE < sts && pTasks[nTaskIdx].syncp.sync_ptr != null) { sts = mfxStatus.MFX_ERR_NONE; // Ignore warnings if output is available break; } else if (mfxStatus.MFX_ERR_NOT_ENOUGH_BUFFER == sts) { Trace.Assert(false); // Allocate more bitstream buffer memory here if needed... break; } else { break; } } // } // MFX_ERR_MORE_DATA means that the input file has ended, need to go to buffering loop, exit in case of other errors //MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_DATA); if (sts == mfxStatus.MFX_ERR_MORE_DATA) { sts = mfxStatus.MFX_ERR_NONE; } //MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); QuickSyncStatic.ThrowOnBadStatus(sts, "encodeFrameAsync"); return; }
public void UnlockFrame(IntPtr frame) { var sts = NativeLLEncoderUnsafeNativeMethods.NativeEncoder_UnlockFrame(h, frame); QuickSyncStatic.ThrowOnBadStatus(sts, nameof(NativeLLEncoderUnsafeNativeMethods.NativeEncoder_UnlockFrame)); }
/// <summary>Get frames during 2nd stage of flushing</summary> /// <param name="bitStreamChunk">A single frame</param> /// <returns>true if you should continue to call this method, false if you must go to the next stage.</returns> public bool Flush2(ref BitStreamChunk bitStreamChunk) { mfxSyncPoint syncpV; mfxFrameSurface1 *pmfxOutSurface = (mfxFrameSurface1 *)0; int nIndex2 = 0; bitStreamChunk.bytesAvailable = 0; //////// mfxStatus sts = mfxStatus.MFX_ERR_NONE; // // Stage 3: Retrieve buffered frames from VPP // if (mfxStatus.MFX_ERR_NONE <= sts || mfxStatus.MFX_ERR_MORE_DATA == sts || mfxStatus.MFX_ERR_MORE_SURFACE == sts) { int nTaskIdx = GetFreeTaskIndex(pTasks, taskPoolSize); // Find free task if ((int)mfxStatus.MFX_ERR_NOT_FOUND == nTaskIdx) { // No more free tasks, need to sync sts = UnsafeNativeMethods.MFXVideoCORE_SyncOperation(session, pTasks[nFirstSyncTask].syncp, 60000); QuickSyncStatic.ThrowOnBadStatus(sts, "syncOper"); if (bitStreamChunk.bitstream == null || bitStreamChunk.bitstream.Length < pTasks[nFirstSyncTask].mfxBS.DataLength) { bitStreamChunk.bitstream = new byte[pTasks[nFirstSyncTask].mfxBS.DataLength]; } Trace.Assert(pTasks[nFirstSyncTask].mfxBS.DataOffset == 0); Marshal.Copy(pTasks[nFirstSyncTask].mfxBS.Data, bitStreamChunk.bitstream, 0, (int)pTasks[nFirstSyncTask].mfxBS.DataLength); bitStreamChunk.bytesAvailable = (int)pTasks[nFirstSyncTask].mfxBS.DataLength; // WriteBitStreamFrame(pTasks[nFirstSyncTask].mfxBS, outbs); //MSDK_BREAK_ON_ERROR(sts); pTasks[nFirstSyncTask].syncp.sync = IntPtr.Zero; pTasks[nFirstSyncTask].mfxBS.DataLength = 0; pTasks[nFirstSyncTask].mfxBS.DataOffset = 0; nFirstSyncTask = (nFirstSyncTask + 1) % taskPoolSize; return(true); } else { int compositeFrameIndex = 0; //morevpp: nIndex2 = GetFreeSurfaceIndex(pSurfaces2, nSurfNumVPPEnc); // Find free frame surface Trace.Assert(nIndex2 != (int)mfxStatus.MFX_ERR_NOT_FOUND); for (;;) { var z = pmfxOutSurface; z = null; // if (compositeFrameIndex == 1) // z = overlay; Trace.Assert(compositeFrameIndex <= 1); // Process a frame asychronously (returns immediately) sts = UnsafeNativeMethods.MFXVideoVPP_RunFrameVPPAsync(session, z, &pSurfaces2[nIndex2], (mfxExtVppAuxData *)0, &syncpV); // COMPOSITING if (mfxStatus.MFX_ERR_NONE < sts && syncpV.sync == IntPtr.Zero) { // repeat the call if warning and no output if (mfxStatus.MFX_WRN_DEVICE_BUSY == sts) { Thread.Sleep(1); // wait if device is busy } } else if (mfxStatus.MFX_ERR_NONE < sts && syncpV.sync != IntPtr.Zero) { sts = mfxStatus.MFX_ERR_NONE; // ignore warnings if output is available break; } else { if (sts != mfxStatus.MFX_ERR_MORE_DATA && sts != mfxStatus.MFX_ERR_MORE_SURFACE) { QuickSyncStatic.ThrowOnBadStatus(sts, "vppAsync"); } break; // not a warning } } //VPP needs more data, let decoder decode another frame as input if (mfxStatus.MFX_ERR_MORE_DATA == sts) { return(false); // compositeFrameIndex++; //goto morevpp; } else if (mfxStatus.MFX_ERR_MORE_SURFACE == sts) { // Not relevant for the illustrated workload! Therefore not handled. // Relevant for cases when VPP produces more frames at output than consumes at input. E.g. framerate conversion 30 fps -> 60 fps QuickSyncStatic.ThrowOnBadStatus(sts, "vpp");; } else if (mfxStatus.MFX_ERR_NONE != sts) { QuickSyncStatic.ThrowOnBadStatus(sts, "vpp"); } ; for (;;) { // Encode a frame asychronously (returns immediately) //sts = mfxENC.EncodeFrameAsync(NULL, pSurfaces2[nIndex2], &pTasks[nTaskIdx].mfxBS, &pTasks[nTaskIdx].syncp); sts = UnsafeNativeMethods.MFXVideoENCODE_EncodeFrameAsync(session, (mfxEncodeCtrl *)0, &pSurfaces2[nIndex2], &pTasks[nTaskIdx].mfxBS, &pTasks[nTaskIdx].syncp); if (mfxStatus.MFX_ERR_NONE < sts && pTasks[nTaskIdx].syncp.sync == IntPtr.Zero) { // repeat the call if warning and no output if (mfxStatus.MFX_WRN_DEVICE_BUSY == sts) { Thread.Sleep(1); // wait if device is busy } } else if (mfxStatus.MFX_ERR_NONE < sts && pTasks[nTaskIdx].syncp.sync != IntPtr.Zero) { sts = mfxStatus.MFX_ERR_NONE; // ignore warnings if output is available break; } else if (mfxStatus.MFX_ERR_NOT_ENOUGH_BUFFER == sts) { // Allocate more bitstream buffer memory here if needed... break; } else { if (sts != mfxStatus.MFX_ERR_MORE_DATA && sts != mfxStatus.MFX_ERR_MORE_SURFACE) { QuickSyncStatic.ThrowOnBadStatus(sts, "encodeAsync"); } break; } } } } // MFX_ERR_MORE_DATA means that file has ended, need to go to buffering loop, exit in case of other errors //MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_DATA); if (sts == mfxStatus.MFX_ERR_MORE_DATA) { return(false); } QuickSyncStatic.ThrowOnBadStatus(sts, "dec or enc or vpp"); return(true); }
public unsafe VideoAccelerationSupport(mfxSession session, bool forceSystemMemory = false) { mfxStatus sts; mfxVersion versionMinimum = new mfxVersion() { Major = 1, Minor = 3 }; acceleratorHandle = VideoAccelerationSupportPInvoke.VideoAccelerationSupport_New(); Trace.Assert(acceleratorHandle != IntPtr.Zero); if (sizeof(IntPtr) != 8) { throw new Exception("only x64 supported at this time"); } mfxIMPL ii; sts = UnsafeNativeMethods.MFXQueryIMPL(session, &ii); QuickSyncStatic.ThrowOnBadStatus(sts, "MFXQueryIMPL"); // if (Environment.OSVersion.Platform == PlatformID.Win32NT) mfxIMPL viaMask = (mfxIMPL.MFX_IMPL_VIA_D3D9 | mfxIMPL.MFX_IMPL_VIA_D3D11 | mfxIMPL.MFX_IMPL_VIA_VAAPI); if ((ii & viaMask) == mfxIMPL.MFX_IMPL_VIA_D3D11) { isDirectX11 = true; memType = FrameMemType.D3D11_MEMORY; } else if ((ii & viaMask) == mfxIMPL.MFX_IMPL_VIA_D3D9) { memType = FrameMemType.D3D9_MEMORY; } else if ((ii & viaMask) == mfxIMPL.MFX_IMPL_VIA_VAAPI) { memType = FrameMemType.VAAPI_MEMORY; } //if (Environment.OSVersion.Platform == PlatformID.Win32NT) //{ // if (Environment.OSVersion.Version.Major >= 6 && Environment.OSVersion.Version.Minor >= 2) // memType = MemType.D3D11_MEMORY; // else // memType = MemType.D3D9_MEMORY; //} //else //{ // memType = MemType.VAAPI_MEMORY; //} if (forceSystemMemory) { memType = FrameMemType.SYSTEM_MEMORY; } sts = VideoAccelerationSupportPInvoke.VideoAccelerationSupport_Init(acceleratorHandle, session, false, memType); QuickSyncStatic.ThrowOnBadStatus(sts, "VideoAccelerationSupport_Init"); }
/// <summary> /// /// </summary> /// <param name="frame"></param> /// <returns> /// true:keep calling me /// false:this phase done /// </returns> bool Flush1(mfxFrameSurface1 **frame) { mfxStatus sts = 0; * frame = (mfxFrameSurface1 *)0; mfxSyncPoint syncpD, syncpV; mfxFrameSurface1 *pmfxOutSurface = (mfxFrameSurface1 *)0; int nIndex = 0; int nIndex2 = 0; // // Stage 2: Retrieve the buffered decoded frames // //while (MFX_ERR_NONE <= sts || MFX_ERR_MORE_SURFACE == sts) { if (mfxStatus.MFX_WRN_DEVICE_BUSY == sts) { Thread.Sleep(1); // Wait if device is busy, then repeat the same call to DecodeFrameAsync } nIndex = GetFreeSurfaceIndex(pmfxSurfaces); // Find free frame surface QuickSyncStatic.ThrowOnBadStatus((mfxStatus)nIndex, "cannot find free surface"); // Decode a frame asychronously (returns immediately) fixed(mfxFrameSurface1 *p1 = &pmfxSurfaces[nIndex]) sts = UnsafeNativeMethods.MFXVideoDECODE_DecodeFrameAsync(session, null, p1, &pmfxOutSurface, &syncpD); // Ignore warnings if output is available, // if no output and no action required just repeat the DecodeFrameAsync call if (mfxStatus.MFX_ERR_NONE < sts && syncpD.sync_ptr != null) { sts = mfxStatus.MFX_ERR_NONE; } if (!enableVPP) { if (mfxStatus.MFX_ERR_NONE == sts) { sts = UnsafeNativeMethods.MFXVideoCORE_SyncOperation(session, syncpD, 60000); // Synchronize. Wait until decoded frame is ready *frame = pmfxOutSurface; } } if (sts == mfxStatus.MFX_ERR_MORE_SURFACE) // decoder needs to be called again, it is eating memory.SWmode { return(true); } if (sts == mfxStatus.MFX_ERR_MORE_DATA) { return(false); } if (sts < 0) { throw new QuickSyncException("Flush1 fail", sts); } if (enableVPP && sts == mfxStatus.MFX_ERR_NONE) { fixed(mfxFrameSurface1 *p1 = &pmfxSurfaces2[nIndex2]) { nIndex2 = GetFreeSurfaceIndex(pmfxSurfaces2); // Find free frame surface QuickSyncStatic.ThrowOnBadStatus((mfxStatus)nIndex2, "cannot find free surface"); for (;;) { // Process a frame asychronously (returns immediately) sts = UnsafeNativeMethods.MFXVideoVPP_RunFrameVPPAsync(session, pmfxOutSurface, p1, null, &syncpV); //if (sts == MFX_WRN_VIDEO_PARAM_CHANGED) // ; if (mfxStatus.MFX_ERR_NONE < sts && syncpV.sync_ptr == null) { // repeat the call if warning and no output if (mfxStatus.MFX_WRN_DEVICE_BUSY == sts) { Thread.Sleep(1); // wait if device is busy } } else if (mfxStatus.MFX_ERR_NONE < sts && syncpV.sync_ptr != null) { sts = mfxStatus.MFX_ERR_NONE; // ignore warnings if output is available break; } else { break; // not a warning } // VPP needs more data, let decoder decode another frame as input if (mfxStatus.MFX_ERR_MORE_DATA == sts) { //continue; return(false); } else if (mfxStatus.MFX_ERR_MORE_SURFACE == sts) { // Not relevant for the illustrated workload! Therefore not handled. // Relevant for cases when VPP produces more frames at output than consumes at input. E.g. framerate conversion 30 fps -> 60 fps //break; return(true); } else if (sts < 0) { throw new QuickSyncException("RunFrameVPPAsync fail", sts); } // MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); //MSDK_BREAK_ON_ERROR(sts); } if (mfxStatus.MFX_ERR_NONE == sts && syncpV.sync != IntPtr.Zero) { sts = UnsafeNativeMethods.MFXVideoCORE_SyncOperation(session, syncpV, 60000); // Synchronize. Wait until decoded frame is ready *frame = p1; } } } return(true); //} // MFX_ERR_MORE_DATA means that decoder is done with buffered frames, need to go to VPP buffering loop, exit in case of other errors //MSDK_IGNORE_MFX_STS(sts, MFX_ERR_MORE_DATA); //MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); }
/// <summary> /// Constructor /// </summary> public LowLevelDecoderCSharp(mfxVideoParam mfxDecParamsX, mfxVideoParam?VPPParamsX = null, mfxIMPL impl = mfxIMPL.MFX_IMPL_AUTO) { mfxStatus sts; bool enableVPP = VPPParamsX != null; if (VPPParamsX == null) { // Create a default VPPParamsX var foo = new mfxVideoParam(); foo.AsyncDepth = 1; foo.IOPattern = IOPattern.MFX_IOPATTERN_OUT_SYSTEM_MEMORY | IOPattern.MFX_IOPATTERN_IN_SYSTEM_MEMORY; foo.vpp.In = mfxDecParamsX.mfx.FrameInfo; foo.vpp.Out = mfxDecParamsX.mfx.FrameInfo; VPPParamsX = foo; } mfxVideoParam VPPParams = VPPParamsX != null ? VPPParamsX.Value : new mfxVideoParam(); mfxVideoParam mfxDecParams = mfxDecParamsX; // NOTE // IF I am worried about interop issues with stuff moving due to GC, // just pin ever single blitable here pinningHandles.Add(GCHandle.Alloc(pmfxSurfaces, GCHandleType.Pinned)); pinningHandles.Add(GCHandle.Alloc(pmfxSurfaces2, GCHandleType.Pinned)); //pinningHandles.Add(GCHandle.Alloc(struct1, GCHandleType.Pinned)); //pinningHandles.Add(GCHandle.Alloc(struct1, GCHandleType.Pinned)); this.videoParam = mfxDecParams; this.enableVPP = enableVPP; session = new mfxSession(); var ver = new mfxVersion() { Major = 1, Minor = 3 }; fixed(mfxSession *s = &session) sts = UnsafeNativeMethods.MFXInit(impl, &ver, s); QuickSyncStatic.ThrowOnBadStatus(sts, "MFXInit"); bool decVideoMemOut = (mfxDecParams.IOPattern & IOPattern.MFX_IOPATTERN_OUT_VIDEO_MEMORY) != 0; bool vppVideoMemIn = (VPPParams.IOPattern & IOPattern.MFX_IOPATTERN_IN_VIDEO_MEMORY) != 0; bool vppVideoMemOut = (VPPParams.IOPattern & IOPattern.MFX_IOPATTERN_OUT_VIDEO_MEMORY) != 0; Trace.Assert(!enableVPP || decVideoMemOut == vppVideoMemIn, "When the VPP is enabled, the memory type from DEC into VPP must be of same type"); if (vppVideoMemIn || vppVideoMemOut) { //if you want to use video memory, you need to have a way to allocate the Direct3D or Vaapi frames videoAccelerationSupport = new VideoAccelerationSupport(session); } fixed(mfxFrameAllocRequest *p = &DecRequest) sts = UnsafeNativeMethods.MFXVideoDECODE_QueryIOSurf(session, &mfxDecParams, p); if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION) { sts = 0; } QuickSyncStatic.ThrowOnBadStatus(sts, "DECODE_QueryIOSurf"); if (enableVPP) { fixed(mfxFrameAllocRequest *p = &VPPRequest[0]) sts = UnsafeNativeMethods.MFXVideoVPP_QueryIOSurf(session, &VPPParams, p); if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION) { sts = 0; } QuickSyncStatic.ThrowOnBadStatus(sts, "VPP_QueryIOSurf"); VPPRequest[1].Type |= FrameMemoryType.WILL_READ; } //mfxU16 nSurfNumDecVPP = DecRequest.NumFrameSuggested + VPPRequest[0].NumFrameSuggested; //mfxU16 nSurfNumVPPOut = VPPRequest[1].NumFrameSuggested; int nSurfNumVPPOut = 0; var numSurfaces = DecRequest.NumFrameSuggested + VPPRequest[0].NumFrameSuggested + VPPParams.AsyncDepth; if (enableVPP) { nSurfNumVPPOut = 0 + VPPRequest[1].NumFrameSuggested + VPPParams.AsyncDepth; } bitstreamBuffer = Marshal.AllocHGlobal(defaultBitstreamBufferSize); bitstream.Data = bitstreamBuffer; bitstream.DataLength = 0; bitstream.MaxLength = (uint)defaultBitstreamBufferSize; bitstream.DataOffset = 0; //mfxFrameAllocRequest DecRequest; //sts = UnsafeNativeMethods.MFXVideoDECODE_QueryIOSurf(session, &mfxDecParams, &DecRequest); //if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION) sts = 0; //Trace.Assert(sts == mfxStatus.MFX_ERR_NONE); //allocate decoder frames via directx mfxFrameAllocResponse DecResponse = new mfxFrameAllocResponse(); if (decVideoMemOut) { DecRequest.NumFrameMin = DecRequest.NumFrameSuggested = (ushort)numSurfaces; fixed(mfxFrameAllocRequest *p = &DecRequest) videoAccelerationSupport.AllocFrames(p, &DecResponse); } //allocate vpp frames via directx mfxFrameAllocResponse EncResponse = new mfxFrameAllocResponse(); if (vppVideoMemOut) { VPPRequest[1].NumFrameMin = VPPRequest[1].NumFrameSuggested = (ushort)nSurfNumVPPOut; fixed(mfxFrameAllocRequest *p = &VPPRequest[1]) videoAccelerationSupport.AllocFrames(p, &EncResponse); } // Allocate surfaces for decoder // - Width and height of buffer must be aligned, a multiple of 32 // - Frame surface array keeps pointers all surface planes and general frame info UInt16 width = (UInt16)QuickSyncStatic.ALIGN32(DecRequest.Info.Width); UInt16 height = (UInt16)QuickSyncStatic.ALIGN32(DecRequest.Info.Height); int bitsPerPixel = VideoUtility.GetBitsPerPixel(mfxDecParams.mfx.FrameInfo.FourCC); int surfaceSize = width * height * bitsPerPixel / 8; //byte[] surfaceBuffers = new byte[surfaceSize * numSurfaces]; //XXX if (!decVideoMemOut) { surfaceBuffers = Marshal.AllocHGlobal(surfaceSize * numSurfaces); } // // Allocate surface headers (mfxFrameSurface1) for decoder pmfxSurfaces = new mfxFrameSurface1[numSurfaces]; pinningHandles.Add(GCHandle.Alloc(pmfxSurfaces, GCHandleType.Pinned)); //MSDK_CHECK_POINTER(pmfxSurfaces, MFX_ERR_MEMORY_ALLOC); for (int i = 0; i < numSurfaces; i++) { pmfxSurfaces[i] = new mfxFrameSurface1(); pmfxSurfaces[i].Info = mfxDecParams.mfx.FrameInfo; if (!decVideoMemOut) { switch (mfxDecParams.mfx.FrameInfo.FourCC) { case FourCC.NV12: pmfxSurfaces[i].Data.Y_ptr = (byte *)surfaceBuffers + i * surfaceSize; pmfxSurfaces[i].Data.U_ptr = pmfxSurfaces[i].Data.Y_ptr + width * height; pmfxSurfaces[i].Data.V_ptr = pmfxSurfaces[i].Data.U_ptr + 1; pmfxSurfaces[i].Data.Pitch = width; break; case FourCC.YUY2: pmfxSurfaces[i].Data.Y_ptr = (byte *)surfaceBuffers + i * surfaceSize; pmfxSurfaces[i].Data.U_ptr = pmfxSurfaces[i].Data.Y_ptr + 1; pmfxSurfaces[i].Data.V_ptr = pmfxSurfaces[i].Data.U_ptr + 3; pmfxSurfaces[i].Data.Pitch = (ushort)(width * 2); break; default: //find sysmem_allocator.cpp for more help throw new NotImplementedException(); } } else { pmfxSurfaces[i].Data.MemId = DecResponse.mids_ptr[i]; // MID (memory id) represent one D3D NV12 surface } } if (enableVPP) { UInt16 width2 = (UInt16)QuickSyncStatic.ALIGN32(VPPRequest[1].Info.CropW); UInt16 height2 = (UInt16)QuickSyncStatic.ALIGN32(VPPRequest[1].Info.CropH); int bitsPerPixel2 = VideoUtility.GetBitsPerPixel(VPPParams.vpp.Out.FourCC); // NV12 format is a 12 bits per pixel format int surfaceSize2 = width2 * height2 * bitsPerPixel2 / 8; int pitch2 = width2 * bitsPerPixel2 / 8; if (!vppVideoMemOut) { surfaceBuffers2 = Marshal.AllocHGlobal(surfaceSize2 * nSurfNumVPPOut); } pmfxSurfaces2 = new mfxFrameSurface1[nSurfNumVPPOut]; pinningHandles.Add(GCHandle.Alloc(pmfxSurfaces2, GCHandleType.Pinned)); //MSDK_CHECK_POINTER(pmfxSurfaces, MFX_ERR_MEMORY_ALLOC); for (int i = 0; i < nSurfNumVPPOut; i++) { pmfxSurfaces2[i] = new mfxFrameSurface1(); pmfxSurfaces2[i].Info = VPPParams.vpp.Out; if (!vppVideoMemOut) { pmfxSurfaces2[i].Data.Pitch = (ushort)pitch2; switch (VPPParams.vpp.Out.FourCC) { case FourCC.NV12: pmfxSurfaces2[i].Data.Y_ptr = (byte *)surfaceBuffers2 + i * surfaceSize2; pmfxSurfaces2[i].Data.U_ptr = pmfxSurfaces2[i].Data.Y_ptr + width * height; pmfxSurfaces2[i].Data.V_ptr = pmfxSurfaces2[i].Data.U_ptr + 1; break; case FourCC.RGB4: pmfxSurfaces2[i].Data.B_ptr = (byte *)surfaceBuffers2 + i * surfaceSize2; pmfxSurfaces2[i].Data.G_ptr = (byte *)surfaceBuffers2 + i * surfaceSize2 + 1; pmfxSurfaces2[i].Data.R_ptr = (byte *)surfaceBuffers2 + i * surfaceSize2 + 2; // pmfxSurfaces2[i].Data.A_ptr = (byte*)surfaceBuffers2 + i * surfaceSize2+3; // pmfxSurfaces2[i].Data. = pmfxSurfaces2[i].Data.Y_ptr + width * height; // pmfxSurfaces2[i].Data.V_ptr = pmfxSurfaces2[i].Data.U_ptr + 1; break; default: break; } } else { pmfxSurfaces2[i].Data.MemId = EncResponse.mids_ptr[i]; // MID (memory id) represent one D3D NV12 surface } } } sts = UnsafeNativeMethods.MFXVideoDECODE_Init(session, &mfxDecParams); if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION) { sts = 0; } QuickSyncStatic.ThrowOnBadStatus(sts, "MFXVideoDECODE_Init"); if (enableVPP) { sts = UnsafeNativeMethods.MFXVideoVPP_Init(session, &VPPParams); if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION) { sts = 0; } QuickSyncStatic.ThrowOnBadStatus(sts, "MFXVideoVPP_Init"); } }
/// <summary> /// Place a decoded frame in 'frame' if one is available. /// </summary> /// <param name="frame">Where to pyt frame.</param> /// <returns> /// true:keep calling me /// false:this phase done /// </returns> /// bool DecodeFrame(mfxFrameSurface1 **frame) { mfxStatus sts = 0; *frame = (mfxFrameSurface1 *)0; mfxSyncPoint syncpD; mfxSyncPoint syncpV; mfxFrameSurface1 *pmfxOutSurface = (mfxFrameSurface1 *)0; int nIndex = 0; int nIndex2 = 0; // // Stage 1: Main decoding loop // if (mfxStatus.MFX_ERR_NONE <= sts || mfxStatus.MFX_ERR_MORE_DATA == sts || mfxStatus.MFX_ERR_MORE_SURFACE == sts) { if (mfxStatus.MFX_WRN_DEVICE_BUSY == sts) { Thread.Sleep(1); // Wait if device is busy, then repeat the same call to DecodeFrameAsync } //if (MFX_ERR_MORE_DATA == sts) { // sts = ReadBitStreamData(&config.mfxBS, fSource); // Read more data into input bit stream // MSDK_BREAK_ON_ERROR(sts); //} foo: if (mfxStatus.MFX_ERR_MORE_SURFACE == sts || mfxStatus.MFX_ERR_NONE == sts) { nIndex = GetFreeSurfaceIndex(pmfxSurfaces); // Find free frame surface QuickSyncStatic.ThrowOnBadStatus((mfxStatus)nIndex, "cannot find free surface"); } // Decode a frame asychronously (returns immediately) // - If input bitstream contains multiple frames DecodeFrameAsync will start decoding multiple frames, and remove them from bitstream // it might have been better to use marshal.XXX to pin this? fixed(mfxFrameSurface1 *p1 = &pmfxSurfaces[nIndex]) fixed(mfxBitstream * p2 = &bitstream) { sts = UnsafeNativeMethods.MFXVideoDECODE_DecodeFrameAsync(session, p2, p1, &pmfxOutSurface, &syncpD); if (!enableVPP && mfxStatus.MFX_ERR_NONE == sts && syncpD.sync != IntPtr.Zero) { sts = UnsafeNativeMethods.MFXVideoCORE_SyncOperation(session, syncpD, 60000); // Synchronize. Wait until decoded frame is ready *frame = pmfxOutSurface; } } // Decode a frame asychronously (returns immediately) //sts = mfxDEC->DecodeFrameAsync(&config.mfxBS, pmfxSurfaces[nIndex], &pmfxOutSurface, &syncpD); //if (sts == MFX_WRN_VIDEO_PARAM_CHANGED) // ; // I had a problem where I was getting a lot of these, I suspect // when you get this return code, and you sync anyway, it forces more of them // be sure to test this statement under vmware in software mode // it seems this uniquely happens there that it uses this to ask for more internal surfaces. if (sts == mfxStatus.MFX_ERR_MORE_SURFACE) { goto foo; } // Ignore warnings if output is available, // if no output and no action required just repeat the DecodeFrameAsync call if (mfxStatus.MFX_ERR_NONE < sts && syncpD.sync != IntPtr.Zero) { sts = mfxStatus.MFX_ERR_NONE; } } if (sts == mfxStatus.MFX_ERR_MORE_SURFACE) // decoder needs to be called again, it is eating memory.SWmode { return(true); } if (sts == mfxStatus.MFX_ERR_MORE_DATA) { return(false); } if (sts < 0) { throw new QuickSyncException("DecodeFrame fail", sts); } if (enableVPP && sts == mfxStatus.MFX_ERR_NONE) { fixed(mfxFrameSurface1 *p1 = &pmfxSurfaces2[nIndex2]) { nIndex2 = GetFreeSurfaceIndex(pmfxSurfaces2); // Find free frame surface QuickSyncStatic.ThrowOnBadStatus((mfxStatus)nIndex2, "cannot find free surface"); tryagain: // Process a frame asychronously (returns immediately) sts = UnsafeNativeMethods.MFXVideoVPP_RunFrameVPPAsync(session, pmfxOutSurface, p1, null, &syncpV); //if (sts == MFX_WRN_VIDEO_PARAM_CHANGED) // ; if (mfxStatus.MFX_ERR_NONE < sts && syncpV.sync_ptr == null) { // repeat the call if warning and no output if (mfxStatus.MFX_WRN_DEVICE_BUSY == sts) { Thread.Sleep(1); // wait if device is busy goto tryagain; } } else if (mfxStatus.MFX_ERR_NONE < sts && syncpV.sync_ptr != null) { sts = mfxStatus.MFX_ERR_NONE; // ignore warnings if output is available } else if (mfxStatus.MFX_ERR_MORE_DATA == sts) // VPP needs more data, let decoder decode another frame as input { //continue; return(false); } else if (mfxStatus.MFX_ERR_MORE_SURFACE == sts) { // Not relevant for the illustrated workload! Therefore not handled. // Relevant for cases when VPP produces more frames at output than consumes at input. E.g. framerate conversion 30 fps -> 60 fps //break; return(true); } else if (sts < 0) { throw new QuickSyncException("RunFrameVPPAsync fail", sts); } // MSDK_CHECK_RESULT(sts, MFX_ERR_NONE, sts); //MSDK_BREAK_ON_ERROR(sts); else if (mfxStatus.MFX_ERR_NONE == sts && syncpV.sync != IntPtr.Zero) { sts = UnsafeNativeMethods.MFXVideoCORE_SyncOperation(session, syncpV, 60000); // Synchronize. Wait until decoded frame is ready *frame = p1; return(true); } } } return(true); }
/// <summary>Get frames during 2nd stage of flushing</summary> /// <param name="bitStreamChunk">A single frame</param> /// <returns>true if you should continue to call this method, false if you must go to the next phase.</returns> public bool Flush3(ref BitStreamChunk bitStreamChunk) { bitStreamChunk.bytesAvailable = 0; //////// mfxStatus sts = mfxStatus.MFX_ERR_NONE; // // Stage 4: Retrieve the buffered encoded frames // if (mfxStatus.MFX_ERR_NONE <= sts) { int nTaskIdx = GetFreeTaskIndex(pTasks, taskPoolSize); // Find free task if ((int)mfxStatus.MFX_ERR_NOT_FOUND == nTaskIdx) { // No more free tasks, need to sync sts = UnsafeNativeMethods.MFXVideoCORE_SyncOperation(session, pTasks[nFirstSyncTask].syncp, 60000); QuickSyncStatic.ThrowOnBadStatus(sts, "syncOper"); if (bitStreamChunk.bitstream == null || bitStreamChunk.bitstream.Length < pTasks[nFirstSyncTask].mfxBS.DataLength) { bitStreamChunk.bitstream = new byte[pTasks[nFirstSyncTask].mfxBS.DataLength]; } Trace.Assert(pTasks[nFirstSyncTask].mfxBS.DataOffset == 0); Marshal.Copy(pTasks[nFirstSyncTask].mfxBS.Data, bitStreamChunk.bitstream, 0, (int)pTasks[nFirstSyncTask].mfxBS.DataLength); bitStreamChunk.bytesAvailable = (int)pTasks[nFirstSyncTask].mfxBS.DataLength; // WriteBitStreamFrame(pTasks[nFirstSyncTask].mfxBS, outbs); //MSDK_BREAK_ON_ERROR(sts); pTasks[nFirstSyncTask].syncp.sync = IntPtr.Zero; pTasks[nFirstSyncTask].mfxBS.DataLength = 0; pTasks[nFirstSyncTask].mfxBS.DataOffset = 0; nFirstSyncTask = (nFirstSyncTask + 1) % taskPoolSize; return(true); } else { for (;;) { // Encode a frame asychronously (returns immediately) //sts = mfxENC.EncodeFrameAsync(NULL, pSurfaces2[nIndex2], &pTasks[nTaskIdx].mfxBS, &pTasks[nTaskIdx].syncp); sts = UnsafeNativeMethods.MFXVideoENCODE_EncodeFrameAsync(session, (mfxEncodeCtrl *)0, null, &pTasks[nTaskIdx].mfxBS, &pTasks[nTaskIdx].syncp); if (mfxStatus.MFX_ERR_NONE < sts && pTasks[nTaskIdx].syncp.sync == IntPtr.Zero) { // repeat the call if warning and no output if (mfxStatus.MFX_WRN_DEVICE_BUSY == sts) { Thread.Sleep(1); // wait if device is busy } } else if (mfxStatus.MFX_ERR_NONE < sts && pTasks[nTaskIdx].syncp.sync != IntPtr.Zero) { sts = mfxStatus.MFX_ERR_NONE; // ignore warnings if output is available break; } else if (mfxStatus.MFX_ERR_NOT_ENOUGH_BUFFER == sts) { // Allocate more bitstream buffer memory here if needed... break; } else { if (sts != mfxStatus.MFX_ERR_MORE_DATA && sts != mfxStatus.MFX_ERR_MORE_SURFACE) { QuickSyncStatic.ThrowOnBadStatus(sts, "encodeAsync"); } break; } } } } if (mfxStatus.MFX_ERR_MORE_DATA == sts) { return(false); } QuickSyncStatic.ThrowOnBadStatus(sts, "enc error"); return(true); }
/// <summary>Initializes a new instance of the <see cref="LowLevelTranscoderCSharp"/> class.</summary> /// <param name="config">The configuration.</param> /// <param name="impl">The implementation.</param> /// <param name="forceSystemMemory">if set to <c>true</c> [force system memory].</param> public LowLevelTranscoderCSharp(TranscoderConfiguration config, mfxIMPL impl = mfxIMPL.MFX_IMPL_AUTO, bool forceSystemMemory = false) { mfxStatus sts; mfxVideoParam mfxDecParams = config.decParams; mfxVideoParam mfxVPPParams = config.vppParams; mfxVideoParam mfxEncParams = config.encParams; session = new mfxSession(); var ver = new mfxVersion() { Major = 1, Minor = 3 }; fixed(mfxSession *s = &session) sts = UnsafeNativeMethods.MFXInit(impl, &ver, s); QuickSyncStatic.ThrowOnBadStatus(sts, "MFXInit"); //deviceSetup = new DeviceSetup(session, forceSystemMemory); // mfxVideoParam mfxDecParams = new mfxVideoParam(); // mfxDecParams.mfx.CodecId = CodecId.MFX_CODEC_AVC; int bufsize = (int)1e6; mfxBS = (mfxBitstream *)MyAllocHGlobalAndZero(sizeof(mfxBitstream)); mfxBS->Data = MyAllocHGlobalAndZero(bufsize); mfxBS->DataLength = (uint)0; mfxBS->MaxLength = (uint)bufsize; mfxBS->DataOffset = 0; int outwidth = mfxDecParams.mfx.FrameInfo.CropW; int outheight = mfxDecParams.mfx.FrameInfo.CropH; // Query number of required surfaces for VPP //mfxFrameAllocRequest[] VPPRequest = new mfxFrameAllocRequest[2]; // [0] - in, [1] - out TwoMfxFrameAllocRequest VPPRequest; sts = UnsafeNativeMethods.MFXVideoVPP_QueryIOSurf(session, &mfxVPPParams, (mfxFrameAllocRequest *)&VPPRequest); if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION) { warnings.Add(nameof(UnsafeNativeMethods.MFXVideoVPP_QueryIOSurf), sts); sts = 0; } QuickSyncStatic.ThrowOnBadStatus(sts, "vpp.queryiosurf"); // Query number required surfaces for dec mfxFrameAllocRequest DecRequest; sts = UnsafeNativeMethods.MFXVideoDECODE_QueryIOSurf(session, &mfxDecParams, &DecRequest); if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION) { warnings.Add(nameof(UnsafeNativeMethods.MFXVideoDECODE_QueryIOSurf), sts); sts = 0; } QuickSyncStatic.ThrowOnBadStatus(sts, nameof(UnsafeNativeMethods.MFXVideoDECODE_QueryIOSurf)); // Query number of required surfaces for enc mfxFrameAllocRequest EncRequest = new mfxFrameAllocRequest(); sts = UnsafeNativeMethods.MFXVideoENCODE_QueryIOSurf(session, &mfxEncParams, &EncRequest); if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION) { warnings.Add(nameof(UnsafeNativeMethods.MFXVideoENCODE_QueryIOSurf), sts); sts = 0; } QuickSyncStatic.ThrowOnBadStatus(sts, nameof(UnsafeNativeMethods.MFXVideoENCODE_QueryIOSurf)); // Determine the required number of surfaces for decoder output (VPP input) and for VPP output (encoder input) nSurfNumDecVPP = DecRequest.NumFrameSuggested + VPPRequest.In.NumFrameSuggested + mfxVPPParams.AsyncDepth; nSurfNumVPPEnc = EncRequest.NumFrameSuggested + VPPRequest.Out.NumFrameSuggested + mfxVPPParams.AsyncDepth; { Trace.Assert((mfxEncParams.IOPattern & IOPattern.MFX_IOPATTERN_IN_SYSTEM_MEMORY) != 0); Trace.Assert((mfxDecParams.IOPattern & IOPattern.MFX_IOPATTERN_OUT_SYSTEM_MEMORY) != 0); UInt16 width = (UInt16)QuickSyncStatic.ALIGN32(DecRequest.Info.Width); UInt16 height = (UInt16)QuickSyncStatic.ALIGN32(DecRequest.Info.Height); int bitsPerPixel = 12; int surfaceSize = width * height * bitsPerPixel / 8; var decVppSurfaceBuffers = Marshal.AllocHGlobal(surfaceSize * nSurfNumDecVPP); var vppEncSurfaceBuffers = Marshal.AllocHGlobal(surfaceSize * nSurfNumVPPEnc); pSurfaces = (mfxFrameSurface1 *)MyAllocHGlobalAndZero(sizeof(mfxFrameSurface1) * nSurfNumDecVPP); pSurfaces2 = (mfxFrameSurface1 *)MyAllocHGlobalAndZero(sizeof(mfxFrameSurface1) * nSurfNumVPPEnc); for (int i = 0; i < nSurfNumDecVPP; i++) { pSurfaces[i] = new mfxFrameSurface1(); pSurfaces[i].Info = DecRequest.Info; pSurfaces[i].Data.Y_ptr = (byte *)decVppSurfaceBuffers + i * surfaceSize; pSurfaces[i].Data.U_ptr = pSurfaces[i].Data.Y_ptr + width * height; pSurfaces[i].Data.V_ptr = pSurfaces[i].Data.U_ptr + 1; pSurfaces[i].Data.Pitch = width; } for (int i = 0; i < nSurfNumVPPEnc; i++) { pSurfaces2[i] = new mfxFrameSurface1(); pSurfaces2[i].Info = EncRequest.Info; pSurfaces2[i].Data.Y_ptr = (byte *)vppEncSurfaceBuffers + i * surfaceSize; pSurfaces2[i].Data.U_ptr = pSurfaces2[i].Data.Y_ptr + width * height; pSurfaces2[i].Data.V_ptr = pSurfaces2[i].Data.U_ptr + 1; pSurfaces2[i].Data.Pitch = width; } } sts = UnsafeNativeMethods.MFXVideoDECODE_Init(session, &mfxDecParams); if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION) { warnings.Add(nameof(UnsafeNativeMethods.MFXVideoDECODE_Init), sts); sts = 0; } QuickSyncStatic.ThrowOnBadStatus(sts, "decode.init"); sts = UnsafeNativeMethods.MFXVideoENCODE_Init(session, &mfxEncParams); if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION) { warnings.Add(nameof(UnsafeNativeMethods.MFXVideoENCODE_Init), sts); sts = 0; } QuickSyncStatic.ThrowOnBadStatus(sts, "encode.init"); sts = UnsafeNativeMethods.MFXVideoVPP_Init(session, &mfxVPPParams); if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION) { warnings.Add(nameof(UnsafeNativeMethods.MFXVideoVPP_Init), sts); sts = 0; } QuickSyncStatic.ThrowOnBadStatus(sts, "vpp.init"); //mfxExtVPPDoNotUse zz; //zz.Header.BufferId = BufferId.MFX_EXTBUFF_VPP_DONOTUSE; //zz.Header.BufferSz = (uint)sizeof(mfxExtVPPDoUse); //mfxExtBuffer** pExtParamsVPPx = stackalloc mfxExtBuffer*[1]; //pExtParamsVPPx[0] = (mfxExtBuffer*)&zz; //var t1 = stackalloc uint[100]; //zz.AlgList = t1; //zz.NumAlg = 100; //mfxVideoParam par; //par.ExtParam = pExtParamsVPPx; //par.NumExtParam = 1; //sts = UnsafeNativeMethods.MFXVideoVPP_GetVideoParam(session, &par); //Trace.Assert(sts == mfxStatus.MFX_ERR_NONE); //Console.WriteLine(zz.NumAlg); //for (int i = 0; i < 10; i++) //{ // Console.WriteLine((BufferId)t1[i]); //} mfxVideoParam par; // Retrieve video parameters selected by encoder. // - BufferSizeInKB parameter is required to set bit stream buffer size par = new mfxVideoParam(); sts = UnsafeNativeMethods.MFXVideoENCODE_GetVideoParam(session, &par); QuickSyncStatic.ThrowOnBadStatus(sts, "enc.getvideoparams"); // Create task pool to improve asynchronous performance (greater GPU utilization) taskPoolSize = mfxEncParams.AsyncDepth; // number of tasks that can be submitted, before synchronizing is required // Task* pTasks = stackalloc Task[taskPoolSize]; pTasks = (Task *)MyAllocHGlobalAndZero(sizeof(Task) * taskPoolSize); // GCHandle gch3 = GCHandle.Alloc(pTasks, GCHandleType.Pinned); for (int i = 0; i < taskPoolSize; i++) { // Prepare Media SDK bit stream buffer pTasks[i].mfxBS.MaxLength = (uint)(par.mfx.BufferSizeInKB * 1000); pTasks[i].mfxBS.Data = MyAllocHGlobalAndZero((int)pTasks[i].mfxBS.MaxLength); Trace.Assert(pTasks[i].mfxBS.Data != IntPtr.Zero); } // GCHandle gch3 = GCHandle.Alloc(pTasks, GCHandleType.Pinned); }