示例#1
0
        /// <summary>
        /// Return the PNSR for two NV12 raw frames of equal size
        /// </summary>
        /// <param name="nv12a">Frame A</param>
        /// <param name="nv12b">Frame B</param>
        /// <param name="width"></param>
        /// <param name="height"></param>
        /// <returns></returns>
        public static double[] PeakSignalNoiseRatioForNV12(byte[] nv12a, byte[] nv12b, int width, int height)
        {
            int len = height * width * VideoUtility.GetBitsPerPixel(FourCC.NV12) / 8;

            Trace.Assert(nv12a.Length == len);
            Trace.Assert(nv12b.Length == len);



            double mseY = 0;
            double mseU = 0;
            double mseV = 0;

            for (int y = 0; y < height; y++)
            {
                for (int x = 0; x < width; x++)
                {
                    //mseY += Math.Pow(nv12a[x + y * width] - nv12b[x + y * width], 2);
                    var a = (int)(nv12a[x + y * width] - nv12b[x + y * width]);
                    mseY += a * a;
                }
            }
            for (int y = 0; y < height / 2; y++)
            {
                for (int x = 0; x < width / 2; x++)
                {
                    // mseU += Math.Pow(nv12a[x * 2 + 0 + y * width + height * width] - nv12b[x * 2 + 0 + y * width + height * width], 2);
                    //  mseV += Math.Pow(nv12a[x * 2 + 1 + y * width + height * width] - nv12b[x * 2 + 1 + y * width + height * width], 2);

                    var a = (int)(nv12a[x * 2 + 0 + y * width + height * width] - nv12b[x * 2 + 0 + y * width + height * width]);
                    mseU += a * a;
                    a     = (int)(nv12a[x * 2 + 1 + y * width + height * width] - nv12b[x * 2 + 1 + y * width + height * width]);
                    mseV += a * a;
                }
            }

            //var diffs = new List<int>();
            //int max = 0;
            //for (int i = 0; i < len; i++)
            //{
            //diffs.Add(nv12a[i] - nv12b[i]);
            //max = Math.Max(max, Math.Abs(nv12a[i] - nv12b[i]));
            //}
            // expensive var foo = diffs.OrderByDescending(x => x).ToArray();

            var psnr = new double[3];

            psnr[0] = 20 * Math.Log10(255) - 10 * Math.Log10(mseY / (width * height));
            psnr[1] = 20 * Math.Log10(255) - 10 * Math.Log10(mseU / (width * height));
            psnr[2] = 20 * Math.Log10(255) - 10 * Math.Log10(mseV / (width * height));
            return(psnr);
        }
        /// <summary>Initializes a new instance of the <see cref="LowLevelEncoderCSharp"/> class.</summary>
        /// <param name="mfxEncParams">The encoder parameters.</param>
        /// <param name="impl">The implementation.</param>
        public LowLevelEncoderCSharp(mfxVideoParam mfxEncParams, mfxIMPL impl = mfxIMPL.MFX_IMPL_AUTO)
        {
            mfxStatus sts;

            session = new mfxSession();
            var ver = new mfxVersion()
            {
                Major = 1, Minor = 3
            };

            fixed(mfxSession *s = &session)
            sts = UnsafeNativeMethods.MFXInit(impl, &ver, s);

            QuickSyncStatic.ThrowOnBadStatus(sts, "MFXInit");
            //deviceSetup = new DeviceSetup(session, false);



            sts = UnsafeNativeMethods.MFXVideoENCODE_Query(session, &mfxEncParams, &mfxEncParams);
            if (sts > 0)
            {
                warnings.Add(nameof(UnsafeNativeMethods.MFXVideoENCODE_Query), sts);
                sts = 0;
            }
            QuickSyncStatic.ThrowOnBadStatus(sts, "encodequery");


            mfxFrameAllocRequest EncRequest;

            sts = UnsafeNativeMethods.MFXVideoENCODE_QueryIOSurf(session, &mfxEncParams, &EncRequest);
            QuickSyncStatic.ThrowOnBadStatus(sts, "queryiosurf");

            EncRequest.NumFrameSuggested = (ushort)(EncRequest.NumFrameSuggested + mfxEncParams.AsyncDepth);

            EncRequest.Type |= (FrameMemoryType)0x2000; // WILL_WRITE; // This line is only required for Windows DirectX11 to ensure that surfaces can be written to by the application

            UInt16 numSurfaces = EncRequest.NumFrameSuggested;

            // - Width and height of buffer must be aligned, a multiple of 32
            // - Frame surface array keeps pointers all surface planes and general frame info

            UInt16 width        = (UInt16)QuickSyncStatic.ALIGN32(mfxEncParams.mfx.FrameInfo.Width);
            UInt16 height       = (UInt16)QuickSyncStatic.ALIGN32(mfxEncParams.mfx.FrameInfo.Height);
            int    bitsPerPixel = VideoUtility.GetBitsPerPixel(mfxEncParams.mfx.FrameInfo.FourCC);
            int    surfaceSize  = width * height * bitsPerPixel / 8;
            //byte[] surftaceBuffers = new byte[surfaceSize * numSurfaces]; //XXX
            IntPtr surfaceBuffers    = Marshal.AllocHGlobal(surfaceSize * numSurfaces);
            byte * surfaceBuffersPtr = (byte *)surfaceBuffers;


            //         // Allocate surface headers (mfxFrameSurface1) for decoder
            Frames = new mfxFrameSurface1[numSurfaces];
            //MSDK_CHECK_POINTER(pmfxSurfaces, MFX_ERR_MEMORY_ALLOC);
            for (int i = 0; i < numSurfaces; i++)
            {
                Frames[i]      = new mfxFrameSurface1();
                Frames[i].Info = mfxEncParams.mfx.FrameInfo;

                switch (mfxEncParams.mfx.FrameInfo.FourCC)
                {
                case FourCC.NV12:
                    Frames[i].Data.Y_ptr = (byte *)surfaceBuffers + i * surfaceSize;
                    Frames[i].Data.U_ptr = Frames[i].Data.Y_ptr + width * height;
                    Frames[i].Data.V_ptr = Frames[i].Data.U_ptr + 1;
                    Frames[i].Data.Pitch = width;
                    break;

                case FourCC.YUY2:
                    Frames[i].Data.Y_ptr = (byte *)surfaceBuffers + i * surfaceSize;
                    Frames[i].Data.U_ptr = Frames[i].Data.Y_ptr + 1;
                    Frames[i].Data.V_ptr = Frames[i].Data.U_ptr + 3;
                    Frames[i].Data.Pitch = (ushort)(width * 2);
                    break;

                default:      //find sysmem_allocator.cpp for more help
                    throw new NotImplementedException();
                }
            }

            frameIntPtrs = new IntPtr[Frames.Length];
            for (int i = 0; i < Frames.Length; i++)
            {
                fixed(mfxFrameSurface1 *a = &Frames[i])
                frameIntPtrs[i] = (IntPtr)a;
            }


            sts = UnsafeNativeMethods.MFXVideoENCODE_Init(session, &mfxEncParams);
            if (sts > 0)
            {
                warnings.Add(nameof(UnsafeNativeMethods.MFXVideoENCODE_Init), sts);
                sts = 0;
            }
            QuickSyncStatic.ThrowOnBadStatus(sts, "encodeinit");

            mfxVideoParam par;

            UnsafeNativeMethods.MFXVideoENCODE_GetVideoParam(session, &par);
            QuickSyncStatic.ThrowOnBadStatus(sts, "encodegetvideoparam");


            // from mediasdkjpeg-man.pdf
            // BufferSizeInKB = 4 + (Width * Height * BytesPerPx + 1023) / 1024;
            //where Width and Height are weight and height of the picture in pixel, BytesPerPx is number of
            //byte for one pixel.It equals to 1 for monochrome picture, 1.5 for NV12 and YV12 color formats,
            //	2 for YUY2 color format, and 3 for RGB32 color format(alpha channel is not encoded).

            if (par.mfx.BufferSizeInKB == 0 && mfxEncParams.mfx.CodecId == CodecId.MFX_CODEC_JPEG)
            {
                par.mfx.BufferSizeInKB = (ushort)((4 + (mfxEncParams.mfx.FrameInfo.CropW * mfxEncParams.mfx.FrameInfo.CropH * 3 + 1023)) / 1000);
            }
            //printf("bufsize %d\n", par.mfx.BufferSizeInKB);



            // Create task pool to improve asynchronous performance (greater GPU utilization)
            int taskPoolSize = mfxEncParams.AsyncDepth;  // number of tasks that can be submitted, before synchronizing is required


            //Task* pTasks             = stackalloc Task[taskPoolSize];
            // GCHandle gch3 = GCHandle.Alloc(pTasks, GCHandleType.Pinned);
            pTasks = new Task[taskPoolSize];

            for (int i = 0; i < taskPoolSize; i++)
            {
                // Prepare Media SDK bit stream buffer
                pTasks[i].mfxBS.MaxLength = (uint)(par.mfx.BufferSizeInKB * 1000);
                pTasks[i].mfxBS.Data      = Marshal.AllocHGlobal((int)pTasks[i].mfxBS.MaxLength);
                Trace.Assert(pTasks[i].mfxBS.Data != IntPtr.Zero);
            }

            pinningHandles.Add(GCHandle.Alloc(pTasks, GCHandleType.Pinned));
            pinningHandles.Add(GCHandle.Alloc(Frames, GCHandleType.Pinned));
        }
        /// <summary>
        /// Constructor
        /// </summary>
        public LowLevelDecoderCSharp(mfxVideoParam mfxDecParamsX,
                                     mfxVideoParam?VPPParamsX = null,
                                     mfxIMPL impl             = mfxIMPL.MFX_IMPL_AUTO)
        {
            mfxStatus sts;
            bool      enableVPP = VPPParamsX != null;


            if (VPPParamsX == null)
            {
                // Create a default VPPParamsX
                var foo = new mfxVideoParam();
                foo.AsyncDepth = 1;
                foo.IOPattern  = IOPattern.MFX_IOPATTERN_OUT_SYSTEM_MEMORY | IOPattern.MFX_IOPATTERN_IN_SYSTEM_MEMORY;
                foo.vpp.In     = mfxDecParamsX.mfx.FrameInfo;
                foo.vpp.Out    = mfxDecParamsX.mfx.FrameInfo;
                VPPParamsX     = foo;
            }


            mfxVideoParam VPPParams    = VPPParamsX != null ? VPPParamsX.Value : new mfxVideoParam();
            mfxVideoParam mfxDecParams = mfxDecParamsX;

            // NOTE
            // IF I am worried about interop issues with stuff moving due to GC,
            // just pin ever single blitable here
            pinningHandles.Add(GCHandle.Alloc(pmfxSurfaces, GCHandleType.Pinned));
            pinningHandles.Add(GCHandle.Alloc(pmfxSurfaces2, GCHandleType.Pinned));
            //pinningHandles.Add(GCHandle.Alloc(struct1, GCHandleType.Pinned));
            //pinningHandles.Add(GCHandle.Alloc(struct1, GCHandleType.Pinned));


            this.videoParam = mfxDecParams;
            this.enableVPP  = enableVPP;



            session = new mfxSession();
            var ver = new mfxVersion()
            {
                Major = 1, Minor = 3
            };

            fixed(mfxSession *s = &session)
            sts = UnsafeNativeMethods.MFXInit(impl, &ver, s);

            QuickSyncStatic.ThrowOnBadStatus(sts, "MFXInit");



            bool decVideoMemOut = (mfxDecParams.IOPattern & IOPattern.MFX_IOPATTERN_OUT_VIDEO_MEMORY) != 0;
            bool vppVideoMemIn  = (VPPParams.IOPattern & IOPattern.MFX_IOPATTERN_IN_VIDEO_MEMORY) != 0;
            bool vppVideoMemOut = (VPPParams.IOPattern & IOPattern.MFX_IOPATTERN_OUT_VIDEO_MEMORY) != 0;

            Trace.Assert(!enableVPP || decVideoMemOut == vppVideoMemIn, "When the VPP is enabled, the memory type from DEC into VPP must be of same type");



            if (vppVideoMemIn || vppVideoMemOut)
            {
                //if you want to use video memory, you need to have a way to allocate the Direct3D or Vaapi frames
                videoAccelerationSupport = new VideoAccelerationSupport(session);
            }

            fixed(mfxFrameAllocRequest *p = &DecRequest)
            sts = UnsafeNativeMethods.MFXVideoDECODE_QueryIOSurf(session, &mfxDecParams, p);

            if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION)
            {
                sts = 0;
            }
            QuickSyncStatic.ThrowOnBadStatus(sts, "DECODE_QueryIOSurf");


            if (enableVPP)
            {
                fixed(mfxFrameAllocRequest *p = &VPPRequest[0])
                sts = UnsafeNativeMethods.MFXVideoVPP_QueryIOSurf(session, &VPPParams, p);

                if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION)
                {
                    sts = 0;
                }
                QuickSyncStatic.ThrowOnBadStatus(sts, "VPP_QueryIOSurf");


                VPPRequest[1].Type |= FrameMemoryType.WILL_READ;
            }



            //mfxU16 nSurfNumDecVPP = DecRequest.NumFrameSuggested + VPPRequest[0].NumFrameSuggested;
            //mfxU16 nSurfNumVPPOut = VPPRequest[1].NumFrameSuggested;

            int nSurfNumVPPOut = 0;

            var numSurfaces = DecRequest.NumFrameSuggested + VPPRequest[0].NumFrameSuggested + VPPParams.AsyncDepth;

            if (enableVPP)
            {
                nSurfNumVPPOut = 0 + VPPRequest[1].NumFrameSuggested + VPPParams.AsyncDepth;
            }



            bitstreamBuffer      = Marshal.AllocHGlobal(defaultBitstreamBufferSize);
            bitstream.Data       = bitstreamBuffer;
            bitstream.DataLength = 0;
            bitstream.MaxLength  = (uint)defaultBitstreamBufferSize;
            bitstream.DataOffset = 0;


            //mfxFrameAllocRequest DecRequest;
            //sts = UnsafeNativeMethods.MFXVideoDECODE_QueryIOSurf(session, &mfxDecParams, &DecRequest);
            //if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION) sts = 0;
            //Trace.Assert(sts == mfxStatus.MFX_ERR_NONE);



            //allocate decoder frames via directx
            mfxFrameAllocResponse DecResponse = new mfxFrameAllocResponse();

            if (decVideoMemOut)
            {
                DecRequest.NumFrameMin = DecRequest.NumFrameSuggested = (ushort)numSurfaces;

                fixed(mfxFrameAllocRequest *p = &DecRequest)
                videoAccelerationSupport.AllocFrames(p, &DecResponse);
            }


            //allocate vpp frames via directx
            mfxFrameAllocResponse EncResponse = new mfxFrameAllocResponse();

            if (vppVideoMemOut)
            {
                VPPRequest[1].NumFrameMin = VPPRequest[1].NumFrameSuggested = (ushort)nSurfNumVPPOut;

                fixed(mfxFrameAllocRequest *p = &VPPRequest[1])
                videoAccelerationSupport.AllocFrames(p, &EncResponse);
            }



            // Allocate surfaces for decoder
            // - Width and height of buffer must be aligned, a multiple of 32
            // - Frame surface array keeps pointers all surface planes and general frame info
            UInt16 width        = (UInt16)QuickSyncStatic.ALIGN32(DecRequest.Info.Width);
            UInt16 height       = (UInt16)QuickSyncStatic.ALIGN32(DecRequest.Info.Height);
            int    bitsPerPixel = VideoUtility.GetBitsPerPixel(mfxDecParams.mfx.FrameInfo.FourCC);
            int    surfaceSize  = width * height * bitsPerPixel / 8;

            //byte[] surfaceBuffers = new byte[surfaceSize * numSurfaces]; //XXX

            if (!decVideoMemOut)
            {
                surfaceBuffers = Marshal.AllocHGlobal(surfaceSize * numSurfaces);
            }



            //         // Allocate surface headers (mfxFrameSurface1) for decoder
            pmfxSurfaces = new mfxFrameSurface1[numSurfaces];
            pinningHandles.Add(GCHandle.Alloc(pmfxSurfaces, GCHandleType.Pinned));

            //MSDK_CHECK_POINTER(pmfxSurfaces, MFX_ERR_MEMORY_ALLOC);
            for (int i = 0; i < numSurfaces; i++)
            {
                pmfxSurfaces[i]      = new mfxFrameSurface1();
                pmfxSurfaces[i].Info = mfxDecParams.mfx.FrameInfo;
                if (!decVideoMemOut)
                {
                    switch (mfxDecParams.mfx.FrameInfo.FourCC)
                    {
                    case FourCC.NV12:
                        pmfxSurfaces[i].Data.Y_ptr = (byte *)surfaceBuffers + i * surfaceSize;
                        pmfxSurfaces[i].Data.U_ptr = pmfxSurfaces[i].Data.Y_ptr + width * height;
                        pmfxSurfaces[i].Data.V_ptr = pmfxSurfaces[i].Data.U_ptr + 1;
                        pmfxSurfaces[i].Data.Pitch = width;
                        break;

                    case FourCC.YUY2:
                        pmfxSurfaces[i].Data.Y_ptr = (byte *)surfaceBuffers + i * surfaceSize;
                        pmfxSurfaces[i].Data.U_ptr = pmfxSurfaces[i].Data.Y_ptr + 1;
                        pmfxSurfaces[i].Data.V_ptr = pmfxSurfaces[i].Data.U_ptr + 3;
                        pmfxSurfaces[i].Data.Pitch = (ushort)(width * 2);
                        break;

                    default:      //find sysmem_allocator.cpp for more help
                        throw new NotImplementedException();
                    }
                }
                else
                {
                    pmfxSurfaces[i].Data.MemId = DecResponse.mids_ptr[i];   // MID (memory id) represent one D3D NV12 surface
                }
            }



            if (enableVPP)
            {
                UInt16 width2        = (UInt16)QuickSyncStatic.ALIGN32(VPPRequest[1].Info.CropW);
                UInt16 height2       = (UInt16)QuickSyncStatic.ALIGN32(VPPRequest[1].Info.CropH);
                int    bitsPerPixel2 = VideoUtility.GetBitsPerPixel(VPPParams.vpp.Out.FourCC);     // NV12 format is a 12 bits per pixel format
                int    surfaceSize2  = width2 * height2 * bitsPerPixel2 / 8;
                int    pitch2        = width2 * bitsPerPixel2 / 8;

                if (!vppVideoMemOut)
                {
                    surfaceBuffers2 = Marshal.AllocHGlobal(surfaceSize2 * nSurfNumVPPOut);
                }

                pmfxSurfaces2 = new mfxFrameSurface1[nSurfNumVPPOut];
                pinningHandles.Add(GCHandle.Alloc(pmfxSurfaces2, GCHandleType.Pinned));
                //MSDK_CHECK_POINTER(pmfxSurfaces, MFX_ERR_MEMORY_ALLOC);
                for (int i = 0; i < nSurfNumVPPOut; i++)
                {
                    pmfxSurfaces2[i]      = new mfxFrameSurface1();
                    pmfxSurfaces2[i].Info = VPPParams.vpp.Out;

                    if (!vppVideoMemOut)
                    {
                        pmfxSurfaces2[i].Data.Pitch = (ushort)pitch2;
                        switch (VPPParams.vpp.Out.FourCC)
                        {
                        case FourCC.NV12:
                            pmfxSurfaces2[i].Data.Y_ptr = (byte *)surfaceBuffers2 + i * surfaceSize2;
                            pmfxSurfaces2[i].Data.U_ptr = pmfxSurfaces2[i].Data.Y_ptr + width * height;
                            pmfxSurfaces2[i].Data.V_ptr = pmfxSurfaces2[i].Data.U_ptr + 1;
                            break;

                        case FourCC.RGB4:
                            pmfxSurfaces2[i].Data.B_ptr = (byte *)surfaceBuffers2 + i * surfaceSize2;
                            pmfxSurfaces2[i].Data.G_ptr = (byte *)surfaceBuffers2 + i * surfaceSize2 + 1;
                            pmfxSurfaces2[i].Data.R_ptr = (byte *)surfaceBuffers2 + i * surfaceSize2 + 2;
                            // pmfxSurfaces2[i].Data.A_ptr = (byte*)surfaceBuffers2 + i * surfaceSize2+3;
                            //   pmfxSurfaces2[i].Data. = pmfxSurfaces2[i].Data.Y_ptr + width * height;
                            //  pmfxSurfaces2[i].Data.V_ptr = pmfxSurfaces2[i].Data.U_ptr + 1;
                            break;

                        default:
                            break;
                        }
                    }
                    else
                    {
                        pmfxSurfaces2[i].Data.MemId = EncResponse.mids_ptr[i];   // MID (memory id) represent one D3D NV12 surface
                    }
                }
            }



            sts = UnsafeNativeMethods.MFXVideoDECODE_Init(session, &mfxDecParams);
            if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION)
            {
                sts = 0;
            }
            QuickSyncStatic.ThrowOnBadStatus(sts, "MFXVideoDECODE_Init");


            if (enableVPP)
            {
                sts = UnsafeNativeMethods.MFXVideoVPP_Init(session, &VPPParams);
                if (sts == mfxStatus.MFX_WRN_PARTIAL_ACCELERATION)
                {
                    sts = 0;
                }
                QuickSyncStatic.ThrowOnBadStatus(sts, "MFXVideoVPP_Init");
            }
        }