/// <summary> /// Trains a Facemark algorithm using the given dataset. /// </summary> /// <param name="image">Input image.</param> /// <param name="faces">Output of the function which represent region of interest of the detected faces. Each face is stored in cv::Rect container.</param> /// <param name="landmarks">The detected landmark points for each faces.</param> /// <returns></returns> public virtual bool Fit( InputArray image, InputArray faces, InputOutputArray landmarks) { ThrowIfDisposed(); if (image == null) { throw new ArgumentNullException(nameof(image)); } if (faces == null) { throw new ArgumentNullException(nameof(faces)); } if (landmarks == null) { throw new ArgumentNullException(nameof(landmarks)); } image.ThrowIfDisposed(); faces.ThrowIfDisposed(); landmarks.ThrowIfNotReady(); var ret = NativeMethods.face_Facemark_fit(ptr, image.CvPtr, faces.CvPtr, landmarks.CvPtr); GC.KeepAlive(this); GC.KeepAlive(image); landmarks.Fix(); return(ret != 0); }
/// <summary> /// Performs image denoising using the Block-Matching and 3D-filtering algorithm /// (http://www.cs.tut.fi/~foi/GCF-BM3D/BM3D_TIP_2007.pdf) with several computational /// optimizations.Noise expected to be a gaussian white noise. /// </summary> /// <param name="src">Input 8-bit or 16-bit 1-channel image.</param> /// <param name="dstStep1">Output image of the first step of BM3D with the same size and type as src.</param> /// <param name="dstStep2">Output image of the second step of BM3D with the same size and type as src.</param> /// <param name="h">Parameter regulating filter strength. Big h value perfectly removes noise but also /// removes image details, smaller h value preserves details but also preserves some noise.</param> /// <param name="templateWindowSize">Size in pixels of the template patch that is used for block-matching. Should be power of 2.</param> /// <param name="searchWindowSize">Size in pixels of the window that is used to perform block-matching. /// Affect performance linearly: greater searchWindowsSize - greater denoising time. Must be larger than templateWindowSize.</param> /// <param name="blockMatchingStep1">Block matching threshold for the first step of BM3D (hard thresholding), /// i.e.maximum distance for which two blocks are considered similar.Value expressed in euclidean distance.</param> /// <param name="blockMatchingStep2">Block matching threshold for the second step of BM3D (Wiener filtering), /// i.e.maximum distance for which two blocks are considered similar. Value expressed in euclidean distance.</param> /// <param name="groupSize">Maximum size of the 3D group for collaborative filtering.</param> /// <param name="slidingStep">Sliding step to process every next reference block.</param> /// <param name="beta">Kaiser window parameter that affects the sidelobe attenuation of the transform of the /// window.Kaiser window is used in order to reduce border effects.To prevent usage of the window, set beta to zero.</param> /// <param name="normType">Norm used to calculate distance between blocks. L2 is slower than L1 but yields more accurate results.</param> /// <param name="step">Step of BM3D to be executed. Allowed are only BM3D_STEP1 and BM3D_STEPALL. /// BM3D_STEP2 is not allowed as it requires basic estimate to be present.</param> /// <param name="transformType">Type of the orthogonal transform used in collaborative filtering step. /// Currently only Haar transform is supported.</param> public static void Bm3dDenoising( InputArray src, InputOutputArray dstStep1, OutputArray dstStep2, float h = 1, int templateWindowSize = 4, int searchWindowSize = 16, int blockMatchingStep1 = 2500, int blockMatchingStep2 = 400, int groupSize = 8, int slidingStep = 1, float beta = 2.0f, NormTypes normType = NormTypes.L2, Bm3dSteps step = Bm3dSteps.STEPALL, TransformTypes transformType = TransformTypes.HAAR) { if (src == null) { throw new ArgumentNullException(nameof(src)); } if (dstStep1 == null) { throw new ArgumentNullException(nameof(dstStep1)); } if (dstStep2 == null) { throw new ArgumentNullException(nameof(dstStep2)); } src.ThrowIfDisposed(); dstStep1.ThrowIfNotReady(); dstStep2.ThrowIfNotReady(); NativeMethods.HandleException( NativeMethods.xphoto_bm3dDenoising1( src.CvPtr, dstStep1.CvPtr, dstStep2.CvPtr, h, templateWindowSize, searchWindowSize, blockMatchingStep1, blockMatchingStep2, groupSize, slidingStep, beta, (int)normType, (int)step, (int)transformType)); GC.KeepAlive(src); dstStep1.Fix(); dstStep2.Fix(); }
/// <summary> /// Updates motion history image using the current silhouette /// </summary> /// <param name="silhouette">Silhouette mask that has non-zero pixels where the motion occurs.</param> /// <param name="mhi">Motion history image that is updated by the function (single-channel, 32-bit floating-point).</param> /// <param name="timestamp">Current time in milliseconds or other units.</param> /// <param name="duration">Maximal duration of the motion track in the same units as timestamp .</param> public static void UpdateMotionHistory( InputArray silhouette, InputOutputArray mhi, double timestamp, double duration) { if (silhouette == null) { throw new ArgumentNullException(nameof(silhouette)); } if (mhi == null) { throw new ArgumentNullException(nameof(mhi)); } silhouette.ThrowIfDisposed(); mhi.ThrowIfNotReady(); NativeMethods.optflow_motempl_updateMotionHistory( silhouette.CvPtr, mhi.CvPtr, timestamp, duration); mhi.Fix(); GC.KeepAlive(silhouette); }