Beispiel #1
0
        /// <summary>
        /// Add salt-and-pepper noise to the given image.
        /// </summary>
        /// <param name="source"></param>
        /// <returns></returns>
        private IplImage Noise(IplImage source)
        {
            IplImage         result     = source.Clone();
            Mat              noise      = new Mat(source.Height, source.Width, MatType.CV_32F);
            InputOutputArray noiseArray = (InputOutputArray)noise;

            Cv2.Randu(noiseArray, (Scalar)0, (Scalar)255);
            noise = noiseArray.GetMat();

            int bound = 5;

            int   upperBound = 255 - bound;
            int   lowerBound = 0 + bound;
            float noiseValue;

            for (int y = 0; y < source.Height; y++)
            {
                for (int x = 0; x < source.Width; x++)
                {
                    noiseValue = noise.At <float>(y, x);

                    if (noiseValue >= upperBound)
                    {
                        result[y, x] = new CvScalar(255);
                    }
                    else if (noiseValue <= lowerBound)
                    {
                        result[y, x] = new CvScalar(0);
                    }
                }
            }

            return(result);
        }
        /// <summary>
        /// Нахадит пиксели с красным цветом
        /// </summary>
        /// <param name="image">Изображение для обработки</param>
        /// <param name="mask">Пиксельная маска</param>
        private static void GetRedPixelMask(IInputArray image, IInputOutputArray mask)
        {
            bool useUMat;

            using (InputOutputArray ia = mask.GetInputOutputArray())
            {
                useUMat = ia.IsUMat;
            }

            using (IImage hsv = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                using (IImage s = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                {
                    CvInvoke.CvtColor(image, hsv, ColorConversion.Bgr2Hsv);
                    CvInvoke.ExtractChannel(hsv, mask, 0);
                    CvInvoke.ExtractChannel(hsv, s, 1);

                    //По маске от 20 до 160
                    using (ScalarArray lower = new ScalarArray(20))
                        using (ScalarArray upper = new ScalarArray(160))
                        {
                            CvInvoke.InRange(mask, lower, upper, mask);
                        }

                    CvInvoke.BitwiseNot(mask, mask);

                    //маска для насыщения не менее 10
                    CvInvoke.Threshold(s, s, 15, 255, ThresholdType.Binary);
                    CvInvoke.BitwiseAnd(mask, s, mask, null);
                }
        }
        public static Bitmap BackProject(Bitmap bmp, int[] HueRange, int[] SaturationRange)
        {
            Emgu.CV.Image <Bgr, Byte> Mask = new Image <Bgr, Byte>(bmp);                                    //Image Datatype switch
            Mat  Copy = new Mat();                                                                          //Result Mat type
            bool useUMat;                                                                                   //bool for Mat Check

            using (InputOutputArray ia = Copy.GetInputOutputArray())                                        //Determine Mask type
                useUMat = ia.IsUMat;                                                                        //If Mat, use Mat
            using (IImage hsv = useUMat ? (IImage) new UMat() : (IImage) new Mat())                         //Mat Image Copies (Hue)
                using (IImage s = useUMat ? (IImage) new UMat() : (IImage) new Mat())                       //Mat Image Copies (Saturation)
                {
                    CvInvoke.CvtColor(Mask, hsv, ColorConversion.Bgr2Hsv);                                  //Convert Image to Hsv
                    CvInvoke.ExtractChannel(hsv, Copy, 0);                                                  //Extract Hue channel from Hsv
                    CvInvoke.ExtractChannel(hsv, s, 1);                                                     //Extract Saturation channel from Hsv
                                                                                                            //the mask for hue less than 20 or larger than 160
                    using (ScalarArray lower = new ScalarArray(HueRange[0]))                                //hue min
                        using (ScalarArray upper = new ScalarArray(HueRange[1]))                            //hue max
                            CvInvoke.InRange(Copy, lower, upper, Copy);                                     //Check Ranges
                    CvInvoke.BitwiseNot(Copy, Copy);                                                        //If ranges dont line up, fade to black
                                                                                                            //s is the mask for saturation of at least 10, this is mainly used to filter out white pixels
                    CvInvoke.Threshold(s, s, SaturationRange[0], SaturationRange[1], ThresholdType.Binary); //saturation check
                    CvInvoke.BitwiseAnd(Copy, s, Copy, null);                                               //If saturation and hue match requirements, place in mask
                }
            return(Copy.Bitmap);
        }
Beispiel #4
0
 /// <summary>
 /// Draw the matched keypoints between the model image and the observed image.
 /// </summary>
 /// <param name="modelImage">The model image</param>
 /// <param name="modelKeypoints">The keypoints in the model image</param>
 /// <param name="observedImage">The observed image</param>
 /// <param name="observedKeyPoints">The keypoints in the observed image</param>
 /// <param name="matchColor">The color for the match correspondence lines</param>
 /// <param name="singlePointColor">The color for highlighting the keypoints</param>
 /// <param name="mask">The mask for the matches. Use null for all matches.</param>
 /// <param name="flags">The drawing type</param>
 /// <param name="result">The image where model and observed image is displayed side by side. Matches are drawn as indicated by the flag</param>
 /// <param name="matches">Matches. Each matches[i] is k or less matches for the same query descriptor.</param>
 public static void DrawMatches(
     IInputArray modelImage,
     VectorOfKeyPoint modelKeypoints,
     IInputArray observedImage,
     VectorOfKeyPoint observedKeyPoints,
     VectorOfVectorOfDMatch matches,
     IInputOutputArray result,
     MCvScalar matchColor,
     MCvScalar singlePointColor,
     VectorOfVectorOfByte mask = null,
     KeypointDrawType flags    = KeypointDrawType.Default)
 {
     using (InputArray iaModelImage = modelImage.GetInputArray())
         using (InputArray iaObservedImage = observedImage.GetInputArray())
             using (InputOutputArray ioaResult = result.GetInputOutputArray())
                 Features2DInvoke.drawMatchedFeatures2(
                     iaObservedImage,
                     observedKeyPoints,
                     iaModelImage,
                     modelKeypoints,
                     matches,
                     ioaResult,
                     ref matchColor,
                     ref singlePointColor,
                     mask,
                     flags);
 }
Beispiel #5
0
 /// <summary>
 /// Performs image denoising using the Block-Matching and 3D-filtering algorithm with several computational optimizations. Noise expected to be a gaussian white noise.
 /// </summary>
 /// <param name="src">Input 8-bit or 16-bit 1-channel image.</param>
 /// <param name="dstStep1">Output image of the first step of BM3D with the same size and type as src.</param>
 /// <param name="dstStep2">Output image of the second step of BM3D with the same size and type as src.</param>
 /// <param name="h">Parameter regulating filter strength. Big h value perfectly removes noise but also removes image details, smaller h value preserves details but also preserves some noise.</param>
 /// <param name="templateWindowSize">Size in pixels of the template patch that is used for block-matching. Should be power of 2.</param>
 /// <param name="searchWindowSize">Size in pixels of the window that is used to perform block-matching. Affect performance linearly: greater searchWindowsSize - greater denoising time. Must be larger than templateWindowSize.</param>
 /// <param name="blockMatchingStep1">Block matching threshold for the first step of BM3D (hard thresholding), i.e. maximum distance for which two blocks are considered similar. Value expressed in euclidean distance.</param>
 /// <param name="blockMatchingStep2">Block matching threshold for the second step of BM3D (Wiener filtering), i.e. maximum distance for which two blocks are considered similar. Value expressed in euclidean distance.</param>
 /// <param name="groupSize">Maximum size of the 3D group for collaborative filtering.</param>
 /// <param name="slidingStep">Sliding step to process every next reference block.</param>
 /// <param name="beta">Kaiser window parameter that affects the sidelobe attenuation of the transform of the window. Kaiser window is used in order to reduce border effects. To prevent usage of the window, set beta to zero.</param>
 /// <param name="normType">Norm used to calculate distance between blocks. L2 is slower than L1 but yields more accurate results.</param>
 /// <param name="step">Step of BM3D to be executed. Possible variants are: step 1, step 2, both steps.</param>
 /// <param name="transformType">	Type of the orthogonal transform used in collaborative filtering step. Currently only Haar transform is supported.</param>
 /// <remarks> <c href="http://www.cs.tut.fi/~foi/GCF-BM3D/BM3D_TIP_2007.pdf"/>   </remarks>
 public static void Bm3dDenoising(
     IInputArray src,
     IInputOutputArray dstStep1,
     IOutputArray dstStep2,
     float h = 1,
     int templateWindowSize       = 4,
     int searchWindowSize         = 16,
     int blockMatchingStep1       = 2500,
     int blockMatchingStep2       = 400,
     int groupSize                = 8,
     int slidingStep              = 1,
     float beta                   = 2.0f,
     NormType normType            = NormType.L2,
     Bm3dSteps step               = Bm3dSteps.All,
     TransformTypes transformType = TransformTypes.Haar)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (InputOutputArray ioaDstStep1 = dstStep1.GetInputOutputArray())
             using (OutputArray oaStep2 = dstStep2.GetOutputArray())
             {
                 cveBm3dDenoising1(iaSrc, ioaDstStep1, oaStep2,
                                   h, templateWindowSize, searchWindowSize, blockMatchingStep1, blockMatchingStep2,
                                   groupSize, slidingStep, beta, normType, step, transformType);
             }
 }
Beispiel #6
0
        private static void GetLabColorPixelMask(IInputArray image, IInputOutputArray mask, int lightLower, int lightUpper, int aLower, int aUpper, int bLower, int bUpper)
        {
            bool useUMat;

            using (InputOutputArray ia = mask.GetInputOutputArray())
                useUMat = ia.IsUMat;

            using (IImage lab = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                using (IImage l = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                    using (IImage a = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                        using (IImage b = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                        {
                            CvInvoke.CvtColor(image, lab, ColorConversion.Bgr2Lab);
                            CvInvoke.ExtractChannel(lab, mask, 0);
                            CvInvoke.ExtractChannel(lab, a, 1);
                            CvInvoke.ExtractChannel(lab, b, 2);

                            //threshold on lightness
                            //CvInvoke.Threshold(lab, l, lightLower, lightUpper, ThresholdType.Binary);
                            //CvInvoke.BitwiseAnd(mask, s, mask, null);

                            using (ScalarArray lower = new ScalarArray(lightLower))
                                using (ScalarArray upper = new ScalarArray(lightUpper))
                                    CvInvoke.InRange(mask, lower, upper, mask);

                            //threshold on A colorspace and merge L and A into Mask
                            CvInvoke.Threshold(a, a, aLower, aUpper, ThresholdType.Binary);
                            CvInvoke.BitwiseAnd(mask, a, mask, null);

                            //threshold on B colorspace and merge B into previous Mask
                            CvInvoke.Threshold(b, b, bLower, bUpper, ThresholdType.Binary);
                            CvInvoke.BitwiseAnd(mask, b, mask, null);
                        }
        }
Beispiel #7
0
        /// <summary>
        /// Compute the red pixel mask for the given image.
        /// A red pixel is a pixel where:  20 &lt; hue &lt; 160 AND saturation &gt; 10
        /// </summary>
        /// <param name="image">The color image to find red mask from</param>
        /// <param name="mask">The red pixel mask</param>
        private static void GetColorPixelMask(IInputArray image, IInputOutputArray mask, int hueLower, int hueUpper, int satLower, int satUpper, int lumLower, int lumUpper)
        {
            bool useUMat;

            using (InputOutputArray ia = mask.GetInputOutputArray())
                useUMat = ia.IsUMat;

            using (IImage hsv = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                using (IImage s = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                    using (IImage lum = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                    {
                        CvInvoke.CvtColor(image, hsv, ColorConversion.Bgr2Hsv);
                        CvInvoke.ExtractChannel(hsv, mask, 0);
                        CvInvoke.ExtractChannel(hsv, s, 1);
                        CvInvoke.ExtractChannel(hsv, lum, 2);

                        //the mask for hue less than 20 or larger than 160
                        using (ScalarArray lower = new ScalarArray(hueLower))
                            using (ScalarArray upper = new ScalarArray(hueUpper))
                                CvInvoke.InRange(mask, lower, upper, mask);
                        //CvInvoke.BitwiseNot(mask, mask); //invert results to "round the corner" of the hue scale

                        //s is the mask for saturation of at least 10, this is mainly used to filter out white pixels
                        CvInvoke.Threshold(s, s, satLower, satUpper, ThresholdType.Binary);
                        CvInvoke.BitwiseAnd(mask, s, mask, null);

                        // mask luminosity
                        CvInvoke.Threshold(lum, lum, lumLower, lumUpper, ThresholdType.Binary);
                        CvInvoke.BitwiseAnd(mask, lum, mask, null);
                    }
        }
Beispiel #8
0
        public Mat GetHandContours(Mat image)
        {
            var copy = new Mat();

            CvInvoke.GaussianBlur(image, copy, new Size(5, 5), 1.5, 1.5);
            var  mask = new Mat();
            bool useUMat;

            using (InputOutputArray ia = mask.GetInputOutputArray())
                useUMat = ia.IsUMat;

            using (IImage hsv = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                using (IImage s = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                {
                    CvInvoke.CvtColor(copy, hsv, ColorConversion.Bgr2Hsv);
                    CvInvoke.ExtractChannel(hsv, mask, 0);
                    CvInvoke.ExtractChannel(hsv, s, 1);

                    using (ScalarArray lower = new ScalarArray(9))
                        using (ScalarArray upper = new ScalarArray(14))
                            CvInvoke.InRange(mask, lower, upper, mask);
                    //CvInvoke.BitwiseNot(mask, mask);

                    //s is the mask for saturation of at least 10, this is mainly used to filter out white pixels
                    CvInvoke.Threshold(s, s, 10, 255, ThresholdType.Binary);
                    CvInvoke.BitwiseAnd(mask, s, mask, null);
                }

            //Use Dilate followed by Erode to eliminate small gaps in some contour.
            CvInvoke.Dilate(mask, mask, null, new Point(-1, -1), 1, BorderType.Constant, CvInvoke.MorphologyDefaultBorderValue);
            CvInvoke.Erode(mask, mask, null, new Point(-1, -1), 1, BorderType.Constant, CvInvoke.MorphologyDefaultBorderValue);

            return(mask);
        }
Beispiel #9
0
 /// <summary>
 /// Calibrate a camera using Charuco corners.
 /// </summary>
 /// <param name="charucoCorners">Vector of detected charuco corners per frame</param>
 /// <param name="charucoIds">List of identifiers for each corner in charucoCorners per frame</param>
 /// <param name="board">Marker Board layout</param>
 /// <param name="imageSize">Size of the image used only to initialize the intrinsic camera matrix.</param>
 /// <param name="cameraMatrix">Output 3x3 floating-point camera matrix. </param>
 /// <param name="distCoeffs">Output vector of distortion coefficients (k1,k2,p1,p2[,k3[,k4,k5,k6],[s1,s2,s3,s4]]) of 4, 5, 8 or 12 elements</param>
 /// <param name="rvecs">Output vector of rotation vectors (see Rodrigues ) estimated for each board view (e.g. std::vector&lt;cv::Mat&gt;). That is, each k-th rotation vector together with the corresponding k-th translation vector (see the next output parameter description) brings the board pattern from the model coordinate space (in which object points are specified) to the world coordinate space, that is, a real position of the board pattern in the k-th pattern view (k=0.. M -1).</param>
 /// <param name="tvecs">Output vector of translation vectors estimated for each pattern view.</param>
 /// <param name="stdDeviationsIntrinsics">Output vector of standard deviations estimated for intrinsic parameters. Order of deviations values: (fx,fy,cx,cy,k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,τx,τy) If one of parameters is not estimated, it's deviation is equals to zero.</param>
 /// <param name="stdDeviationsExtrinsics">Output vector of standard deviations estimated for extrinsic parameters. Order of deviations values: (R1,T1,…,RM,TM) where M is number of pattern views, Ri,Ti are concatenated 1x3 vectors.</param>
 /// <param name="perViewErrors">Output vector of average re-projection errors estimated for each pattern view.</param>
 /// <param name="flags">Flags Different flags for the calibration process</param>
 /// <param name="criteria">Termination criteria for the iterative optimization algorithm.</param>
 /// <returns>The final re-projection error.</returns>
 public static double CalibrateCameraCharuco(
     IInputArrayOfArrays charucoCorners,
     IInputArrayOfArrays charucoIds,
     CharucoBoard board,
     Size imageSize,
     IInputOutputArray cameraMatrix,
     IInputOutputArray distCoeffs,
     IOutputArray rvecs,
     IOutputArray tvecs,
     IOutputArray stdDeviationsIntrinsics,
     IOutputArray stdDeviationsExtrinsics,
     IOutputArray perViewErrors,
     CalibType flags,
     MCvTermCriteria criteria)
 {
     using (InputArray iaCharucoCorners = charucoCorners.GetInputArray())
         using (InputArray iaCharucoIds = charucoIds.GetInputArray())
             using (InputOutputArray ioaCameraMatrix = cameraMatrix.GetInputOutputArray())
                 using (InputOutputArray ioaDistCoeffs = distCoeffs.GetInputOutputArray())
                     using (OutputArray oaRvecs = rvecs == null ? OutputArray.GetEmpty() : rvecs.GetOutputArray())
                         using (OutputArray oaTvecs = tvecs == null ? OutputArray.GetEmpty() : tvecs.GetOutputArray())
                             using (OutputArray oaStdDeviationsIntrinsics = stdDeviationsIntrinsics == null ? OutputArray.GetEmpty() : stdDeviationsIntrinsics.GetOutputArray())
                                 using (OutputArray oaStdDeviationsExtrinsics = stdDeviationsExtrinsics == null ? OutputArray.GetEmpty() : stdDeviationsExtrinsics.GetOutputArray())
                                     using (OutputArray oaPerViewErrors = perViewErrors == null ? OutputArray.GetEmpty() : perViewErrors.GetOutputArray())
                                     {
                                         return(cveArucoCalibrateCameraCharuco(
                                                    iaCharucoCorners, iaCharucoIds, board.BoardPtr, ref imageSize,
                                                    ioaCameraMatrix, ioaDistCoeffs, oaRvecs, oaTvecs,
                                                    oaStdDeviationsIntrinsics, oaStdDeviationsExtrinsics, oaPerViewErrors,
                                                    flags, ref criteria));
                                     }
 }
Beispiel #10
0
 /// <summary>
 /// Trains a Facemark algorithm using the given dataset.
 /// </summary>
 /// <param name="image">Input image.</param>
 /// <param name="faces">Output of the function which represent region of interest of the detected faces. Each face is stored in cv::Rect container.</param>
 /// <param name="landmarks">The detected landmark points for each faces.</param>
 /// <returns></returns>
 public virtual bool Fit(
     InputArray image,
     InputArray faces,
     InputOutputArray landmarks)
 {
     return(Fit(image, faces, landmarks, IntPtr.Zero));
 }
Beispiel #11
0
        /// <summary>
        /// Trains a Facemark algorithm using the given dataset.
        /// </summary>
        /// <param name="image">Input image.</param>
        /// <param name="faces">Output of the function which represent region of interest of the detected faces. Each face is stored in cv::Rect container.</param>
        /// <param name="landmarks">The detected landmark points for each faces.</param>
        /// <returns></returns>
        public virtual bool Fit(
            InputArray image,
            InputArray faces,
            InputOutputArray landmarks)
        {
            ThrowIfDisposed();
            if (image == null)
            {
                throw new ArgumentNullException(nameof(image));
            }
            if (faces == null)
            {
                throw new ArgumentNullException(nameof(faces));
            }
            if (landmarks == null)
            {
                throw new ArgumentNullException(nameof(landmarks));
            }
            image.ThrowIfDisposed();
            faces.ThrowIfDisposed();
            landmarks.ThrowIfNotReady();

            var ret = NativeMethods.face_Facemark_fit(ptr, image.CvPtr, faces.CvPtr, landmarks.CvPtr);

            GC.KeepAlive(this);
            GC.KeepAlive(image);
            landmarks.Fix();

            return(ret != 0);
        }
        /// <summary>
        /// Performs image denoising using the Block-Matching and 3D-filtering algorithm 
        /// (http://www.cs.tut.fi/~foi/GCF-BM3D/BM3D_TIP_2007.pdf) with several computational 
        /// optimizations.Noise expected to be a gaussian white noise.
        /// </summary>
        /// <param name="src">Input 8-bit or 16-bit 1-channel image.</param>
        /// <param name="dstStep1">Output image of the first step of BM3D with the same size and type as src.</param>
        /// <param name="dstStep2">Output image of the second step of BM3D with the same size and type as src.</param>
        /// <param name="h">Parameter regulating filter strength. Big h value perfectly removes noise but also 
        /// removes image details, smaller h value preserves details but also preserves some noise.</param>
        /// <param name="templateWindowSize">Size in pixels of the template patch that is used for block-matching. Should be power of 2.</param>
        /// <param name="searchWindowSize">Size in pixels of the window that is used to perform block-matching.
        ///  Affect performance linearly: greater searchWindowsSize - greater denoising time. Must be larger than templateWindowSize.</param>
        /// <param name="blockMatchingStep1">Block matching threshold for the first step of BM3D (hard thresholding),
        /// i.e.maximum distance for which two blocks are considered similar.Value expressed in euclidean distance.</param>
        /// <param name="blockMatchingStep2">Block matching threshold for the second step of BM3D (Wiener filtering),
        /// i.e.maximum distance for which two blocks are considered similar. Value expressed in euclidean distance.</param>
        /// <param name="groupSize">Maximum size of the 3D group for collaborative filtering.</param>
        /// <param name="slidingStep">Sliding step to process every next reference block.</param>
        /// <param name="beta">Kaiser window parameter that affects the sidelobe attenuation of the transform of the 
        /// window.Kaiser window is used in order to reduce border effects.To prevent usage of the window, set beta to zero.</param>
        /// <param name="normType">Norm used to calculate distance between blocks. L2 is slower than L1 but yields more accurate results.</param>
        /// <param name="step">Step of BM3D to be executed. Allowed are only BM3D_STEP1 and BM3D_STEPALL. 
        /// BM3D_STEP2 is not allowed as it requires basic estimate to be present.</param>
        /// <param name="transformType">Type of the orthogonal transform used in collaborative filtering step. 
        /// Currently only Haar transform is supported.</param>
        public static void Bm3dDenoising(
            InputArray src,
            InputOutputArray dstStep1,
            OutputArray dstStep2,
            float h = 1,
            int templateWindowSize = 4,
            int searchWindowSize = 16,
            int blockMatchingStep1 = 2500,
            int blockMatchingStep2 = 400,
            int groupSize = 8,
            int slidingStep = 1,
            float beta = 2.0f,
            NormTypes normType = NormTypes.L2,
            Bm3dSteps step = Bm3dSteps.STEPALL,
            TransformTypes transformType = TransformTypes.HAAR)
        {
            if (src == null)
                throw new ArgumentNullException(nameof(src));
            if (dstStep1 == null)
                throw new ArgumentNullException(nameof(dstStep1));
            if (dstStep2 == null)
                throw new ArgumentNullException(nameof(dstStep2));

            src.ThrowIfDisposed();
            dstStep1.ThrowIfNotReady();
            dstStep2.ThrowIfNotReady();

            NativeMethods.xphoto_bm3dDenoising1(src.CvPtr, dstStep1.CvPtr, dstStep2.CvPtr, h, templateWindowSize,
                searchWindowSize, blockMatchingStep1, blockMatchingStep2, groupSize, slidingStep, beta, (int) normType,
                (int) step, (int) transformType);

            GC.KeepAlive(src);
            dstStep1.Fix();
            dstStep2.Fix();
        }
Beispiel #13
0
 /// <summary>
 /// Calculates a dense optical flow.
 /// </summary>
 /// <param name="denseFlow">The dense optical flow object</param>
 /// <param name="i0">first input image.</param>
 /// <param name="i1">second input image of the same size and the same type as <paramref name="i0"/>.</param>
 /// <param name="flow">computed flow image that has the same size as I0 and type CV_32FC2.</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public static void Calc(this ICudaDenseOpticalFlow denseFlow, IInputArray i0, IInputArray i1, IInputOutputArray flow, Stream stream = null)
 {
     using (InputArray iaI0 = i0.GetInputArray())
         using (InputArray iaI1 = i1.GetInputArray())
             using (InputOutputArray ioaFlow = flow.GetInputOutputArray())
                 cudaDenseOpticalFlowCalc(denseFlow.DenseOpticalFlowPtr, iaI0, iaI1, ioaFlow, (stream == null) ?  IntPtr.Zero : stream.Ptr);
 }
Beispiel #14
0
 public static void Bm3dDenoising(
     IInputArray src,
     IInputOutputArray dstStep1,
     IOutputArray dstStep2,
     float h,
     int templateWindowSize,
     int searchWindowSize,
     int blockMatchingStep1,
     int blockMatchingStep2,
     int groupSize,
     int slidingStep,
     float beta,
     int normType,
     int step,
     int transformType)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (InputOutputArray ioaDstStep1 = dstStep1.GetInputOutputArray())
             using (OutputArray oaStep2 = dstStep2.GetOutputArray())
             {
                 cveBm3dDenoising1(iaSrc, ioaDstStep1, oaStep2,
                                   h, templateWindowSize, searchWindowSize, blockMatchingStep1, blockMatchingStep2,
                                   groupSize, slidingStep, beta, normType, step, transformType);
             }
 }
Beispiel #15
0
 /// <summary>
 ///  Pose estimation for a ChArUco board given some of their corners
 /// </summary>
 /// <param name="charucoCorners">vector of detected charuco corners</param>
 /// <param name="charucoIds">list of identifiers for each corner in charucoCorners</param>
 /// <param name="board">layout of ChArUco board.</param>
 /// <param name="cameraMatrix">input 3x3 floating-point camera matrix</param>
 /// <param name="distCoeffs">vector of distortion coefficients, 4, 5, 8 or 12 elements</param>
 /// <param name="rvec">Output vector (e.g. cv::Mat) corresponding to the rotation vector of the board</param>
 /// <param name="tvec">Output vector (e.g. cv::Mat) corresponding to the translation vector of the board.</param>
 /// <param name="useExtrinsicGuess">defines whether initial guess for rvec and  tvec will be used or not.</param>
 /// <returns>If pose estimation is valid, returns true, else returns false.</returns>
 public static bool EstimatePoseCharucoBoard(
     IInputArray charucoCorners,
     IInputArray charucoIds,
     CharucoBoard board,
     IInputArray cameraMatrix,
     IInputArray distCoeffs,
     IInputOutputArray rvec,
     IInputOutputArray tvec,
     bool useExtrinsicGuess = false)
 {
     using (InputArray iaCharucoCorners = charucoCorners.GetInputArray())
         using (InputArray iaCharucoIds = charucoIds.GetInputArray())
             using (InputArray iaCameraMatrix = cameraMatrix.GetInputArray())
                 using (InputArray iaDistCoeffs = distCoeffs.GetInputArray())
                     using (InputOutputArray ioaRvec = rvec.GetInputOutputArray())
                         using (InputOutputArray ioaTvec = tvec.GetInputOutputArray())
                         {
                             return(cveArucoEstimatePoseCharucoBoard(
                                        iaCharucoCorners,
                                        iaCharucoIds,
                                        board,
                                        iaCameraMatrix,
                                        iaDistCoeffs,
                                        ioaRvec,
                                        ioaTvec,
                                        useExtrinsicGuess));
                         }
 }
Beispiel #16
0
 /// <summary>
 /// Draws the checker to the given image.
 /// </summary>
 /// <param name="img">image in color space BGR</param>
 public void Draw(IInputOutputArray img)
 {
     using (InputOutputArray ioaImg = img.GetInputOutputArray())
     {
         MccInvoke.cveCCheckerDrawDraw(_ptr, ioaImg);
     }
 }
Beispiel #17
0
        /// <summary>
        /// Compute the red pixel mask for the given image.
        /// A red pixel is a pixel where:  20 &lt; hue &lt; 160 AND saturation &gt; 10
        /// </summary>
        /// <param name="image">The color image to find red mask from</param>
        /// <param name="mask">The red pixel mask</param>
        private static void GetRedPixelMask(IInputArray image, IInputOutputArray mask)
        {
            bool useUMat;

            using (InputOutputArray ia = mask.GetInputOutputArray())
                useUMat = ia.IsUMat;

            using (IImage hsv = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                using (IImage s = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                {
                    CvInvoke.CvtColor(image, hsv, ColorConversion.Bgr2Hsv);
                    CvInvoke.ExtractChannel(hsv, mask, 0);
                    CvInvoke.ExtractChannel(hsv, s, 1);

                    //the mask for hue less than 20 or larger than 160
                    using (ScalarArray lower = new ScalarArray(20))
                        using (ScalarArray upper = new ScalarArray(160))
                            CvInvoke.InRange(mask, lower, upper, mask);
                    CvInvoke.BitwiseNot(mask, mask);

                    //s is the mask for saturation of at least 10, this is mainly used to filter out white pixels
                    CvInvoke.Threshold(s, s, 10, 255, ThresholdType.Binary);
                    CvInvoke.BitwiseAnd(mask, s, mask, null);
                }
        }
Beispiel #18
0
 /// <summary>
 /// Blends and returns the final pano.
 /// </summary>
 /// <param name="dst">Final pano</param>
 /// <param name="dstMask">Final pano mask</param>
 public void Blend(IInputOutputArray dst, IInputOutputArray dstMask)
 {
     using (InputOutputArray ioaDst = dst.GetInputOutputArray())
         using (InputOutputArray ioaDstMask = dstMask.GetInputOutputArray())
         {
             StitchingInvoke.cveBlenderBlend(_blenderPtr, ioaDst, ioaDstMask);
         }
 }
Beispiel #19
0
 /// <summary>
 /// Converts the hardware-generated flow vectors to floating point representation
 /// </summary>
 /// <param name="flow">Buffer of type CV_16FC2 containing flow vectors generated by Calc().</param>
 /// <param name="floatFlow">Buffer of type CV_32FC2, containing flow vectors in floating point representation, each flow vector for 1 pixel per gridSize, in the pitch-linear layout.</param>
 public void ConvertToFloat(IInputArray flow, IInputOutputArray floatFlow)
 {
     using (InputArray iaFlow = flow.GetInputArray())
         using (InputOutputArray ioaFloatFlow = floatFlow.GetInputOutputArray())
         {
             CudaInvoke.cudaNvidiaOpticalFlow_2_0_ConvertToFloat(_ptr, iaFlow, ioaFloatFlow);
         }
 }
Beispiel #20
0
 /// <summary>
 /// Utility to draw the detected facial landmark points.
 /// </summary>
 /// <param name="image">The input image to be processed.</param>
 /// <param name="points">Contains the data of points which will be drawn.</param>
 /// <param name="color">The color of points in BGR format </param>
 public static void DrawFacemarks(IInputOutputArray image, IInputArray points, MCvScalar color)
 {
     using (InputOutputArray ioaImage = image.GetInputOutputArray())
         using (InputArray iaPoints = points.GetInputArray())
         {
             cveDrawFacemarks(ioaImage, iaPoints, ref color);
         }
 }
Beispiel #21
0
 /// <summary>
 /// Generates the all-black and all-white images needed for shadowMasks computation. To identify shadow regions, the regions of two images where the pixels are not lit by projector's light and thus where there is not coded information, the 3DUNDERWORLD algorithm computes a shadow mask for the two cameras views, starting from a white and a black images captured by each camera. This method generates these two additional images to project.
 /// </summary>
 /// <param name="blackImage">The generated all-black CV_8U image, at projector's resolution.</param>
 /// <param name="whiteImage">The generated all-white CV_8U image, at projector's resolution.</param>
 public void GetImagesForShadowMasks(IInputOutputArray blackImage, IInputOutputArray whiteImage)
 {
     using (InputOutputArray ioaBlackImage = blackImage.GetInputOutputArray())
         using (InputOutputArray ioaWhiteImage = whiteImage.GetInputOutputArray())
         {
             StructuredLightInvoke.cveGrayCodePatternGetImagesForShadowMasks(_ptr, ioaBlackImage, ioaWhiteImage);
         }
 }
Beispiel #22
0
        /*
         * /// <summary>
         * /// Default face detector This function is mainly utilized by the implementation of a Facemark Algorithm.
         * /// </summary>
         * /// <param name="facemark">The facemark object</param>
         * /// <param name="image">The input image to be processed.</param>
         * /// <param name="faces">Output of the function which represent region of interest of the detected faces. Each face is stored in cv::Rect container.</param>
         * /// <returns>True if success</returns>
         * public static bool GetFaces(this IFacemark facemark, IInputArray image, IOutputArray faces)
         * {
         *  using (InputArray iaImage = image.GetInputArray())
         *  using (OutputArray oaFaces = faces.GetOutputArray())
         *  {
         *      return cveFacemarkGetFaces(facemark.FacemarkPtr, iaImage, oaFaces);
         *  }
         * }
         * [DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
         * [return: MarshalAs(CvInvoke.BoolMarshalType)]
         * internal extern static bool cveFacemarkGetFaces(IntPtr facemark, IntPtr image, IntPtr faces);
         */

        /// <summary>
        /// Trains a Facemark algorithm using the given dataset.
        /// </summary>
        /// <param name="facemark">The facemark object</param>
        /// <param name="image">Input image.</param>
        /// <param name="faces">Represent region of interest of the detected faces. Each face is stored in cv::Rect container.</param>
        /// <param name="landmarks">The detected landmark points for each faces.</param>
        /// <returns>True if successful</returns>
        public static bool Fit(this IFacemark facemark, IInputArray image, IInputArray faces, IInputOutputArray landmarks)
        {
            using (InputArray iaImage = image.GetInputArray())
                using (InputArray iaFaces = faces.GetInputArray())
                    using (InputOutputArray ioaLandmarks = landmarks.GetInputOutputArray())
                    {
                        return(cveFacemarkFit(facemark.FacemarkPtr, iaImage, iaFaces, ioaLandmarks));
                    }
        }
Beispiel #23
0
        /// <summary>
        /// Exchanges the color channels of an image in-place.
        /// </summary>
        /// <param name="image">Source image. Supports only CV_8UC4 type.</param>
        /// <param name="dstOrder">Integer array describing how channel values are permutated. The n-th entry of the
        /// array contains the number of the channel that is stored in the n-th channel of the output image.
        /// E.g.Given an RGBA image, aDstOrder = [3, 2, 1, 0] converts this to ABGR channel order.</param>
        /// <param name="stream">Stream for the asynchronous version.</param>
        /// <returns></returns>
        public static void swapChannels(InputOutputArray image, IEnumerable <int> dstOrder, Stream stream = null)
        {
            if (image == null)
            {
                throw new ArgumentNullException(nameof(image));
            }

            int[] dstOrderArray = EnumerableEx.ToArray(dstOrder);

            NativeMethods.cuda_imgproc_swapChannels(image.CvPtr, dstOrderArray, stream?.CvPtr ?? Stream.Null.CvPtr);
        }
Beispiel #24
0
 /// <summary>
 /// Draw detected markers in image.
 /// </summary>
 /// <param name="image">Input/output image. It must have 1 or 3 channels. The number of channels is not altered.</param>
 /// <param name="corners">Positions of marker corners on input image. (e.g std::vector&lt;std::vector&lt;cv::Point2f&gt; &gt; ). For N detected markers, the dimensions of this array should be Nx4. The order of the corners should be clockwise.</param>
 /// <param name="ids">Vector of identifiers for markers in markersCorners . Optional, if not provided, ids are not painted.</param>
 /// <param name="borderColor">Color of marker borders. Rest of colors (text color and first corner color) are calculated based on this one to improve visualization.</param>
 public static void DrawDetectedMarkers(
     IInputOutputArray image, IInputArray corners, IInputArray ids,
     MCvScalar borderColor)
 {
     using (InputOutputArray ioaImage = image.GetInputOutputArray())
         using (InputArray iaCorners = corners.GetInputArray())
             using (InputArray iaIds = ids.GetInputArray())
             {
                 cveArucoDrawDetectedMarkers(ioaImage, iaCorners, iaIds, ref borderColor);
             }
 }
Beispiel #25
0
 /// <summary>
 /// Debug draw search lines onto an image
 /// </summary>
 /// <param name="img">The output image</param>
 /// <param name="locations">The source locations of a line bundle</param>
 /// <param name="color">The line color</param>
 public static void DrawSearchLines(
     IInputOutputArray img,
     IInputArray locations,
     MCvScalar color)
 {
     using (InputOutputArray ioaImg = img.GetInputOutputArray())
         using (InputArray iaLocations = locations.GetInputArray())
         {
             cveDrawSearchLines(ioaImg, iaLocations, ref color);
         }
 }
Beispiel #26
0
 /// <summary>
 /// Calculates a sparse optical flow.
 /// </summary>
 /// <param name="sparseFlow">The sparse optical flow</param>
 /// <param name="prevImg">First input image.</param>
 /// <param name="nextImg">Second input image of the same size and the same type as <paramref name="prevImg"/>.</param>
 /// <param name="prevPts">Vector of 2D points for which the flow needs to be found.</param>
 /// <param name="nextPts">Output vector of 2D points containing the calculated new positions of input features in the second image.</param>
 /// <param name="status">Output status vector. Each element of the vector is set to 1 if the flow for the corresponding features has been found. Otherwise, it is set to 0.</param>
 /// <param name="err">Optional output vector that contains error response for each point (inverse confidence).</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public static void Calc(this ICudaSparseOpticalFlow sparseFlow, IInputArray prevImg, IInputArray nextImg, IInputArray prevPts, IInputOutputArray nextPts, IOutputArray status = null, IOutputArray err = null, Stream stream = null)
 {
     using (InputArray iaPrevImg = prevImg.GetInputArray())
         using (InputArray iaNextImg = nextImg.GetInputArray())
             using (InputArray iaPrevPts = prevPts.GetInputArray())
                 using (InputOutputArray ioaNextPts = nextPts.GetInputOutputArray())
                     using (OutputArray oaStatus = (status == null ? OutputArray.GetEmpty() : status.GetOutputArray()))
                         using (OutputArray oaErr = (err == null ? OutputArray.GetEmpty() : err.GetOutputArray()))
                             cudaSparseOpticalFlowCalc(sparseFlow.SparseOpticalFlowPtr, iaPrevImg, iaNextImg, iaPrevPts, ioaNextPts,
                                                       oaStatus, oaErr, (stream == null) ? IntPtr.Zero : stream.Ptr);
 }
Beispiel #27
0
 /// <summary>
 /// Debug draw markers of matched correspondences onto a lineBundle
 /// </summary>
 /// <param name="bundle">the lineBundle</param>
 /// <param name="cols">column coordinates in the line bundle</param>
 /// <param name="colors">colors for the markers. Defaults to white.</param>
 public static void DrawCorrespondencies(
     IInputOutputArray bundle,
     IInputArray cols,
     IInputArray colors = null)
 {
     using (InputOutputArray ioaBundle = bundle.GetInputOutputArray())
         using (InputArray iaCols = cols.GetInputArray())
             using (InputArray iaColors = colors == null ? InputArray.GetEmpty() : colors.GetInputArray())
             {
                 cveDrawCorrespondencies(ioaBundle, iaCols, iaColors);
             }
 }
Beispiel #28
0
 /// <summary>
 /// Given the pose estimation of a marker or board, this function draws the axis of the world coordinate system, i.e. the system centered on the marker/board. Useful for debugging purposes.
 /// </summary>
 /// <param name="image">input/output image. It must have 1 or 3 channels. The number of channels is not altered.</param>
 /// <param name="cameraMatrix">input 3x3 floating-point camera matrix</param>
 /// <param name="distCoeffs">vector of distortion coefficients (k1,k2,p1,p2[,k3[,k4,k5,k6],[s1,s2,s3,s4]]) of 4, 5, 8 or 12 elements</param>
 /// <param name="rvec">rotation vector of the coordinate system that will be drawn.</param>
 /// <param name="tvec">translation vector of the coordinate system that will be drawn.</param>
 /// <param name="length">length of the painted axis in the same unit than tvec (usually in meters)</param>
 public static void DrawAxis(
     IInputOutputArray image, IInputArray cameraMatrix, IInputArray distCoeffs,
     IInputArray rvec, IInputArray tvec, float length)
 {
     using (InputOutputArray ioaImage = image.GetInputOutputArray())
         using (InputArray iaCameraMatrix = cameraMatrix.GetInputArray())
             using (InputArray iaDistCoeffs = distCoeffs.GetInputArray())
                 using (InputArray iaRvec = rvec.GetInputArray())
                     using (InputArray iaTvec = tvec.GetInputArray())
                     {
                         cveArucoDrawAxis(ioaImage, iaCameraMatrix, iaDistCoeffs, iaRvec, iaTvec, length);
                     }
 }
Beispiel #29
0
 /// <summary>
 /// Draw a set of detected ChArUco Diamond markers
 /// </summary>
 /// <param name="image">input/output image. It must have 1 or 3 channels. The number of channels is not altered.</param>
 /// <param name="diamondCorners">positions of diamond corners in the same format returned by detectCharucoDiamond(). (e.g VectorOfVectorOfPointF ). For N detected markers, the dimensions of this array should be Nx4. The order of the corners should be clockwise.</param>
 /// <param name="diamondIds">vector of identifiers for diamonds in diamondCorners, in the same format returned by detectCharucoDiamond() (e.g. VectorOfMat ). Optional, if not provided, ids are not painted. </param>
 /// <param name="borderColor">color of marker borders. Rest of colors (text color and first corner color) are calculated based on this one.</param>
 public static void DrawDetectedDiamonds(
     IInputOutputArray image,
     IInputArrayOfArrays diamondCorners,
     IInputArray diamondIds,
     MCvScalar borderColor)
 {
     using (InputOutputArray ioaImage = image.GetInputOutputArray())
         using (InputArray iaDiamondCorners = diamondCorners.GetInputArray())
             using (InputArray iaDiamondIds = diamondIds == null ? InputArray.GetEmpty() : diamondIds.GetInputArray())
             {
                 cveArucoDrawDetectedDiamonds(ioaImage, iaDiamondCorners, iaDiamondIds, ref borderColor);
             }
 }
Beispiel #30
0
 /// <summary>
 /// Draws a set of Charuco corners
 /// </summary>
 /// <param name="image">image input/output image. It must have 1 or 3 channels. The number of channels is not altered.</param>
 /// <param name="charucoCorners">vector of detected charuco corners</param>
 /// <param name="charucoIds">list of identifiers for each corner in charucoCorners</param>
 /// <param name="cornerColor">color of the square surrounding each corner</param>
 public static void DrawDetectedCornersCharuco(
     IInputOutputArray image,
     IInputArray charucoCorners,
     IInputArray charucoIds,
     MCvScalar cornerColor)
 {
     using (InputOutputArray ioaImage = image.GetInputOutputArray())
         using (InputArray iaCharucoCorners = charucoCorners.GetInputArray())
             using (InputArray iaCharucoIds = charucoIds == null ? InputArray.GetEmpty() : charucoIds.GetInputArray())
             {
                 cveArucoDrawDetectedCornersCharuco(ioaImage, iaCharucoCorners, iaCharucoIds, ref cornerColor);
             }
 }