Пример #1
0
 /// <summary>
 ///  Pose estimation for a ChArUco board given some of their corners
 /// </summary>
 /// <param name="charucoCorners">vector of detected charuco corners</param>
 /// <param name="charucoIds">list of identifiers for each corner in charucoCorners</param>
 /// <param name="board">layout of ChArUco board.</param>
 /// <param name="cameraMatrix">input 3x3 floating-point camera matrix</param>
 /// <param name="distCoeffs">vector of distortion coefficients, 4, 5, 8 or 12 elements</param>
 /// <param name="rvec">Output vector (e.g. cv::Mat) corresponding to the rotation vector of the board</param>
 /// <param name="tvec">Output vector (e.g. cv::Mat) corresponding to the translation vector of the board.</param>
 /// <param name="useExtrinsicGuess">defines whether initial guess for rvec and  tvec will be used or not.</param>
 /// <returns>If pose estimation is valid, returns true, else returns false.</returns>
 public static bool EstimatePoseCharucoBoard(
     IInputArray charucoCorners,
     IInputArray charucoIds,
     CharucoBoard board,
     IInputArray cameraMatrix,
     IInputArray distCoeffs,
     IInputOutputArray rvec,
     IInputOutputArray tvec,
     bool useExtrinsicGuess = false)
 {
     using (InputArray iaCharucoCorners = charucoCorners.GetInputArray())
         using (InputArray iaCharucoIds = charucoIds.GetInputArray())
             using (InputArray iaCameraMatrix = cameraMatrix.GetInputArray())
                 using (InputArray iaDistCoeffs = distCoeffs.GetInputArray())
                     using (InputOutputArray ioaRvec = rvec.GetInputOutputArray())
                         using (InputOutputArray ioaTvec = tvec.GetInputOutputArray())
                         {
                             return(cveArucoEstimatePoseCharucoBoard(
                                        iaCharucoCorners,
                                        iaCharucoIds,
                                        board,
                                        iaCameraMatrix,
                                        iaDistCoeffs,
                                        ioaRvec,
                                        ioaTvec,
                                        useExtrinsicGuess));
                         }
 }
Пример #2
0
        private static void GetLabColorPixelMask(IInputArray image, IInputOutputArray mask, int lightLower, int lightUpper, int aLower, int aUpper, int bLower, int bUpper)
        {
            bool useUMat;

            using (InputOutputArray ia = mask.GetInputOutputArray())
                useUMat = ia.IsUMat;

            using (IImage lab = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                using (IImage l = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                    using (IImage a = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                        using (IImage b = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                        {
                            CvInvoke.CvtColor(image, lab, ColorConversion.Bgr2Lab);
                            CvInvoke.ExtractChannel(lab, mask, 0);
                            CvInvoke.ExtractChannel(lab, a, 1);
                            CvInvoke.ExtractChannel(lab, b, 2);

                            //threshold on lightness
                            //CvInvoke.Threshold(lab, l, lightLower, lightUpper, ThresholdType.Binary);
                            //CvInvoke.BitwiseAnd(mask, s, mask, null);

                            using (ScalarArray lower = new ScalarArray(lightLower))
                                using (ScalarArray upper = new ScalarArray(lightUpper))
                                    CvInvoke.InRange(mask, lower, upper, mask);

                            //threshold on A colorspace and merge L and A into Mask
                            CvInvoke.Threshold(a, a, aLower, aUpper, ThresholdType.Binary);
                            CvInvoke.BitwiseAnd(mask, a, mask, null);

                            //threshold on B colorspace and merge B into previous Mask
                            CvInvoke.Threshold(b, b, bLower, bUpper, ThresholdType.Binary);
                            CvInvoke.BitwiseAnd(mask, b, mask, null);
                        }
        }
Пример #3
0
        /// <summary>
        /// Compute the red pixel mask for the given image.
        /// A red pixel is a pixel where:  20 &lt; hue &lt; 160 AND saturation &gt; 10
        /// </summary>
        /// <param name="image">The color image to find red mask from</param>
        /// <param name="mask">The red pixel mask</param>
        private static void GetRedPixelMask(IInputArray image, IInputOutputArray mask)
        {
            bool useUMat;

            using (InputOutputArray ia = mask.GetInputOutputArray())
                useUMat = ia.IsUMat;

            using (IImage hsv = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                using (IImage s = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                {
                    CvInvoke.CvtColor(image, hsv, ColorConversion.Bgr2Hsv);
                    CvInvoke.ExtractChannel(hsv, mask, 0);
                    CvInvoke.ExtractChannel(hsv, s, 1);

                    //the mask for hue less than 20 or larger than 160
                    using (ScalarArray lower = new ScalarArray(20))
                        using (ScalarArray upper = new ScalarArray(160))
                            CvInvoke.InRange(mask, lower, upper, mask);
                    CvInvoke.BitwiseNot(mask, mask);

                    //s is the mask for saturation of at least 10, this is mainly used to filter out white pixels
                    CvInvoke.Threshold(s, s, 10, 255, ThresholdType.Binary);
                    CvInvoke.BitwiseAnd(mask, s, mask, null);
                }
        }
Пример #4
0
 /// <summary>
 /// Calculates an optical flow.
 /// </summary>
 /// <param name="i0">First 8-bit single-channel input image.</param>
 /// <param name="i1">Second input image of the same size and the same type as prev.</param>
 /// <param name="flow">Computed flow image that has the same size as prev and type CV_32FC2 </param>
 /// <param name="opticalFlow">The dense optical flow object</param>
 public static void Calc(this IDenseOpticalFlow opticalFlow, IInputArray i0, IInputArray i1, IInputOutputArray flow)
 {
     using (InputArray iaI0 = i0.GetInputArray())
         using (InputArray iaI1 = i1.GetInputArray())
             using (InputOutputArray ioaFlow = flow.GetInputOutputArray())
                 CvInvoke.cveDenseOpticalFlowCalc(opticalFlow.DenseOpticalFlowPtr, iaI0, iaI1, ioaFlow);
 }
Пример #5
0
 /// <summary>
 /// Performs image denoising using the Block-Matching and 3D-filtering algorithm with several computational optimizations. Noise expected to be a gaussian white noise.
 /// </summary>
 /// <param name="src">Input 8-bit or 16-bit 1-channel image.</param>
 /// <param name="dstStep1">Output image of the first step of BM3D with the same size and type as src.</param>
 /// <param name="dstStep2">Output image of the second step of BM3D with the same size and type as src.</param>
 /// <param name="h">Parameter regulating filter strength. Big h value perfectly removes noise but also removes image details, smaller h value preserves details but also preserves some noise.</param>
 /// <param name="templateWindowSize">Size in pixels of the template patch that is used for block-matching. Should be power of 2.</param>
 /// <param name="searchWindowSize">Size in pixels of the window that is used to perform block-matching. Affect performance linearly: greater searchWindowsSize - greater denoising time. Must be larger than templateWindowSize.</param>
 /// <param name="blockMatchingStep1">Block matching threshold for the first step of BM3D (hard thresholding), i.e. maximum distance for which two blocks are considered similar. Value expressed in euclidean distance.</param>
 /// <param name="blockMatchingStep2">Block matching threshold for the second step of BM3D (Wiener filtering), i.e. maximum distance for which two blocks are considered similar. Value expressed in euclidean distance.</param>
 /// <param name="groupSize">Maximum size of the 3D group for collaborative filtering.</param>
 /// <param name="slidingStep">Sliding step to process every next reference block.</param>
 /// <param name="beta">Kaiser window parameter that affects the sidelobe attenuation of the transform of the window. Kaiser window is used in order to reduce border effects. To prevent usage of the window, set beta to zero.</param>
 /// <param name="normType">Norm used to calculate distance between blocks. L2 is slower than L1 but yields more accurate results.</param>
 /// <param name="step">Step of BM3D to be executed. Possible variants are: step 1, step 2, both steps.</param>
 /// <param name="transformType">	Type of the orthogonal transform used in collaborative filtering step. Currently only Haar transform is supported.</param>
 /// <remarks> <c href="http://www.cs.tut.fi/~foi/GCF-BM3D/BM3D_TIP_2007.pdf"/>   </remarks>
 public static void Bm3dDenoising(
     IInputArray src,
     IInputOutputArray dstStep1,
     IOutputArray dstStep2,
     float h = 1,
     int templateWindowSize       = 4,
     int searchWindowSize         = 16,
     int blockMatchingStep1       = 2500,
     int blockMatchingStep2       = 400,
     int groupSize                = 8,
     int slidingStep              = 1,
     float beta                   = 2.0f,
     NormType normType            = NormType.L2,
     Bm3dSteps step               = Bm3dSteps.All,
     TransformTypes transformType = TransformTypes.Haar)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (InputOutputArray ioaDstStep1 = dstStep1.GetInputOutputArray())
             using (OutputArray oaStep2 = dstStep2.GetOutputArray())
             {
                 cveBm3dDenoising1(iaSrc, ioaDstStep1, oaStep2,
                                   h, templateWindowSize, searchWindowSize, blockMatchingStep1, blockMatchingStep2,
                                   groupSize, slidingStep, beta, normType, step, transformType);
             }
 }
        /// <summary>
        /// Нахадит пиксели с красным цветом
        /// </summary>
        /// <param name="image">Изображение для обработки</param>
        /// <param name="mask">Пиксельная маска</param>
        private static void GetRedPixelMask(IInputArray image, IInputOutputArray mask)
        {
            bool useUMat;

            using (InputOutputArray ia = mask.GetInputOutputArray())
            {
                useUMat = ia.IsUMat;
            }

            using (IImage hsv = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                using (IImage s = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                {
                    CvInvoke.CvtColor(image, hsv, ColorConversion.Bgr2Hsv);
                    CvInvoke.ExtractChannel(hsv, mask, 0);
                    CvInvoke.ExtractChannel(hsv, s, 1);

                    //По маске от 20 до 160
                    using (ScalarArray lower = new ScalarArray(20))
                        using (ScalarArray upper = new ScalarArray(160))
                        {
                            CvInvoke.InRange(mask, lower, upper, mask);
                        }

                    CvInvoke.BitwiseNot(mask, mask);

                    //маска для насыщения не менее 10
                    CvInvoke.Threshold(s, s, 15, 255, ThresholdType.Binary);
                    CvInvoke.BitwiseAnd(mask, s, mask, null);
                }
        }
Пример #7
0
 /// <summary>
 /// Calculates an optical flow.
 /// </summary>
 /// <param name="i0">First 8-bit single-channel input image.</param>
 /// <param name="i1">Second input image of the same size and the same type as prev.</param>
 /// <param name="flow">Computed flow image that has the same size as prev and type CV_32FC2 </param>
 public void Calc(IInputArray i0, IInputArray i1, IInputOutputArray flow)
 {
     using (InputArray iaI0 = i0.GetInputArray())
         using (InputArray iaI1 = i1.GetInputArray())
             using (InputOutputArray ioaFlow = flow.GetInputOutputArray())
                 cveDenseOpticalFlowCalc(_ptr, iaI0, iaI1, ioaFlow);
 }
Пример #8
0
 /// <summary>
 /// Calibrate a camera using Charuco corners.
 /// </summary>
 /// <param name="charucoCorners">Vector of detected charuco corners per frame</param>
 /// <param name="charucoIds">List of identifiers for each corner in charucoCorners per frame</param>
 /// <param name="board">Marker Board layout</param>
 /// <param name="imageSize">Size of the image used only to initialize the intrinsic camera matrix.</param>
 /// <param name="cameraMatrix">Output 3x3 floating-point camera matrix. </param>
 /// <param name="distCoeffs">Output vector of distortion coefficients (k1,k2,p1,p2[,k3[,k4,k5,k6],[s1,s2,s3,s4]]) of 4, 5, 8 or 12 elements</param>
 /// <param name="rvecs">Output vector of rotation vectors (see Rodrigues ) estimated for each board view (e.g. std::vector&lt;cv::Mat&gt;). That is, each k-th rotation vector together with the corresponding k-th translation vector (see the next output parameter description) brings the board pattern from the model coordinate space (in which object points are specified) to the world coordinate space, that is, a real position of the board pattern in the k-th pattern view (k=0.. M -1).</param>
 /// <param name="tvecs">Output vector of translation vectors estimated for each pattern view.</param>
 /// <param name="stdDeviationsIntrinsics">Output vector of standard deviations estimated for intrinsic parameters. Order of deviations values: (fx,fy,cx,cy,k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,τx,τy) If one of parameters is not estimated, it's deviation is equals to zero.</param>
 /// <param name="stdDeviationsExtrinsics">Output vector of standard deviations estimated for extrinsic parameters. Order of deviations values: (R1,T1,…,RM,TM) where M is number of pattern views, Ri,Ti are concatenated 1x3 vectors.</param>
 /// <param name="perViewErrors">Output vector of average re-projection errors estimated for each pattern view.</param>
 /// <param name="flags">Flags Different flags for the calibration process</param>
 /// <param name="criteria">Termination criteria for the iterative optimization algorithm.</param>
 /// <returns>The final re-projection error.</returns>
 public static double CalibrateCameraCharuco(
     IInputArrayOfArrays charucoCorners,
     IInputArrayOfArrays charucoIds,
     CharucoBoard board,
     Size imageSize,
     IInputOutputArray cameraMatrix,
     IInputOutputArray distCoeffs,
     IOutputArray rvecs,
     IOutputArray tvecs,
     IOutputArray stdDeviationsIntrinsics,
     IOutputArray stdDeviationsExtrinsics,
     IOutputArray perViewErrors,
     CalibType flags,
     MCvTermCriteria criteria)
 {
     using (InputArray iaCharucoCorners = charucoCorners.GetInputArray())
         using (InputArray iaCharucoIds = charucoIds.GetInputArray())
             using (InputOutputArray ioaCameraMatrix = cameraMatrix.GetInputOutputArray())
                 using (InputOutputArray ioaDistCoeffs = distCoeffs.GetInputOutputArray())
                     using (OutputArray oaRvecs = rvecs == null ? OutputArray.GetEmpty() : rvecs.GetOutputArray())
                         using (OutputArray oaTvecs = tvecs == null ? OutputArray.GetEmpty() : tvecs.GetOutputArray())
                             using (OutputArray oaStdDeviationsIntrinsics = stdDeviationsIntrinsics == null ? OutputArray.GetEmpty() : stdDeviationsIntrinsics.GetOutputArray())
                                 using (OutputArray oaStdDeviationsExtrinsics = stdDeviationsExtrinsics == null ? OutputArray.GetEmpty() : stdDeviationsExtrinsics.GetOutputArray())
                                     using (OutputArray oaPerViewErrors = perViewErrors == null ? OutputArray.GetEmpty() : perViewErrors.GetOutputArray())
                                     {
                                         return(cveArucoCalibrateCameraCharuco(
                                                    iaCharucoCorners, iaCharucoIds, board.BoardPtr, ref imageSize,
                                                    ioaCameraMatrix, ioaDistCoeffs, oaRvecs, oaTvecs,
                                                    oaStdDeviationsIntrinsics, oaStdDeviationsExtrinsics, oaPerViewErrors,
                                                    flags, ref criteria));
                                     }
 }
Пример #9
0
 /// <summary>
 /// Draw the matched keypoints between the model image and the observed image.
 /// </summary>
 /// <param name="modelImage">The model image</param>
 /// <param name="modelKeypoints">The keypoints in the model image</param>
 /// <param name="observedImage">The observed image</param>
 /// <param name="observedKeyPoints">The keypoints in the observed image</param>
 /// <param name="matchColor">The color for the match correspondence lines</param>
 /// <param name="singlePointColor">The color for highlighting the keypoints</param>
 /// <param name="mask">The mask for the matches. Use null for all matches.</param>
 /// <param name="flags">The drawing type</param>
 /// <param name="result">The image where model and observed image is displayed side by side. Matches are drawn as indicated by the flag</param>
 /// <param name="matches">Matches. Each matches[i] is k or less matches for the same query descriptor.</param>
 public static void DrawMatches(
     IInputArray modelImage,
     VectorOfKeyPoint modelKeypoints,
     IInputArray observedImage,
     VectorOfKeyPoint observedKeyPoints,
     VectorOfVectorOfDMatch matches,
     IInputOutputArray result,
     MCvScalar matchColor,
     MCvScalar singlePointColor,
     VectorOfVectorOfByte mask = null,
     KeypointDrawType flags    = KeypointDrawType.Default)
 {
     using (InputArray iaModelImage = modelImage.GetInputArray())
         using (InputArray iaObservedImage = observedImage.GetInputArray())
             using (InputOutputArray ioaResult = result.GetInputOutputArray())
                 Features2DInvoke.drawMatchedFeatures2(
                     iaObservedImage,
                     observedKeyPoints,
                     iaModelImage,
                     modelKeypoints,
                     matches,
                     ioaResult,
                     ref matchColor,
                     ref singlePointColor,
                     mask,
                     flags);
 }
Пример #10
0
 /// <summary>
 /// Performs stereo calibration.
 /// </summary>
 /// <param name="objectPoints">Vector of vectors of the calibration pattern points.</param>
 /// <param name="imagePoints1">Vector of vectors of the projections of the calibration pattern points, observed by the first camera.</param>
 /// <param name="imagePoints2">Vector of vectors of the projections of the calibration pattern points, observed by the second camera.</param>
 /// <param name="K1">Input/output first camera matrix.If FixIntrinsic is specified, some or all of the matrix components must be initialized.</param>
 /// <param name="D1">Input/output vector of distortion coefficients (k1,k2,k3,k4) of 4 elements.</param>
 /// <param name="K2">Input/output second camera matrix. The parameter is similar to <paramref name="K1"/> </param>
 /// <param name="D2">Input/output lens distortion coefficients for the second camera. The parameter is similar to <paramref name="D1"/></param>
 /// <param name="imageSize">Size of the image used only to initialize intrinsic camera matrix.</param>
 /// <param name="R">Output rotation matrix between the 1st and the 2nd camera coordinate systems.</param>
 /// <param name="T">Output translation vector between the coordinate systems of the cameras.</param>
 /// <param name="flags">Fish eye calibration flags</param>
 /// <param name="criteria">Termination criteria for the iterative optimization algorithm.</param>
 public static double StereoCalibrate(IInputArray objectPoints, IInputArray imagePoints1,
                                      IInputArray imagePoints2, IInputOutputArray K1, IInputOutputArray D1, IInputOutputArray K2,
                                      IInputOutputArray D2, Size imageSize, IOutputArray R, IOutputArray T, CalibrationFlag flags, MCvTermCriteria criteria)
 {
     using (InputArray iaObjectPoints = objectPoints.GetInputArray())
         using (InputArray iaImagePoints1 = imagePoints1.GetInputArray())
             using (InputArray iaImagePoints2 = imagePoints2.GetInputArray())
                 using (InputOutputArray ioaK1 = K1.GetInputOutputArray())
                     using (InputOutputArray ioaD1 = D1.GetInputOutputArray())
                         using (InputOutputArray ioaK2 = K2.GetInputOutputArray())
                             using (InputOutputArray ioaD2 = D2.GetInputOutputArray())
                                 using (OutputArray oaR = R.GetOutputArray())
                                     using (OutputArray oaT = T.GetOutputArray())
                                     {
                                         return(CvInvoke.cveFisheyeStereoCalibrate(
                                                    iaObjectPoints,
                                                    iaImagePoints1,
                                                    iaImagePoints2,
                                                    ioaK1,
                                                    ioaD1,
                                                    ioaK2,
                                                    ioaD2,
                                                    ref imageSize,
                                                    oaR,
                                                    oaT,
                                                    (int)flags,
                                                    ref criteria));
                                     }
 }
Пример #11
0
 /// <summary>
 /// Calculates a dense optical flow.
 /// </summary>
 /// <param name="denseFlow">The dense optical flow object</param>
 /// <param name="i0">first input image.</param>
 /// <param name="i1">second input image of the same size and the same type as <paramref name="i0"/>.</param>
 /// <param name="flow">computed flow image that has the same size as I0 and type CV_32FC2.</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public static void Calc(this ICudaDenseOpticalFlow denseFlow, IInputArray i0, IInputArray i1, IInputOutputArray flow, Stream stream = null)
 {
     using (InputArray iaI0 = i0.GetInputArray())
         using (InputArray iaI1 = i1.GetInputArray())
             using (InputOutputArray ioaFlow = flow.GetInputOutputArray())
                 cudaDenseOpticalFlowCalc(denseFlow.DenseOpticalFlowPtr, iaI0, iaI1, ioaFlow, (stream == null) ?  IntPtr.Zero : stream.Ptr);
 }
Пример #12
0
        /// <summary>
        /// Compute the red pixel mask for the given image.
        /// A red pixel is a pixel where:  20 &lt; hue &lt; 160 AND saturation &gt; 10
        /// </summary>
        /// <param name="image">The color image to find red mask from</param>
        /// <param name="mask">The red pixel mask</param>
        private static void GetColorPixelMask(IInputArray image, IInputOutputArray mask, int hueLower, int hueUpper, int satLower, int satUpper, int lumLower, int lumUpper)
        {
            bool useUMat;

            using (InputOutputArray ia = mask.GetInputOutputArray())
                useUMat = ia.IsUMat;

            using (IImage hsv = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                using (IImage s = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                    using (IImage lum = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                    {
                        CvInvoke.CvtColor(image, hsv, ColorConversion.Bgr2Hsv);
                        CvInvoke.ExtractChannel(hsv, mask, 0);
                        CvInvoke.ExtractChannel(hsv, s, 1);
                        CvInvoke.ExtractChannel(hsv, lum, 2);

                        //the mask for hue less than 20 or larger than 160
                        using (ScalarArray lower = new ScalarArray(hueLower))
                            using (ScalarArray upper = new ScalarArray(hueUpper))
                                CvInvoke.InRange(mask, lower, upper, mask);
                        //CvInvoke.BitwiseNot(mask, mask); //invert results to "round the corner" of the hue scale

                        //s is the mask for saturation of at least 10, this is mainly used to filter out white pixels
                        CvInvoke.Threshold(s, s, satLower, satUpper, ThresholdType.Binary);
                        CvInvoke.BitwiseAnd(mask, s, mask, null);

                        // mask luminosity
                        CvInvoke.Threshold(lum, lum, lumLower, lumUpper, ThresholdType.Binary);
                        CvInvoke.BitwiseAnd(mask, lum, mask, null);
                    }
        }
Пример #13
0
 /// <summary>
 /// Calculates an optical flow.
 /// </summary>
 /// <param name="i0">First 8-bit single-channel input image.</param>
 /// <param name="i1">Second input image of the same size and the same type as prev.</param>
 /// <param name="flow">Computed flow image that has the same size as prev and type CV_32FC2 </param>
 /// <param name="opticalFlow">The dense optical flow object</param>
 public static void Calc(this IDenseOpticalFlow opticalFlow, IInputArray i0, IInputArray i1, IInputOutputArray flow)
 {
    using (InputArray iaI0 = i0.GetInputArray())
    using (InputArray iaI1 = i1.GetInputArray())
    using (InputOutputArray ioaFlow = flow.GetInputOutputArray())
       CvInvoke.cveDenseOpticalFlowCalc(opticalFlow.DenseOpticalFlowPtr, iaI0, iaI1, ioaFlow);
 }
Пример #14
0
      /// <summary>
      /// Compute the red pixel mask for the given image. 
      /// A red pixel is a pixel where:  20 &lt; hue &lt; 160 AND saturation &gt; 10
      /// </summary>
      /// <param name="image">The color image to find red mask from</param>
      /// <param name="mask">The red pixel mask</param>
      private static void GetRedPixelMask(IInputArray image, IInputOutputArray mask)
      {
         bool useUMat;
         using (InputOutputArray ia = mask.GetInputOutputArray())
            useUMat = ia.IsUMat;

         using (IImage hsv = useUMat ? (IImage)new UMat() : (IImage)new Mat())
         using (IImage s = useUMat ? (IImage)new UMat() : (IImage)new Mat())
         {
            CvInvoke.CvtColor(image, hsv, ColorConversion.Bgr2Hsv);
            CvInvoke.ExtractChannel(hsv, mask, 0);
            CvInvoke.ExtractChannel(hsv, s, 1);

            //the mask for hue less than 20 or larger than 160
            using (ScalarArray lower = new ScalarArray(20))
            using (ScalarArray upper = new ScalarArray(160))
               CvInvoke.InRange(mask, lower, upper, mask);
            CvInvoke.BitwiseNot(mask, mask);

            //s is the mask for saturation of at least 10, this is mainly used to filter out white pixels
            CvInvoke.Threshold(s, s, 10, 255, ThresholdType.Binary);
            CvInvoke.BitwiseAnd(mask, s, mask, null);

         }
      }
Пример #15
0
 public static void Bm3dDenoising(
     IInputArray src,
     IInputOutputArray dstStep1,
     IOutputArray dstStep2,
     float h,
     int templateWindowSize,
     int searchWindowSize,
     int blockMatchingStep1,
     int blockMatchingStep2,
     int groupSize,
     int slidingStep,
     float beta,
     int normType,
     int step,
     int transformType)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (InputOutputArray ioaDstStep1 = dstStep1.GetInputOutputArray())
             using (OutputArray oaStep2 = dstStep2.GetOutputArray())
             {
                 cveBm3dDenoising1(iaSrc, ioaDstStep1, oaStep2,
                                   h, templateWindowSize, searchWindowSize, blockMatchingStep1, blockMatchingStep2,
                                   groupSize, slidingStep, beta, normType, step, transformType);
             }
 }
Пример #16
0
 public static void Calc(this ICudaDenseOpticalFlow denseFlow, IInputArray i0, IInputArray i1, IInputOutputArray flow, Stream stream = null)
 {
    using (InputArray iaI0 = i0.GetInputArray())
    using (InputArray iaI1 = i1.GetInputArray())
    using (InputOutputArray ioaFlow = flow.GetInputOutputArray())
       cudaDenseOpticalFlowCalc(denseFlow.DenseOpticalFlowPtr, iaI0, iaI1, ioaFlow, (stream == null) ?  IntPtr.Zero : stream.Ptr);
 }
Пример #17
0
 /// <summary>
 /// Draws the checker to the given image.
 /// </summary>
 /// <param name="img">image in color space BGR</param>
 public void Draw(IInputOutputArray img)
 {
     using (InputOutputArray ioaImg = img.GetInputOutputArray())
     {
         MccInvoke.cveCCheckerDrawDraw(_ptr, ioaImg);
     }
 }
Пример #18
0
 /// <summary>
 /// Blends and returns the final pano.
 /// </summary>
 /// <param name="dst">Final pano</param>
 /// <param name="dstMask">Final pano mask</param>
 public void Blend(IInputOutputArray dst, IInputOutputArray dstMask)
 {
     using (InputOutputArray ioaDst = dst.GetInputOutputArray())
         using (InputOutputArray ioaDstMask = dstMask.GetInputOutputArray())
         {
             StitchingInvoke.cveBlenderBlend(_blenderPtr, ioaDst, ioaDstMask);
         }
 }
Пример #19
0
 /// <summary>
 /// Generates the all-black and all-white images needed for shadowMasks computation. To identify shadow regions, the regions of two images where the pixels are not lit by projector's light and thus where there is not coded information, the 3DUNDERWORLD algorithm computes a shadow mask for the two cameras views, starting from a white and a black images captured by each camera. This method generates these two additional images to project.
 /// </summary>
 /// <param name="blackImage">The generated all-black CV_8U image, at projector's resolution.</param>
 /// <param name="whiteImage">The generated all-white CV_8U image, at projector's resolution.</param>
 public void GetImagesForShadowMasks(IInputOutputArray blackImage, IInputOutputArray whiteImage)
 {
     using (InputOutputArray ioaBlackImage = blackImage.GetInputOutputArray())
         using (InputOutputArray ioaWhiteImage = whiteImage.GetInputOutputArray())
         {
             StructuredLightInvoke.cveGrayCodePatternGetImagesForShadowMasks(_ptr, ioaBlackImage, ioaWhiteImage);
         }
 }
Пример #20
0
 /// <summary>
 /// Converts the hardware-generated flow vectors to floating point representation
 /// </summary>
 /// <param name="flow">Buffer of type CV_16FC2 containing flow vectors generated by Calc().</param>
 /// <param name="floatFlow">Buffer of type CV_32FC2, containing flow vectors in floating point representation, each flow vector for 1 pixel per gridSize, in the pitch-linear layout.</param>
 public void ConvertToFloat(IInputArray flow, IInputOutputArray floatFlow)
 {
     using (InputArray iaFlow = flow.GetInputArray())
         using (InputOutputArray ioaFloatFlow = floatFlow.GetInputOutputArray())
         {
             CudaInvoke.cudaNvidiaOpticalFlow_2_0_ConvertToFloat(_ptr, iaFlow, ioaFloatFlow);
         }
 }
Пример #21
0
 /// <summary>
 /// Utility to draw the detected facial landmark points.
 /// </summary>
 /// <param name="image">The input image to be processed.</param>
 /// <param name="points">Contains the data of points which will be drawn.</param>
 /// <param name="color">The color of points in BGR format </param>
 public static void DrawFacemarks(IInputOutputArray image, IInputArray points, MCvScalar color)
 {
     using (InputOutputArray ioaImage = image.GetInputOutputArray())
         using (InputArray iaPoints = points.GetInputArray())
         {
             cveDrawFacemarks(ioaImage, iaPoints, ref color);
         }
 }
Пример #22
0
        /*
         * /// <summary>
         * /// Default face detector This function is mainly utilized by the implementation of a Facemark Algorithm.
         * /// </summary>
         * /// <param name="facemark">The facemark object</param>
         * /// <param name="image">The input image to be processed.</param>
         * /// <param name="faces">Output of the function which represent region of interest of the detected faces. Each face is stored in cv::Rect container.</param>
         * /// <returns>True if success</returns>
         * public static bool GetFaces(this IFacemark facemark, IInputArray image, IOutputArray faces)
         * {
         *  using (InputArray iaImage = image.GetInputArray())
         *  using (OutputArray oaFaces = faces.GetOutputArray())
         *  {
         *      return cveFacemarkGetFaces(facemark.FacemarkPtr, iaImage, oaFaces);
         *  }
         * }
         * [DllImport(CvInvoke.ExternLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
         * [return: MarshalAs(CvInvoke.BoolMarshalType)]
         * internal extern static bool cveFacemarkGetFaces(IntPtr facemark, IntPtr image, IntPtr faces);
         */

        /// <summary>
        /// Trains a Facemark algorithm using the given dataset.
        /// </summary>
        /// <param name="facemark">The facemark object</param>
        /// <param name="image">Input image.</param>
        /// <param name="faces">Represent region of interest of the detected faces. Each face is stored in cv::Rect container.</param>
        /// <param name="landmarks">The detected landmark points for each faces.</param>
        /// <returns>True if successful</returns>
        public static bool Fit(this IFacemark facemark, IInputArray image, IInputArray faces, IInputOutputArray landmarks)
        {
            using (InputArray iaImage = image.GetInputArray())
                using (InputArray iaFaces = faces.GetInputArray())
                    using (InputOutputArray ioaLandmarks = landmarks.GetInputOutputArray())
                    {
                        return(cveFacemarkFit(facemark.FacemarkPtr, iaImage, iaFaces, ioaLandmarks));
                    }
        }
Пример #23
0
 /// <summary>
 /// Calculates a sparse optical flow.
 /// </summary>
 /// <param name="sparseFlow">The sparse optical flow</param>
 /// <param name="prevImg">First input image.</param>
 /// <param name="nextImg">Second input image of the same size and the same type as <paramref name="prevImg"/>.</param>
 /// <param name="prevPts">Vector of 2D points for which the flow needs to be found.</param>
 /// <param name="nextPts">Output vector of 2D points containing the calculated new positions of input features in the second image.</param>
 /// <param name="status">Output status vector. Each element of the vector is set to 1 if the flow for the corresponding features has been found. Otherwise, it is set to 0.</param>
 /// <param name="err">Optional output vector that contains error response for each point (inverse confidence).</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public static void Calc(this ICudaSparseOpticalFlow sparseFlow, IInputArray prevImg, IInputArray nextImg, IInputArray prevPts, IInputOutputArray nextPts, IOutputArray status = null, IOutputArray err = null, Stream stream = null)
 {
    using (InputArray iaPrevImg = prevImg.GetInputArray())
    using (InputArray iaNextImg = nextImg.GetInputArray())
    using (InputArray iaPrevPts = prevPts.GetInputArray())
    using (InputOutputArray ioaNextPts = nextPts.GetInputOutputArray())
    using (OutputArray oaStatus = (status == null ? OutputArray.GetEmpty() : status.GetOutputArray()))
    using (OutputArray oaErr = (err == null ? OutputArray.GetEmpty() : err.GetOutputArray()))
       cudaSparseOpticalFlowCalc(sparseFlow.SparseOpticalFlowPtr, iaPrevImg, iaNextImg, iaPrevPts, ioaNextPts,
          oaStatus, oaErr, (stream == null) ? IntPtr.Zero : stream.Ptr);
 }
Пример #24
0
 /// <summary>
 /// Draw detected markers in image.
 /// </summary>
 /// <param name="image">Input/output image. It must have 1 or 3 channels. The number of channels is not altered.</param>
 /// <param name="corners">Positions of marker corners on input image. (e.g std::vector&lt;std::vector&lt;cv::Point2f&gt; &gt; ). For N detected markers, the dimensions of this array should be Nx4. The order of the corners should be clockwise.</param>
 /// <param name="ids">Vector of identifiers for markers in markersCorners . Optional, if not provided, ids are not painted.</param>
 /// <param name="borderColor">Color of marker borders. Rest of colors (text color and first corner color) are calculated based on this one to improve visualization.</param>
 public static void DrawDetectedMarkers(
     IInputOutputArray image, IInputArray corners, IInputArray ids,
     MCvScalar borderColor)
 {
     using (InputOutputArray ioaImage = image.GetInputOutputArray())
         using (InputArray iaCorners = corners.GetInputArray())
             using (InputArray iaIds = ids.GetInputArray())
             {
                 cveArucoDrawDetectedMarkers(ioaImage, iaCorners, iaIds, ref borderColor);
             }
 }
Пример #25
0
 /// <summary>
 /// Debug draw search lines onto an image
 /// </summary>
 /// <param name="img">The output image</param>
 /// <param name="locations">The source locations of a line bundle</param>
 /// <param name="color">The line color</param>
 public static void DrawSearchLines(
     IInputOutputArray img,
     IInputArray locations,
     MCvScalar color)
 {
     using (InputOutputArray ioaImg = img.GetInputOutputArray())
         using (InputArray iaLocations = locations.GetInputArray())
         {
             cveDrawSearchLines(ioaImg, iaLocations, ref color);
         }
 }
Пример #26
0
 /// <summary>
 /// Calculates a sparse optical flow.
 /// </summary>
 /// <param name="sparseFlow">The sparse optical flow</param>
 /// <param name="prevImg">First input image.</param>
 /// <param name="nextImg">Second input image of the same size and the same type as <paramref name="prevImg"/>.</param>
 /// <param name="prevPts">Vector of 2D points for which the flow needs to be found.</param>
 /// <param name="nextPts">Output vector of 2D points containing the calculated new positions of input features in the second image.</param>
 /// <param name="status">Output status vector. Each element of the vector is set to 1 if the flow for the corresponding features has been found. Otherwise, it is set to 0.</param>
 /// <param name="err">Optional output vector that contains error response for each point (inverse confidence).</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public static void Calc(this ICudaSparseOpticalFlow sparseFlow, IInputArray prevImg, IInputArray nextImg, IInputArray prevPts, IInputOutputArray nextPts, IOutputArray status = null, IOutputArray err = null, Stream stream = null)
 {
     using (InputArray iaPrevImg = prevImg.GetInputArray())
         using (InputArray iaNextImg = nextImg.GetInputArray())
             using (InputArray iaPrevPts = prevPts.GetInputArray())
                 using (InputOutputArray ioaNextPts = nextPts.GetInputOutputArray())
                     using (OutputArray oaStatus = (status == null ? OutputArray.GetEmpty() : status.GetOutputArray()))
                         using (OutputArray oaErr = (err == null ? OutputArray.GetEmpty() : err.GetOutputArray()))
                             cudaSparseOpticalFlowCalc(sparseFlow.SparseOpticalFlowPtr, iaPrevImg, iaNextImg, iaPrevPts, ioaNextPts,
                                                       oaStatus, oaErr, (stream == null) ? IntPtr.Zero : stream.Ptr);
 }
Пример #27
0
 /// <summary>
 /// Draw the keypoints found on the image.
 /// </summary>
 /// <param name="image">The image</param>
 /// <param name="keypoints">The keypoints to be drawn</param>
 /// <param name="color">The color used to draw the keypoints</param>
 /// <param name="type">The drawing type</param>
 /// <param name="outImage">The image with the keypoints drawn</param> 
 public static void DrawKeypoints(
    IInputArray image,
    VectorOfKeyPoint keypoints,
    IInputOutputArray outImage,
    Bgr color,
    Features2DToolbox.KeypointDrawType type)
 {
    MCvScalar c = color.MCvScalar;
    using (InputArray iaImage = image.GetInputArray())
    using (InputOutputArray ioaOutImage = outImage.GetInputOutputArray())
    CvInvoke.drawKeypoints(iaImage, keypoints, ioaOutImage, ref c, type);
 }
Пример #28
0
 /// <summary>
 /// Debug draw markers of matched correspondences onto a lineBundle
 /// </summary>
 /// <param name="bundle">the lineBundle</param>
 /// <param name="cols">column coordinates in the line bundle</param>
 /// <param name="colors">colors for the markers. Defaults to white.</param>
 public static void DrawCorrespondencies(
     IInputOutputArray bundle,
     IInputArray cols,
     IInputArray colors = null)
 {
     using (InputOutputArray ioaBundle = bundle.GetInputOutputArray())
         using (InputArray iaCols = cols.GetInputArray())
             using (InputArray iaColors = colors == null ? InputArray.GetEmpty() : colors.GetInputArray())
             {
                 cveDrawCorrespondencies(ioaBundle, iaCols, iaColors);
             }
 }
Пример #29
0
 public static void DrawCircle(IInputOutputArray image,
                               Point center,
                               int radius,
                               MCvScalar color,
                               int thickness     = 1,
                               LineType lineType = LineType.EightConnected,
                               int shift         = 0)
 {
     using (InputOutputArray array = image.GetInputOutputArray())
     {
         cveCircle(array, ref center, radius, ref color, thickness, lineType, shift);
     }
 }
Пример #30
0
 public static void DrawLine(IInputOutputArray image,
                             Point start,
                             Point end,
                             MCvScalar color,
                             int thickness     = 1,
                             LineType lineType = LineType.EightConnected,
                             int shift         = 0)
 {
     using (InputOutputArray array = image.GetInputOutputArray())
     {
         cveLine(array, ref start, ref end, ref color, thickness, lineType, shift);
     }
 }
Пример #31
0
 public static void DrawCircle(IInputOutputArray image,
     Point center,
     int radius,
     MCvScalar color,
     int thickness = 1,
     LineType lineType = LineType.EightConnected,
     int shift = 0)
 {
     using (InputOutputArray array = image.GetInputOutputArray())
     {
         cveCircle(array, ref center, radius, ref color, thickness, lineType, shift);
     }
 }
Пример #32
0
 /// <summary>
 /// Given the pose estimation of a marker or board, this function draws the axis of the world coordinate system, i.e. the system centered on the marker/board. Useful for debugging purposes.
 /// </summary>
 /// <param name="image">input/output image. It must have 1 or 3 channels. The number of channels is not altered.</param>
 /// <param name="cameraMatrix">input 3x3 floating-point camera matrix</param>
 /// <param name="distCoeffs">vector of distortion coefficients (k1,k2,p1,p2[,k3[,k4,k5,k6],[s1,s2,s3,s4]]) of 4, 5, 8 or 12 elements</param>
 /// <param name="rvec">rotation vector of the coordinate system that will be drawn.</param>
 /// <param name="tvec">translation vector of the coordinate system that will be drawn.</param>
 /// <param name="length">length of the painted axis in the same unit than tvec (usually in meters)</param>
 public static void DrawAxis(
     IInputOutputArray image, IInputArray cameraMatrix, IInputArray distCoeffs,
     IInputArray rvec, IInputArray tvec, float length)
 {
     using (InputOutputArray ioaImage = image.GetInputOutputArray())
         using (InputArray iaCameraMatrix = cameraMatrix.GetInputArray())
             using (InputArray iaDistCoeffs = distCoeffs.GetInputArray())
                 using (InputArray iaRvec = rvec.GetInputArray())
                     using (InputArray iaTvec = tvec.GetInputArray())
                     {
                         cveArucoDrawAxis(ioaImage, iaCameraMatrix, iaDistCoeffs, iaRvec, iaTvec, length);
                     }
 }
Пример #33
0
        /// <summary>
        /// Draw the keypoints found on the image.
        /// </summary>
        /// <param name="image">The image</param>
        /// <param name="keypoints">The keypoints to be drawn</param>
        /// <param name="color">The color used to draw the keypoints</param>
        /// <param name="type">The drawing type</param>
        /// <param name="outImage">The image with the keypoints drawn</param>
        public static void DrawKeypoints(
            IInputArray image,
            VectorOfKeyPoint keypoints,
            IInputOutputArray outImage,
            Bgr color,
            Features2DToolbox.KeypointDrawType type = KeypointDrawType.Default)
        {
            MCvScalar c = color.MCvScalar;

            using (InputArray iaImage = image.GetInputArray())
                using (InputOutputArray ioaOutImage = outImage.GetInputOutputArray())
                    Features2DInvoke.drawKeypoints(iaImage, keypoints, ioaOutImage, ref c, type);
        }
Пример #34
0
        /// <summary>
        /// Swap channels.
        /// </summary>
        /// <param name="src">The image where the channels will be swapped</param>
        /// <param name="dstOrder">
        /// Integer array describing how channel values are permutated. The n-th entry
        /// of the array contains the number of the channel that is stored in the n-th channel of
        /// the output image. E.g. Given an RGBA image, aDstOrder = [3,2,1,0] converts this to ABGR
        /// channel order.
        /// </param>
        /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
        public static void SwapChannels(IInputOutputArray src, int[] dstOrder, Stream stream)
        {
            if (dstOrder == null || dstOrder.Length < 4)
            {
                throw new ArgumentException("dstOrder must be an int array of size 4");
            }
            GCHandle handle = GCHandle.Alloc(dstOrder, GCHandleType.Pinned);

            using (InputOutputArray ioaSrc = src.GetInputOutputArray())
                cudaSwapChannels(ioaSrc, handle.AddrOfPinnedObject(), stream);

            handle.Free();
        }
Пример #35
0
 /// <summary>
 /// Draws a set of Charuco corners
 /// </summary>
 /// <param name="image">image input/output image. It must have 1 or 3 channels. The number of channels is not altered.</param>
 /// <param name="charucoCorners">vector of detected charuco corners</param>
 /// <param name="charucoIds">list of identifiers for each corner in charucoCorners</param>
 /// <param name="cornerColor">color of the square surrounding each corner</param>
 public static void DrawDetectedCornersCharuco(
     IInputOutputArray image,
     IInputArray charucoCorners,
     IInputArray charucoIds,
     MCvScalar cornerColor)
 {
     using (InputOutputArray ioaImage = image.GetInputOutputArray())
         using (InputArray iaCharucoCorners = charucoCorners.GetInputArray())
             using (InputArray iaCharucoIds = charucoIds == null ? InputArray.GetEmpty() : charucoIds.GetInputArray())
             {
                 cveArucoDrawDetectedCornersCharuco(ioaImage, iaCharucoCorners, iaCharucoIds, ref cornerColor);
             }
 }
Пример #36
0
 /// <summary>
 /// Draw a set of detected ChArUco Diamond markers
 /// </summary>
 /// <param name="image">input/output image. It must have 1 or 3 channels. The number of channels is not altered.</param>
 /// <param name="diamondCorners">positions of diamond corners in the same format returned by detectCharucoDiamond(). (e.g VectorOfVectorOfPointF ). For N detected markers, the dimensions of this array should be Nx4. The order of the corners should be clockwise.</param>
 /// <param name="diamondIds">vector of identifiers for diamonds in diamondCorners, in the same format returned by detectCharucoDiamond() (e.g. VectorOfMat ). Optional, if not provided, ids are not painted. </param>
 /// <param name="borderColor">color of marker borders. Rest of colors (text color and first corner color) are calculated based on this one.</param>
 public static void DrawDetectedDiamonds(
     IInputOutputArray image,
     IInputArrayOfArrays diamondCorners,
     IInputArray diamondIds,
     MCvScalar borderColor)
 {
     using (InputOutputArray ioaImage = image.GetInputOutputArray())
         using (InputArray iaDiamondCorners = diamondCorners.GetInputArray())
             using (InputArray iaDiamondIds = diamondIds == null ? InputArray.GetEmpty() : diamondIds.GetInputArray())
             {
                 cveArucoDrawDetectedDiamonds(ioaImage, iaDiamondCorners, iaDiamondIds, ref borderColor);
             }
 }
Пример #37
0
 /// <summary>
 /// The grab cut algorithm for segmentation
 /// </summary>
 /// <param name="img">The 8-bit 3-channel image to be segmented</param>
 /// <param name="mask">Input/output 8-bit single-channel mask. The mask is initialized by the function
 /// when mode is set to GC_INIT_WITH_RECT. Its elements may have one of following values:
 /// 0 (GC_BGD) defines an obvious background pixels.
 /// 1 (GC_FGD) defines an obvious foreground (object) pixel.
 /// 2 (GC_PR_BGR) defines a possible background pixel.
 /// 3 (GC_PR_FGD) defines a possible foreground pixel.
 ///</param>
 /// <param name="rect">The rectangle to initialize the segmentation</param>
 /// <param name="bgdModel">
 /// Temporary array for the background model. Do not modify it while you are
 /// processing the same image.
 /// </param>
 /// <param name="fgdModel">
 /// Temporary arrays for the foreground model. Do not modify it while you are
 /// processing the same image.
 /// </param>
 /// <param name="iterCount">The number of iterations</param>
 /// <param name="type">The initialization type</param>
 public static void GrabCut(
    IInputArray img,
    IInputOutputArray mask,
    Rectangle rect,
    IInputOutputArray bgdModel,
    IInputOutputArray fgdModel,
    int iterCount,
    CvEnum.GrabcutInitType type)
 {
    using (InputArray iaImg = img.GetInputArray())
    using (InputOutputArray ioaMask = mask == null ? InputOutputArray.GetEmpty() : mask.GetInputOutputArray())
    using (InputOutputArray ioaBgdModel = bgdModel.GetInputOutputArray())
    using (InputOutputArray ioaFgdModel = fgdModel.GetInputOutputArray())
       cveGrabCut(iaImg, ioaMask, ref rect, ioaBgdModel, ioaFgdModel, iterCount, type);
 }
Пример #38
0
 public static void DrawEllipse(IInputOutputArray image,
     Point center,
     Size axes,
     double angle,
     double startAngle,
     double endAngle,
     MCvScalar color,
     int thickness = 1,
     LineType lineType = LineType.EightConnected,
     int shift = 0)
 {
     using (InputOutputArray array = image.GetInputOutputArray())
     {
         cveEllipse(array, ref center, ref axes, angle, startAngle, endAngle, ref color, thickness, lineType, shift);
     }
 }
Пример #39
0
 /// <summary>
 /// Draw the matched keypoints between the model image and the observered image.
 /// </summary>
 /// <param name="modelImage">The model image</param>
 /// <param name="modelKeypoints">The keypoints in the model image</param>
 /// <param name="observerdImage">The observed image</param>
 /// <param name="observedKeyPoints">The keypoints in the observed image</param>
 /// <param name="matchColor">The color for the match correspondence lines</param>
 /// <param name="singlePointColor">The color for highlighting the keypoints</param>
 /// <param name="mask">The mask for the matches. Use null for all matches.</param>
 /// <param name="flags">The drawing type</param>
 /// <param name="result">The image where model and observed image is displayed side by side. Matches are drawn as indicated by the flag</param>
 /// <param name="matches">Matches. Each matches[i] is k or less matches for the same query descriptor.</param>
 public static void DrawMatches(
    IInputArray modelImage, VectorOfKeyPoint modelKeypoints,
    IInputArray observerdImage, VectorOfKeyPoint observedKeyPoints,
    VectorOfVectorOfDMatch matches,
    IInputOutputArray result,
    MCvScalar matchColor, MCvScalar singlePointColor,
    IInputArray mask = null,
    KeypointDrawType flags = KeypointDrawType.Default)
 {
    using (InputArray iaModelImage = modelImage.GetInputArray())
    using (InputArray iaObserverdImage = observerdImage.GetInputArray())
    using (InputOutputArray ioaResult = result.GetInputOutputArray())
    using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
    CvInvoke.drawMatchedFeatures(iaObserverdImage, observedKeyPoints, iaModelImage,
       modelKeypoints, matches, ioaResult, ref matchColor, ref singlePointColor, iaMask , flags);
 }
Пример #40
0
 public static void DrawContours(IInputOutputArray image,
     IInputArray contours,
     int contourIdx,
     MCvScalar color,
     int thickness = 1,
     LineType lineType = LineType.EightConnected,
     IInputArray hierarchy = null,
     int maxLevel = int.MaxValue,
     Point offset = default(Point))
 {
     using (InputOutputArray imageArray = image.GetInputOutputArray())
     {
         using (InputArray contoursArray = contours.GetInputArray())
         {
             using (InputArray hierarchyArray = (hierarchy != null) ? hierarchy.GetInputArray() : EmptyArray<InputArray>.Value)
             {
                 cveDrawContours(imageArray, contoursArray, contourIdx, ref color, thickness, lineType, hierarchyArray, maxLevel, ref offset);
             }
         }
     }
 }
Пример #41
0
 /// <summary>
 /// Draws a rectangle specified by a CvRect structure
 /// </summary>
 /// /// <param name="img">Image</param>
 /// <param name="rect">The rectangle to be drawn</param>
 /// <param name="color">Line color </param>
 /// <param name="thickness">Thickness of lines that make up the rectangle. Negative values make the function to draw a filled rectangle.</param>
 /// <param name="lineType">Type of the line</param>
 /// <param name="shift">Number of fractional bits in the point coordinates</param>
 public static void Rectangle(IInputOutputArray img, Rectangle rect, MCvScalar color, int thickness = 1, CvEnum.LineType lineType = CvEnum.LineType.EightConnected, int shift = 0)
 {
    using (InputOutputArray ioaImg = img.GetInputOutputArray())
       cveRectangle(ioaImg, ref rect, ref color, thickness, lineType, shift);
 }
Пример #42
0
 /// <summary>
 /// Implements one of the variants of watershed, non-parametric marker-based segmentation algorithm, described in [Meyer92] Before passing the image to the function, user has to outline roughly the desired regions in the image markers with positive (>0) indices, i.e. every region is represented as one or more connected components with the pixel values 1, 2, 3 etc. Those components will be "seeds" of the future image regions. All the other pixels in markers, which relation to the outlined regions is not known and should be defined by the algorithm, should be set to 0's. On the output of the function, each pixel in markers is set to one of values of the "seed" components, or to -1 at boundaries between the regions.
 /// </summary>
 /// <remarks>Note, that it is not necessary that every two neighbor connected components are separated by a watershed boundary (-1's pixels), for example, in case when such tangent components exist in the initial marker image. </remarks>
 /// <param name="image">The input 8-bit 3-channel image</param>
 /// <param name="markers">The input/output Int32 depth single-channel image (map) of markers. </param>
 public static void Watershed(IInputArray image, IInputOutputArray markers)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (InputOutputArray ioaMarkers = markers.GetInputOutputArray())
       cveWatershed(iaImage, ioaMarkers);
 }
Пример #43
0
 /// <summary>
 /// Draws a arrow segment pointing from the first point to the second one.
 /// </summary>
 /// <param name="img">Image</param>
 /// <param name="pt1">The point the arrow starts from.</param>
 /// <param name="pt2">The point the arrow points to.</param>
 /// <param name="color">Line color.</param>
 /// <param name="thickness">Line thickness.</param>
 /// <param name="lineType">Type of the line.</param>
 /// <param name="shift">Number of fractional bits in the point coordinates.</param>
 /// <param name="tipLength">The length of the arrow tip in relation to the arrow length</param>
 public static void ArrowedLine(IInputOutputArray img, Point pt1, Point pt2, MCvScalar color, int thickness = 1,
    CvEnum.LineType lineType = CvEnum.LineType.EightConnected, int shift = 0, double tipLength = 0.1)
 {
    using (InputOutputArray ioaImg = img.GetInputOutputArray())
    {
       cveArrowedLine(ioaImg, ref pt1, ref pt2, ref color, thickness, lineType, shift, tipLength);
    }
 }
Пример #44
0
 /// <summary>
 /// Draws a single or multiple polygonal curves
 /// </summary>
 /// <param name="img">Image</param>
 /// <param name="pts">Array of pointers to polylines</param>
 /// <param name="isClosed">
 /// Indicates whether the polylines must be drawn closed. 
 /// If !=0, the function draws the line from the last vertex of every contour to the first vertex.
 /// </param>
 /// <param name="color">Polyline color</param>
 /// <param name="thickness">Thickness of the polyline edges</param>
 /// <param name="lineType">Type of the line segments, see cvLine description</param>
 /// <param name="shift">Number of fractional bits in the vertex coordinates</param>
 public static void Polylines(IInputOutputArray img, IInputArray pts, bool isClosed, MCvScalar color, int thickness = 1, CvEnum.LineType lineType = CvEnum.LineType.EightConnected, int shift = 0)
 {
    using (InputOutputArray ioaImg = img.GetInputOutputArray())
    using (InputArray iaPts = pts.GetInputArray())
       cvePolylines(ioaImg, iaPts, isClosed, ref color, thickness, lineType, shift);
 }
Пример #45
0
 /// <summary>
 /// Fills a connected component with given color.
 /// </summary>
 /// <param name="src">Input 1- or 3-channel, 8-bit or floating-point image. It is modified by the function unless CV_FLOODFILL_MASK_ONLY flag is set.</param>
 /// <param name="seedPoint">The starting point.</param>
 /// <param name="newVal">New value of repainted domain pixels.</param>
 /// <param name="loDiff">Maximal lower brightness/color difference
 /// between the currently observed pixel and one of its neighbor belong to the component
 /// or seed pixel to add the pixel to component.
 /// In case of 8-bit color images it is packed value.</param>
 /// <param name="upDiff">Maximal upper brightness/color difference
 /// between the currently observed pixel and one of its neighbor belong to the component
 /// or seed pixel to add the pixel to component.
 /// In case of 8-bit color images it is packed value.</param>
 /// <param name="flags">The operation flags.
 /// Lower bits contain connectivity value, 4 (by default) or 8, used within the function.
 /// Connectivity determines which neighbors of a pixel are considered.
 /// Upper bits can be 0 or combination of the following flags:
 /// CV_FLOODFILL_FIXED_RANGE - if set the difference between the current pixel and seed pixel is considered,
 /// otherwise difference between neighbor pixels is considered (the range is floating).
 /// CV_FLOODFILL_MASK_ONLY - if set, the function does not fill the image (new_val is ignored),
 /// but the fills mask (that must be non-NULL in this case). </param>
 /// <param name="mask">Operation mask,
 /// should be singe-channel 8-bit image, 2 pixels wider and 2 pixels taller than image.
 /// If not IntPtr.Zero, the function uses and updates the mask, so user takes responsibility of initializing mask content.
 /// Floodfilling can't go across non-zero pixels in the mask, for example, an edge detector output can be used as a mask to stop filling at edges.
 /// Or it is possible to use the same mask in multiple calls to the function to make sure the filled area do not overlap.
 /// Note: because mask is larger than the filled image, pixel in mask that corresponds to (x,y) pixel in image will have coordinates (x+1,y+1).</param>
 /// <param name="rect">Output parameter set by the function to the minimum bounding rectangle of the repainted domain.</param>
 /// <param name="connectivity">Flood fill connectivity</param>
 public static int FloodFill(
    IInputOutputArray src,
    IInputOutputArray mask,
    Point seedPoint,
    MCvScalar newVal,
    out Rectangle rect,
    MCvScalar loDiff,
    MCvScalar upDiff,
    CvEnum.Connectivity connectivity = CvEnum.Connectivity.FourConnected,
    CvEnum.FloodFillType flags = CvEnum.FloodFillType.Default)
 {
    rect = new Rectangle();
    using (InputOutputArray ioaSrc = src.GetInputOutputArray())
    using (InputOutputArray ioaMask = mask == null ? InputOutputArray.GetEmpty() : mask.GetInputOutputArray())
       return cveFloodFill(
          ioaSrc,
          ioaMask,
          ref seedPoint, ref newVal,
          ref rect,
          ref loDiff, ref upDiff, (int)connectivity | (int)flags);
 }
Пример #46
0
 /// <summary>
 /// Draws the line segment between pt1 and pt2 points in the image. The line is clipped by the image or ROI rectangle. For non-antialiased lines with integer coordinates the 8-connected or 4-connected Bresenham algorithm is used. Thick lines are drawn with rounding endings. Antialiased lines are drawn using Gaussian filtering.
 /// </summary>
 /// <param name="img">The image</param>
 /// <param name="pt1">First point of the line segment</param>
 /// <param name="pt2">Second point of the line segment</param>
 /// <param name="color">Line color</param>
 /// <param name="thickness">Line thickness. </param>
 /// <param name="lineType">Type of the line:
 /// 8 (or 0) - 8-connected line.
 /// 4 - 4-connected line.
 /// CV_AA - antialiased line. 
 /// </param>
 /// <param name="shift">Number of fractional bits in the point coordinates</param>
 public static void Line(IInputOutputArray img, Point pt1, Point pt2, MCvScalar color, int thickness = 1, CvEnum.LineType lineType = CvEnum.LineType.EightConnected, int shift = 0)
 {
    using (InputOutputArray ioaImg = img.GetInputOutputArray())
       cveLine(ioaImg, ref pt1, ref pt2, ref color, thickness, lineType, shift);
 }
Пример #47
0
 /// <summary>
 /// Calculates weighted sum of input <paramref name="src"/> and the accumulator acc so that acc becomes a running average of frame sequence:
 /// acc(x,y)=(1-<paramref name="alpha"/>) * acc(x,y) + <paramref name="alpha"/> * image(x,y) if mask(x,y)!=0
 /// where <paramref name="alpha"/> regulates update speed (how fast accumulator forgets about previous frames). 
 /// </summary>
 /// <param name="src">Input image, 1- or 3-channel, 8-bit or 32-bit floating point (each channel of multi-channel image is processed independently). </param>
 /// <param name="dst">Accumulator of the same number of channels as input image, 32-bit or 64-bit floating-point. </param>
 /// <param name="alpha">Weight of input image</param>
 /// <param name="mask">Optional operation mask</param>
 public static void AccumulateWeighted(IInputArray src, IInputOutputArray dst, double alpha, IInputArray mask = null)
 {
    using (InputArray iaSrc = src.GetInputArray())
    using (InputOutputArray ioaDst = dst.GetInputOutputArray())
    using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
       cveAccumulateWeighted(iaSrc, ioaDst, alpha, iaMask);
 }
Пример #48
0
 /// <summary>
 /// Iterates to find the sub-pixel accurate location of corners, or radial saddle points
 /// </summary>
 /// <param name="image">Input image</param>
 /// <param name="corners">Initial coordinates of the input corners and refined coordinates on output</param>
 /// <param name="win">Half sizes of the search window. For example, if win=(5,5) then 5*2+1 x 5*2+1 = 11 x 11 search window is used</param>
 /// <param name="zeroZone">Half size of the dead region in the middle of the search zone over which the summation in formulae below is not done. It is used sometimes to avoid possible singularities of the autocorrelation matrix. The value of (-1,-1) indicates that there is no such size</param>
 /// <param name="criteria">Criteria for termination of the iterative process of corner refinement. That is, the process of corner position refinement stops either after certain number of iteration or when a required accuracy is achieved. The criteria may specify either of or both the maximum number of iteration and the required accuracy</param>
 public static void CornerSubPix(
    IInputArray image,
    IInputOutputArray corners,
    Size win,
    Size zeroZone,
    MCvTermCriteria criteria)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (InputOutputArray ioaCorners = corners.GetInputOutputArray())
       cveCornerSubPix(iaImage, ioaCorners, ref win, ref zeroZone, ref criteria);
 }
Пример #49
0
 /// <summary>
 /// Adds the input <paramref name="src"/> or its selected region, raised to power 2, to the accumulator sqsum
 /// </summary>
 /// <param name="src">Input image, 1- or 3-channel, 8-bit or 32-bit floating point (each channel of multi-channel image is processed independently)</param>
 /// <param name="dst">Accumulator of the same number of channels as input image, 32-bit or 64-bit floating-point</param>
 /// <param name="mask">Optional operation mask</param>
 public static void AccumulateSquare(IInputArray src, IInputOutputArray dst, IInputArray mask = null)
 {
    using (InputArray iaSrc = src.GetInputArray())
    using (InputOutputArray ioaDst = dst.GetInputOutputArray())
    using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
       cveAccumulateSquare(iaSrc, ioaDst, iaMask);
 }
Пример #50
0
 /// <summary>
 /// Computes dense optical flow using Gunnar Farneback's algorithm
 /// </summary>
 /// <param name="prev0">The first 8-bit single-channel input image</param>
 /// <param name="next0">The second input image of the same size and the same type as prevImg</param>
 /// <param name="flow">The computed flow image; will have the same size as prevImg and type CV 32FC2</param>
 /// <param name="pyrScale">Specifies the image scale (!1) to build the pyramids for each image. pyrScale=0.5 means the classical pyramid, where each next layer is twice smaller than the previous</param>
 /// <param name="levels">The number of pyramid layers, including the initial image. levels=1 means that no extra layers are created and only the original images are used</param>
 /// <param name="winSize">The averaging window size; The larger values increase the algorithm robustness to image noise and give more chances for fast motion detection, but yield more blurred motion field</param>
 /// <param name="iterations">The number of iterations the algorithm does at each pyramid level</param>
 /// <param name="polyN">Size of the pixel neighborhood used to find polynomial expansion in each pixel. The larger values mean that the image will be approximated with smoother surfaces, yielding more robust algorithm and more blurred motion field. Typically, poly n=5 or 7</param>
 /// <param name="polySigma">Standard deviation of the Gaussian that is used to smooth derivatives that are used as a basis for the polynomial expansion. For poly n=5 you can set poly sigma=1.1, for poly n=7 a good value would be poly sigma=1.5</param>
 /// <param name="flags">The operation flags</param>
 public static void CalcOpticalFlowFarneback(
    IInputArray prev0,
    IInputArray next0,
    IInputOutputArray flow,
    double pyrScale,
    int levels,
    int winSize,
    int iterations,
    int polyN,
    double polySigma,
    CvEnum.OpticalflowFarnebackFlag flags)
 {
    using (InputArray iaPrev0 = prev0.GetInputArray())
    using (InputArray iaNext0 = next0.GetInputArray())
    using (InputOutputArray ioaFlow = flow.GetInputOutputArray())
    cveCalcOpticalFlowFarneback(iaPrev0, iaNext0, ioaFlow, pyrScale, levels, winSize, iterations, polyN, polySigma, flags);
 }
Пример #51
0
      /*
      /// <summary>
      /// Copies scalar value to every selected element of the destination array:
      /// arr(I)=value if mask(I)!=0
      /// If array arr is of IplImage type, then is ROI used, but COI must not be set
      /// </summary>
      /// <param name="arr">The destination array</param>
      /// <param name="value">Fill value</param>
      /// <param name="mask">Operation mask, 8-bit single channel array; specifies elements of destination array to be changed</param>
#if ANDROID
      public static void cvSet(IntPtr arr, MCvScalar value, IntPtr mask)
      {
         cvSet(arr, value.V0, value.V1, value.V2, value.V3, mask);
      }

      [DllImport(OpencvCoreLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
      private static extern void cvSet(IntPtr arr, double v0, double v1, double v2, double v3, IntPtr mask);
#else
      [DllImport(OpencvCoreLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
      public static extern void cvSet(IntPtr arr, MCvScalar value, IntPtr mask);
#endif

      /// <summary>
      /// Clears the array. In case of dense arrays (CvMat, CvMatND or IplImage) cvZero(array) is equivalent to cvSet(array,cvScalarAll(0),0), in case of sparse arrays all the elements are removed
      /// </summary>
      /// <param name="arr">array to be cleared</param>
      [DllImport(OpencvCoreLibrary, CallingConvention = CvInvoke.CvCallingConvention)]
      public static extern void cvSetZero(IntPtr arr);

      /// <summary>
      /// Clears the array. In case of dense arrays (CvMat, CvMatND or IplImage) cvZero(array) is equivalent to cvSet(array,cvScalarAll(0),0), in case of sparse arrays all the elements are removed
      /// </summary>
      /// <param name="arr">array to be cleared</param>
      public static void cvZero(IntPtr arr)
      {
         cvSetZero(arr);
      }*/

      /// <summary>
      /// Initializes scaled identity matrix:
      /// arr(i,j)=value if i=j,
      /// 0 otherwise
      /// </summary>
      /// <param name="mat">The matrix to initialize (not necessarily square).</param>
      /// <param name="value">The value to assign to the diagonal elements.</param>
      public static void SetIdentity(IInputOutputArray mat, MCvScalar value)
      {
         using (InputOutputArray ioaMat = mat.GetInputOutputArray())
            cveSetIdentity(ioaMat, ref value);
      }
Пример #52
0
 /// <summary>
 /// Shuffles the matrix by swapping randomly chosen pairs of the matrix elements on each iteration (where each element may contain several components in case of multi-channel arrays)
 /// </summary>
 /// <param name="mat">The input/output matrix. It is shuffled in-place. </param>
 /// <param name="rng">Pointer to MCvRNG random number generator. Use 0 if not sure</param>
 /// <param name="iterFactor">The relative parameter that characterizes intensity of the shuffling performed. The number of iterations (i.e. pairs swapped) is round(iter_factor*rows(mat)*cols(mat)), so iter_factor=0 means that no shuffling is done, iter_factor=1 means that the function swaps rows(mat)*cols(mat) random pairs etc</param>
 public static void RandShuffle(IInputOutputArray mat, double iterFactor, UInt64 rng)
 {
    using (InputOutputArray ioaMat = mat.GetInputOutputArray())
       cveRandShuffle(ioaMat, iterFactor, rng);
 }
Пример #53
0
 /// <summary>
 /// Insert the specific channel to the image
 /// </summary>
 /// <param name="src">The source channel</param>
 /// <param name="dst">The destination image where the channel will be inserted into</param>
 /// <param name="coi">0-based index of the channel to be inserted</param>
 public static void InsertChannel(IInputArray src, IInputOutputArray dst, int coi)
 {
    using (InputArray iaSrc = src.GetInputArray())
    using (InputOutputArray oaDst = dst.GetInputOutputArray())
       cveInsertChannel(iaSrc, oaDst, coi);
 }
Пример #54
0
      /// <summary>
      /// Swap channels.
      /// </summary>
      /// <param name="src">The image where the channels will be swapped</param>
      /// <param name="dstOrder">
      /// Integer array describing how channel values are permutated. The n-th entry
      /// of the array contains the number of the channel that is stored in the n-th channel of
      /// the output image. E.g. Given an RGBA image, aDstOrder = [3,2,1,0] converts this to ABGR
      /// channel order.
      /// </param>
      /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
      public static void SwapChannels(IInputOutputArray src, int[] dstOrder, Stream stream)
      {
         if (dstOrder == null || dstOrder.Length < 4)
            throw new ArgumentException("dstOrder must be an int array of size 4");
         GCHandle handle = GCHandle.Alloc(dstOrder, GCHandleType.Pinned);
         using (InputOutputArray ioaSrc = src.GetInputOutputArray())
            cudaSwapChannels(ioaSrc, handle.AddrOfPinnedObject(), stream);

         handle.Free();
      }
Пример #55
0
 /// <summary>
 /// Performs Principal Component Analysis of the supplied dataset.
 /// </summary>
 /// <param name="data">Input samples stored as the matrix rows or as the matrix columns.</param>
 /// <param name="mean">Optional mean value; if the matrix is empty, the mean is computed from the data.</param>
 /// <param name="eigenvectors">The eigenvectors.</param>
 /// <param name="retainedVariance">Percentage of variance that PCA should retain. Using this parameter will let the PCA decided how many components to retain but it will always keep at least 2.</param>
 public static void PCACompute(IInputArray data, IInputOutputArray mean, IOutputArray eigenvectors, double retainedVariance)
 {
    using (InputArray iaData = data.GetInputArray())
    using (InputOutputArray ioaMean = mean.GetInputOutputArray())
    using (OutputArray oaEigenvectors = eigenvectors.GetOutputArray())
       cvePCACompute2(iaData, ioaMean, oaEigenvectors, retainedVariance);
 }
Пример #56
0
 /// <summary>
 /// Retrieves contours from the binary image and returns the number of retrieved contours. The pointer firstContour is filled by the function. It will contain pointer to the first most outer contour or IntPtr.Zero if no contours is detected (if the image is completely black). Other contours may be reached from firstContour using h_next and v_next links. The sample in cvDrawContours discussion shows how to use contours for connected component detection. Contours can be also used for shape analysis and object recognition - see squares.c in OpenCV sample directory
 /// The function modifies the source image content
 /// </summary>
 /// <param name="image">The source 8-bit single channel image. Non-zero pixels are treated as 1s, zero pixels remain 0s - that is image treated as binary. To get such a binary image from grayscale, one may use cvThreshold, cvAdaptiveThreshold or cvCanny. The function modifies the source image content</param>
 /// <param name="contours">Detected contours. Each contour is stored as a vector of points.</param>
 /// <param name="hierarchy">Optional output vector, containing information about the image topology.</param>
 /// <param name="mode">Retrieval mode</param>
 /// <param name="method">Approximation method (for all the modes, except CV_RETR_RUNS, which uses built-in approximation). </param>
 /// <param name="offset">Offset, by which every contour point is shifted. This is useful if the contours are extracted from the image ROI and then they should be analyzed in the whole image context</param>
 /// <returns>The number of countours</returns>
 public static void FindContours(
    IInputOutputArray image, IOutputArray contours, IOutputArray hierarchy,
    CvEnum.RetrType mode,
    CvEnum.ChainApproxMethod method,
    Point offset = new Point())
 {
    using (InputOutputArray ioaImage = image.GetInputOutputArray())
    using (OutputArray oaContours = contours.GetOutputArray())
    using (OutputArray oaHierarchy = hierarchy == null ? OutputArray.GetEmpty() : hierarchy.GetOutputArray())
       cveFindContours(ioaImage, oaContours, oaHierarchy, mode, method, ref offset);
 }
Пример #57
0
 /// <summary>
 /// Adds product of 2 images or thier selected regions to accumulator acc
 /// </summary>
 /// <param name="src1">First input image, 1- or 3-channel, 8-bit or 32-bit floating point (each channel of multi-channel image is processed independently)</param>
 /// <param name="src2">Second input image, the same format as the first one</param>
 /// <param name="dst">Accumulator of the same number of channels as input images, 32-bit or 64-bit floating-point</param>
 /// <param name="mask">Optional operation mask</param>
 public static void AccumulateProduct(IInputArray src1, IInputArray src2, IInputOutputArray dst, IInputArray mask = null)
 {
    using (InputArray iaSrc1 = src1.GetInputArray())
    using (InputArray iaSrc2 = src2.GetInputArray())
    using (InputOutputArray ioaDst = dst.GetInputOutputArray())
    using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
       cveAccumulateProduct(iaSrc1, iaSrc2, ioaDst, iaMask);
 }
Пример #58
0
 /// <summary>
 /// Implements sparse iterative version of Lucas-Kanade optical flow in pyramids ([Bouguet00]). It calculates coordinates of the feature points on the current video frame given their coordinates on the previous frame. The function finds the coordinates with sub-pixel accuracy. 
 /// </summary>
 /// <remarks>Both parameters prev_pyr and curr_pyr comply with the following rules: if the image pointer is 0, the function allocates the buffer internally, calculates the pyramid, and releases the buffer after processing. Otherwise, the function calculates the pyramid and stores it in the buffer unless the flag CV_LKFLOW_PYR_A[B]_READY is set. The image should be large enough to fit the Gaussian pyramid data. After the function call both pyramids are calculated and the readiness flag for the corresponding image can be set in the next call (i.e., typically, for all the image pairs except the very first one CV_LKFLOW_PYR_A_READY is set). </remarks>
 /// <param name="prevImg">First frame, at time t. </param>
 /// <param name="nextImg">Second frame, at time t + dt .</param>
 /// <param name="prevPts">Array of points for which the flow needs to be found. </param>
 /// <param name="nextPts">Array of 2D points containing calculated new positions of input </param>
 /// <param name="winSize">Size of the search window of each pyramid level.</param>
 /// <param name="maxLevel">Maximal pyramid level number. If 0 , pyramids are not used (single level), if 1 , two levels are used, etc. </param>
 /// <param name="status">Array. Every element of the array is set to 1 if the flow for the corresponding feature has been found, 0 otherwise.</param>
 /// <param name="err">Array of double numbers containing difference between patches around the original and moved points. Optional parameter; can be NULL </param>
 /// <param name="criteria">Specifies when the iteration process of finding the flow for each point on each pyramid level should be stopped.</param>
 /// <param name="flags">Miscellaneous flags</param>
 /// <param name="minEigThreshold">the algorithm calculates the minimum eigen value of a 2x2 normal matrix of optical flow equations (this matrix is called a spatial gradient matrix in [Bouguet00]), divided by number of pixels in a window; if this value is less than minEigThreshold, then a corresponding feature is filtered out and its flow is not processed, so it allows to remove bad points and get a performance boost.</param>
 public static void CalcOpticalFlowPyrLK(
    IInputArray prevImg,
    IInputArray nextImg,
    IInputArray prevPts,
    IInputOutputArray nextPts,
    IOutputArray status,
    IOutputArray err,
    Size winSize,
    int maxLevel,
    MCvTermCriteria criteria,
    CvEnum.LKFlowFlag flags = CvEnum.LKFlowFlag.Default,
    double minEigThreshold = 1.0e-4)
 {
    using (InputArray iaPrevImg = prevImg.GetInputArray())
    using (InputArray iaNextImg = nextImg.GetInputArray())
    using (InputArray iaPrevPts = prevPts.GetInputArray())
    using (InputOutputArray ioaNextPts = nextPts.GetInputOutputArray())
    using (OutputArray oaStatus = status.GetOutputArray())
    using (OutputArray oaErr = err.GetOutputArray())
       cveCalcOpticalFlowPyrLK(iaPrevImg, iaNextImg, iaPrevPts, ioaNextPts, oaStatus, oaErr, ref winSize, maxLevel, ref criteria, flags, minEigThreshold);
 }
Пример #59
0
 /// <summary>
 /// Updates the motion history image as following:
 /// mhi(x,y)=timestamp  if silhouette(x,y)!=0
 ///         0          if silhouette(x,y)=0 and mhi(x,y)&lt;timestamp-duration
 ///         mhi(x,y)   otherwise
 /// That is, MHI pixels where motion occurs are set to the current timestamp, while the pixels where motion happened far ago are cleared. 
 /// </summary>
 /// <param name="silhouette">Silhouette mask that has non-zero pixels where the motion occurs. </param>
 /// <param name="mhi">Motion history image, that is updated by the function (single-channel, 32-bit floating-point) </param>
 /// <param name="timestamp">Current time in milliseconds or other units. </param>
 /// <param name="duration">Maximal duration of motion track in the same units as timestamp. </param>
 public static void UpdateMotionHistory(IInputArray silhouette, IInputOutputArray mhi, double timestamp, double duration)
 {
    using (InputArray iaSilhouette = silhouette.GetInputArray())
    using (InputOutputArray ioaMhi = mhi.GetInputOutputArray())
       cveUpdateMotionHistory(iaSilhouette, ioaMhi, timestamp, duration);
 }
Пример #60
-38
 /// <summary>
 /// The function cvMixChannels is a generalized form of cvSplit and cvMerge and some forms of cvCvtColor. It can be used to change the order of the planes, add/remove alpha channel, extract or insert a single plane or multiple planes etc.
 /// </summary>
 /// <param name="src">The array of input arrays.</param>
 /// <param name="dst">The array of output arrays</param>
 /// <param name="fromTo">The array of pairs of indices of the planes copied. from_to[k*2] is the 0-based index of the input plane, and from_to[k*2+1] is the index of the output plane, where the continuous numbering of the planes over all the input and over all the output arrays is used. When from_to[k*2] is negative, the corresponding output plane is filled with 0's.</param>
 /// <remarks>Unlike many other new-style C++ functions in OpenCV, mixChannels requires the output arrays to be pre-allocated before calling the function.</remarks>
 public static void MixChannels(
    IInputArrayOfArrays src,
    IInputOutputArray dst,
    int[] fromTo)
 {
    GCHandle handle = GCHandle.Alloc(fromTo, GCHandleType.Pinned);
    using (InputArray iaSrc = src.GetInputArray())
    using (InputOutputArray ioaDst = dst.GetInputOutputArray())
       cveMixChannels(iaSrc, ioaDst, handle.AddrOfPinnedObject(), fromTo.Length >> 1);
    handle.Free();
 }