Esempio n. 1
0
 /// <summary>
 /// Calculates an optical flow.
 /// </summary>
 /// <param name="i0">First 8-bit single-channel input image.</param>
 /// <param name="i1">Second input image of the same size and the same type as prev.</param>
 /// <param name="flow">Computed flow image that has the same size as prev and type CV_32FC2 </param>
 public void Calc(IInputArray i0, IInputArray i1, IInputOutputArray flow)
 {
     using (InputArray iaI0 = i0.GetInputArray())
         using (InputArray iaI1 = i1.GetInputArray())
             using (InputOutputArray ioaFlow = flow.GetInputOutputArray())
                 cveDenseOpticalFlowCalc(_ptr, iaI0, iaI1, ioaFlow);
 }
Esempio n. 2
0
 /// <summary>
 /// Calculates an optical flow.
 /// </summary>
 /// <param name="i0">First 8-bit single-channel input image.</param>
 /// <param name="i1">Second input image of the same size and the same type as prev.</param>
 /// <param name="flow">Computed flow image that has the same size as prev and type CV_32FC2 </param>
 /// <param name="opticalFlow">The dense optical flow object</param>
 public static void Calc(this IDenseOpticalFlow opticalFlow, IInputArray i0, IInputArray i1, IInputOutputArray flow)
 {
     using (InputArray iaI0 = i0.GetInputArray())
         using (InputArray iaI1 = i1.GetInputArray())
             using (InputOutputArray ioaFlow = flow.GetInputOutputArray())
                 CvInvoke.cveDenseOpticalFlowCalc(opticalFlow.DenseOpticalFlowPtr, iaI0, iaI1, ioaFlow);
 }
Esempio n. 3
0
 /// <summary>
 /// Performs stereo calibration.
 /// </summary>
 /// <param name="objectPoints">Vector of vectors of the calibration pattern points.</param>
 /// <param name="imagePoints1">Vector of vectors of the projections of the calibration pattern points, observed by the first camera.</param>
 /// <param name="imagePoints2">Vector of vectors of the projections of the calibration pattern points, observed by the second camera.</param>
 /// <param name="K1">Input/output first camera matrix.If FixIntrinsic is specified, some or all of the matrix components must be initialized.</param>
 /// <param name="D1">Input/output vector of distortion coefficients (k1,k2,k3,k4) of 4 elements.</param>
 /// <param name="K2">Input/output second camera matrix. The parameter is similar to <paramref name="K1"/> </param>
 /// <param name="D2">Input/output lens distortion coefficients for the second camera. The parameter is similar to <paramref name="D1"/></param>
 /// <param name="imageSize">Size of the image used only to initialize intrinsic camera matrix.</param>
 /// <param name="R">Output rotation matrix between the 1st and the 2nd camera coordinate systems.</param>
 /// <param name="T">Output translation vector between the coordinate systems of the cameras.</param>
 /// <param name="flags">Fish eye calibration flags</param>
 /// <param name="criteria">Termination criteria for the iterative optimization algorithm.</param>
 public static double StereoCalibrate(IInputArray objectPoints, IInputArray imagePoints1,
                                      IInputArray imagePoints2, IInputOutputArray K1, IInputOutputArray D1, IInputOutputArray K2,
                                      IInputOutputArray D2, Size imageSize, IOutputArray R, IOutputArray T, CalibrationFlag flags, MCvTermCriteria criteria)
 {
     using (InputArray iaObjectPoints = objectPoints.GetInputArray())
         using (InputArray iaImagePoints1 = imagePoints1.GetInputArray())
             using (InputArray iaImagePoints2 = imagePoints2.GetInputArray())
                 using (InputOutputArray ioaK1 = K1.GetInputOutputArray())
                     using (InputOutputArray ioaD1 = D1.GetInputOutputArray())
                         using (InputOutputArray ioaK2 = K2.GetInputOutputArray())
                             using (InputOutputArray ioaD2 = D2.GetInputOutputArray())
                                 using (OutputArray oaR = R.GetOutputArray())
                                     using (OutputArray oaT = T.GetOutputArray())
                                     {
                                         return(CvInvoke.cveFisheyeStereoCalibrate(
                                                    iaObjectPoints,
                                                    iaImagePoints1,
                                                    iaImagePoints2,
                                                    ioaK1,
                                                    ioaD1,
                                                    ioaK2,
                                                    ioaD2,
                                                    ref imageSize,
                                                    oaR,
                                                    oaT,
                                                    (int)flags,
                                                    ref criteria));
                                     }
 }
Esempio n. 4
0
 /// <summary>
 /// Fills arrays with random numbers.
 /// </summary>
 /// <param name="mat">2D or N-dimensional matrix; currently matrices with more than 4 channels are not supported by the methods, use Mat::reshape as a possible workaround.</param>
 /// <param name="distType">distribution type</param>
 /// <param name="a">First distribution parameter; in case of the uniform distribution, this is an inclusive lower boundary, in case of the normal distribution, this is a mean value.</param>
 /// <param name="b">Second distribution parameter; in case of the uniform distribution, this is a non-inclusive upper boundary, in case of the normal distribution, this is a standard deviation (diagonal of the standard deviation matrix or the full standard deviation matrix).</param>
 /// <param name="saturateRange">Pre-saturation flag; for uniform distribution only; if true, the method will first convert a and b to the acceptable value range (according to the mat datatype) and then will generate uniformly distributed random numbers within the range [saturate(a), saturate(b)), if saturateRange=false, the method will generate uniformly distributed random numbers in the original range [a, b) and then will saturate them</param>
 public void Fill(
     IInputOutputArray mat,
     DistType distType,
     IInputArray a,
     IInputArray b,
     bool saturateRange = false)
 {
     using (InputArray iaA = a.GetInputArray())
         using (InputArray iaB = b.GetInputArray())
             using (InputOutputArray ioaMat = mat.GetInputOutputArray())
             {
                 CvInvoke.cveRngFill(_ptr, ioaMat, distType, iaA, iaB, saturateRange);
             }
 }
Esempio n. 5
0
 /// <summary>
 /// Finds the geometric transform (warp) between two images in terms of the ECC criterion
 /// </summary>
 /// <param name="templateImage">single-channel template image; CV_8U or CV_32F array.</param>
 /// <param name="inputImage">single-channel input image which should be warped with the final warpMatrix in order to provide an image similar to templateImage, same type as temlateImage.</param>
 /// <param name="warpMatrix">floating-point 2×3 or 3×3 mapping matrix (warp).</param>
 /// <param name="motionType">Specifying the type of motion. Use Affine for default</param>
 /// <param name="criteria">specifying the termination criteria of the ECC algorithm; criteria.epsilon defines the threshold of the increment in the correlation coefficient between two iterations (a negative criteria.epsilon makes criteria.maxcount the only termination criterion). Default values can use 50 iteration and 0.001 eps.</param>
 /// <param name="inputMask">An optional mask to indicate valid values of inputImage.</param>
 /// <returns>The final enhanced correlation coefficient, that is the correlation coefficient between the template image and the final warped input image.</returns>
 public static double FindTransformECC(
     IInputArray templateImage, IInputArray inputImage,
     IInputOutputArray warpMatrix, CvEnum.MotionType motionType,
     MCvTermCriteria criteria,
     IInputArray inputMask = null)
 {
     using (InputArray iaTemplateImage = templateImage.GetInputArray())
         using (InputArray iaInputImage = inputImage.GetInputArray())
             using (InputOutputArray ioaWarpMatrix = warpMatrix.GetInputOutputArray())
                 using (InputArray iaInputMask = inputMask == null ? InputArray.GetEmpty() : inputMask.GetInputArray())
                 {
                     return(cveFindTransformECC(iaTemplateImage, iaInputImage, ioaWarpMatrix, motionType, ref criteria, iaInputMask));
                 }
 }
Esempio n. 6
0
 /// <summary>
 /// The grab cut algorithm for segmentation
 /// </summary>
 /// <param name="img">The 8-bit 3-channel image to be segmented</param>
 /// <param name="mask">Input/output 8-bit single-channel mask. The mask is initialized by the function
 /// when mode is set to GC_INIT_WITH_RECT. Its elements may have one of following values:
 /// 0 (GC_BGD) defines an obvious background pixels.
 /// 1 (GC_FGD) defines an obvious foreground (object) pixel.
 /// 2 (GC_PR_BGR) defines a possible background pixel.
 /// 3 (GC_PR_FGD) defines a possible foreground pixel.
 ///</param>
 /// <param name="rect">The rectangle to initialize the segmentation</param>
 /// <param name="bgdModel">
 /// Temporary array for the background model. Do not modify it while you are
 /// processing the same image.
 /// </param>
 /// <param name="fgdModel">
 /// Temporary arrays for the foreground model. Do not modify it while you are
 /// processing the same image.
 /// </param>
 /// <param name="iterCount">The number of iterations</param>
 /// <param name="type">The initialization type</param>
 public static void GrabCut(
     IInputArray img,
     IInputOutputArray mask,
     Rectangle rect,
     IInputOutputArray bgdModel,
     IInputOutputArray fgdModel,
     int iterCount,
     CvEnum.GrabcutInitType type)
 {
     using (InputArray iaImg = img.GetInputArray())
         using (InputOutputArray ioaMask = mask == null ? InputOutputArray.GetEmpty() : mask.GetInputOutputArray())
             using (InputOutputArray ioaBgdModel = bgdModel.GetInputOutputArray())
                 using (InputOutputArray ioaFgdModel = fgdModel.GetInputOutputArray())
                     cveGrabCut(iaImg, ioaMask, ref rect, ioaBgdModel, ioaFgdModel, iterCount, type);
 }
Esempio n. 7
0
 /// <summary>
 /// Computes dense optical flow using Gunnar Farneback's algorithm
 /// </summary>
 /// <param name="prev0">The first 8-bit single-channel input image</param>
 /// <param name="next0">The second input image of the same size and the same type as prevImg</param>
 /// <param name="flow">The computed flow image; will have the same size as prevImg and type CV 32FC2</param>
 /// <param name="pyrScale">Specifies the image scale (!1) to build the pyramids for each image. pyrScale=0.5 means the classical pyramid, where each next layer is twice smaller than the previous</param>
 /// <param name="levels">The number of pyramid layers, including the initial image. levels=1 means that no extra layers are created and only the original images are used</param>
 /// <param name="winSize">The averaging window size; The larger values increase the algorithm robustness to image noise and give more chances for fast motion detection, but yield more blurred motion field</param>
 /// <param name="iterations">The number of iterations the algorithm does at each pyramid level</param>
 /// <param name="polyN">Size of the pixel neighborhood used to find polynomial expansion in each pixel. The larger values mean that the image will be approximated with smoother surfaces, yielding more robust algorithm and more blurred motion field. Typically, poly n=5 or 7</param>
 /// <param name="polySigma">Standard deviation of the Gaussian that is used to smooth derivatives that are used as a basis for the polynomial expansion. For poly n=5 you can set poly sigma=1.1, for poly n=7 a good value would be poly sigma=1.5</param>
 /// <param name="flags">The operation flags</param>
 public static void CalcOpticalFlowFarneback(
     IInputArray prev0,
     IInputArray next0,
     IInputOutputArray flow,
     double pyrScale,
     int levels,
     int winSize,
     int iterations,
     int polyN,
     double polySigma,
     CvEnum.OpticalflowFarnebackFlag flags)
 {
     using (InputArray iaPrev0 = prev0.GetInputArray())
         using (InputArray iaNext0 = next0.GetInputArray())
             using (InputOutputArray ioaFlow = flow.GetInputOutputArray())
                 cveCalcOpticalFlowFarneback(iaPrev0, iaNext0, ioaFlow, pyrScale, levels, winSize, iterations, polyN, polySigma, flags);
 }
Esempio n. 8
0
 /// <summary>
 /// Implements sparse iterative version of Lucas-Kanade optical flow in pyramids ([Bouguet00]). It calculates coordinates of the feature points on the current video frame given their coordinates on the previous frame. The function finds the coordinates with sub-pixel accuracy.
 /// </summary>
 /// <remarks>Both parameters prev_pyr and curr_pyr comply with the following rules: if the image pointer is 0, the function allocates the buffer internally, calculates the pyramid, and releases the buffer after processing. Otherwise, the function calculates the pyramid and stores it in the buffer unless the flag CV_LKFLOW_PYR_A[B]_READY is set. The image should be large enough to fit the Gaussian pyramid data. After the function call both pyramids are calculated and the readiness flag for the corresponding image can be set in the next call (i.e., typically, for all the image pairs except the very first one CV_LKFLOW_PYR_A_READY is set). </remarks>
 /// <param name="prevImg">First frame, at time t. </param>
 /// <param name="nextImg">Second frame, at time t + dt .</param>
 /// <param name="prevPts">Array of points for which the flow needs to be found. </param>
 /// <param name="nextPts">Array of 2D points containing calculated new positions of input </param>
 /// <param name="winSize">Size of the search window of each pyramid level.</param>
 /// <param name="maxLevel">Maximal pyramid level number. If 0 , pyramids are not used (single level), if 1 , two levels are used, etc. </param>
 /// <param name="status">Array. Every element of the array is set to 1 if the flow for the corresponding feature has been found, 0 otherwise.</param>
 /// <param name="err">Array of double numbers containing difference between patches around the original and moved points. Optional parameter; can be NULL </param>
 /// <param name="criteria">Specifies when the iteration process of finding the flow for each point on each pyramid level should be stopped.</param>
 /// <param name="flags">Miscellaneous flags</param>
 /// <param name="minEigThreshold">the algorithm calculates the minimum eigen value of a 2x2 normal matrix of optical flow equations (this matrix is called a spatial gradient matrix in [Bouguet00]), divided by number of pixels in a window; if this value is less than minEigThreshold, then a corresponding feature is filtered out and its flow is not processed, so it allows to remove bad points and get a performance boost.</param>
 public static void CalcOpticalFlowPyrLK(
     IInputArray prevImg,
     IInputArray nextImg,
     IInputArray prevPts,
     IInputOutputArray nextPts,
     IOutputArray status,
     IOutputArray err,
     Size winSize,
     int maxLevel,
     MCvTermCriteria criteria,
     CvEnum.LKFlowFlag flags = CvEnum.LKFlowFlag.Default,
     double minEigThreshold  = 1.0e-4)
 {
     using (InputArray iaPrevImg = prevImg.GetInputArray())
         using (InputArray iaNextImg = nextImg.GetInputArray())
             using (InputArray iaPrevPts = prevPts.GetInputArray())
                 using (InputOutputArray ioaNextPts = nextPts.GetInputOutputArray())
                     using (OutputArray oaStatus = status.GetOutputArray())
                         using (OutputArray oaErr = err.GetOutputArray())
                             cveCalcOpticalFlowPyrLK(iaPrevImg, iaNextImg, iaPrevPts, ioaNextPts, oaStatus, oaErr, ref winSize, maxLevel, ref criteria, flags, minEigThreshold);
 }
Esempio n. 9
0
 /// <summary>
 /// Calculates a sparse optical flow.
 /// </summary>
 /// <param name="opticalFlow">The sparse optical flow</param>
 /// <param name="prevImg">First input image.</param>
 /// <param name="nextImg">Second input image of the same size and the same type as prevImg.</param>
 /// <param name="prevPts">Vector of 2D points for which the flow needs to be found.</param>
 /// <param name="nextPts">Output vector of 2D points containing the calculated new positions of input features in the second image.</param>
 /// <param name="status">Output status vector. Each element of the vector is set to 1 if the flow for the corresponding features has been found.Otherwise, it is set to 0.</param>
 /// <param name="error">Optional output vector that contains error response for each point (inverse confidence).</param>
 public static void Calc(
     this ISparseOpticalFlow opticalFlow,
     IInputArray prevImg, IInputArray nextImg,
     IInputArray prevPts, IInputOutputArray nextPts,
     IOutputArray status,
     IOutputArray error = null
     )
 {
     using (InputArray iaPreImg = prevImg.GetInputArray())
         using (InputArray iaNextImg = nextImg.GetInputArray())
             using (InputArray iaPrevPts = prevPts.GetInputArray())
                 using (InputOutputArray ioaNextPts = nextPts.GetInputOutputArray())
                     using (OutputArray oaStatus = status.GetOutputArray())
                         using (OutputArray oaError = error == null ? OutputArray.GetEmpty() : error.GetOutputArray())
                             CvInvoke.cveSparseOpticalFlowCalc(
                                 opticalFlow.SparseOpticalFlowPtr,
                                 iaPreImg, iaNextImg,
                                 iaPrevPts, ioaNextPts,
                                 oaStatus, oaError
                                 );
 }
Esempio n. 10
0
 /// <summary>
 /// Performs camera calibration.
 /// </summary>
 /// <param name="objectPoints">vector of vectors of calibration pattern points in the calibration pattern coordinate space.</param>
 /// <param name="imagePoints">vector of vectors of the projections of calibration pattern points. imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to objectPoints[i].size() for each i.</param>
 /// <param name="imageSize">Size of the image used only to initialize the intrinsic camera matrix.</param>
 /// <param name="K">Output 3x3 floating-point camera matrix. If UseIntrisicGuess is specified, some or all of fx, fy, cx, cy must be initialized before calling the function. </param>
 /// <param name="D">Output vector of distortion coefficients (k1,k2,k3,k4).</param>
 /// <param name="rvecs">Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view. That is, each k-th rotation vector together with the corresponding k-th translation vector (see the next output parameter description) brings the calibration pattern from the model coordinate space (in which object points are specified) to the world coordinate space, that is, a real position of the calibration pattern in the k-th pattern view (k=0.. M -1).</param>
 /// <param name="tvecs">Output vector of translation vectors estimated for each pattern view.</param>
 /// <param name="flags">Different flags</param>
 /// <param name="criteria">Termination criteria for the iterative optimization algorithm.</param>
 public static double Calibrate(IInputArray objectPoints, IInputArray imagePoints, Size imageSize,
                                IInputOutputArray K, IInputOutputArray D, IOutputArray rvecs, IOutputArray tvecs, CalibrationFlag flags,
                                MCvTermCriteria criteria)
 {
     using (InputArray iaObjectPoints = objectPoints.GetInputArray())
         using (InputArray iaImagePoints = imagePoints.GetInputArray())
             using (InputOutputArray ioaK = K.GetInputOutputArray())
                 using (InputOutputArray ioaD = D.GetInputOutputArray())
                     using (OutputArray oaRvecs = rvecs.GetOutputArray())
                         using (OutputArray oaTvecs = tvecs.GetOutputArray())
                         {
                             return(CvInvoke.cveFisheyeCalibrate(
                                        iaObjectPoints,
                                        iaImagePoints,
                                        ref imageSize,
                                        ioaK,
                                        ioaD,
                                        oaRvecs,
                                        oaTvecs,
                                        (int)flags,
                                        ref criteria));
                         }
 }
Esempio n. 11
0
 /// <summary>
 /// Converts NaN's to the given number
 /// </summary>
 /// <param name="a">The array where NaN needs to be converted</param>
 /// <param name="val">The value to convert to</param>
 public static void PatchNaNs(IInputOutputArray a, double val = 0)
 {
     using (InputOutputArray ioaA = a.GetInputOutputArray())
         cvePatchNaNs(ioaA, val);
 }
Esempio n. 12
0
 /// <summary>
 /// Updates the motion history image as following:
 /// mhi(x,y)=timestamp  if silhouette(x,y)!=0
 ///         0          if silhouette(x,y)=0 and mhi(x,y)&lt;timestamp-duration
 ///         mhi(x,y)   otherwise
 /// That is, MHI pixels where motion occurs are set to the current timestamp, while the pixels where motion happened far ago are cleared.
 /// </summary>
 /// <param name="silhouette">Silhouette mask that has non-zero pixels where the motion occurs. </param>
 /// <param name="mhi">Motion history image, that is updated by the function (single-channel, 32-bit floating-point) </param>
 /// <param name="timestamp">Current time in milliseconds or other units. </param>
 /// <param name="duration">Maximal duration of motion track in the same units as timestamp. </param>
 public static void UpdateMotionHistory(IInputArray silhouette, IInputOutputArray mhi, double timestamp, double duration)
 {
     using (InputArray iaSilhouette = silhouette.GetInputArray())
         using (InputOutputArray ioaMhi = mhi.GetInputOutputArray())
             cveUpdateMotionHistory(iaSilhouette, ioaMhi, timestamp, duration);
 }