Пример #1
0
 public int EstimateTransform(IInputArrayOfArrays images, Rectangle[][] rois = null)
 {
     using (InputArray iaImages = images.GetInputArray())
         if (rois == null)
         {
             return(StitchingInvoke.cveStitcherEstimateTransform1(_ptr, iaImages));
         }
         else
         {
             using (VectorOfVectorOfRect vvr = new VectorOfVectorOfRect(rois))
             {
                 return(StitchingInvoke.cveStitcherEstimateTransform2(_ptr, iaImages, vvr));
             }
         }
 }
Пример #2
0
 /// <summary>
 /// For each query descriptor, finds the training descriptors not farther than the specified distance.
 /// </summary>
 /// <param name="queryDescriptors">Query set of descriptors.</param>
 /// <param name="trainDescriptors">Train set of descriptors. This set is not added to the train descriptors collection stored in the class object.</param>
 /// <param name="matches">Found matches.</param>
 /// <param name="maxDistance">Threshold for the distance between matched descriptors. Distance means here metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured in Pixels)!</param>
 /// <param name="mask">Mask specifying permissible matches between an input query and train matrices of descriptors.</param>
 /// <param name="compactResult">Parameter used when the mask (or masks) is not empty. If compactResult is false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, the matches vector does not contain matches for fully masked-out query descriptors.</param>
 public void RadiusMatch(
     IInputArray queryDescriptors,
     IInputArray trainDescriptors,
     VectorOfVectorOfDMatch matches,
     float maxDistance,
     IInputArrayOfArrays mask = null,
     bool compactResult       = false)
 {
     using (InputArray iaQueryDesccriptor = queryDescriptors.GetInputArray())
         using (InputArray iaTrainDescriptot = trainDescriptors.GetInputArray())
             using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
             {
                 Features2DInvoke.cveDescriptorMatcherRadiusMatch1(_descriptorMatcherPtr, iaQueryDesccriptor, iaTrainDescriptot, matches, maxDistance, iaMask, compactResult);
             }
 }
Пример #3
0
 /// <summary>
 /// Calibrate a camera using Charuco corners.
 /// </summary>
 /// <param name="charucoCorners">Vector of detected charuco corners per frame</param>
 /// <param name="charucoIds">List of identifiers for each corner in charucoCorners per frame</param>
 /// <param name="board">Marker Board layout</param>
 /// <param name="imageSize">Size of the image used only to initialize the intrinsic camera matrix.</param>
 /// <param name="cameraMatrix">Output 3x3 floating-point camera matrix. </param>
 /// <param name="distCoeffs">Output vector of distortion coefficients (k1,k2,p1,p2[,k3[,k4,k5,k6],[s1,s2,s3,s4]]) of 4, 5, 8 or 12 elements</param>
 /// <param name="rvecs">Output vector of rotation vectors (see Rodrigues ) estimated for each board view (e.g. std::vector&lt;cv::Mat&gt;>). That is, each k-th rotation vector together with the corresponding k-th translation vector (see the next output parameter description) brings the board pattern from the model coordinate space (in which object points are specified) to the world coordinate space, that is, a real position of the board pattern in the k-th pattern view (k=0.. M -1).</param>
 /// <param name="tvecs">Output vector of translation vectors estimated for each pattern view.</param>
 /// <param name="flags">Flags Different flags for the calibration process</param>
 /// <param name="criteria">Termination criteria for the iterative optimization algorithm.</param>
 /// <returns>The final re-projection error.</returns>
 public static double CalibrateCameraCharuco(
     IInputArrayOfArrays charucoCorners,
     IInputArrayOfArrays charucoIds,
     CharucoBoard board,
     Size imageSize,
     IInputOutputArray cameraMatrix,
     IInputOutputArray distCoeffs,
     IOutputArray rvecs,
     IOutputArray tvecs,
     CalibType flags,
     MCvTermCriteria criteria)
 {
     return(CalibrateCameraCharuco(charucoCorners, charucoIds, board, imageSize, cameraMatrix, distCoeffs,
                                   rvecs, tvecs, null, null, null, flags, criteria));
 }
Пример #4
0
 /// <summary>
 /// Creates 4-dimensional blob from series of images. Optionally resizes and crops images from center, subtract mean values, scales values by scale factor, swap Blue and Red channels.
 /// </summary>
 /// <param name="images">input images (all with 1-, 3- or 4-channels).</param>
 /// <param name="blob">4-dimansional OutputArray with NCHW dimensions order.</param>
 /// <param name="scaleFactor">multiplier for images values.</param>
 /// <param name="size">spatial size for output image</param>
 /// <param name="mean">scalar with mean values which are subtracted from channels. Values are intended to be in (mean-R, mean-G, mean-B) order if image has BGR ordering and swapRB is true.</param>
 /// <param name="swapRB">flag which indicates that swap first and last channels in 3-channel image is necessary.</param>
 /// <param name="crop">	flag which indicates whether image will be cropped after resize or not</param>
 /// <param name="ddepth">Depth of output blob. Choose CV_32F or CV_8U.</param>
 public static void BlobFromImages(
     IInputArrayOfArrays images,
     IOutputArray blob,
     double scaleFactor      = 1.0,
     Size size               = new Size(),
     MCvScalar mean          = new MCvScalar(),
     bool swapRB             = false,
     bool crop               = false,
     CvEnum.DepthType ddepth = CvEnum.DepthType.Cv32F)
 {
     using (InputArray iaImages = images.GetInputArray())
         using (OutputArray oaBlob = blob.GetOutputArray())
         {
             cveDnnBlobFromImages(iaImages, oaBlob, scaleFactor, ref size, ref mean, swapRB, crop, ddepth);
         }
 }
Пример #5
0
 /// <summary>
 /// Compute a wrapped phase map from sinusoidal patterns.
 /// </summary>
 /// <param name="patternImages">Input data to compute the wrapped phase map.</param>
 /// <param name="wrappedPhaseMap">Wrapped phase map obtained through one of the three methods.</param>
 /// <param name="shadowMask">Mask used to discard shadow regions.</param>
 /// <param name="fundamental">Fundamental matrix used to compute epipolar lines and ease the matching step.</param>
 public void ComputePhaseMap(
     IInputArrayOfArrays patternImages,
     IOutputArray wrappedPhaseMap,
     IOutputArray shadowMask = null,
     IInputArray fundamental = null)
 {
     using (InputArray iaPatternImages = patternImages.GetInputArray())
         using (OutputArray oaWrappedPhaseMap = wrappedPhaseMap.GetOutputArray())
             using (OutputArray oaShadowMask = shadowMask == null ? OutputArray.GetEmpty() : shadowMask.GetOutputArray())
                 using (InputArray iaFundamental = fundamental == null ? InputArray.GetEmpty() : fundamental.GetInputArray())
                 {
                     StructuredLightInvoke.cveSinusoidalPatternComputePhaseMap(
                         _ptr,
                         iaPatternImages,
                         oaWrappedPhaseMap,
                         oaShadowMask,
                         iaFundamental);
                 }
 }
Пример #6
0
 /// <summary>
 /// Pose estimation for a board of markers.
 /// </summary>
 /// <param name="corners">Vector of already detected markers corners. For each marker, its four corners are provided, (e.g std::vector&gt;std::vector&gt;cv::Point2f&lt; &lt; ). For N detected markers, the dimensions of this array should be Nx4. The order of the corners should be clockwise.</param>
 /// <param name="ids">List of identifiers for each marker in corners</param>
 /// <param name="board">Layout of markers in the board. The layout is composed by the marker identifiers and the positions of each marker corner in the board reference system.</param>
 /// <param name="cameraMatrix">Input 3x3 floating-point camera matrix</param>
 /// <param name="distCoeffs">Vector of distortion coefficients (k1,k2,p1,p2[,k3[,k4,k5,k6],[s1,s2,s3,s4]]) of 4, 5, 8 or 12 elements</param>
 /// <param name="rvec">Output vector (e.g. cv::Mat) corresponding to the rotation vector of the board (see cv::Rodrigues). Used as initial guess if not empty.</param>
 /// <param name="tvec">Output vector (e.g. cv::Mat) corresponding to the translation vector of the board.</param>
 /// <param name="useExtrinsicGuess">Defines whether initial guess for rvec and tvec will be used or not. Used as initial guess if not empty.</param>
 /// <returns>The function returns the number of markers from the input employed for the board pose estimation. Note that returning a 0 means the pose has not been estimated.</returns>
 public static int EstimatePoseBoard(
     IInputArrayOfArrays corners,
     IInputArray ids,
     IBoard board,
     IInputArray cameraMatrix,
     IInputArray distCoeffs,
     IOutputArray rvec,
     IOutputArray tvec,
     bool useExtrinsicGuess = false)
 {
     using (InputArray iaCorners = corners.GetInputArray())
         using (InputArray iaIds = ids.GetInputArray())
             using (InputArray iaCameraMatrix = cameraMatrix.GetInputArray())
                 using (InputArray iaDistCoeffs = distCoeffs.GetInputArray())
                     using (OutputArray oaRvec = rvec.GetOutputArray())
                         using (OutputArray oaTvec = tvec.GetOutputArray())
                         {
                             return(cveArucoEstimatePoseBoard(iaCorners, iaIds, board.BoardPtr, iaCameraMatrix, iaDistCoeffs, oaRvec,
                                                              oaTvec, useExtrinsicGuess));
                         }
 }
Пример #7
0
        // reference http://www.codeproject.com/Articles/196168/Contour-Analysis-for-Image-Recognition-in-C
        private static LineSegment2D[] GetCanny(IInputArrayOfArrays uimage)
        {
            #region Canny and edge detection

            var cannyThreshold        = 180;
            var cannyThresholdLinking = 120;
            var cannyEdges            = new UMat();

            CvInvoke.Canny(uimage, cannyEdges, cannyThreshold, cannyThresholdLinking);

            var lines = CvInvoke.HoughLinesP(
                cannyEdges,
                1,              //Distance resolution in pixel-related units
                Math.PI / 45.0, //Angle resolution measured in radians.
                22,             //threshold
                12,             //min Line width
                10);            //gap between lines

            #endregion

            return(lines);
        }
Пример #8
0
 /// <summary>
 /// Decodes the structured light pattern, generating a disparity map.
 /// </summary>
 /// <param name="structuredLightPattern">The strucutred light pattern</param>
 /// <param name="patternImages">The acquired pattern images to decode VectorOfVectorOfMat), loaded as grayscale and previously rectified.</param>
 /// <param name="disparityMap">The decoding result: a CV_64F Mat at image resolution, storing the computed disparity map.</param>
 /// <param name="blackImages">The all-black images needed for shadowMasks computation.</param>
 /// <param name="whiteImages">The all-white images needed for shadowMasks computation.</param>
 /// <param name="flags">Flags setting decoding algorithms.</param>
 /// <returns>True if successful.</returns>
 public static bool Decode(
     this IStructuredLightPattern structuredLightPattern,
     VectorOfVectorOfMat patternImages,
     IOutputArray disparityMap,
     IInputArrayOfArrays blackImages = null,
     IInputArrayOfArrays whiteImages = null,
     DecodeFlag flags = DecodeFlag.Decode3dUnderworld)
 {
     using (OutputArray oaDisparityMap = disparityMap.GetOutputArray())
         using (InputArray iaBlackImages = blackImages == null? InputArray.GetEmpty() : blackImages.GetInputArray())
             using (InputArray iaWhiteImages = whiteImages == null? InputArray.GetEmpty() : whiteImages.GetInputArray())
             {
                 return(cveStructuredLightPatternDecode(
                            structuredLightPattern.StructuredLightPatternPtr,
                            patternImages,
                            oaDisparityMap,
                            iaBlackImages,
                            iaWhiteImages,
                            flags
                            ));
             }
 }
Пример #9
0
 /// <summary>
 /// Train the face recognizer with the specific images and labels
 /// </summary>
 /// <param name="images">The images used in the training. This can be a VectorOfMat</param>
 /// <param name="labels">The labels of the images. This can be a VectorOfInt</param>
 public void Train(IInputArrayOfArrays images, IInputArray labels)
 {
     using (InputArray iaImage = images.GetInputArray())
         using (InputArray iaLabels = labels.GetInputArray())
             FaceInvoke.cveFaceRecognizerTrain(_faceRecognizerPtr, iaImage, iaLabels);
 }
Пример #10
0
 /// <summary>
 /// These functions try to compose the given images (or images stored internally from the other function calls) into the final pano under the assumption that the image transformations were estimated before.
 /// </summary>
 /// <param name="images">Input images</param>
 /// <param name="pano">Final pano.</param>
 /// <returns>Status code.</returns>
 public Stitcher.Status ComposePanorama(IInputArrayOfArrays images, IOutputArray pano)
 {
     using (InputArray iaImages = images.GetInputArray())
         using (OutputArray oaPano = pano.GetOutputArray())
             return(StitchingInvoke.cveStitcherComposePanorama2(_ptr, iaImages, oaPano));
 }
Пример #11
-1
 /// <summary>
 /// Draws contours outlines or filled contours.
 /// </summary>
 /// <param name="image">Image where the contours are to be drawn. Like in any other drawing function, the contours are clipped with the ROI</param>
 /// <param name="contours">All the input contours. Each contour is stored as a point vector.</param>
 /// <param name="contourIdx">Parameter indicating a contour to draw. If it is negative, all the contours are drawn.</param>
 /// <param name="color">Color of the contours </param>
 /// <param name="maxLevel">Maximal level for drawn contours. If 0, only contour is drawn. If 1, the contour and all contours after it on the same level are drawn. If 2, all contours after and all contours one level below the contours are drawn, etc. If the value is negative, the function does not draw the contours following after contour but draws child contours of contour up to abs(maxLevel)-1 level. </param>
 /// <param name="thickness">Thickness of lines the contours are drawn with. If it is negative the contour interiors are drawn</param>
 /// <param name="lineType">Type of the contour segments</param>
 /// <param name="hierarchy">Optional information about hierarchy. It is only needed if you want to draw only some of the contours</param>
 /// <param name="offset">Shift all the point coordinates by the specified value. It is useful in case if the contours retrieved in some image ROI and then the ROI offset needs to be taken into account during the rendering. </param>
 public static void DrawContours(
    IInputOutputArray image,
    IInputArrayOfArrays contours,
    int contourIdx,
    MCvScalar color,
    int thickness = 1,
    CvEnum.LineType lineType = LineType.EightConnected,
    IInputArray hierarchy = null,
    int maxLevel = int.MaxValue,
    Point offset = new Point())
 {
    using (InputOutputArray ioaImage = image.GetInputOutputArray())
    using (InputArray iaContours = contours.GetInputArray())
    using (InputArray iaHierarchy = hierarchy == null ? InputArray.GetEmpty() : hierarchy.GetInputArray())
       cveDrawContours(
          ioaImage,
          iaContours,
          contourIdx,
          ref color,
          thickness,
          lineType,
          iaHierarchy,
          maxLevel,
          ref offset);
 }
Пример #12
-1
      /// <summary>
      /// Find groups of Extremal Regions that are organized as text blocks.
      /// </summary>
      /// <param name="image">The image where ER grouping is to be perform on</param>
      /// <param name="channels">Array of single channel images from which the regions were extracted</param>
      /// <param name="erstats">Vector of ER’s retrieved from the ERFilter algorithm from each channel</param>
      /// <param name="groupingTrainedFileName">The XML or YAML file with the classifier model (e.g. trained_classifier_erGrouping.xml)</param>
      /// <param name="minProbability">The minimum probability for accepting a group.</param>
      /// <param name="groupMethods">The grouping methods</param>
      /// <returns>The output of the algorithm that indicates the text regions</returns>
      public static System.Drawing.Rectangle[] ERGrouping(IInputArray image, IInputArrayOfArrays channels, VectorOfERStat[] erstats, GroupingMethod groupMethods = GroupingMethod.OrientationHoriz, String groupingTrainedFileName = null, float minProbability = 0.5f)
      {
         IntPtr[] erstatPtrs = new IntPtr[erstats.Length];

         for (int i = 0; i < erstatPtrs.Length; i++)
         {
            erstatPtrs[i] = erstats[i].Ptr;
         }

         using (VectorOfVectorOfPoint regionGroups = new VectorOfVectorOfPoint())
         using (VectorOfRect groupsBoxes = new VectorOfRect())
         using (InputArray iaImage = image.GetInputArray())
         using (InputArray iaChannels = channels.GetInputArray())
         using (CvString s = (groupingTrainedFileName == null ? new CvString() : new CvString(groupingTrainedFileName)))
         {
            GCHandle erstatsHandle = GCHandle.Alloc(erstatPtrs, GCHandleType.Pinned);
            CvERGrouping(
               iaImage, iaChannels,
               erstatsHandle.AddrOfPinnedObject(), erstatPtrs.Length,
               regionGroups, groupsBoxes,
               groupMethods,
               s, minProbability);

            erstatsHandle.Free();
            return groupsBoxes.ToArray();
         }
      }
Пример #13
-1
 /// <summary>
 /// Calculates a histogram of a set of arrays.
 /// </summary>
 /// <param name="images">Source arrays. They all should have the same depth, CV_8U or CV_32F , and the same size. Each of them can have an arbitrary number of channels.</param>
 /// <param name="channels">List of the channels used to compute the histogram. </param>
 /// <param name="mask">Optional mask. If the matrix is not empty, it must be an 8-bit array of the same size as images[i] . The non-zero mask elements mark the array elements counted in the histogram.</param>
 /// <param name="hist">Output histogram</param>
 /// <param name="histSize">Array of histogram sizes in each dimension.</param>
 /// <param name="ranges">Array of the dims arrays of the histogram bin boundaries in each dimension.</param>
 /// <param name="accumulate">Accumulation flag. If it is set, the histogram is not cleared in the beginning when it is allocated. This feature enables you to compute a single histogram from several sets of arrays, or to update the histogram in time.</param>
 public static void CalcHist(IInputArrayOfArrays images, int[] channels, IInputArray mask, IOutputArray hist, int[] histSize, float[] ranges, bool accumulate)
 {
    using (VectorOfInt channelsVec = new VectorOfInt(channels))
    using (VectorOfInt histSizeVec = new VectorOfInt(histSize))
    using (VectorOfFloat rangesVec = new VectorOfFloat(ranges))
    using (InputArray iaImages = images.GetInputArray())
    using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
    using (OutputArray oaHist = hist.GetOutputArray())
    {
       cveCalcHist(iaImages, channelsVec, iaMask, oaHist, histSizeVec, rangesVec, accumulate);
    }
 }
Пример #14
-1
 /// <summary>
 /// Calculates the back projection of a histogram.
 /// </summary>
 /// <param name="images">Source arrays. They all should have the same depth, CV_8U or CV_32F , and the same size. Each of them can have an arbitrary number of channels.</param>
 /// <param name="channels">Number of source images.</param>
 /// <param name="hist">Input histogram that can be dense or sparse.</param>
 /// <param name="backProject">Destination back projection array that is a single-channel array of the same size and depth as images[0] .</param>
 /// <param name="ranges">Array of arrays of the histogram bin boundaries in each dimension.</param>
 /// <param name="scale"> Optional scale factor for the output back projection.</param>
 public static void CalcBackProject(IInputArrayOfArrays images, int[] channels, IInputArray hist, IOutputArray backProject, float[] ranges, double scale = 1.0)
 {
    using (VectorOfInt channelsVec = new VectorOfInt(channels))
    using (VectorOfFloat rangeVec = new VectorOfFloat(ranges))
    using (InputArray iaImages = images.GetInputArray())
    using (InputArray iaHist = hist.GetInputArray())
    using (OutputArray oaBackProject = backProject.GetOutputArray())
    {
       cveCalcBackProject(iaImages, channelsVec, iaHist, oaBackProject, rangeVec, scale);
    }
 }
Пример #15
-1
 /// <summary>
 /// This function receives the detected markers and returns their pose estimation respect to the camera individually. So for each marker, one rotation and translation vector is returned. The returned transformation is the one that transforms points from each marker coordinate system to the camera coordinate system. The marker corrdinate system is centered on the middle of the marker, with the Z axis perpendicular to the marker plane. The coordinates of the four corners of the marker in its own coordinate system are: (-markerLength/2, markerLength/2, 0), (markerLength/2, markerLength/2, 0), (markerLength/2, -markerLength/2, 0), (-markerLength/2, -markerLength/2, 0)
 /// </summary>
 /// <param name="corners">vector of already detected markers corners. For each marker, its four corners are provided, (e.g VectorOfVectorOfPointF ). For N detected markers, the dimensions of this array should be Nx4. The order of the corners should be clockwise.</param>
 /// <param name="markerLength">the length of the markers' side. The returning translation vectors will be in the same unit. Normally, unit is meters.</param>
 /// <param name="cameraMatrix">input 3x3 floating-point camera matrix</param>
 /// <param name="distCoeffs">vector of distortion coefficients (k1,k2,p1,p2[,k3[,k4,k5,k6],[s1,s2,s3,s4]]) of 4, 5, 8 or 12 elements</param>
 /// <param name="rvecs">array of output rotation vectors. Each element in rvecs corresponds to the specific marker in imgPoints.</param>
 /// <param name="tvecs">array of output translation vectors (e.g. VectorOfPoint3D32F ). Each element in tvecs corresponds to the specific marker in imgPoints.</param>
 public static void EstimatePoseSingleMarkers(IInputArrayOfArrays corners, float markerLength,
    IInputArray cameraMatrix, IInputArray distCoeffs,
    IOutputArrayOfArrays rvecs, IOutputArrayOfArrays tvecs)
 {
    using (InputArray iaCorners = corners.GetInputArray())
    using (InputArray iaCameraMatrix = cameraMatrix.GetInputArray())
    using (InputArray iaDistCoeffs = distCoeffs.GetInputArray())
    using (OutputArray oaRvecs = rvecs.GetOutputArray())
    using (OutputArray oaTvecs = tvecs.GetOutputArray())
    {
       cveArucoEstimatePoseSingleMarkers(iaCorners, markerLength, iaCameraMatrix, iaDistCoeffs, oaRvecs, oaTvecs);
    }
 }
Пример #16
-1
 /// <summary>
 /// This function is the opposite to cvSplit. If the destination array has N channels then if the first N input channels are not IntPtr.Zero, all they are copied to the destination array, otherwise if only a single source channel of the first N is not IntPtr.Zero, this particular channel is copied into the destination array, otherwise an error is raised. Rest of source channels (beyond the first N) must always be IntPtr.Zero. For IplImage cvCopy with COI set can be also used to insert a single channel into the image. 
 /// </summary>
 /// <param name="mv">Input vector of matrices to be merged; all the matrices in mv must have the same size and the same depth.</param>
 /// <param name="dst">output array of the same size and the same depth as mv[0]; The number of channels will be the total number of channels in the matrix array.</param>
 public static void Merge(IInputArrayOfArrays mv, IOutputArray dst)
 {
    using (InputArray iaMv = mv.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       cveMerge(iaMv, oaDst);
 }
Пример #17
-38
 /// <summary>
 /// The function cvMixChannels is a generalized form of cvSplit and cvMerge and some forms of cvCvtColor. It can be used to change the order of the planes, add/remove alpha channel, extract or insert a single plane or multiple planes etc.
 /// </summary>
 /// <param name="src">The array of input arrays.</param>
 /// <param name="dst">The array of output arrays</param>
 /// <param name="fromTo">The array of pairs of indices of the planes copied. from_to[k*2] is the 0-based index of the input plane, and from_to[k*2+1] is the index of the output plane, where the continuous numbering of the planes over all the input and over all the output arrays is used. When from_to[k*2] is negative, the corresponding output plane is filled with 0's.</param>
 /// <remarks>Unlike many other new-style C++ functions in OpenCV, mixChannels requires the output arrays to be pre-allocated before calling the function.</remarks>
 public static void MixChannels(
    IInputArrayOfArrays src,
    IInputOutputArray dst,
    int[] fromTo)
 {
    GCHandle handle = GCHandle.Alloc(fromTo, GCHandleType.Pinned);
    using (InputArray iaSrc = src.GetInputArray())
    using (InputOutputArray ioaDst = dst.GetInputOutputArray())
       cveMixChannels(iaSrc, ioaDst, handle.AddrOfPinnedObject(), fromTo.Length >> 1);
    handle.Free();
 }