Exemple #1
0
 /// <summary>
 /// Computes Hand-Eye calibration
 /// </summary>
 /// <param name="rGripper2base">
 /// Rotation part extracted from the homogeneous matrix that transforms a point expressed in the gripper frame to the robot base frame.
 /// This is a vector (vector&lt;Mat&gt;) that contains the rotation matrices for all the transformations from gripper frame to robot base frame.
 /// </param>
 /// <param name="tGripper2base">
 /// Translation part extracted from the homogeneous matrix that transforms a point expressed in the gripper frame to the robot base frame.
 /// This is a vector (vector&lt;Mat&gt;) that contains the translation vectors for all the transformations from gripper frame to robot base frame.
 /// </param>
 /// <param name="rTarget2cam">
 /// Rotation part extracted from the homogeneous matrix that transforms a point expressed in the target frame to the camera frame.
 /// This is a vector (vector&lt;Mat&gt;) that contains the rotation matrices for all the transformations from calibration target frame to camera frame.
 /// </param>
 /// <param name="tTarget2cam">
 /// Rotation part extracted from the homogeneous matrix that transforms a point expressed in the target frame to the camera frame.
 /// This is a vector (vector&lt;Mat&gt;) that contains the translation vectors for all the transformations from calibration target frame to camera frame.
 /// </param>
 /// <param name="rCam2gripper">
 /// Estimated rotation part extracted from the homogeneous matrix that transforms a point expressed in the camera frame to the gripper frame.
 /// </param>
 /// <param name="tCam2gripper">
 /// Estimated translation part extracted from the homogeneous matrix that transforms a point expressed in the camera frame to the gripper frame.
 /// </param>
 /// <param name="method">One of the implemented Hand-Eye calibration method</param>
 public static void CalibrateHandEye(
     IInputArrayOfArrays rGripper2base,
     IInputArrayOfArrays tGripper2base,
     IInputArrayOfArrays rTarget2cam,
     IInputArrayOfArrays tTarget2cam,
     IOutputArray rCam2gripper,
     IOutputArray tCam2gripper,
     CvEnum.HandEyeCalibrationMethod method)
 {
     using (InputArray iaRGripper2Base = rGripper2base.GetInputArray())
         using (InputArray iaTGripper2Base = rGripper2base.GetInputArray())
             using (InputArray iaRTarget2Cam = rTarget2cam.GetInputArray())
                 using (InputArray iaTTarget2Cam = tTarget2cam.GetInputArray())
                     using (OutputArray oaRCam2Gripper = rCam2gripper.GetOutputArray())
                         using (OutputArray oaTCam2Gripper = tCam2gripper.GetOutputArray())
                         {
                             cveCalibrateHandEye(
                                 iaRGripper2Base,
                                 iaTGripper2Base,
                                 iaRTarget2Cam,
                                 iaTTarget2Cam,
                                 oaRCam2Gripper,
                                 oaTCam2Gripper,
                                 method);
                         }
 }
Exemple #2
0
        /// <summary>
        /// Find groups of Extremal Regions that are organized as text blocks.
        /// </summary>
        /// <param name="image">The image where ER grouping is to be perform on</param>
        /// <param name="channels">Array of single channel images from which the regions were extracted</param>
        /// <param name="erstats">Vector of ER’s retrieved from the ERFilter algorithm from each channel</param>
        /// <param name="groupingTrainedFileName">The XML or YAML file with the classifier model (e.g. trained_classifier_erGrouping.xml)</param>
        /// <param name="minProbability">The minimum probability for accepting a group.</param>
        /// <param name="groupMethods">The grouping methods</param>
        /// <returns>The output of the algorithm that indicates the text regions</returns>
        public static System.Drawing.Rectangle[] ERGrouping(IInputArray image, IInputArrayOfArrays channels, VectorOfERStat[] erstats, GroupingMethod groupMethods = GroupingMethod.OrientationHoriz, String groupingTrainedFileName = null, float minProbability = 0.5f)
        {
            IntPtr[] erstatPtrs = new IntPtr[erstats.Length];

            for (int i = 0; i < erstatPtrs.Length; i++)
            {
                erstatPtrs[i] = erstats[i].Ptr;
            }

            using (VectorOfVectorOfPoint regionGroups = new VectorOfVectorOfPoint())
                using (VectorOfRect groupsBoxes = new VectorOfRect())
                    using (InputArray iaImage = image.GetInputArray())
                        using (InputArray iaChannels = channels.GetInputArray())
                            using (CvString s = (groupingTrainedFileName == null ? new CvString() : new CvString(groupingTrainedFileName)))
                            {
                                GCHandle erstatsHandle = GCHandle.Alloc(erstatPtrs, GCHandleType.Pinned);
                                ContribInvoke.CvERGrouping(
                                    iaImage, iaChannels,
                                    erstatsHandle.AddrOfPinnedObject(), erstatPtrs.Length,
                                    regionGroups, groupsBoxes,
                                    groupMethods,
                                    s, minProbability);

                                erstatsHandle.Free();
                                return(groupsBoxes.ToArray());
                            }
        }
Exemple #3
0
 /// <summary>
 /// train it on positive features compute the mace filter: h = D(-1) * X * (X(+) * D(-1) * X)(-1) * C also calculate a minimal threshold for this class, the smallest self-similarity from the train images
 /// </summary>
 /// <param name="images">A VectorOfMat with the train images</param>
 public void Train(IInputArrayOfArrays images)
 {
     using (InputArray iaImages = images.GetInputArray())
     {
         FaceInvoke.cveMaceTrain(_ptr, iaImages);
     }
 }
Exemple #4
0
 /// <summary>
 /// Calibrate a camera using Charuco corners.
 /// </summary>
 /// <param name="charucoCorners">Vector of detected charuco corners per frame</param>
 /// <param name="charucoIds">List of identifiers for each corner in charucoCorners per frame</param>
 /// <param name="board">Marker Board layout</param>
 /// <param name="imageSize">Size of the image used only to initialize the intrinsic camera matrix.</param>
 /// <param name="cameraMatrix">Output 3x3 floating-point camera matrix. </param>
 /// <param name="distCoeffs">Output vector of distortion coefficients (k1,k2,p1,p2[,k3[,k4,k5,k6],[s1,s2,s3,s4]]) of 4, 5, 8 or 12 elements</param>
 /// <param name="rvecs">Output vector of rotation vectors (see Rodrigues ) estimated for each board view (e.g. std::vector&lt;cv::Mat&gt;). That is, each k-th rotation vector together with the corresponding k-th translation vector (see the next output parameter description) brings the board pattern from the model coordinate space (in which object points are specified) to the world coordinate space, that is, a real position of the board pattern in the k-th pattern view (k=0.. M -1).</param>
 /// <param name="tvecs">Output vector of translation vectors estimated for each pattern view.</param>
 /// <param name="stdDeviationsIntrinsics">Output vector of standard deviations estimated for intrinsic parameters. Order of deviations values: (fx,fy,cx,cy,k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,τx,τy) If one of parameters is not estimated, it's deviation is equals to zero.</param>
 /// <param name="stdDeviationsExtrinsics">Output vector of standard deviations estimated for extrinsic parameters. Order of deviations values: (R1,T1,…,RM,TM) where M is number of pattern views, Ri,Ti are concatenated 1x3 vectors.</param>
 /// <param name="perViewErrors">Output vector of average re-projection errors estimated for each pattern view.</param>
 /// <param name="flags">Flags Different flags for the calibration process</param>
 /// <param name="criteria">Termination criteria for the iterative optimization algorithm.</param>
 /// <returns>The final re-projection error.</returns>
 public static double CalibrateCameraCharuco(
     IInputArrayOfArrays charucoCorners,
     IInputArrayOfArrays charucoIds,
     CharucoBoard board,
     Size imageSize,
     IInputOutputArray cameraMatrix,
     IInputOutputArray distCoeffs,
     IOutputArray rvecs,
     IOutputArray tvecs,
     IOutputArray stdDeviationsIntrinsics,
     IOutputArray stdDeviationsExtrinsics,
     IOutputArray perViewErrors,
     CalibType flags,
     MCvTermCriteria criteria)
 {
     using (InputArray iaCharucoCorners = charucoCorners.GetInputArray())
         using (InputArray iaCharucoIds = charucoIds.GetInputArray())
             using (InputOutputArray ioaCameraMatrix = cameraMatrix.GetInputOutputArray())
                 using (InputOutputArray ioaDistCoeffs = distCoeffs.GetInputOutputArray())
                     using (OutputArray oaRvecs = rvecs == null ? OutputArray.GetEmpty() : rvecs.GetOutputArray())
                         using (OutputArray oaTvecs = tvecs == null ? OutputArray.GetEmpty() : tvecs.GetOutputArray())
                             using (OutputArray oaStdDeviationsIntrinsics = stdDeviationsIntrinsics == null ? OutputArray.GetEmpty() : stdDeviationsIntrinsics.GetOutputArray())
                                 using (OutputArray oaStdDeviationsExtrinsics = stdDeviationsExtrinsics == null ? OutputArray.GetEmpty() : stdDeviationsExtrinsics.GetOutputArray())
                                     using (OutputArray oaPerViewErrors = perViewErrors == null ? OutputArray.GetEmpty() : perViewErrors.GetOutputArray())
                                     {
                                         return(cveArucoCalibrateCameraCharuco(
                                                    iaCharucoCorners, iaCharucoIds, board.BoardPtr, ref imageSize,
                                                    ioaCameraMatrix, ioaDistCoeffs, oaRvecs, oaTvecs,
                                                    oaStdDeviationsIntrinsics, oaStdDeviationsExtrinsics, oaPerViewErrors,
                                                    flags, ref criteria));
                                     }
 }
Exemple #5
0
 /// <summary>
 /// Interpolate position of ChArUco board corners
 /// </summary>
 /// <param name="markerCorners">vector of already detected markers corners. For each marker, its four corners are provided, (e.g VectorOfVectorOfPointF ). For N detected markers, the dimensions of this array should be Nx4.The order of the corners should be clockwise.</param>
 /// <param name="markerIds">list of identifiers for each marker in corners</param>
 /// <param name="image">input image necesary for corner refinement. Note that markers are not detected and should be sent in corners and ids parameters.</param>
 /// <param name="board">layout of ChArUco board.</param>
 /// <param name="charucoCorners">interpolated chessboard corners</param>
 /// <param name="charucoIds">interpolated chessboard corners identifiers</param>
 /// <param name="cameraMatrix">optional 3x3 floating-point camera matrix</param>
 /// <param name="distCoeffs">optional vector of distortion coefficients, (k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]]) of 4, 5, 8 or 12 elements </param>
 /// <param name="minMarkers">number of adjacent markers that must be detected to return a charuco corner</param>
 /// <returns>The number of interpolated corners.</returns>
 public static int InterpolateCornersCharuco(
     IInputArrayOfArrays markerCorners,
     IInputArray markerIds,
     IInputArray image,
     CharucoBoard board,
     IOutputArray charucoCorners,
     IOutputArray charucoIds,
     IInputArray cameraMatrix = null,
     IInputArray distCoeffs   = null,
     int minMarkers           = 2)
 {
     using (InputArray iaMarkerCorners = markerCorners.GetInputArray())
         using (InputArray iaMarkerIds = markerIds.GetInputArray())
             using (InputArray iaImage = image.GetInputArray())
                 using (OutputArray oaCharucoCorners = charucoCorners.GetOutputArray())
                     using (OutputArray oaCharucoIds = charucoIds.GetOutputArray())
                         using (InputArray iaCameraMatrix = cameraMatrix == null ? InputArray.GetEmpty() : cameraMatrix.GetInputArray())
                             using (InputArray iaDistCoeffs = distCoeffs == null ? InputArray.GetEmpty() : distCoeffs.GetInputArray())
                             {
                                 return(cveArucoInterpolateCornersCharuco(
                                            iaMarkerCorners, iaMarkerIds, iaImage, board,
                                            oaCharucoCorners, oaCharucoIds,
                                            iaCameraMatrix, iaDistCoeffs,
                                            minMarkers));
                             }
 }
Exemple #6
0
 public static void BlobFromImages(IInputArrayOfArrays images, IOutputArray blob, double scaleFactor = 1.0, Size size = new Size(), MCvScalar mean = new MCvScalar(), bool swapRB = true, bool crop = true)
 {
     using (InputArray iaImages = images.GetInputArray())
         using (OutputArray oaBlob = blob.GetOutputArray())
         {
             cveDnnBlobFromImages(iaImages, oaBlob, scaleFactor, ref size, ref mean, swapRB, crop);
         }
 }
Exemple #7
0
 /// <summary>
 /// These functions try to match the given images and to estimate rotations of each camera.
 /// </summary>
 /// <param name="images">Input images.</param>
 /// <param name="masks">Masks for each input image specifying where to look for keypoints (optional).</param>
 /// <returns>Status code.</returns>
 public Stitcher.Status EstimateTransform(IInputArrayOfArrays images, IInputArrayOfArrays masks = null)
 {
     using (InputArray iaImages = images.GetInputArray())
         using (InputArray iaMasks = masks == null ? InputArray.GetEmpty() : masks.GetInputArray())
         {
             return(StitchingInvoke.cveStitcherEstimateTransform(_ptr, iaImages, iaMasks));
         }
 }
Exemple #8
0
 /// <summary>
 /// Create a new instance of GMSD quality measurement.
 /// </summary>
 /// <param name="refImgs">vector of reference images, converted to internal type</param>
 public QualityGMSD(IInputArrayOfArrays refImgs)
 {
     using (InputArray iaRefImgs = refImgs.GetInputArray())
         _ptr = QualityInvoke.cveQualityGMSDCreate(
             iaRefImgs,
             ref _qualityBasePtr,
             ref _algorithmPtr,
             ref _sharedPtr);
 }
Exemple #9
0
 /// <summary>
 /// Aligns images.
 /// </summary>
 /// <param name="src">vector of input images</param>
 /// <param name="dst">vector of aligned images</param>
 /// <param name="times">vector of exposure time values for each image</param>
 /// <param name="response">256x1 matrix with inverse camera response function for each pixel value, it should have the same number of channels as images.</param>
 public void Process(IInputArrayOfArrays src, VectorOfMat dst, IInputArray times, IInputArray response)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (InputArray iaTimes = times.GetInputArray())
             using (InputArray iaResponse = times.GetInputArray())
             {
                 CvInvoke.cveAlignExposuresProcess(_alignExposuresPtr, iaSrc, dst, iaTimes, iaResponse);
             }
 }
Exemple #10
0
        /// <summary>
        /// Compute quality score per channel with the per-channel score in each element of the result
        /// </summary>
        /// <param name="qualityBase">The quality base object</param>
        /// <param name="cmpImgs">Comparison image(s), or image(s) to evaluate for no-reference quality algorithms</param>
        /// <returns>Quality score per channel</returns>
        public static MCvScalar Compute(
            this IQualityBase qualityBase,
            IInputArrayOfArrays cmpImgs)
        {
            MCvScalar score = new MCvScalar();

            using (InputArray iaCmpImgs = cmpImgs.GetInputArray())
                cveQualityBaseCompute(qualityBase.QualityBasePtr, iaCmpImgs, ref score);
            return(score);
        }
 /// <summary>
 /// Given the input frame, create input blob, run net and return recognition result.
 /// </summary>
 /// <param name="frame">The input image</param>
 /// <param name="roiRects">Vector of text detection regions of interest (Rect, CV_32SC4). ROIs is be cropped as the network inputs</param>
 /// <returns>A set of text recognition results.</returns>
 public String[] Recognize(IInputArray frame, IInputArrayOfArrays roiRects)
 {
     using (VectorOfCvString vs = new VectorOfCvString())
         using (InputArray iaFrame = frame.GetInputArray())
             using (InputArray iaRoiRects = roiRects.GetInputArray())
             {
                 DnnInvoke.cveDnnTextRecognitionModelRecognize2(_ptr, iaFrame, iaRoiRects, vs);
                 return(vs.ToArray());
             }
 }
Exemple #12
0
 /// <summary>
 /// Save multiple images to a specified file (e.g. ".tiff" that support multiple images).
 /// </summary>
 /// <param name="filename">Name of the file.</param>
 /// <param name="images">Images to be saved.</param>
 /// <param name="parameters">The parameters</param>
 /// <returns>true if success</returns>
 public static bool Imwritemulti(String filename, IInputArrayOfArrays images, params KeyValuePair <CvEnum.ImwriteFlags, int>[] parameters)
 {
     using (CvString strFilename = new CvString(filename))
         using (Util.VectorOfInt vec = new Util.VectorOfInt())
             using (InputArray iaImages = images.GetInputArray())
             {
                 PushParameters(vec, parameters);
                 return(cveImwritemulti(strFilename, iaImages, vec));
             }
 }
Exemple #13
0
 /// <summary>
 /// Create an instance of peak signal to noise ratio (PSNR) algorithm
 /// </summary>
 /// <param name="refImgs">Input image(s) to use as the source for comparison</param>
 /// <param name="maxPixelValue">maximum per-channel value for any individual pixel; eg 255 for uint8 image</param>
 public QualityPSNR(IInputArrayOfArrays refImgs, double maxPixelValue = 255.0)
 {
     using (InputArray iaRefImgs = refImgs.GetInputArray())
         _ptr = QualityInvoke.cveQualityPSNRCreate(
             iaRefImgs,
             maxPixelValue,
             ref _qualityBasePtr,
             ref _algorithmPtr,
             ref _sharedPtr);
 }
 /// <summary>
 /// Finds the best match for each descriptor from a query set. Train descriptors collection that was set by the Add function is used.
 /// </summary>
 /// <param name="queryDescriptors">Query set of descriptors.</param>
 /// <param name="matches">If a query descriptor is masked out in mask , no match is added for this descriptor. So, matches size may be smaller than the query descriptors count.</param>
 /// <param name="masks">Mask specifying permissible matches between an input query and train matrices of descriptors.</param>
 public void Match(
     IInputArray queryDescriptors,
     VectorOfDMatch matches,
     IInputArrayOfArrays masks = null
     )
 {
     using (InputArray iaQueryDesccriptor = queryDescriptors.GetInputArray())
         using (InputArray iaMasks = masks == null ? InputArray.GetEmpty() : masks.GetInputArray())
         {
             CvInvoke.cveDescriptorMatcherMatch2(_descriptorMatcherPtr, iaQueryDesccriptor, matches, iaMasks);
         }
 }
Exemple #15
0
 /// <summary>
 /// Draw a set of detected ChArUco Diamond markers
 /// </summary>
 /// <param name="image">input/output image. It must have 1 or 3 channels. The number of channels is not altered.</param>
 /// <param name="diamondCorners">positions of diamond corners in the same format returned by detectCharucoDiamond(). (e.g VectorOfVectorOfPointF ). For N detected markers, the dimensions of this array should be Nx4. The order of the corners should be clockwise.</param>
 /// <param name="diamondIds">vector of identifiers for diamonds in diamondCorners, in the same format returned by detectCharucoDiamond() (e.g. VectorOfMat ). Optional, if not provided, ids are not painted. </param>
 /// <param name="borderColor">color of marker borders. Rest of colors (text color and first corner color) are calculated based on this one.</param>
 public static void DrawDetectedDiamonds(
     IInputOutputArray image,
     IInputArrayOfArrays diamondCorners,
     IInputArray diamondIds,
     MCvScalar borderColor)
 {
     using (InputOutputArray ioaImage = image.GetInputOutputArray())
         using (InputArray iaDiamondCorners = diamondCorners.GetInputArray())
             using (InputArray iaDiamondIds = diamondIds == null ? InputArray.GetEmpty() : diamondIds.GetInputArray())
             {
                 cveArucoDrawDetectedDiamonds(ioaImage, iaDiamondCorners, iaDiamondIds, ref borderColor);
             }
 }
Exemple #16
0
 /// <summary>
 /// This function receives the detected markers and returns their pose estimation respect to the camera individually. So for each marker, one rotation and translation vector is returned. The returned transformation is the one that transforms points from each marker coordinate system to the camera coordinate system. The marker corrdinate system is centered on the middle of the marker, with the Z axis perpendicular to the marker plane. The coordinates of the four corners of the marker in its own coordinate system are: (-markerLength/2, markerLength/2, 0), (markerLength/2, markerLength/2, 0), (markerLength/2, -markerLength/2, 0), (-markerLength/2, -markerLength/2, 0)
 /// </summary>
 /// <param name="corners">vector of already detected markers corners. For each marker, its four corners are provided, (e.g VectorOfVectorOfPointF ). For N detected markers, the dimensions of this array should be Nx4. The order of the corners should be clockwise.</param>
 /// <param name="markerLength">the length of the markers' side. The returning translation vectors will be in the same unit. Normally, unit is meters.</param>
 /// <param name="cameraMatrix">input 3x3 floating-point camera matrix</param>
 /// <param name="distCoeffs">vector of distortion coefficients (k1,k2,p1,p2[,k3[,k4,k5,k6],[s1,s2,s3,s4]]) of 4, 5, 8 or 12 elements</param>
 /// <param name="rvecs">array of output rotation vectors. Each element in rvecs corresponds to the specific marker in imgPoints.</param>
 /// <param name="tvecs">array of output translation vectors (e.g. VectorOfPoint3D32F ). Each element in tvecs corresponds to the specific marker in imgPoints.</param>
 public static void EstimatePoseSingleMarkers(IInputArrayOfArrays corners, float markerLength,
                                              IInputArray cameraMatrix, IInputArray distCoeffs,
                                              IOutputArrayOfArrays rvecs, IOutputArrayOfArrays tvecs)
 {
     using (InputArray iaCorners = corners.GetInputArray())
         using (InputArray iaCameraMatrix = cameraMatrix.GetInputArray())
             using (InputArray iaDistCoeffs = distCoeffs.GetInputArray())
                 using (OutputArray oaRvecs = rvecs.GetOutputArray())
                     using (OutputArray oaTvecs = tvecs.GetOutputArray())
                     {
                         cveArucoEstimatePoseSingleMarkers(iaCorners, markerLength, iaCameraMatrix, iaDistCoeffs, oaRvecs, oaTvecs);
                     }
 }
Exemple #17
0
 public int EstimateTransform(IInputArrayOfArrays images, Rectangle[][] rois = null)
 {
     using (InputArray iaImages = images.GetInputArray())
         if (rois == null)
         {
             return(StitchingInvoke.cveStitcherEstimateTransform1(_ptr, iaImages));
         }
         else
         {
             using (VectorOfVectorOfRect vvr = new VectorOfVectorOfRect(rois))
             {
                 return(StitchingInvoke.cveStitcherEstimateTransform2(_ptr, iaImages, vvr));
             }
         }
 }
Exemple #18
0
 /// <summary>
 /// Creates 4-dimensional blob from series of images. Optionally resizes and crops images from center, subtract mean values, scales values by scale factor, swap Blue and Red channels.
 /// </summary>
 /// <param name="images">input images (all with 1-, 3- or 4-channels).</param>
 /// <param name="blob">4-dimansional OutputArray with NCHW dimensions order.</param>
 /// <param name="scaleFactor">multiplier for images values.</param>
 /// <param name="size">spatial size for output image</param>
 /// <param name="mean">scalar with mean values which are subtracted from channels. Values are intended to be in (mean-R, mean-G, mean-B) order if image has BGR ordering and swapRB is true.</param>
 /// <param name="swapRB">flag which indicates that swap first and last channels in 3-channel image is necessary.</param>
 /// <param name="crop">	flag which indicates whether image will be cropped after resize or not</param>
 /// <param name="ddepth">Depth of output blob. Choose CV_32F or CV_8U.</param>
 public static void BlobFromImages(
     IInputArrayOfArrays images,
     IOutputArray blob,
     double scaleFactor      = 1.0,
     Size size               = new Size(),
     MCvScalar mean          = new MCvScalar(),
     bool swapRB             = false,
     bool crop               = false,
     CvEnum.DepthType ddepth = CvEnum.DepthType.Cv32F)
 {
     using (InputArray iaImages = images.GetInputArray())
         using (OutputArray oaBlob = blob.GetOutputArray())
         {
             cveDnnBlobFromImages(iaImages, oaBlob, scaleFactor, ref size, ref mean, swapRB, crop, ddepth);
         }
 }
Exemple #19
0
 /// <summary>
 /// Compute a wrapped phase map from sinusoidal patterns.
 /// </summary>
 /// <param name="patternImages">Input data to compute the wrapped phase map.</param>
 /// <param name="wrappedPhaseMap">Wrapped phase map obtained through one of the three methods.</param>
 /// <param name="shadowMask">Mask used to discard shadow regions.</param>
 /// <param name="fundamental">Fundamental matrix used to compute epipolar lines and ease the matching step.</param>
 public void ComputePhaseMap(
     IInputArrayOfArrays patternImages,
     IOutputArray wrappedPhaseMap,
     IOutputArray shadowMask = null,
     IInputArray fundamental = null)
 {
     using (InputArray iaPatternImages = patternImages.GetInputArray())
         using (OutputArray oaWrappedPhaseMap = wrappedPhaseMap.GetOutputArray())
             using (OutputArray oaShadowMask = shadowMask == null ? OutputArray.GetEmpty() : shadowMask.GetOutputArray())
                 using (InputArray iaFundamental = fundamental == null ? InputArray.GetEmpty() : fundamental.GetInputArray())
                 {
                     StructuredLightInvoke.cveSinusoidalPatternComputePhaseMap(
                         _ptr,
                         iaPatternImages,
                         oaWrappedPhaseMap,
                         oaShadowMask,
                         iaFundamental);
                 }
 }
Exemple #20
0
 /// <summary>
 /// Pose estimation for a board of markers.
 /// </summary>
 /// <param name="corners">Vector of already detected markers corners. For each marker, its four corners are provided, (e.g std::vector&gt;std::vector&gt;cv::Point2f&lt; &lt; ). For N detected markers, the dimensions of this array should be Nx4. The order of the corners should be clockwise.</param>
 /// <param name="ids">List of identifiers for each marker in corners</param>
 /// <param name="board">Layout of markers in the board. The layout is composed by the marker identifiers and the positions of each marker corner in the board reference system.</param>
 /// <param name="cameraMatrix">Input 3x3 floating-point camera matrix</param>
 /// <param name="distCoeffs">Vector of distortion coefficients (k1,k2,p1,p2[,k3[,k4,k5,k6],[s1,s2,s3,s4]]) of 4, 5, 8 or 12 elements</param>
 /// <param name="rvec">Output vector (e.g. cv::Mat) corresponding to the rotation vector of the board (see cv::Rodrigues). Used as initial guess if not empty.</param>
 /// <param name="tvec">Output vector (e.g. cv::Mat) corresponding to the translation vector of the board.</param>
 /// <param name="useExtrinsicGuess">Defines whether initial guess for rvec and tvec will be used or not. Used as initial guess if not empty.</param>
 /// <returns>The function returns the number of markers from the input employed for the board pose estimation. Note that returning a 0 means the pose has not been estimated.</returns>
 public static int EstimatePoseBoard(
     IInputArrayOfArrays corners,
     IInputArray ids,
     IBoard board,
     IInputArray cameraMatrix,
     IInputArray distCoeffs,
     IOutputArray rvec,
     IOutputArray tvec,
     bool useExtrinsicGuess = false)
 {
     using (InputArray iaCorners = corners.GetInputArray())
         using (InputArray iaIds = ids.GetInputArray())
             using (InputArray iaCameraMatrix = cameraMatrix.GetInputArray())
                 using (InputArray iaDistCoeffs = distCoeffs.GetInputArray())
                     using (OutputArray oaRvec = rvec.GetOutputArray())
                         using (OutputArray oaTvec = tvec.GetOutputArray())
                         {
                             return(cveArucoEstimatePoseBoard(iaCorners, iaIds, board.BoardPtr, iaCameraMatrix, iaDistCoeffs, oaRvec,
                                                              oaTvec, useExtrinsicGuess));
                         }
 }
Exemple #21
0
 /// <summary>
 /// These functions try to compose the given images (or images stored internally from the other function calls) into the final pano under the assumption that the image transformations were estimated before.
 /// </summary>
 /// <param name="images">Input images</param>
 /// <param name="pano">Final pano.</param>
 /// <returns>Status code.</returns>
 public Stitcher.Status ComposePanorama(IInputArrayOfArrays images, IOutputArray pano)
 {
     using (InputArray iaImages = images.GetInputArray())
         using (OutputArray oaPano = pano.GetOutputArray())
             return(StitchingInvoke.cveStitcherComposePanorama2(_ptr, iaImages, oaPano));
 }
Exemple #22
0
 /// <summary>
 /// Decodes the structured light pattern, generating a disparity map.
 /// </summary>
 /// <param name="structuredLightPattern">The strucutred light pattern</param>
 /// <param name="patternImages">The acquired pattern images to decode VectorOfVectorOfMat), loaded as grayscale and previously rectified.</param>
 /// <param name="disparityMap">The decoding result: a CV_64F Mat at image resolution, storing the computed disparity map.</param>
 /// <param name="blackImages">The all-black images needed for shadowMasks computation.</param>
 /// <param name="whiteImages">The all-white images needed for shadowMasks computation.</param>
 /// <param name="flags">Flags setting decoding algorithms.</param>
 /// <returns>True if successful.</returns>
 public static bool Decode(
     this IStructuredLightPattern structuredLightPattern,
     VectorOfVectorOfMat patternImages,
     IOutputArray disparityMap,
     IInputArrayOfArrays blackImages = null,
     IInputArrayOfArrays whiteImages = null,
     DecodeFlag flags = DecodeFlag.Decode3dUnderworld)
 {
     using (OutputArray oaDisparityMap = disparityMap.GetOutputArray())
         using (InputArray iaBlackImages = blackImages == null? InputArray.GetEmpty() : blackImages.GetInputArray())
             using (InputArray iaWhiteImages = whiteImages == null? InputArray.GetEmpty() : whiteImages.GetInputArray())
             {
                 return(cveStructuredLightPatternDecode(
                            structuredLightPattern.StructuredLightPatternPtr,
                            patternImages,
                            oaDisparityMap,
                            iaBlackImages,
                            iaWhiteImages,
                            flags
                            ));
             }
 }
 /// <summary>
 /// For each query descriptor, finds the training descriptors not farther than the specified distance.
 /// </summary>
 /// <param name="queryDescriptors">Query set of descriptors.</param>
 /// <param name="trainDescriptors">Train set of descriptors. This set is not added to the train descriptors collection stored in the class object.</param>
 /// <param name="matches">Found matches.</param>
 /// <param name="maxDistance">Threshold for the distance between matched descriptors. Distance means here metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured in Pixels)!</param>
 /// <param name="mask">Mask specifying permissible matches between an input query and train matrices of descriptors.</param>
 /// <param name="compactResult">Parameter used when the mask (or masks) is not empty. If compactResult is false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, the matches vector does not contain matches for fully masked-out query descriptors.</param>
 public void RadiusMatch(
     IInputArray queryDescriptors,
     IInputArray trainDescriptors,
     VectorOfVectorOfDMatch matches,
     float maxDistance,
     IInputArrayOfArrays mask = null,
     bool compactResult       = false)
 {
     using (InputArray iaQueryDesccriptor = queryDescriptors.GetInputArray())
         using (InputArray iaTrainDescriptot = trainDescriptors.GetInputArray())
             using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
             {
                 CvInvoke.cveDescriptorMatcherRadiusMatch1(_descriptorMatcherPtr, iaQueryDesccriptor, iaTrainDescriptot, matches, maxDistance, iaMask, compactResult);
             }
 }
Exemple #24
0
 /// <summary>
 /// Train the face recognizer with the specific images and labels
 /// </summary>
 /// <param name="images">The images used in the training. This can be a VectorOfMat</param>
 /// <param name="labels">The labels of the images. This can be a VectorOfInt</param>
 public void Train(IInputArrayOfArrays images, IInputArray labels)
 {
     using (InputArray iaImage = images.GetInputArray())
         using (InputArray iaLabels = labels.GetInputArray())
             FaceInvoke.cveFaceRecognizerTrain(_faceRecognizerPtr, iaImage, iaLabels);
 }
Exemple #25
-1
      /// <summary>
      /// Find groups of Extremal Regions that are organized as text blocks.
      /// </summary>
      /// <param name="image">The image where ER grouping is to be perform on</param>
      /// <param name="channels">Array of single channel images from which the regions were extracted</param>
      /// <param name="erstats">Vector of ER’s retrieved from the ERFilter algorithm from each channel</param>
      /// <param name="groupingTrainedFileName">The XML or YAML file with the classifier model (e.g. trained_classifier_erGrouping.xml)</param>
      /// <param name="minProbability">The minimum probability for accepting a group.</param>
      /// <param name="groupMethods">The grouping methods</param>
      /// <returns>The output of the algorithm that indicates the text regions</returns>
      public static System.Drawing.Rectangle[] ERGrouping(IInputArray image, IInputArrayOfArrays channels, VectorOfERStat[] erstats, GroupingMethod groupMethods = GroupingMethod.OrientationHoriz, String groupingTrainedFileName = null, float minProbability = 0.5f)
      {
         IntPtr[] erstatPtrs = new IntPtr[erstats.Length];

         for (int i = 0; i < erstatPtrs.Length; i++)
         {
            erstatPtrs[i] = erstats[i].Ptr;
         }

         using (VectorOfVectorOfPoint regionGroups = new VectorOfVectorOfPoint())
         using (VectorOfRect groupsBoxes = new VectorOfRect())
         using (InputArray iaImage = image.GetInputArray())
         using (InputArray iaChannels = channels.GetInputArray())
         using (CvString s = (groupingTrainedFileName == null ? new CvString() : new CvString(groupingTrainedFileName)))
         {
            GCHandle erstatsHandle = GCHandle.Alloc(erstatPtrs, GCHandleType.Pinned);
            CvERGrouping(
               iaImage, iaChannels,
               erstatsHandle.AddrOfPinnedObject(), erstatPtrs.Length,
               regionGroups, groupsBoxes,
               groupMethods,
               s, minProbability);

            erstatsHandle.Free();
            return groupsBoxes.ToArray();
         }
      }
Exemple #26
-1
 /// <summary>
 /// Calculates a histogram of a set of arrays.
 /// </summary>
 /// <param name="images">Source arrays. They all should have the same depth, CV_8U or CV_32F , and the same size. Each of them can have an arbitrary number of channels.</param>
 /// <param name="channels">List of the channels used to compute the histogram. </param>
 /// <param name="mask">Optional mask. If the matrix is not empty, it must be an 8-bit array of the same size as images[i] . The non-zero mask elements mark the array elements counted in the histogram.</param>
 /// <param name="hist">Output histogram</param>
 /// <param name="histSize">Array of histogram sizes in each dimension.</param>
 /// <param name="ranges">Array of the dims arrays of the histogram bin boundaries in each dimension.</param>
 /// <param name="accumulate">Accumulation flag. If it is set, the histogram is not cleared in the beginning when it is allocated. This feature enables you to compute a single histogram from several sets of arrays, or to update the histogram in time.</param>
 public static void CalcHist(IInputArrayOfArrays images, int[] channels, IInputArray mask, IOutputArray hist, int[] histSize, float[] ranges, bool accumulate)
 {
    using (VectorOfInt channelsVec = new VectorOfInt(channels))
    using (VectorOfInt histSizeVec = new VectorOfInt(histSize))
    using (VectorOfFloat rangesVec = new VectorOfFloat(ranges))
    using (InputArray iaImages = images.GetInputArray())
    using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
    using (OutputArray oaHist = hist.GetOutputArray())
    {
       cveCalcHist(iaImages, channelsVec, iaMask, oaHist, histSizeVec, rangesVec, accumulate);
    }
 }
Exemple #27
-1
 /// <summary>
 /// Calculates the back projection of a histogram.
 /// </summary>
 /// <param name="images">Source arrays. They all should have the same depth, CV_8U or CV_32F , and the same size. Each of them can have an arbitrary number of channels.</param>
 /// <param name="channels">Number of source images.</param>
 /// <param name="hist">Input histogram that can be dense or sparse.</param>
 /// <param name="backProject">Destination back projection array that is a single-channel array of the same size and depth as images[0] .</param>
 /// <param name="ranges">Array of arrays of the histogram bin boundaries in each dimension.</param>
 /// <param name="scale"> Optional scale factor for the output back projection.</param>
 public static void CalcBackProject(IInputArrayOfArrays images, int[] channels, IInputArray hist, IOutputArray backProject, float[] ranges, double scale = 1.0)
 {
    using (VectorOfInt channelsVec = new VectorOfInt(channels))
    using (VectorOfFloat rangeVec = new VectorOfFloat(ranges))
    using (InputArray iaImages = images.GetInputArray())
    using (InputArray iaHist = hist.GetInputArray())
    using (OutputArray oaBackProject = backProject.GetOutputArray())
    {
       cveCalcBackProject(iaImages, channelsVec, iaHist, oaBackProject, rangeVec, scale);
    }
 }
Exemple #28
-1
 /// <summary>
 /// Draws contours outlines or filled contours.
 /// </summary>
 /// <param name="image">Image where the contours are to be drawn. Like in any other drawing function, the contours are clipped with the ROI</param>
 /// <param name="contours">All the input contours. Each contour is stored as a point vector.</param>
 /// <param name="contourIdx">Parameter indicating a contour to draw. If it is negative, all the contours are drawn.</param>
 /// <param name="color">Color of the contours </param>
 /// <param name="maxLevel">Maximal level for drawn contours. If 0, only contour is drawn. If 1, the contour and all contours after it on the same level are drawn. If 2, all contours after and all contours one level below the contours are drawn, etc. If the value is negative, the function does not draw the contours following after contour but draws child contours of contour up to abs(maxLevel)-1 level. </param>
 /// <param name="thickness">Thickness of lines the contours are drawn with. If it is negative the contour interiors are drawn</param>
 /// <param name="lineType">Type of the contour segments</param>
 /// <param name="hierarchy">Optional information about hierarchy. It is only needed if you want to draw only some of the contours</param>
 /// <param name="offset">Shift all the point coordinates by the specified value. It is useful in case if the contours retrieved in some image ROI and then the ROI offset needs to be taken into account during the rendering. </param>
 public static void DrawContours(
    IInputOutputArray image,
    IInputArrayOfArrays contours,
    int contourIdx,
    MCvScalar color,
    int thickness = 1,
    CvEnum.LineType lineType = LineType.EightConnected,
    IInputArray hierarchy = null,
    int maxLevel = int.MaxValue,
    Point offset = new Point())
 {
    using (InputOutputArray ioaImage = image.GetInputOutputArray())
    using (InputArray iaContours = contours.GetInputArray())
    using (InputArray iaHierarchy = hierarchy == null ? InputArray.GetEmpty() : hierarchy.GetInputArray())
       cveDrawContours(
          ioaImage,
          iaContours,
          contourIdx,
          ref color,
          thickness,
          lineType,
          iaHierarchy,
          maxLevel,
          ref offset);
 }
Exemple #29
-1
 /// <summary>
 /// This function is the opposite to cvSplit. If the destination array has N channels then if the first N input channels are not IntPtr.Zero, all they are copied to the destination array, otherwise if only a single source channel of the first N is not IntPtr.Zero, this particular channel is copied into the destination array, otherwise an error is raised. Rest of source channels (beyond the first N) must always be IntPtr.Zero. For IplImage cvCopy with COI set can be also used to insert a single channel into the image. 
 /// </summary>
 /// <param name="mv">Input vector of matrices to be merged; all the matrices in mv must have the same size and the same depth.</param>
 /// <param name="dst">output array of the same size and the same depth as mv[0]; The number of channels will be the total number of channels in the matrix array.</param>
 public static void Merge(IInputArrayOfArrays mv, IOutputArray dst)
 {
    using (InputArray iaMv = mv.GetInputArray())
    using (OutputArray oaDst = dst.GetOutputArray())
       cveMerge(iaMv, oaDst);
 }
Exemple #30
-1
 /// <summary>
 /// This function receives the detected markers and returns their pose estimation respect to the camera individually. So for each marker, one rotation and translation vector is returned. The returned transformation is the one that transforms points from each marker coordinate system to the camera coordinate system. The marker corrdinate system is centered on the middle of the marker, with the Z axis perpendicular to the marker plane. The coordinates of the four corners of the marker in its own coordinate system are: (-markerLength/2, markerLength/2, 0), (markerLength/2, markerLength/2, 0), (markerLength/2, -markerLength/2, 0), (-markerLength/2, -markerLength/2, 0)
 /// </summary>
 /// <param name="corners">vector of already detected markers corners. For each marker, its four corners are provided, (e.g VectorOfVectorOfPointF ). For N detected markers, the dimensions of this array should be Nx4. The order of the corners should be clockwise.</param>
 /// <param name="markerLength">the length of the markers' side. The returning translation vectors will be in the same unit. Normally, unit is meters.</param>
 /// <param name="cameraMatrix">input 3x3 floating-point camera matrix</param>
 /// <param name="distCoeffs">vector of distortion coefficients (k1,k2,p1,p2[,k3[,k4,k5,k6],[s1,s2,s3,s4]]) of 4, 5, 8 or 12 elements</param>
 /// <param name="rvecs">array of output rotation vectors. Each element in rvecs corresponds to the specific marker in imgPoints.</param>
 /// <param name="tvecs">array of output translation vectors (e.g. VectorOfPoint3D32F ). Each element in tvecs corresponds to the specific marker in imgPoints.</param>
 public static void EstimatePoseSingleMarkers(IInputArrayOfArrays corners, float markerLength,
    IInputArray cameraMatrix, IInputArray distCoeffs,
    IOutputArrayOfArrays rvecs, IOutputArrayOfArrays tvecs)
 {
    using (InputArray iaCorners = corners.GetInputArray())
    using (InputArray iaCameraMatrix = cameraMatrix.GetInputArray())
    using (InputArray iaDistCoeffs = distCoeffs.GetInputArray())
    using (OutputArray oaRvecs = rvecs.GetOutputArray())
    using (OutputArray oaTvecs = tvecs.GetOutputArray())
    {
       cveArucoEstimatePoseSingleMarkers(iaCorners, markerLength, iaCameraMatrix, iaDistCoeffs, oaRvecs, oaTvecs);
    }
 }
Exemple #31
-38
 /// <summary>
 /// The function cvMixChannels is a generalized form of cvSplit and cvMerge and some forms of cvCvtColor. It can be used to change the order of the planes, add/remove alpha channel, extract or insert a single plane or multiple planes etc.
 /// </summary>
 /// <param name="src">The array of input arrays.</param>
 /// <param name="dst">The array of output arrays</param>
 /// <param name="fromTo">The array of pairs of indices of the planes copied. from_to[k*2] is the 0-based index of the input plane, and from_to[k*2+1] is the index of the output plane, where the continuous numbering of the planes over all the input and over all the output arrays is used. When from_to[k*2] is negative, the corresponding output plane is filled with 0's.</param>
 /// <remarks>Unlike many other new-style C++ functions in OpenCV, mixChannels requires the output arrays to be pre-allocated before calling the function.</remarks>
 public static void MixChannels(
    IInputArrayOfArrays src,
    IInputOutputArray dst,
    int[] fromTo)
 {
    GCHandle handle = GCHandle.Alloc(fromTo, GCHandleType.Pinned);
    using (InputArray iaSrc = src.GetInputArray())
    using (InputOutputArray ioaDst = dst.GetInputOutputArray())
       cveMixChannels(iaSrc, ioaDst, handle.AddrOfPinnedObject(), fromTo.Length >> 1);
    handle.Free();
 }