Ejemplo n.º 1
0
 /// <summary>
 /// Generates the structured light pattern to project.
 /// </summary>
 /// <param name="structuredLightPattern">The strucutred light pattern</param>
 /// <param name="patternImages">
 /// The generated pattern: a VectorOfMat, in which each image is a CV_8U Mat at projector's resolution.
 /// </param>
 /// <returns>True if successful.</returns>
 public static bool Generate(
     this IStructuredLightPattern structuredLightPattern,
     IOutputArrayOfArrays patternImages)
 {
     using (OutputArray oaPatternImages = patternImages.GetOutputArray())
         return(cveStructuredLightPatternGenerate(structuredLightPattern.StructuredLightPatternPtr, oaPatternImages));
 }
Ejemplo n.º 2
0
 /// <summary>
 /// Returns output quality map images that were generated during computation, if supported by the algorithm.
 /// </summary>
 /// <param name="qualityBase">The quality base object</param>
 /// <param name="dst">Output quality map images that were generated during computation, if supported by the algorithm.</param>
 public static void GetQualityMap(
     this IQualityBase qualityBase,
     IOutputArrayOfArrays dst)
 {
     using (OutputArray oaDst = dst.GetOutputArray())
         cveQualityBaseGetQualityMap(qualityBase.QualityBasePtr, oaDst);
 }
Ejemplo n.º 3
0
 public static void ImagesFromBlob(Mat blob, IOutputArrayOfArrays images)
 {
     using (OutputArray oaImages = images.GetOutputArray())
     {
         cveDnnImagesFromBlob(blob, oaImages);
     }
 }
Ejemplo n.º 4
0
 /// <summary>
 /// Given the input frame, create input blob, run net and return the output blobs.
 /// </summary>
 /// <param name="frame">The input image.</param>
 /// <param name="outs">Allocated output blobs, which will store results of the computation.</param>
 public void Predict(IInputArray frame, IOutputArrayOfArrays outs)
 {
     using (InputArray iaFrame = frame.GetInputArray())
         using (OutputArray oaOuts = outs.GetOutputArray())
         {
             DnnInvoke.cveModelPredict(_model, iaFrame, oaOuts);
         }
 }
Ejemplo n.º 5
0
 /// <summary>
 /// Compute the different channels to be processed independently in the N&amp;M algorithm.
 /// </summary>
 /// <param name="src">Source image. Must be RGB CV_8UC3.</param>
 /// <param name="channels">Output vector of Mat where computed channels are stored.</param>
 /// <param name="mode">Mode of operation</param>
 public static void ComputeNMChannels(IInputArray src, IOutputArrayOfArrays channels, ERFilterNMMode mode = ERFilterNMMode.RGBLGrad)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaChannels = channels.GetOutputArray())
         {
             cveComputeNMChannels(iaSrc, oaChannels, mode);
         }
 }
Ejemplo n.º 6
0
 /// <summary>
 /// Runs forward pass to compute output of layer with name outputName.
 /// </summary>
 /// <param name="outputBlobs">Contains all output blobs for specified layer.</param>
 /// <param name="outputName">Name for layer which output is needed to get</param>
 public void Forward(IOutputArrayOfArrays outputBlobs, String outputName = "")
 {
     using (OutputArray oaOutputBlobs = outputBlobs.GetOutputArray())
         using (CvString outputNameStr = new CvString(outputName))
         {
             DnnInvoke.cveDnnNetForward2(_ptr, oaOutputBlobs, outputNameStr);
         }
 }
Ejemplo n.º 7
0
 /// <summary>
 /// This function receives the detected markers and returns their pose estimation respect to the camera individually. So for each marker, one rotation and translation vector is returned. The returned transformation is the one that transforms points from each marker coordinate system to the camera coordinate system. The marker corrdinate system is centered on the middle of the marker, with the Z axis perpendicular to the marker plane. The coordinates of the four corners of the marker in its own coordinate system are: (-markerLength/2, markerLength/2, 0), (markerLength/2, markerLength/2, 0), (markerLength/2, -markerLength/2, 0), (-markerLength/2, -markerLength/2, 0)
 /// </summary>
 /// <param name="corners">vector of already detected markers corners. For each marker, its four corners are provided, (e.g VectorOfVectorOfPointF ). For N detected markers, the dimensions of this array should be Nx4. The order of the corners should be clockwise.</param>
 /// <param name="markerLength">the length of the markers' side. The returning translation vectors will be in the same unit. Normally, unit is meters.</param>
 /// <param name="cameraMatrix">input 3x3 floating-point camera matrix</param>
 /// <param name="distCoeffs">vector of distortion coefficients (k1,k2,p1,p2[,k3[,k4,k5,k6],[s1,s2,s3,s4]]) of 4, 5, 8 or 12 elements</param>
 /// <param name="rvecs">array of output rotation vectors. Each element in rvecs corresponds to the specific marker in imgPoints.</param>
 /// <param name="tvecs">array of output translation vectors (e.g. VectorOfPoint3D32F ). Each element in tvecs corresponds to the specific marker in imgPoints.</param>
 public static void EstimatePoseSingleMarkers(IInputArrayOfArrays corners, float markerLength,
                                              IInputArray cameraMatrix, IInputArray distCoeffs,
                                              IOutputArrayOfArrays rvecs, IOutputArrayOfArrays tvecs)
 {
     using (InputArray iaCorners = corners.GetInputArray())
         using (InputArray iaCameraMatrix = cameraMatrix.GetInputArray())
             using (InputArray iaDistCoeffs = distCoeffs.GetInputArray())
                 using (OutputArray oaRvecs = rvecs.GetOutputArray())
                     using (OutputArray oaTvecs = tvecs.GetOutputArray())
                     {
                         cveArucoEstimatePoseSingleMarkers(iaCorners, markerLength, iaCameraMatrix, iaDistCoeffs, oaRvecs, oaTvecs);
                     }
 }
Ejemplo n.º 8
0
 /// <summary>
 /// Performs marker detection in the input image. Only markers included in the specific dictionary are searched. For each detected marker, it returns the 2D position of its corner in the image and its corresponding identifier. Note that this function does not perform pose estimation.
 /// </summary>
 /// <param name="image">input image</param>
 /// <param name="dict">indicates the type of markers that will be searched</param>
 /// <param name="corners">	vector of detected marker corners. For each marker, its four corners are provided, (e.g VectorOfVectorOfPointF ). For N detected markers, the dimensions of this array is Nx4. The order of the corners is clockwise.</param>
 /// <param name="ids">vector of identifiers of the detected markers. The identifier is of type int (e.g. VectorOfInt). For N detected markers, the size of ids is also N. The identifiers have the same order than the markers in the imgPoints array.</param>
 /// <param name="parameters">marker detection parameters</param>
 /// <param name="rejectedImgPoints">contains the imgPoints of those squares whose inner code has not a correct codification. Useful for debugging purposes.</param>
 public static void DetectMarkers(
     IInputArray image, Dictionary dict, IOutputArrayOfArrays corners,
     IOutputArray ids, DetectorParameters parameters,
     IOutputArrayOfArrays rejectedImgPoints = null
     )
 {
     using (InputArray iaImage = image.GetInputArray())
         using (OutputArray oaCorners = corners.GetOutputArray())
             using (OutputArray oaIds = ids.GetOutputArray())
                 using (OutputArray oaRejectedImgPoints = rejectedImgPoints != null ? rejectedImgPoints.GetOutputArray() : OutputArray.GetEmpty())
                 {
                     cveArucoDetectMarkers(iaImage, dict, oaCorners, oaIds, ref parameters, oaRejectedImgPoints);
                 }
 }
Ejemplo n.º 9
0
 /// <summary>
 /// Performs marker detection in the input image. Only markers included in the specific dictionary are searched. For each detected marker, it returns the 2D position of its corner in the image and its corresponding identifier. Note that this function does not perform pose estimation.
 /// </summary>
 /// <param name="image">input image</param>
 /// <param name="dict">indicates the type of markers that will be searched</param>
 /// <param name="corners">	vector of detected marker corners. For each marker, its four corners are provided, (e.g VectorOfVectorOfPointF ). For N detected markers, the dimensions of this array is Nx4. The order of the corners is clockwise.</param>
 /// <param name="ids">vector of identifiers of the detected markers. The identifier is of type int (e.g. VectorOfInt). For N detected markers, the size of ids is also N. The identifiers have the same order than the markers in the imgPoints array.</param>
 /// <param name="parameters">marker detection parameters</param>
 /// <param name="rejectedImgPoints">contains the imgPoints of those squares whose inner code has not a correct codification. Useful for debugging purposes.</param>
 public static void DetectMarkers(
    IInputArray image, Dictionary dict, IOutputArrayOfArrays corners,
    IOutputArray ids, DetectorParameters parameters,
    IOutputArrayOfArrays rejectedImgPoints = null
    )
 {
    using (InputArray iaImage = image.GetInputArray())
    using (OutputArray oaCorners = corners.GetOutputArray())
    using (OutputArray oaIds = ids.GetOutputArray())
    using (OutputArray oaRejectedImgPoints = rejectedImgPoints != null ? rejectedImgPoints.GetOutputArray() : OutputArray.GetEmpty())
    {
       cveArucoDetectMarkers(iaImage, dict, oaCorners, oaIds, ref parameters, oaRejectedImgPoints);
    }
 }
Ejemplo n.º 10
0
 /// <summary>
 /// Constructs the image pyramid which can be passed to calcOpticalFlowPyrLK.
 /// </summary>
 /// <param name="img">8-bit input image.</param>
 /// <param name="pyramid">Output pyramid.</param>
 /// <param name="winSize">Window size of optical flow algorithm. Must be not less than winSize argument of calcOpticalFlowPyrLK. It is needed to calculate required padding for pyramid levels.</param>
 /// <param name="maxLevel">0-based maximal pyramid level number.</param>
 /// <param name="withDerivatives">Set to precompute gradients for the every pyramid level. If pyramid is constructed without the gradients then calcOpticalFlowPyrLK will calculate them internally.</param>
 /// <param name="pyrBorder">The border mode for pyramid layers.</param>
 /// <param name="derivBorder">The border mode for gradients.</param>
 /// <param name="tryReuseInputImage">put ROI of input image into the pyramid if possible. You can pass false to force data copying.</param>
 /// <returns>Number of levels in constructed pyramid. Can be less than maxLevel.</returns>
 public static int BuildOpticalFlowPyramid(
     IInputArray img,
     IOutputArrayOfArrays pyramid,
     Size winSize,
     int maxLevel,
     bool withDerivatives          = true,
     CvEnum.BorderType pyrBorder   = CvEnum.BorderType.Reflect101,
     CvEnum.BorderType derivBorder = CvEnum.BorderType.Constant,
     bool tryReuseInputImage       = true)
 {
     using (InputArray iaImage = img.GetInputArray())
         using (OutputArray oaPyramid = pyramid.GetOutputArray())
         {
             return(cveBuildOpticalFlowPyramid(iaImage, oaPyramid, ref winSize, maxLevel, withDerivatives, pyrBorder, derivBorder, tryReuseInputImage));
         }
 }
Ejemplo n.º 11
0
 /// <summary>
 /// Both detects and decodes QR code.
 /// </summary>
 /// <param name="img">Supports grayscale or color (BGR) image</param>
 /// <param name="points">Optional output array of vertices of the found QR code quadrangle. Will be empty if not found.</param>
 /// <returns>The array of decoded string.</returns>
 public String[] DetectAndDecode(
     IInputArray img,
     IOutputArrayOfArrays points)
 {
     using (InputArray iaImg = img.GetInputArray())
         using (OutputArray oaPoints = points == null? OutputArray.GetEmpty() : points.GetOutputArray())
             using (VectorOfCvString result = new VectorOfCvString())
             {
                 WeChatQRCodeInvoke.cveWeChatQRCodeDetectAndDecode(
                     _ptr,
                     iaImg,
                     oaPoints,
                     result);
                 return(result.ToArray());
             }
 }
Ejemplo n.º 12
0
 /// <summary>
 /// Detect objects by template matching. Matches globally at the lowest pyramid level, then refines locally stepping up the pyramid.
 /// </summary>
 /// <param name="sources">Source images, one for each modality.</param>
 /// <param name="threshold">Similarity threshold, a percentage between 0 and 100.</param>
 /// <param name="matches">Template matches, sorted by similarity score.</param>
 /// <param name="classIds">If non-empty, only search for the desired object classes.</param>
 /// <param name="quantizedImages">Optionally return vector&lt;Mat&gt; of quantized images.</param>
 /// <param name="masks">The masks for consideration during matching. The masks should be CV_8UC1 where 255 represents a valid pixel. If non-empty, the vector must be the same size as sources. Each element must be empty or the same size as its corresponding source.</param>
 public void Match(
     VectorOfMat sources,
     float threshold,
     VectorOfLinemodMatch matches,
     VectorOfCvString classIds            = null,
     IOutputArrayOfArrays quantizedImages = null,
     VectorOfMat masks = null)
 {
     using (OutputArray oaQuantizedImages =
                quantizedImages == null ? OutputArray.GetEmpty() : quantizedImages.GetOutputArray())
     {
         LinemodInvoke.cveLinemodDetectorMatch(
             _ptr,
             sources,
             threshold,
             matches,
             classIds,
             oaQuantizedImages,
             masks
             );
     }
 }
Ejemplo n.º 13
0
 /// <summary>
 /// Runs forward pass to compute outputs of layers listed in outBlobNames.
 /// </summary>
 /// <param name="outputBlobs">Contains blobs for first outputs of specified layers.</param>
 /// <param name="outBlobNames">Names for layers which outputs are needed to get</param>
 public void Forward(IOutputArrayOfArrays outputBlobs, String[] outBlobNames)
 {
     using (OutputArray oaOutputBlobs = outputBlobs.GetOutputArray())
         using (VectorOfCvString vcs = new VectorOfCvString(outBlobNames))
             DnnInvoke.cveDnnNetForward3(_ptr, oaOutputBlobs, vcs);
 }
Ejemplo n.º 14
-1
 /// <summary>
 /// This function receives the detected markers and returns their pose estimation respect to the camera individually. So for each marker, one rotation and translation vector is returned. The returned transformation is the one that transforms points from each marker coordinate system to the camera coordinate system. The marker corrdinate system is centered on the middle of the marker, with the Z axis perpendicular to the marker plane. The coordinates of the four corners of the marker in its own coordinate system are: (-markerLength/2, markerLength/2, 0), (markerLength/2, markerLength/2, 0), (markerLength/2, -markerLength/2, 0), (-markerLength/2, -markerLength/2, 0)
 /// </summary>
 /// <param name="corners">vector of already detected markers corners. For each marker, its four corners are provided, (e.g VectorOfVectorOfPointF ). For N detected markers, the dimensions of this array should be Nx4. The order of the corners should be clockwise.</param>
 /// <param name="markerLength">the length of the markers' side. The returning translation vectors will be in the same unit. Normally, unit is meters.</param>
 /// <param name="cameraMatrix">input 3x3 floating-point camera matrix</param>
 /// <param name="distCoeffs">vector of distortion coefficients (k1,k2,p1,p2[,k3[,k4,k5,k6],[s1,s2,s3,s4]]) of 4, 5, 8 or 12 elements</param>
 /// <param name="rvecs">array of output rotation vectors. Each element in rvecs corresponds to the specific marker in imgPoints.</param>
 /// <param name="tvecs">array of output translation vectors (e.g. VectorOfPoint3D32F ). Each element in tvecs corresponds to the specific marker in imgPoints.</param>
 public static void EstimatePoseSingleMarkers(IInputArrayOfArrays corners, float markerLength,
    IInputArray cameraMatrix, IInputArray distCoeffs,
    IOutputArrayOfArrays rvecs, IOutputArrayOfArrays tvecs)
 {
    using (InputArray iaCorners = corners.GetInputArray())
    using (InputArray iaCameraMatrix = cameraMatrix.GetInputArray())
    using (InputArray iaDistCoeffs = distCoeffs.GetInputArray())
    using (OutputArray oaRvecs = rvecs.GetOutputArray())
    using (OutputArray oaTvecs = tvecs.GetOutputArray())
    {
       cveArucoEstimatePoseSingleMarkers(iaCorners, markerLength, iaCameraMatrix, iaDistCoeffs, oaRvecs, oaTvecs);
    }
 }