예제 #1
0
 /// <summary>
 /// Calculates a sparse optical flow.
 /// </summary>
 /// <param name="sparseFlow">The sparse optical flow</param>
 /// <param name="prevImg">First input image.</param>
 /// <param name="nextImg">Second input image of the same size and the same type as <paramref name="prevImg"/>.</param>
 /// <param name="prevPts">Vector of 2D points for which the flow needs to be found.</param>
 /// <param name="nextPts">Output vector of 2D points containing the calculated new positions of input features in the second image.</param>
 /// <param name="status">Output status vector. Each element of the vector is set to 1 if the flow for the corresponding features has been found. Otherwise, it is set to 0.</param>
 /// <param name="err">Optional output vector that contains error response for each point (inverse confidence).</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public static void Calc(this ICudaSparseOpticalFlow sparseFlow, IInputArray prevImg, IInputArray nextImg, IInputArray prevPts, IInputOutputArray nextPts, IOutputArray status = null, IOutputArray err = null, Stream stream = null)
 {
     using (InputArray iaPrevImg = prevImg.GetInputArray())
         using (InputArray iaNextImg = nextImg.GetInputArray())
             using (InputArray iaPrevPts = prevPts.GetInputArray())
                 using (InputOutputArray ioaNextPts = nextPts.GetInputOutputArray())
                     using (OutputArray oaStatus = (status == null ? OutputArray.GetEmpty() : status.GetOutputArray()))
                         using (OutputArray oaErr = (err == null ? OutputArray.GetEmpty() : err.GetOutputArray()))
                             cudaSparseOpticalFlowCalc(sparseFlow.SparseOpticalFlowPtr, iaPrevImg, iaNextImg, iaPrevPts, ioaNextPts,
                                                       oaStatus, oaErr, (stream == null) ? IntPtr.Zero : stream.Ptr);
 }
예제 #2
0
 public bool TrainE(IInputArray samples, IInputArray means0, IInputArray covs0, IInputArray weights0,
                    IOutputArray loglikelihoods, IOutputArray labels, IOutputArray probs)
 {
     using (InputArray iaSamples = samples.GetInputArray())
         using (InputArray iaMeans0 = means0.GetInputArray())
             using (InputArray iaCovs0 = covs0 == null ? InputArray.GetEmpty() : covs0.GetInputArray())
                 using (InputArray iaWeights = weights0 == null ? InputArray.GetEmpty() : weights0.GetInputArray())
                     using (OutputArray oaLogLikelihood = loglikelihoods == null ? OutputArray.GetEmpty() : loglikelihoods.GetOutputArray())
                         using (OutputArray oaLabels = labels == null ? OutputArray.GetEmpty() : labels.GetOutputArray())
                             using (OutputArray oaProbs = probs == null ? OutputArray.GetEmpty() : probs.GetOutputArray())
                                 return(MlInvoke.CvEMTrainE(_ptr, iaSamples, iaMeans0, iaCovs0, iaWeights, oaLogLikelihood, oaLabels, oaProbs));
 }
예제 #3
0
        /// <summary>
        /// Predict the probability of the <paramref name="samples"/>
        /// </summary>
        /// <param name="samples">The input samples</param>
        /// <param name="probs">The prediction results, should have the same # of rows as the <paramref name="samples"/></param>
        public MCvPoint2D64f Predict(IInputArray samples, IOutputArray probs = null)
        {
            MCvPoint2D64f result = new MCvPoint2D64f();

            using (InputArray iaSamples = samples.GetInputArray())
                using (OutputArray oaProbs = probs == null ? OutputArray.GetEmpty() : probs.GetOutputArray())
                    MlInvoke.CvEMPredict(
                        _ptr,
                        iaSamples,
                        ref result,
                        oaProbs);
            return(result);
        }
예제 #4
0
 /// <summary>
 /// Apply a transformation, given a pre-estimated transformation parameters.
 /// </summary>
 /// <param name="transformer">The shape transformer</param>
 /// <param name="input">Contour (set of points) to apply the transformation.</param>
 /// <param name="output">Output contour.</param>
 /// <returns></returns>
 public static float ApplyTransformation(
     this IShapeTransformer transformer,
     IInputArray input,
     IOutputArray output = null)
 {
     using (InputArray iaInput = input.GetInputArray())
         using (OutputArray oaOutput = output == null ? OutputArray.GetEmpty() : output.GetOutputArray())
         {
             return(cveShapeTransformerApplyTransformation(
                        transformer.ShapeTransformerPtr,
                        iaInput,
                        oaOutput));
         }
 }
예제 #5
0
 public bool CvEMTrainM(
     IInputArray samples,
     IInputArray probs0,
     IOutputArray logLikelihoods,
     IOutputArray labels,
     IOutputArray probs)
 {
     using (InputArray iaSamples = samples.GetInputArray())
         using (InputArray iaProbs0 = probs0.GetInputArray())
             using (OutputArray oaLogLikelihood = logLikelihoods == null ? OutputArray.GetEmpty() : logLikelihoods.GetOutputArray())
                 using (OutputArray oaLabels = labels == null ? OutputArray.GetEmpty() : labels.GetOutputArray())
                     using (OutputArray oaProbs = probs == null ? OutputArray.GetEmpty() : probs.GetOutputArray())
                         return(MlInvoke.CvEMTrainM(_ptr, iaSamples, iaProbs0, oaLogLikelihood, oaLabels, oaProbs));
 }
예제 #6
0
 public void trainE(IInputArray samples, IInputArray means0, IInputArray covs0, IInputArray weights0,
                    IOutputArray loglikelihoods, IOutputArray labels, IOutputArray probs)
 {
     using (InputArray iaSamples = samples.GetInputArray())
         using (InputArray iaMeans0 = means0.GetInputArray())
             using (InputArray iaCovs0 = covs0 == null ? InputArray.GetEmpty() : covs0.GetInputArray())
                 using (InputArray iaWeights = weights0 == null ? InputArray.GetEmpty() : weights0.GetInputArray())
                     using (OutputArray oaLogLikelihood = loglikelihoods == null ? OutputArray.GetEmpty() : loglikelihoods.GetOutputArray())
                         using (OutputArray oaLabels = labels == null ? OutputArray.GetEmpty() : labels.GetOutputArray())
                             using (OutputArray oaProbs = probs == null ? OutputArray.GetEmpty() : probs.GetOutputArray())
                             {
                                 MlInvoke.CvEMTrainE(_ptr, iaSamples, iaMeans0, iaCovs0, iaWeights, oaLogLikelihood, oaLabels,
                                                     oaProbs, ref _statModel, ref _algorithm);
                             }
 }
예제 #7
0
 public void TrainM(
     IInputArray samples,
     IInputArray probs0,
     IOutputArray logLikelihoods,
     IOutputArray labels,
     IOutputArray probs)
 {
     using (InputArray iaSamples = samples.GetInputArray())
         using (InputArray iaProbs0 = probs0.GetInputArray())
             using (OutputArray oaLogLikelihood = logLikelihoods == null ? OutputArray.GetEmpty() : logLikelihoods.GetOutputArray())
                 using (OutputArray oaLabels = labels == null ? OutputArray.GetEmpty() : labels.GetOutputArray())
                     using (OutputArray oaProbs = probs == null ? OutputArray.GetEmpty() : probs.GetOutputArray())
                     {
                         MlInvoke.CvEMTrainM(_ptr, iaSamples, iaProbs0, oaLogLikelihood, oaLabels, oaProbs, ref _statModel, ref _algorithm);
                     }
 }
예제 #8
0
 /// <summary>
 /// Calibrate a camera using aruco markers.
 /// </summary>
 /// <param name="corners">Vector of detected marker corners in all frames. The corners should have the same format returned by detectMarkers</param>
 /// <param name="ids">List of identifiers for each marker in corners</param>
 /// <param name="counter">Number of markers in each frame so that corners and ids can be split</param>
 /// <param name="board">Marker Board layout</param>
 /// <param name="imageSize">Size of the image used only to initialize the intrinsic camera matrix.</param>
 /// <param name="cameraMatrix">Output 3x3 floating-point camera matrix. </param>
 /// <param name="distCoeffs">Output vector of distortion coefficients (k1,k2,p1,p2[,k3[,k4,k5,k6],[s1,s2,s3,s4]]) of 4, 5, 8 or 12 elements</param>
 /// <param name="rvecs">Output vector of rotation vectors (see Rodrigues ) estimated for each board view (e.g. std::vector&lt;cv::Mat&gt;>). That is, each k-th rotation vector together with the corresponding k-th translation vector (see the next output parameter description) brings the board pattern from the model coordinate space (in which object points are specified) to the world coordinate space, that is, a real position of the board pattern in the k-th pattern view (k=0.. M -1).</param>
 /// <param name="tvecs">Output vector of translation vectors estimated for each pattern view.</param>
 /// <param name="flags">Flags Different flags for the calibration process</param>
 /// <param name="criteria">Termination criteria for the iterative optimization algorithm.</param>
 /// <returns>The final re-projection error.</returns>
 public static double CalibrateCameraAruco(
     IInputArray corners, IInputArray ids, IInputArray counter, IBoard board, Size imageSize,
     IInputOutputArray cameraMatrix, IInputOutputArray distCoeffs, IOutputArray rvecs, IOutputArray tvecs,
     CalibType flags, MCvTermCriteria criteria)
 {
     using (InputArray iaCorners = corners.GetInputArray())
         using (InputArray iaIds = ids.GetInputArray())
             using (InputArray iaCounter = counter.GetInputArray())
                 using (InputOutputArray ioaCameraMatrix = cameraMatrix.GetInputOutputArray())
                     using (InputOutputArray ioaDistCoeffs = distCoeffs.GetInputOutputArray())
                         using (OutputArray oaRvecs = rvecs == null ? OutputArray.GetEmpty() : rvecs.GetOutputArray())
                             using (OutputArray oaTvecs = tvecs == null ? OutputArray.GetEmpty() : tvecs.GetOutputArray())
                             {
                                 return(cveArucoCalibrateCameraAruco(iaCorners, iaIds, iaCounter, board.BoardPtr, ref imageSize,
                                                                     ioaCameraMatrix, ioaDistCoeffs, oaRvecs, oaTvecs, flags, ref criteria));
                             }
 }
예제 #9
0
 /// <summary>
 /// Compute a wrapped phase map from sinusoidal patterns.
 /// </summary>
 /// <param name="patternImages">Input data to compute the wrapped phase map.</param>
 /// <param name="wrappedPhaseMap">Wrapped phase map obtained through one of the three methods.</param>
 /// <param name="shadowMask">Mask used to discard shadow regions.</param>
 /// <param name="fundamental">Fundamental matrix used to compute epipolar lines and ease the matching step.</param>
 public void ComputePhaseMap(
     IInputArrayOfArrays patternImages,
     IOutputArray wrappedPhaseMap,
     IOutputArray shadowMask = null,
     IInputArray fundamental = null)
 {
     using (InputArray iaPatternImages = patternImages.GetInputArray())
         using (OutputArray oaWrappedPhaseMap = wrappedPhaseMap.GetOutputArray())
             using (OutputArray oaShadowMask = shadowMask == null ? OutputArray.GetEmpty() : shadowMask.GetOutputArray())
                 using (InputArray iaFundamental = fundamental == null ? InputArray.GetEmpty() : fundamental.GetInputArray())
                 {
                     StructuredLightInvoke.cveSinusoidalPatternComputePhaseMap(
                         _ptr,
                         iaPatternImages,
                         oaWrappedPhaseMap,
                         oaShadowMask,
                         iaFundamental);
                 }
 }
예제 #10
0
 /// <summary>
 /// Finds the neighbors and predicts responses for input vectors.
 /// </summary>
 /// <param name="samples">Input samples stored by rows. It is a single-precision floating-point matrix of &lt;number_of_samples&gt; * k size.</param>
 /// <param name="k">Number of used nearest neighbors. Should be greater than 1.</param>
 /// <param name="results">Vector with results of prediction (regression or classification) for each input sample. It is a single-precision floating-point vector with &lt;number_of_samples&gt; elements.</param>
 /// <param name="neighborResponses">Optional output values for corresponding neighbors. It is a single- precision floating-point matrix of &lt;number_of_samples&gt; * k size.</param>
 /// <param name="dist">Optional output distances from the input vectors to the corresponding neighbors. It is a single-precision floating-point matrix of &lt;number_of_samples&gt; * k size.</param>
 /// <returns>If only a single input vector is passed, the predicted value is returned by the method.</returns>
 public float FindNearest(
     IInputArray samples,
     int k,
     IOutputArray results,
     IOutputArray neighborResponses = null,
     IOutputArray dist = null)
 {
     using (InputArray iaSamples = samples.GetInputArray())
         using (OutputArray oaResults = results.GetOutputArray())
             using (OutputArray oaNeighborResponses = neighborResponses == null ? OutputArray.GetEmpty() : neighborResponses.GetOutputArray())
                 using (OutputArray oaDist = dist == null ? OutputArray.GetEmpty() : dist.GetOutputArray())
                 {
                     return(MlInvoke.cveKNearestFindNearest(
                                _ptr,
                                iaSamples,
                                k,
                                oaResults,
                                oaNeighborResponses,
                                oaDist));
                 }
 }
예제 #11
0
파일: Detector.cs 프로젝트: zanker99/emgucv
 /// <summary>
 /// Detect objects by template matching. Matches globally at the lowest pyramid level, then refines locally stepping up the pyramid.
 /// </summary>
 /// <param name="sources">Source images, one for each modality.</param>
 /// <param name="threshold">Similarity threshold, a percentage between 0 and 100.</param>
 /// <param name="matches">Template matches, sorted by similarity score.</param>
 /// <param name="classIds">If non-empty, only search for the desired object classes.</param>
 /// <param name="quantizedImages">Optionally return vector&lt;Mat&gt; of quantized images.</param>
 /// <param name="masks">The masks for consideration during matching. The masks should be CV_8UC1 where 255 represents a valid pixel. If non-empty, the vector must be the same size as sources. Each element must be empty or the same size as its corresponding source.</param>
 public void Match(
     VectorOfMat sources,
     float threshold,
     VectorOfLinemodMatch matches,
     VectorOfCvString classIds            = null,
     IOutputArrayOfArrays quantizedImages = null,
     VectorOfMat masks = null)
 {
     using (OutputArray oaQuantizedImages =
                quantizedImages == null ? OutputArray.GetEmpty() : quantizedImages.GetOutputArray())
     {
         LinemodInvoke.cveLinemodDetectorMatch(
             _ptr,
             sources,
             threshold,
             matches,
             classIds,
             oaQuantizedImages,
             masks
             );
     }
 }
예제 #12
0
 /// <summary>
 /// Refine not detected markers based on the already detected and the board layout.
 /// </summary>
 /// <param name="image">Input image</param>
 /// <param name="board">Layout of markers in the board.</param>
 /// <param name="detectedCorners">Vector of already detected marker corners.</param>
 /// <param name="detectedIds">Vector of already detected marker identifiers.</param>
 /// <param name="rejectedCorners">Vector of rejected candidates during the marker detection process</param>
 /// <param name="cameraMatrix">Optional input 3x3 floating-point camera matrix </param>
 /// <param name="distCoeffs">Optional vector of distortion coefficients (k1,k2,p1,p2[,k3[,k4,k5,k6],[s1,s2,s3,s4]]) of 4, 5, 8 or 12 elements</param>
 /// <param name="minRepDistance">Minimum distance between the corners of the rejected candidate and the reprojected marker in order to consider it as a correspondence. (default 10)</param>
 /// <param name="errorCorrectionRate">Rate of allowed erroneous bits respect to the error correction capability of the used dictionary. -1 ignores the error correction step. (default 3)</param>
 /// <param name="checkAllOrders">Consider the four posible corner orders in the rejectedCorners array. If it set to false, only the provided corner order is considered (default true).</param>
 /// <param name="recoveredIdxs">Optional array to returns the indexes of the recovered candidates in the original rejectedCorners array.</param>
 /// <param name="parameters">marker detection parameters</param>
 public static void RefineDetectedMarkers(
     IInputArray image, IBoard board, IInputOutputArray detectedCorners,
     IInputOutputArray detectedIds, IInputOutputArray rejectedCorners,
     IInputArray cameraMatrix, IInputArray distCoeffs,
     float minRepDistance, float errorCorrectionRate,
     bool checkAllOrders,
     IOutputArray recoveredIdxs, DetectorParameters parameters)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (InputOutputArray ioaDetectedCorners = detectedCorners.GetInputOutputArray())
             using (InputOutputArray ioaDetectedIds = detectedIds.GetInputOutputArray())
                 using (InputOutputArray ioaRejectedCorners = rejectedCorners.GetInputOutputArray())
                     using (InputArray iaCameraMatrix = cameraMatrix == null ? InputArray.GetEmpty() : cameraMatrix.GetInputArray())
                         using (InputArray iaDistCoeffs = distCoeffs == null ? InputArray.GetEmpty() : distCoeffs.GetInputArray())
                             using (
                                 OutputArray oaRecovervedIdx = recoveredIdxs == null
           ? OutputArray.GetEmpty()
           : recoveredIdxs.GetOutputArray())
                             {
                                 cveArucoRefineDetectedMarkers(iaImage, board.BoardPtr, ioaDetectedCorners, ioaDetectedIds, ioaRejectedCorners,
                                                               iaCameraMatrix, iaDistCoeffs, minRepDistance, errorCorrectionRate, checkAllOrders, oaRecovervedIdx, ref parameters);
                             }
 }
예제 #13
0
 /// <summary>
 /// Calculates Optical Flow using NVIDIA Optical Flow SDK.
 /// NVIDIA GPUs starting with Turing contain a dedicated hardware accelerator for computing optical flow vectors between pairs of images.
 /// The optical flow hardware accelerator generates block-based optical flow vectors.
 /// The size of the block depends on hardware in use, and can be queried using the function getGridSize().
 /// The block-based flow vectors generated by the hardware can be converted to dense representation(i.e.per-pixel flow vectors) using upSampler() helper function, if needed.
 /// The flow vectors are stored in CV_16SC2 format with x and y components of each flow vector in 16-bit signed fixed point representation S10.5.
 /// </summary>
 /// <param name="nvidiaOpticalFlow">The nvidia optical flow object</param>
 /// <param name="inputImage">Input image</param>
 /// <param name="referenceImage">Reference image of the same size and the same type as input image.</param>
 /// <param name="flow">A buffer consisting of inputImage.Size() / getGridSize() flow vectors in CV_16SC2 format.</param>
 /// <param name="stream">Stream for the asynchronous version.</param>
 /// <param name="hint">Hint buffer if client provides external hints. Must have same size as flow buffer. Caller can provide flow vectors as hints for optical flow calculation.</param>
 /// <param name="cost">Cost buffer contains numbers indicating the confidence associated with each of the generated flow vectors. Higher the cost, lower the confidence. Cost buffer is of type CV_32SC1.</param>
 public static void Calc(
     this INvidiaOpticalFlow nvidiaOpticalFlow,
     IInputArray inputImage,
     IInputArray referenceImage,
     IInputOutputArray flow,
     Stream stream     = null,
     IInputArray hint  = null,
     IOutputArray cost = null)
 {
     using (InputArray iaInputImage = inputImage.GetInputArray())
         using (InputArray iaReferenceImage = referenceImage.GetInputArray())
             using (InputOutputArray ioaFlow = flow.GetInputOutputArray())
                 using (InputArray iaHint = (hint == null ? InputArray.GetEmpty() : hint.GetInputArray()))
                     using (OutputArray oaCost = (cost == null ? OutputArray.GetEmpty() : cost.GetOutputArray()))
                         cudaNvidiaOpticalFlowCalc(
                             nvidiaOpticalFlow.NvidiaOpticalFlowPtr,
                             iaInputImage,
                             iaReferenceImage,
                             ioaFlow,
                             (stream == null) ? IntPtr.Zero : stream.Ptr,
                             iaHint,
                             oaCost);
 }
예제 #14
0
 /// <summary>
 /// Performs marker detection in the input image. Only markers included in the specific dictionary are searched. For each detected marker, it returns the 2D position of its corner in the image and its corresponding identifier. Note that this function does not perform pose estimation.
 /// </summary>
 /// <param name="image">input image</param>
 /// <param name="dict">indicates the type of markers that will be searched</param>
 /// <param name="corners">	vector of detected marker corners. For each marker, its four corners are provided, (e.g VectorOfVectorOfPointF ). For N detected markers, the dimensions of this array is Nx4. The order of the corners is clockwise.</param>
 /// <param name="ids">vector of identifiers of the detected markers. The identifier is of type int (e.g. VectorOfInt). For N detected markers, the size of ids is also N. The identifiers have the same order than the markers in the imgPoints array.</param>
 /// <param name="parameters">marker detection parameters</param>
 /// <param name="rejectedImgPoints">contains the imgPoints of those squares whose inner code has not a correct codification. Useful for debugging purposes.</param>
 public static void DetectMarkers(
     IInputArray image, Dictionary dict, IOutputArrayOfArrays corners,
     IOutputArray ids, DetectorParameters parameters,
     IOutputArrayOfArrays rejectedImgPoints = null
     )
 {
     using (InputArray iaImage = image.GetInputArray())
         using (OutputArray oaCorners = corners.GetOutputArray())
             using (OutputArray oaIds = ids.GetOutputArray())
                 using (OutputArray oaRejectedImgPoints = rejectedImgPoints != null ? rejectedImgPoints.GetOutputArray() : OutputArray.GetEmpty())
                 {
                     cveArucoDetectMarkers(iaImage, dict, oaCorners, oaIds, ref parameters, oaRejectedImgPoints);
                 }
 }
예제 #15
0
 /// <summary>
 /// Calibrate a camera using Charuco corners.
 /// </summary>
 /// <param name="charucoCorners">Vector of detected charuco corners per frame</param>
 /// <param name="charucoIds">List of identifiers for each corner in charucoCorners per frame</param>
 /// <param name="board">Marker Board layout</param>
 /// <param name="imageSize">Size of the image used only to initialize the intrinsic camera matrix.</param>
 /// <param name="cameraMatrix">Output 3x3 floating-point camera matrix. </param>
 /// <param name="distCoeffs">Output vector of distortion coefficients (k1,k2,p1,p2[,k3[,k4,k5,k6],[s1,s2,s3,s4]]) of 4, 5, 8 or 12 elements</param>
 /// <param name="rvecs">Output vector of rotation vectors (see Rodrigues ) estimated for each board view (e.g. std::vector&lt;cv::Mat&gt;). That is, each k-th rotation vector together with the corresponding k-th translation vector (see the next output parameter description) brings the board pattern from the model coordinate space (in which object points are specified) to the world coordinate space, that is, a real position of the board pattern in the k-th pattern view (k=0.. M -1).</param>
 /// <param name="tvecs">Output vector of translation vectors estimated for each pattern view.</param>
 /// <param name="stdDeviationsIntrinsics">Output vector of standard deviations estimated for intrinsic parameters. Order of deviations values: (fx,fy,cx,cy,k1,k2,p1,p2,k3,k4,k5,k6,s1,s2,s3,s4,τx,τy) If one of parameters is not estimated, it's deviation is equals to zero.</param>
 /// <param name="stdDeviationsExtrinsics">Output vector of standard deviations estimated for extrinsic parameters. Order of deviations values: (R1,T1,…,RM,TM) where M is number of pattern views, Ri,Ti are concatenated 1x3 vectors.</param>
 /// <param name="perViewErrors">Output vector of average re-projection errors estimated for each pattern view.</param>
 /// <param name="flags">Flags Different flags for the calibration process</param>
 /// <param name="criteria">Termination criteria for the iterative optimization algorithm.</param>
 /// <returns>The final re-projection error.</returns>
 public static double CalibrateCameraCharuco(
     IInputArrayOfArrays charucoCorners,
     IInputArrayOfArrays charucoIds,
     CharucoBoard board,
     Size imageSize,
     IInputOutputArray cameraMatrix,
     IInputOutputArray distCoeffs,
     IOutputArray rvecs,
     IOutputArray tvecs,
     IOutputArray stdDeviationsIntrinsics,
     IOutputArray stdDeviationsExtrinsics,
     IOutputArray perViewErrors,
     CalibType flags,
     MCvTermCriteria criteria)
 {
     using (InputArray iaCharucoCorners = charucoCorners.GetInputArray())
         using (InputArray iaCharucoIds = charucoIds.GetInputArray())
             using (InputOutputArray ioaCameraMatrix = cameraMatrix.GetInputOutputArray())
                 using (InputOutputArray ioaDistCoeffs = distCoeffs.GetInputOutputArray())
                     using (OutputArray oaRvecs = rvecs == null ? OutputArray.GetEmpty() : rvecs.GetOutputArray())
                         using (OutputArray oaTvecs = tvecs == null ? OutputArray.GetEmpty() : tvecs.GetOutputArray())
                             using (OutputArray oaStdDeviationsIntrinsics = stdDeviationsIntrinsics == null ? OutputArray.GetEmpty() : stdDeviationsIntrinsics.GetOutputArray())
                                 using (OutputArray oaStdDeviationsExtrinsics = stdDeviationsExtrinsics == null ? OutputArray.GetEmpty() : stdDeviationsExtrinsics.GetOutputArray())
                                     using (OutputArray oaPerViewErrors = perViewErrors == null ? OutputArray.GetEmpty() : perViewErrors.GetOutputArray())
                                     {
                                         return(cveArucoCalibrateCameraCharuco(
                                                    iaCharucoCorners, iaCharucoIds, board.BoardPtr, ref imageSize,
                                                    ioaCameraMatrix, ioaDistCoeffs, oaRvecs, oaTvecs,
                                                    oaStdDeviationsIntrinsics, oaStdDeviationsExtrinsics, oaPerViewErrors,
                                                    flags, ref criteria));
                                     }
 }