//javadoc: calibrate(objectPoints, imagePoints, size, K, xi, D, rvecs, tvecs, flags, criteria) public static double calibrate(List <Mat> objectPoints, List <Mat> imagePoints, Size size, Mat K, Mat xi, Mat D, List <Mat> rvecs, List <Mat> tvecs, int flags, TermCriteria criteria) { if (K != null) { K.ThrowIfDisposed(); } if (xi != null) { xi.ThrowIfDisposed(); } if (D != null) { D.ThrowIfDisposed(); } #if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); Mat imagePoints_mat = Converters.vector_Mat_to_Mat(imagePoints); Mat rvecs_mat = new Mat(); Mat tvecs_mat = new Mat(); double retVal = ccalib_Ccalib_calibrate_11(objectPoints_mat.nativeObj, imagePoints_mat.nativeObj, size.width, size.height, K.nativeObj, xi.nativeObj, D.nativeObj, rvecs_mat.nativeObj, tvecs_mat.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon); Converters.Mat_to_vector_Mat(rvecs_mat, rvecs); rvecs_mat.release(); Converters.Mat_to_vector_Mat(tvecs_mat, tvecs); tvecs_mat.release(); return(retVal); #else return(-1); #endif }
// // C++: double cv::omnidir::stereoCalibrate(vector_Mat& objectPoints, vector_Mat& imagePoints1, vector_Mat& imagePoints2, Size imageSize1, Size imageSize2, Mat& K1, Mat& xi1, Mat& D1, Mat& K2, Mat& xi2, Mat& D2, Mat& rvec, Mat& tvec, vector_Mat& rvecsL, vector_Mat& tvecsL, int flags, TermCriteria criteria, Mat& idx = Mat()) // //javadoc: stereoCalibrate(objectPoints, imagePoints1, imagePoints2, imageSize1, imageSize2, K1, xi1, D1, K2, xi2, D2, rvec, tvec, rvecsL, tvecsL, flags, criteria, idx) public static double stereoCalibrate(List <Mat> objectPoints, List <Mat> imagePoints1, List <Mat> imagePoints2, Size imageSize1, Size imageSize2, Mat K1, Mat xi1, Mat D1, Mat K2, Mat xi2, Mat D2, Mat rvec, Mat tvec, List <Mat> rvecsL, List <Mat> tvecsL, int flags, TermCriteria criteria, Mat idx) { if (K1 != null) { K1.ThrowIfDisposed(); } if (xi1 != null) { xi1.ThrowIfDisposed(); } if (D1 != null) { D1.ThrowIfDisposed(); } if (K2 != null) { K2.ThrowIfDisposed(); } if (xi2 != null) { xi2.ThrowIfDisposed(); } if (D2 != null) { D2.ThrowIfDisposed(); } if (rvec != null) { rvec.ThrowIfDisposed(); } if (tvec != null) { tvec.ThrowIfDisposed(); } if (idx != null) { idx.ThrowIfDisposed(); } #if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER Mat objectPoints_mat = Converters.vector_Mat_to_Mat(objectPoints); Mat imagePoints1_mat = Converters.vector_Mat_to_Mat(imagePoints1); Mat imagePoints2_mat = Converters.vector_Mat_to_Mat(imagePoints2); Mat rvecsL_mat = new Mat(); Mat tvecsL_mat = new Mat(); double retVal = ccalib_Ccalib_stereoCalibrate_10(objectPoints_mat.nativeObj, imagePoints1_mat.nativeObj, imagePoints2_mat.nativeObj, imageSize1.width, imageSize1.height, imageSize2.width, imageSize2.height, K1.nativeObj, xi1.nativeObj, D1.nativeObj, K2.nativeObj, xi2.nativeObj, D2.nativeObj, rvec.nativeObj, tvec.nativeObj, rvecsL_mat.nativeObj, tvecsL_mat.nativeObj, flags, criteria.type, criteria.maxCount, criteria.epsilon, idx.nativeObj); Converters.Mat_to_vector_Mat(objectPoints_mat, objectPoints); objectPoints_mat.release(); Converters.Mat_to_vector_Mat(imagePoints1_mat, imagePoints1); imagePoints1_mat.release(); Converters.Mat_to_vector_Mat(imagePoints2_mat, imagePoints2); imagePoints2_mat.release(); Converters.Mat_to_vector_Mat(rvecsL_mat, rvecsL); rvecsL_mat.release(); Converters.Mat_to_vector_Mat(tvecsL_mat, tvecsL); tvecsL_mat.release(); return(retVal); #else return(-1); #endif }
// // C++: vector_Mat cv::face::LBPHFaceRecognizer::getHistograms() // //javadoc: LBPHFaceRecognizer::getHistograms() public List <Mat> getHistograms() { ThrowIfDisposed(); #if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER List <Mat> retVal = new List <Mat>(); Mat retValMat = new Mat(face_LBPHFaceRecognizer_getHistograms_10(nativeObj)); Converters.Mat_to_vector_Mat(retValMat, retVal); return(retVal); #else return(null); #endif }
// // C++: vector_Mat getProjections() // //javadoc: BasicFaceRecognizer::getProjections() public List <Mat> getProjections() { ThrowIfDisposed(); #if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS) && !UNITY_EDITOR) || UNITY_5 List <Mat> retVal = new List <Mat>(); Mat retValMat = new Mat(face_BasicFaceRecognizer_getProjections_10(nativeObj)); Converters.Mat_to_vector_Mat(retValMat, retVal); return(retVal); #else return(null); #endif }
//javadoc: imreadmulti(filename, mats) public static bool imreadmulti(string filename, List <Mat> mats) { #if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER Mat mats_mat = new Mat(); bool retVal = imgcodecs_Imgcodecs_imreadmulti_11(filename, mats_mat.nativeObj); Converters.Mat_to_vector_Mat(mats_mat, mats); mats_mat.release(); return(retVal); #else return(false); #endif }
// // C++: vector_Mat getTrainDescriptors() // //javadoc: javaDescriptorMatcher::getTrainDescriptors() public List <Mat> getTrainDescriptors() { ThrowIfDisposed(); #if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS) && !UNITY_EDITOR) || UNITY_5 List <Mat> retVal = new List <Mat> (); Mat retValMat = new Mat(features2d_DescriptorMatcher_getTrainDescriptors_10(nativeObj)); Converters.Mat_to_vector_Mat(retValMat, retVal); return(retVal); #else return(null); #endif }
// // C++: void getCovs(vector_Mat& covs) // //javadoc: EM::getCovs(covs) public void getCovs(List <Mat> covs) { ThrowIfDisposed(); #if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS) && !UNITY_EDITOR) || UNITY_5 Mat covs_mat = new Mat(); ml_EM_getCovs_10(nativeObj, covs_mat.nativeObj); Converters.Mat_to_vector_Mat(covs_mat, covs); covs_mat.release(); return; #else return; #endif }
// // C++: bool cv::structured_light::StructuredLightPattern::decode(vector_vector_Mat patternImages, Mat& disparityMap, vector_Mat blackImages = vector_Mat(), vector_Mat whiteImages = vector_Mat(), int flags = DECODE_3D_UNDERWORLD) // // Unknown type 'vector_vector_Mat' (I), skipping the function // // C++: bool cv::structured_light::StructuredLightPattern::generate(vector_Mat& patternImages) // //javadoc: StructuredLightPattern::generate(patternImages) public bool generate(List <Mat> patternImages) { ThrowIfDisposed(); #if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER Mat patternImages_mat = new Mat(); bool retVal = structured_1light_StructuredLightPattern_generate_10(nativeObj, patternImages_mat.nativeObj); Converters.Mat_to_vector_Mat(patternImages_mat, patternImages); patternImages_mat.release(); return(retVal); #else return(false); #endif }
// // C++: vector_Mat cv::dnn::Layer::finalize(vector_Mat inputs) // //javadoc: Layer::finalize(inputs) public List <Mat> finalize(List <Mat> inputs) { ThrowIfDisposed(); #if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER Mat inputs_mat = Converters.vector_Mat_to_Mat(inputs); List <Mat> retVal = new List <Mat>(); Mat retValMat = new Mat(dnn_Layer_finalize_10(nativeObj, inputs_mat.nativeObj)); Converters.Mat_to_vector_Mat(retValMat, retVal); return(retVal); #else return(null); #endif }
// // C++: vector_Mat Algorithm::getMatVector(string name) // public List <Mat> getMatVector(string name) { ThrowIfDisposed(); #if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS) && !UNITY_EDITOR) || UNITY_5 List <Mat> retVal = new List <Mat> (); Mat retValMat = new Mat(core_Algorithm_getMatVector_10(nativeObj, name)); Converters.Mat_to_vector_Mat(retValMat, retVal); return(retVal); #else return(null); #endif }
//javadoc: Net::forward(outputBlobs) public void forward(List <Mat> outputBlobs) { ThrowIfDisposed(); #if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER Mat outputBlobs_mat = new Mat(); dnn_Net_forward_13(nativeObj, outputBlobs_mat.nativeObj); Converters.Mat_to_vector_Mat(outputBlobs_mat, outputBlobs); outputBlobs_mat.release(); return; #else return; #endif }
// // C++: void cv::dnn::Layer::finalize(vector_Mat inputs, vector_Mat& outputs) // //javadoc: Layer::finalize(inputs, outputs) public void finalize(List <Mat> inputs, List <Mat> outputs) { ThrowIfDisposed(); #if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER Mat inputs_mat = Converters.vector_Mat_to_Mat(inputs); Mat outputs_mat = new Mat(); dnn_Layer_finalize_11(nativeObj, inputs_mat.nativeObj, outputs_mat.nativeObj); Converters.Mat_to_vector_Mat(outputs_mat, outputs); outputs_mat.release(); return; #else return; #endif }
// // C++: void cv::dnn::imagesFromBlob(Mat blob_, vector_Mat& images_) // //javadoc: imagesFromBlob(blob_, images_) public static void imagesFromBlob(Mat blob_, List <Mat> images_) { if (blob_ != null) { blob_.ThrowIfDisposed(); } #if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER Mat images__mat = new Mat(); dnn_Dnn_imagesFromBlob_10(blob_.nativeObj, images__mat.nativeObj); Converters.Mat_to_vector_Mat(images__mat, images_); images__mat.release(); return; #else return; #endif }
//javadoc: buildOpticalFlowPyramid(img, pyramid, winSize, maxLevel) public static int buildOpticalFlowPyramid(Mat img, List <Mat> pyramid, Size winSize, int maxLevel) { if (img != null) { img.ThrowIfDisposed(); } #if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER Mat pyramid_mat = new Mat(); int retVal = video_Video_buildOpticalFlowPyramid_11(img.nativeObj, pyramid_mat.nativeObj, winSize.width, winSize.height, maxLevel); Converters.Mat_to_vector_Mat(pyramid_mat, pyramid); pyramid_mat.release(); return(retVal); #else return(-1); #endif }
//javadoc: computeNMChannels(_src, _channels) public static void computeNMChannels(Mat _src, List <Mat> _channels) { if (_src != null) { _src.ThrowIfDisposed(); } #if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER Mat _channels_mat = new Mat(); text_Text_computeNMChannels_11(_src.nativeObj, _channels_mat.nativeObj); Converters.Mat_to_vector_Mat(_channels_mat, _channels); _channels_mat.release(); return; #else return; #endif }
// // C++: int buildOpticalFlowPyramid(Mat img, vector_Mat& pyramid, Size winSize, int maxLevel, bool withDerivatives = true, int pyrBorder = BORDER_REFLECT_101, int derivBorder = BORDER_CONSTANT, bool tryReuseInputImage = true) // //javadoc: buildOpticalFlowPyramid(img, pyramid, winSize, maxLevel, withDerivatives, pyrBorder, derivBorder, tryReuseInputImage) public static int buildOpticalFlowPyramid(Mat img, List <Mat> pyramid, Size winSize, int maxLevel, bool withDerivatives, int pyrBorder, int derivBorder, bool tryReuseInputImage) { if (img != null) { img.ThrowIfDisposed(); } #if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS) && !UNITY_EDITOR) || UNITY_5 Mat pyramid_mat = new Mat(); int retVal = video_Video_buildOpticalFlowPyramid_10(img.nativeObj, pyramid_mat.nativeObj, winSize.width, winSize.height, maxLevel, withDerivatives, pyrBorder, derivBorder, tryReuseInputImage); Converters.Mat_to_vector_Mat(pyramid_mat, pyramid); return(retVal); #else return(-1); #endif }
// // C++: void javaDescriptorExtractor::compute(vector_Mat images, vector_vector_KeyPoint& keypoints, vector_Mat& descriptors) // /** * <p>Computes the descriptors for a set of keypoints detected in an image (first * variant) or image set (second variant).</p> * * @param images Image set. * @param keypoints Input collection of keypoints. Keypoints for which a * descriptor cannot be computed are removed and the remaining ones may be * reordered. Sometimes new keypoints can be added, for example: * <code>SIFT</code> duplicates a keypoint with several dominant orientations * (for each orientation). * @param descriptors Computed descriptors. In the second variant of the method * <code>descriptors[i]</code> are descriptors computed for a <code>keypoints[i]</code>. * Row <code>j</code> is the <code>keypoints</code> (or <code>keypoints[i]</code>) * is the descriptor for keypoint <code>j</code>-th keypoint. * * @see <a href="http://docs.opencv.org/modules/features2d/doc/common_interfaces_of_descriptor_extractors.html#descriptorextractor-compute">org.opencv.features2d.DescriptorExtractor.compute</a> */ public void compute(List <Mat> images, List <MatOfKeyPoint> keypoints, List <Mat> descriptors) { ThrowIfDisposed(); #if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS) && !UNITY_EDITOR) || UNITY_5 Mat images_mat = Converters.vector_Mat_to_Mat(images); List <Mat> keypoints_tmplm = new List <Mat> ((keypoints != null) ? keypoints.Count : 0); Mat keypoints_mat = Converters.vector_vector_KeyPoint_to_Mat(keypoints, keypoints_tmplm); Mat descriptors_mat = new Mat(); features2d_DescriptorExtractor_compute_11(nativeObj, images_mat.nativeObj, keypoints_mat.nativeObj, descriptors_mat.nativeObj); Converters.Mat_to_vector_vector_KeyPoint(keypoints_mat, keypoints); Converters.Mat_to_vector_Mat(descriptors_mat, descriptors); return; #else return; #endif }
// // C++: void cv::structured_light::SinusoidalPattern::findProCamMatches(Mat projUnwrappedPhaseMap, Mat camUnwrappedPhaseMap, vector_Mat& matches) // //javadoc: SinusoidalPattern::findProCamMatches(projUnwrappedPhaseMap, camUnwrappedPhaseMap, matches) public void findProCamMatches(Mat projUnwrappedPhaseMap, Mat camUnwrappedPhaseMap, List <Mat> matches) { ThrowIfDisposed(); if (projUnwrappedPhaseMap != null) { projUnwrappedPhaseMap.ThrowIfDisposed(); } if (camUnwrappedPhaseMap != null) { camUnwrappedPhaseMap.ThrowIfDisposed(); } #if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER Mat matches_mat = new Mat(); structured_1light_SinusoidalPattern_findProCamMatches_10(nativeObj, projUnwrappedPhaseMap.nativeObj, camUnwrappedPhaseMap.nativeObj, matches_mat.nativeObj); Converters.Mat_to_vector_Mat(matches_mat, matches); matches_mat.release(); return; #else return; #endif }