예제 #1
0
        //
        // C++:  Mat cv::dnn::blobFromImages(vector_Mat images, double scalefactor = 1.0, Size size = Size(), Scalar mean = Scalar(), bool swapRB = true, bool crop = true)
        //

        //javadoc: blobFromImages(images, scalefactor, size, mean, swapRB, crop)
        public static Mat blobFromImages(List <Mat> images, double scalefactor, Size size, Scalar mean, bool swapRB, bool crop)
        {
#if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat images_mat = Converters.vector_Mat_to_Mat(images);
            Mat retVal     = new Mat(dnn_Dnn_blobFromImages_10(images_mat.nativeObj, scalefactor, size.width, size.height, mean.val[0], mean.val[1], mean.val[2], mean.val[3], swapRB, crop));

            return(retVal);
#else
            return(null);
#endif
        }
예제 #2
0
        //javadoc: blobFromImages(images, scalefactor, size)
        public static Mat blobFromImages(List <Mat> images, double scalefactor, Size size)
        {
#if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat images_mat = Converters.vector_Mat_to_Mat(images);
            Mat retVal     = new Mat(dnn_Dnn_blobFromImages_13(images_mat.nativeObj, scalefactor, size.width, size.height));

            return(retVal);
#else
            return(null);
#endif
        }
예제 #3
0
        //javadoc: imreadmulti(filename, mats)
        public static bool imreadmulti(string filename, List <Mat> mats)
        {
#if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS) && !UNITY_EDITOR) || UNITY_5
            Mat  mats_mat = Converters.vector_Mat_to_Mat(mats);
            bool retVal   = imgcodecs_Imgcodecs_imreadmulti_11(filename, mats_mat.nativeObj);

            return(retVal);
#else
            return(false);
#endif
        }
예제 #4
0
        //javadoc: blobFromImages(images)
        public static Mat blobFromImages(List <Mat> images)
        {
#if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat images_mat = Converters.vector_Mat_to_Mat(images);
            Mat retVal     = new Mat(dnn_Dnn_blobFromImages_15(images_mat.nativeObj));

            return(retVal);
#else
            return(null);
#endif
        }
예제 #5
0
        //
        // C++:  void add(vector_Mat descriptors)
        //

        //javadoc: javaDescriptorMatcher::add(descriptors)
        public void add(List <Mat> descriptors)
        {
            ThrowIfDisposed();
#if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS) && !UNITY_EDITOR) || UNITY_5
            Mat descriptors_mat = Converters.vector_Mat_to_Mat(descriptors);
            features2d_DescriptorMatcher_add_10(nativeObj, descriptors_mat.nativeObj);

            return;
#else
            return;
#endif
        }
예제 #6
0
        //
        // C++:  void process(vector_Mat src, Mat& dst)
        //

        //javadoc: MergeMertens::process(src, dst)
        public void process(List <Mat> src, Mat dst)
        {
            ThrowIfDisposed();
#if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS) && !UNITY_EDITOR) || UNITY_5
            Mat src_mat = Converters.vector_Mat_to_Mat(src);
            photo_MergeMertens_process_11(nativeObj, src_mat.nativeObj, dst.nativeObj);

            return;
#else
            return;
#endif
        }
예제 #7
0
        //
        // C++: void Layer::blobs
        //

        //javadoc: Layer::set_blobs(blobs)
        public void set_blobs(List <Mat> blobs)
        {
            ThrowIfDisposed();
#if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat blobs_mat = Converters.vector_Mat_to_Mat(blobs);
            dnn_Layer_set_1blobs_10(nativeObj, blobs_mat.nativeObj);

            return;
#else
            return;
#endif
        }
예제 #8
0
        //
        // C++:  bool getProjPixel(vector_Mat patternImages, int x, int y, Point projPix)
        //

        //javadoc: GrayCodePattern::getProjPixel(patternImages, x, y, projPix)
        public bool getProjPixel(List <Mat> patternImages, int x, int y, Point projPix)
        {
            ThrowIfDisposed();
#if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat  patternImages_mat = Converters.vector_Mat_to_Mat(patternImages);
            bool retVal            = structured_1light_GrayCodePattern_getProjPixel_10(nativeObj, patternImages_mat.nativeObj, x, y, projPix.x, projPix.y);

            return(retVal);
#else
            return(false);
#endif
        }
예제 #9
0
        //
        // C++:  void cv::face::MACE::train(vector_Mat images)
        //

        //javadoc: MACE::train(images)
        public void train(List <Mat> images)
        {
            ThrowIfDisposed();
#if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat images_mat = Converters.vector_Mat_to_Mat(images);
            face_MACE_train_10(nativeObj, images_mat.nativeObj);

            return;
#else
            return;
#endif
        }
예제 #10
0
        //
        // C++:  void computeSignatures(vector_Mat images, vector_Mat signatures)
        //

        //javadoc: PCTSignatures::computeSignatures(images, signatures)
        public void computeSignatures(List <Mat> images, List <Mat> signatures)
        {
            ThrowIfDisposed();
#if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat images_mat     = Converters.vector_Mat_to_Mat(images);
            Mat signatures_mat = Converters.vector_Mat_to_Mat(signatures);
            xfeatures2d_PCTSignatures_computeSignatures_10(nativeObj, images_mat.nativeObj, signatures_mat.nativeObj);

            return;
#else
            return;
#endif
        }
예제 #11
0
        //
        // C++:  void process(vector_Mat src, vector_Mat dst, Mat times, Mat response)
        //

        //javadoc: AlignExposures::process(src, dst, times, response)
        public virtual void process(List <Mat> src, List <Mat> dst, Mat times, Mat response)
        {
            ThrowIfDisposed();
#if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS) && !UNITY_EDITOR) || UNITY_5
            Mat src_mat = Converters.vector_Mat_to_Mat(src);
            Mat dst_mat = Converters.vector_Mat_to_Mat(dst);
            photo_AlignExposures_process_10(nativeObj, src_mat.nativeObj, dst_mat.nativeObj, times.nativeObj, response.nativeObj);

            return;
#else
            return;
#endif
        }
예제 #12
0
        //
        // C++:  vector_Mat cv::dnn::Layer::finalize(vector_Mat inputs)
        //

        //javadoc: Layer::finalize(inputs)
        public List <Mat> finalize(List <Mat> inputs)
        {
            ThrowIfDisposed();
#if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat        inputs_mat = Converters.vector_Mat_to_Mat(inputs);
            List <Mat> retVal     = new List <Mat>();
            Mat        retValMat  = new Mat(dnn_Layer_finalize_10(nativeObj, inputs_mat.nativeObj));
            Converters.Mat_to_vector_Mat(retValMat, retVal);
            return(retVal);
#else
            return(null);
#endif
        }
예제 #13
0
        //javadoc: javaFeatureDetector::detect(images, keypoints)
        public void detect(List <Mat> images, List <MatOfKeyPoint> keypoints)
        {
            ThrowIfDisposed();
#if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS) && !UNITY_EDITOR) || UNITY_5
            Mat images_mat    = Converters.vector_Mat_to_Mat(images);
            Mat keypoints_mat = new Mat();
            features2d_FeatureDetector_detect_13(nativeObj, images_mat.nativeObj, keypoints_mat.nativeObj);
            Converters.Mat_to_vector_vector_KeyPoint(keypoints_mat, keypoints);
            return;
#else
            return;
#endif
        }
예제 #14
0
        //
        // C++:  void Algorithm::setMatVector(string name, vector_Mat value)
        //

        public void setMatVector(string name, List <Mat> value)
        {
            ThrowIfDisposed();

                                                #if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS) && !UNITY_EDITOR) || UNITY_5
            Mat value_mat = Converters.vector_Mat_to_Mat(value);
            core_Algorithm_setMatVector_10(nativeObj, name, value_mat.nativeObj);

            return;
                                                #else
            return;
                                                #endif
        }
예제 #15
0
        //
        // C++:  void process(vector_Mat src, vector_Mat dst)
        //

        //javadoc: AlignMTB::process(src, dst)
        public void process(List <Mat> src, List <Mat> dst)
        {
            ThrowIfDisposed();
#if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat src_mat = Converters.vector_Mat_to_Mat(src);
            Mat dst_mat = Converters.vector_Mat_to_Mat(dst);
            photo_AlignMTB_process_11(nativeObj, src_mat.nativeObj, dst_mat.nativeObj);

            return;
#else
            return;
#endif
        }
예제 #16
0
        //
        // C++:  void cv::dnn::Layer::finalize(vector_Mat inputs, vector_Mat& outputs)
        //

        //javadoc: Layer::finalize(inputs, outputs)
        public void finalize(List <Mat> inputs, List <Mat> outputs)
        {
            ThrowIfDisposed();
#if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat inputs_mat  = Converters.vector_Mat_to_Mat(inputs);
            Mat outputs_mat = new Mat();
            dnn_Layer_finalize_11(nativeObj, inputs_mat.nativeObj, outputs_mat.nativeObj);
            Converters.Mat_to_vector_Mat(outputs_mat, outputs);
            outputs_mat.release();
            return;
#else
            return;
#endif
        }
예제 #17
0
        //javadoc: denoise_TVL1(observations, result)
        public static void denoise_TVL1(List <Mat> observations, Mat result)
        {
            if (result != null)
            {
                result.ThrowIfDisposed();
            }
#if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat observations_mat = Converters.vector_Mat_to_Mat(observations);
            photo_Photo_denoise_1TVL1_11(observations_mat.nativeObj, result.nativeObj);

            return;
#else
            return;
#endif
        }
예제 #18
0
        //javadoc: fastNlMeansDenoisingMulti(srcImgs, dst, imgToDenoiseIndex, temporalWindowSize)
        public static void fastNlMeansDenoisingMulti(List <Mat> srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize)
        {
            if (dst != null)
            {
                dst.ThrowIfDisposed();
            }
#if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs);
            photo_Photo_fastNlMeansDenoisingMulti_11(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize);

            return;
#else
            return;
#endif
        }
예제 #19
0
        //
        // C++:  void cv::Feature2D::detect(vector_Mat images, vector_vector_KeyPoint& keypoints, vector_Mat masks = vector_Mat())
        //

        //javadoc: Feature2D::detect(images, keypoints, masks)
        public void detect(List <Mat> images, List <MatOfKeyPoint> keypoints, List <Mat> masks)
        {
            ThrowIfDisposed();
#if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat images_mat    = Converters.vector_Mat_to_Mat(images);
            Mat keypoints_mat = new Mat();
            Mat masks_mat     = Converters.vector_Mat_to_Mat(masks);
            features2d_Feature2D_detect_12(nativeObj, images_mat.nativeObj, keypoints_mat.nativeObj, masks_mat.nativeObj);
            Converters.Mat_to_vector_vector_KeyPoint(keypoints_mat, keypoints);
            keypoints_mat.release();
            return;
#else
            return;
#endif
        }
예제 #20
0
        //javadoc: SinusoidalPattern::computePhaseMap(patternImages, wrappedPhaseMap)
        public void computePhaseMap(List <Mat> patternImages, Mat wrappedPhaseMap)
        {
            ThrowIfDisposed();
            if (wrappedPhaseMap != null)
            {
                wrappedPhaseMap.ThrowIfDisposed();
            }
#if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat patternImages_mat = Converters.vector_Mat_to_Mat(patternImages);
            structured_1light_SinusoidalPattern_computePhaseMap_12(nativeObj, patternImages_mat.nativeObj, wrappedPhaseMap.nativeObj);

            return;
#else
            return;
#endif
        }
예제 #21
0
        //
        // C++:  void update(vector_Mat src, Mat labels)
        //

        //javadoc: FaceRecognizer::update(src, labels)
        public void update(List <Mat> src, Mat labels)
        {
            ThrowIfDisposed();
            if (labels != null)
            {
                labels.ThrowIfDisposed();
            }
#if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat src_mat = Converters.vector_Mat_to_Mat(src);
            face_FaceRecognizer_update_10(nativeObj, src_mat.nativeObj, labels.nativeObj);

            return;
#else
            return;
#endif
        }
예제 #22
0
        //
        // C++:  void fastNlMeansDenoisingColoredMulti(vector_Mat srcImgs, Mat& dst, int imgToDenoiseIndex, int temporalWindowSize, float h = 3, float hColor = 3, int templateWindowSize = 7, int searchWindowSize = 21)
        //

        //javadoc: fastNlMeansDenoisingColoredMulti(srcImgs, dst, imgToDenoiseIndex, temporalWindowSize, h, hColor, templateWindowSize, searchWindowSize)
        public static void fastNlMeansDenoisingColoredMulti(List <Mat> srcImgs, Mat dst, int imgToDenoiseIndex, int temporalWindowSize, float h, float hColor, int templateWindowSize, int searchWindowSize)
        {
            if (dst != null)
            {
                dst.ThrowIfDisposed();
            }

#if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS) && !UNITY_EDITOR) || UNITY_5
            Mat srcImgs_mat = Converters.vector_Mat_to_Mat(srcImgs);
            photo_Photo_fastNlMeansDenoisingColoredMulti_10(srcImgs_mat.nativeObj, dst.nativeObj, imgToDenoiseIndex, temporalWindowSize, h, hColor, templateWindowSize, searchWindowSize);

            return;
#else
            return;
#endif
        }
예제 #23
0
        //javadoc: SinusoidalPattern::unwrapPhaseMap(wrappedPhaseMap, unwrappedPhaseMap, camSize)
        public void unwrapPhaseMap(List <Mat> wrappedPhaseMap, Mat unwrappedPhaseMap, Size camSize)
        {
            ThrowIfDisposed();
            if (unwrappedPhaseMap != null)
            {
                unwrappedPhaseMap.ThrowIfDisposed();
            }
#if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat wrappedPhaseMap_mat = Converters.vector_Mat_to_Mat(wrappedPhaseMap);
            structured_1light_SinusoidalPattern_unwrapPhaseMap_11(nativeObj, wrappedPhaseMap_mat.nativeObj, unwrappedPhaseMap.nativeObj, camSize.width, camSize.height);

            return;
#else
            return;
#endif
        }
예제 #24
0
        //
        // C++:  void denoise_TVL1(vector_Mat observations, Mat result, double lambda = 1.0, int niters = 30)
        //

        //javadoc: denoise_TVL1(observations, result, lambda, niters)
        public static void denoise_TVL1(List <Mat> observations, Mat result, double lambda, int niters)
        {
            if (result != null)
            {
                result.ThrowIfDisposed();
            }

#if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS) && !UNITY_EDITOR) || UNITY_5
            Mat observations_mat = Converters.vector_Mat_to_Mat(observations);
            photo_Photo_denoise_1TVL1_10(observations_mat.nativeObj, result.nativeObj, lambda, niters);

            return;
#else
            return;
#endif
        }
예제 #25
0
        //
        // C++:  void FaceRecognizer::train(vector_Mat src, Mat labels)
        //

        /**
         * <p>Trains a FaceRecognizer with given data and associated labels.</p>
         *
         * <p>The following source code snippet shows you how to learn a Fisherfaces model
         * on a given set of images. The images are read with "imread" and pushed into a
         * <code>std.vector<Mat></code>. The labels of each image are stored within a
         * <code>std.vector<int></code> (you could also use a "Mat" of type
         * "CV_32SC1"). Think of the label as the subject (the person) this image
         * belongs to, so same subjects (persons) should have the same label. For the
         * available "FaceRecognizer" you don't have to pay any attention to the order
         * of the labels, just make sure same persons have the same label: // holds
         * images and labels <code></p>
         *
         * <p>// C++ code:</p>
         *
         * <p>vector<Mat> images;</p>
         *
         * <p>vector<int> labels;</p>
         *
         * <p>// images for first person</p>
         *
         * <p>images.push_back(imread("person0/0.jpg", CV_LOAD_IMAGE_GRAYSCALE));
         * labels.push_back(0);</p>
         *
         * <p>images.push_back(imread("person0/1.jpg", CV_LOAD_IMAGE_GRAYSCALE));
         * labels.push_back(0);</p>
         *
         * <p>images.push_back(imread("person0/2.jpg", CV_LOAD_IMAGE_GRAYSCALE));
         * labels.push_back(0);</p>
         *
         * <p>// images for second person</p>
         *
         * <p>images.push_back(imread("person1/0.jpg", CV_LOAD_IMAGE_GRAYSCALE));
         * labels.push_back(1);</p>
         *
         * <p>images.push_back(imread("person1/1.jpg", CV_LOAD_IMAGE_GRAYSCALE));
         * labels.push_back(1);</p>
         *
         * <p>images.push_back(imread("person1/2.jpg", CV_LOAD_IMAGE_GRAYSCALE));
         * labels.push_back(1);</p>
         *
         * <p>Now that you have read some images, we can create a new "FaceRecognizer". In
         * this example I'll create a Fisherfaces model and decide to keep all of the
         * possible Fisherfaces: </code></p>
         *
         * <p>// Create a new Fisherfaces model and retain all available Fisherfaces,
         * <code></p>
         *
         * <p>// C++ code:</p>
         *
         * <p>// this is the most common usage of this specific FaceRecognizer:</p>
         *
         * <p>//</p>
         *
         * <p>Ptr<FaceRecognizer> model = createFisherFaceRecognizer();</p>
         *
         * <p>And finally train it on the given dataset (the face images and labels):
         * </code></p>
         *
         * <p>// This is the common interface to train all of the available
         * cv.FaceRecognizer <code></p>
         *
         * <p>// C++ code:</p>
         *
         * <p>// implementations:</p>
         *
         * <p>//</p>
         *
         * <p>model->train(images, labels);</p>
         *
         * @param src The training images, that means the faces you want to learn. The
         * data has to be given as a <code>vector<Mat></code>.
         * @param labels The labels corresponding to the images have to be given either
         * as a <code>vector<int></code> or a
         *
         * @see <a href="http://docs.opencv.org/modules/contrib/doc/facerec_api.html#facerecognizer-train">org.opencv.contrib.FaceRecognizer.train</a>
         */
        public void train(List <Mat> src, Mat labels)
        {
            if (labels != null)
            {
                labels.ThrowIfDisposed();
            }
            ThrowIfDisposed();

                                                #if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS) && !UNITY_EDITOR) || UNITY_5
            Mat src_mat = Converters.vector_Mat_to_Mat(src);
            contrib_FaceRecognizer_train_10(nativeObj, src_mat.nativeObj, labels.nativeObj);

            return;
                                                #else
            return;
                                                #endif
        }
예제 #26
0
        //
        // C++:  void javaDescriptorExtractor::compute(vector_Mat images, vector_vector_KeyPoint& keypoints, vector_Mat& descriptors)
        //

        /**
         * <p>Computes the descriptors for a set of keypoints detected in an image (first
         * variant) or image set (second variant).</p>
         *
         * @param images Image set.
         * @param keypoints Input collection of keypoints. Keypoints for which a
         * descriptor cannot be computed are removed and the remaining ones may be
         * reordered. Sometimes new keypoints can be added, for example:
         * <code>SIFT</code> duplicates a keypoint with several dominant orientations
         * (for each orientation).
         * @param descriptors Computed descriptors. In the second variant of the method
         * <code>descriptors[i]</code> are descriptors computed for a <code>keypoints[i]</code>.
         * Row <code>j</code> is the <code>keypoints</code> (or <code>keypoints[i]</code>)
         * is the descriptor for keypoint <code>j</code>-th keypoint.
         *
         * @see <a href="http://docs.opencv.org/modules/features2d/doc/common_interfaces_of_descriptor_extractors.html#descriptorextractor-compute">org.opencv.features2d.DescriptorExtractor.compute</a>
         */
        public void compute(List <Mat> images, List <MatOfKeyPoint> keypoints, List <Mat> descriptors)
        {
            ThrowIfDisposed();

                                                #if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS) && !UNITY_EDITOR) || UNITY_5
            Mat        images_mat      = Converters.vector_Mat_to_Mat(images);
            List <Mat> keypoints_tmplm = new List <Mat> ((keypoints != null) ? keypoints.Count : 0);
            Mat        keypoints_mat   = Converters.vector_vector_KeyPoint_to_Mat(keypoints, keypoints_tmplm);
            Mat        descriptors_mat = new Mat();
            features2d_DescriptorExtractor_compute_11(nativeObj, images_mat.nativeObj, keypoints_mat.nativeObj, descriptors_mat.nativeObj);
            Converters.Mat_to_vector_vector_KeyPoint(keypoints_mat, keypoints);
            Converters.Mat_to_vector_Mat(descriptors_mat, descriptors);
            return;
                                                #else
            return;
                                                #endif
        }
        //javadoc: DescriptorMatcher::radiusMatch(queryDescriptors, matches, maxDistance, masks)
        public void radiusMatch(Mat queryDescriptors, List <MatOfDMatch> matches, float maxDistance, List <Mat> masks)
        {
            ThrowIfDisposed();
            if (queryDescriptors != null)
            {
                queryDescriptors.ThrowIfDisposed();
            }
#if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat matches_mat = new Mat();
            Mat masks_mat   = Converters.vector_Mat_to_Mat(masks);
            features2d_DescriptorMatcher_radiusMatch_14(nativeObj, queryDescriptors.nativeObj, matches_mat.nativeObj, maxDistance, masks_mat.nativeObj);
            Converters.Mat_to_vector_vector_DMatch(matches_mat, matches);
            matches_mat.release();
            return;
#else
            return;
#endif
        }
        //
        // C++:  void cv::DescriptorMatcher::knnMatch(Mat queryDescriptors, vector_vector_DMatch& matches, int k, vector_Mat masks = vector_Mat(), bool compactResult = false)
        //

        //javadoc: DescriptorMatcher::knnMatch(queryDescriptors, matches, k, masks, compactResult)
        public void knnMatch(Mat queryDescriptors, List <MatOfDMatch> matches, int k, List <Mat> masks, bool compactResult)
        {
            ThrowIfDisposed();
            if (queryDescriptors != null)
            {
                queryDescriptors.ThrowIfDisposed();
            }
#if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat matches_mat = new Mat();
            Mat masks_mat   = Converters.vector_Mat_to_Mat(masks);
            features2d_DescriptorMatcher_knnMatch_13(nativeObj, queryDescriptors.nativeObj, matches_mat.nativeObj, k, masks_mat.nativeObj, compactResult);
            Converters.Mat_to_vector_vector_DMatch(matches_mat, matches);
            matches_mat.release();
            return;
#else
            return;
#endif
        }
예제 #29
0
        //
        // C++: static Ptr_Board cv::aruco::Board::create(vector_Mat objPoints, Ptr_Dictionary dictionary, Mat ids)
        //

        //javadoc: Board::create(objPoints, dictionary, ids)
        public static Board create(List <Mat> objPoints, Dictionary dictionary, Mat ids)
        {
            if (dictionary != null)
            {
                dictionary.ThrowIfDisposed();
            }
            if (ids != null)
            {
                ids.ThrowIfDisposed();
            }
#if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat   objPoints_mat = Converters.vector_Mat_to_Mat(objPoints);
            Board retVal        = Board.__fromPtr__(aruco_Board_create_10(objPoints_mat.nativeObj, dictionary.getNativeObjAddr(), ids.nativeObj));

            return(retVal);
#else
            return(null);
#endif
        }
예제 #30
0
        //
        // C++:  void cv::MergeRobertson::process(vector_Mat src, Mat& dst, Mat times)
        //

        //javadoc: MergeRobertson::process(src, dst, times)
        public void process(List <Mat> src, Mat dst, Mat times)
        {
            ThrowIfDisposed();
            if (dst != null)
            {
                dst.ThrowIfDisposed();
            }
            if (times != null)
            {
                times.ThrowIfDisposed();
            }
#if UNITY_PRO_LICENSE || ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat src_mat = Converters.vector_Mat_to_Mat(src);
            photo_MergeRobertson_process_11(nativeObj, src_mat.nativeObj, dst.nativeObj, times.nativeObj);

            return;
#else
            return;
#endif
        }