Beispiel #1
0
        /**
         * Encodes an image into a memory buffer.
         *
         * The function imencode compresses the image and stores it in the memory buffer that is resized to fit the
         * result. See cv::imwrite for the list of supported formats and flags description.
         *
         * param ext File extension that defines the output format.
         * param img Image to be written.
         * param buf Output buffer resized to fit the compressed image.
         * return automatically generated
         */
        public static bool imencode(string ext, Mat img, MatOfByte buf)
        {
            if (img != null)
            {
                img.ThrowIfDisposed();
            }
            if (buf != null)
            {
                buf.ThrowIfDisposed();
            }
            Mat buf_mat = buf;

            return(imgcodecs_Imgcodecs_imencode_11(ext, img.nativeObj, buf_mat.nativeObj));
        }
Beispiel #2
0
        //
        // C++: static Net cv::dnn::Net::readFromModelOptimizer(vector_uchar bufferModelConfig, vector_uchar bufferWeights)
        //

        /**
         * Create a network from Intel's Model Optimizer in-memory buffers with intermediate representation (IR).
         * param bufferModelConfig buffer with model's configuration.
         * param bufferWeights buffer with model's trained weights.
         * return Net object.
         */
        public static Net readFromModelOptimizer(MatOfByte bufferModelConfig, MatOfByte bufferWeights)
        {
            if (bufferModelConfig != null)
            {
                bufferModelConfig.ThrowIfDisposed();
            }
            if (bufferWeights != null)
            {
                bufferWeights.ThrowIfDisposed();
            }
            Mat bufferModelConfig_mat = bufferModelConfig;
            Mat bufferWeights_mat     = bufferWeights;

            return(new Net(dnn_Net_readFromModelOptimizer_11(bufferModelConfig_mat.nativeObj, bufferWeights_mat.nativeObj)));
        }
Beispiel #3
0
        //javadoc: readNetFromTensorflow(bufferModel)
        public static Net readNetFromTensorflow(MatOfByte bufferModel)
        {
            if (bufferModel != null)
            {
                bufferModel.ThrowIfDisposed();
            }
#if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat bufferModel_mat = bufferModel;
            Net retVal          = new Net(dnn_Dnn_readNetFromTensorflow_13(bufferModel_mat.nativeObj));

            return(retVal);
#else
            return(null);
#endif
        }
Beispiel #4
0
        //javadoc: readNetFromDarknet(bufferCfg)
        public static Net readNetFromDarknet(MatOfByte bufferCfg)
        {
            if (bufferCfg != null)
            {
                bufferCfg.ThrowIfDisposed();
            }
#if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat bufferCfg_mat = bufferCfg;
            Net retVal        = new Net(dnn_Dnn_readNetFromDarknet_13(bufferCfg_mat.nativeObj));

            return(retVal);
#else
            return(null);
#endif
        }
Beispiel #5
0
        //
        // C++:  bool cv::imencode(String ext, Mat img, vector_uchar& buf, vector_int _params = std::vector<int>())
        //

        /**
         * Encodes an image into a memory buffer.
         *
         * The function imencode compresses the image and stores it in the memory buffer that is resized to fit the
         * result. See cv::imwrite for the list of supported formats and flags description.
         *
         * param ext File extension that defines the output format.
         * param img Image to be written.
         * param buf Output buffer resized to fit the compressed image.
         * param _params automatically generated
         * return automatically generated
         */
        public static bool imencode(string ext, Mat img, MatOfByte buf, MatOfInt _params)
        {
            if (img != null)
            {
                img.ThrowIfDisposed();
            }
            if (buf != null)
            {
                buf.ThrowIfDisposed();
            }
            if (_params != null)
            {
                _params.ThrowIfDisposed();
            }
            Mat buf_mat     = buf;
            Mat _params_mat = _params;

            return(imgcodecs_Imgcodecs_imencode_10(ext, img.nativeObj, buf_mat.nativeObj, _params_mat.nativeObj));
        }
Beispiel #6
0
        //javadoc: imencode(ext, img, buf)
        public static bool imencode(string ext, Mat img, MatOfByte buf)
        {
            if (img != null)
            {
                img.ThrowIfDisposed();
            }
            if (buf != null)
            {
                buf.ThrowIfDisposed();
            }
#if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat  buf_mat = buf;
            bool retVal  = imgcodecs_Imgcodecs_imencode_11(ext, img.nativeObj, buf_mat.nativeObj);

            return(retVal);
#else
            return(false);
#endif
        }
Beispiel #7
0
        //
        // C++:  void cv::drawMatches(Mat img1, vector_KeyPoint keypoints1, Mat img2, vector_KeyPoint keypoints2, vector_DMatch matches1to2, Mat& outImg, Scalar matchColor = Scalar::all(-1), Scalar singlePointColor = Scalar::all(-1), vector_char matchesMask = std::vector<char>(), DrawMatchesFlags flags = DrawMatchesFlags::DEFAULT)
        //

        //javadoc: drawMatches(img1, keypoints1, img2, keypoints2, matches1to2, outImg, matchColor, singlePointColor, matchesMask)
        public static void drawMatches(Mat img1, MatOfKeyPoint keypoints1, Mat img2, MatOfKeyPoint keypoints2, MatOfDMatch matches1to2, Mat outImg, Scalar matchColor, Scalar singlePointColor, MatOfByte matchesMask)
        {
            if (img1 != null)
            {
                img1.ThrowIfDisposed();
            }
            if (keypoints1 != null)
            {
                keypoints1.ThrowIfDisposed();
            }
            if (img2 != null)
            {
                img2.ThrowIfDisposed();
            }
            if (keypoints2 != null)
            {
                keypoints2.ThrowIfDisposed();
            }
            if (matches1to2 != null)
            {
                matches1to2.ThrowIfDisposed();
            }
            if (outImg != null)
            {
                outImg.ThrowIfDisposed();
            }
            if (matchesMask != null)
            {
                matchesMask.ThrowIfDisposed();
            }
#if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat keypoints1_mat  = keypoints1;
            Mat keypoints2_mat  = keypoints2;
            Mat matches1to2_mat = matches1to2;
            Mat matchesMask_mat = matchesMask;
            features2d_Features2d_drawMatches_10(img1.nativeObj, keypoints1_mat.nativeObj, img2.nativeObj, keypoints2_mat.nativeObj, matches1to2_mat.nativeObj, outImg.nativeObj, matchColor.val[0], matchColor.val[1], matchColor.val[2], matchColor.val[3], singlePointColor.val[0], singlePointColor.val[1], singlePointColor.val[2], singlePointColor.val[3], matchesMask_mat.nativeObj);

            return;
#else
            return;
#endif
        }
Beispiel #8
0
        //
        // C++:  Net cv::dnn::readNet(String framework, vector_uchar bufferModel, vector_uchar bufferConfig = std::vector<uchar>())
        //

        //javadoc: readNet(framework, bufferModel, bufferConfig)
        public static Net readNet(string framework, MatOfByte bufferModel, MatOfByte bufferConfig)
        {
            if (bufferModel != null)
            {
                bufferModel.ThrowIfDisposed();
            }
            if (bufferConfig != null)
            {
                bufferConfig.ThrowIfDisposed();
            }
#if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat bufferModel_mat  = bufferModel;
            Mat bufferConfig_mat = bufferConfig;
            Net retVal           = new Net(dnn_Dnn_readNet_10(framework, bufferModel_mat.nativeObj, bufferConfig_mat.nativeObj));

            return(retVal);
#else
            return(null);
#endif
        }
Beispiel #9
0
        //javadoc: calcOpticalFlowPyrLK(prevImg, nextImg, prevPts, nextPts, status, err)
        public static void calcOpticalFlowPyrLK(Mat prevImg, Mat nextImg, MatOfPoint2f prevPts, MatOfPoint2f nextPts, MatOfByte status, MatOfFloat err)
        {
            if (prevImg != null)
            {
                prevImg.ThrowIfDisposed();
            }
            if (nextImg != null)
            {
                nextImg.ThrowIfDisposed();
            }
            if (prevPts != null)
            {
                prevPts.ThrowIfDisposed();
            }
            if (nextPts != null)
            {
                nextPts.ThrowIfDisposed();
            }
            if (status != null)
            {
                status.ThrowIfDisposed();
            }
            if (err != null)
            {
                err.ThrowIfDisposed();
            }
#if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat prevPts_mat = prevPts;
            Mat nextPts_mat = nextPts;
            Mat status_mat  = status;
            Mat err_mat     = err;
            video_Video_calcOpticalFlowPyrLK_15(prevImg.nativeObj, nextImg.nativeObj, prevPts_mat.nativeObj, nextPts_mat.nativeObj, status_mat.nativeObj, err_mat.nativeObj);

            return;
#else
            return;
#endif
        }
Beispiel #10
0
        //javadoc: calcOpticalFlowPyrLK(prevImg, nextImg, prevPts, nextPts, status, err, winSize, maxLevel, criteria)
        public static void calcOpticalFlowPyrLK(Mat prevImg, Mat nextImg, MatOfPoint2f prevPts, MatOfPoint2f nextPts, MatOfByte status, MatOfFloat err, Size winSize, int maxLevel, TermCriteria criteria)
        {
            if (prevImg != null)
            {
                prevImg.ThrowIfDisposed();
            }
            if (nextImg != null)
            {
                nextImg.ThrowIfDisposed();
            }
            if (prevPts != null)
            {
                prevPts.ThrowIfDisposed();
            }
            if (nextPts != null)
            {
                nextPts.ThrowIfDisposed();
            }
            if (status != null)
            {
                status.ThrowIfDisposed();
            }
            if (err != null)
            {
                err.ThrowIfDisposed();
            }
#if ((UNITY_ANDROID || UNITY_IOS || UNITY_WEBGL) && !UNITY_EDITOR) || UNITY_5 || UNITY_5_3_OR_NEWER
            Mat prevPts_mat = prevPts;
            Mat nextPts_mat = nextPts;
            Mat status_mat  = status;
            Mat err_mat     = err;
            video_Video_calcOpticalFlowPyrLK_12(prevImg.nativeObj, nextImg.nativeObj, prevPts_mat.nativeObj, nextPts_mat.nativeObj, status_mat.nativeObj, err_mat.nativeObj, winSize.width, winSize.height, maxLevel, criteria.type, criteria.maxCount, criteria.epsilon);

            return;
#else
            return;
#endif
        }
Beispiel #11
0
        /**
         * Draws the found matches of keypoints from two images.
         *
         * param img1 First source image.
         * param keypoints1 Keypoints from the first source image.
         * param img2 Second source image.
         * param keypoints2 Keypoints from the second source image.
         * param matches1to2 Matches from the first image to the second one, which means that keypoints1[i]
         * has a corresponding point in keypoints2[matches[i]] .
         * param outImg Output image. Its content depends on the flags value defining what is drawn in the
         * output image. See possible flags bit values below.
         * param matchColor Color of matches (lines and connected keypoints). If matchColor==Scalar::all(-1)
         * , the color is generated randomly.
         * param singlePointColor Color of single keypoints (circles), which means that keypoints do not
         * have the matches. If singlePointColor==Scalar::all(-1) , the color is generated randomly.
         * param matchesMask Mask determining which matches are drawn. If the mask is empty, all matches are
         * drawn.
         * DrawMatchesFlags.
         *
         * This function draws matches of keypoints from two images in the output image. Match is a line
         * connecting two keypoints (circles). See cv::DrawMatchesFlags.
         */
        public static void drawMatches(Mat img1, MatOfKeyPoint keypoints1, Mat img2, MatOfKeyPoint keypoints2, MatOfDMatch matches1to2, Mat outImg, Scalar matchColor, Scalar singlePointColor, MatOfByte matchesMask)
        {
            if (img1 != null)
            {
                img1.ThrowIfDisposed();
            }
            if (keypoints1 != null)
            {
                keypoints1.ThrowIfDisposed();
            }
            if (img2 != null)
            {
                img2.ThrowIfDisposed();
            }
            if (keypoints2 != null)
            {
                keypoints2.ThrowIfDisposed();
            }
            if (matches1to2 != null)
            {
                matches1to2.ThrowIfDisposed();
            }
            if (outImg != null)
            {
                outImg.ThrowIfDisposed();
            }
            if (matchesMask != null)
            {
                matchesMask.ThrowIfDisposed();
            }
            Mat keypoints1_mat  = keypoints1;
            Mat keypoints2_mat  = keypoints2;
            Mat matches1to2_mat = matches1to2;
            Mat matchesMask_mat = matchesMask;

            features2d_Features2d_drawMatches_11(img1.nativeObj, keypoints1_mat.nativeObj, img2.nativeObj, keypoints2_mat.nativeObj, matches1to2_mat.nativeObj, outImg.nativeObj, matchColor.val[0], matchColor.val[1], matchColor.val[2], matchColor.val[3], singlePointColor.val[0], singlePointColor.val[1], singlePointColor.val[2], singlePointColor.val[3], matchesMask_mat.nativeObj);
        }