/// <summary>
        /// 
        /// </summary>
        /// <param name="img"></param>
        /// <param name="templ"></param>
        /// <param name="results"></param>
        /// <param name="cost"></param>
        /// <param name="templScale"></param>
        /// <param name="maxMatches"></param>
        /// <param name="minMatchDistance"></param>
        /// <param name="padX"></param>
        /// <param name="padY"></param>
        /// <param name="scales"></param>
        /// <param name="minScale"></param>
        /// <param name="maxScale"></param>
        /// <param name="orientationWeight"></param>
        /// <param name="truncate"></param>
        /// <returns></returns>
        public static int ChamferMatching(
            Mat img, Mat templ,
                                  out Point[][] results, out float[] cost,
                                  double templScale=1, int maxMatches = 20,
                                  double minMatchDistance = 1.0, int padX = 3,
                                  int padY = 3, int scales = 5, double minScale = 0.6, double maxScale = 1.6,
                                  double orientationWeight = 0.5, double truncate = 20)
        {
            if (img == null)
                throw new ArgumentNullException("img");
            if (templ == null)
                throw new ArgumentNullException("templ");
            img.ThrowIfDisposed();
            templ.ThrowIfDisposed();
            
            using (var resultsVec = new VectorOfVectorPoint())
            using (var costVec = new VectorOfFloat())
            {
                int ret = NativeMethods.contrib_chamerMatching(
                    img.CvPtr, templ.CvPtr, resultsVec.CvPtr, costVec.CvPtr, 
                    templScale, maxMatches, minMatchDistance,
                    padX, padY, scales, minScale, maxScale, orientationWeight, truncate);
                GC.KeepAlive(img);
                GC.KeepAlive(templ);

                results = resultsVec.ToArray();
                cost = costVec.ToArray();

                return ret;
            }
        }
示例#2
0
        /// <summary>
        /// Draw keypoints.
        /// </summary>
        /// <param name="image"></param>
        /// <param name="keypoints"></param>
        /// <param name="outImage"></param>
        /// <param name="color"></param>
        /// <param name="flags"></param>
        public static void DrawKeypoints(Mat image, IEnumerable<KeyPoint> keypoints, Mat outImage,
            Scalar? color = null, DrawMatchesFlags flags = DrawMatchesFlags.Default)
        {
            if (image == null)
                throw new ArgumentNullException("image");
            if (outImage == null)
                throw new ArgumentNullException("outImage");
            if (keypoints == null)
                throw new ArgumentNullException("keypoints");
            image.ThrowIfDisposed();
            outImage.ThrowIfDisposed();

            KeyPoint[] keypointsArray = EnumerableEx.ToArray(keypoints);
            Scalar color0 = color.GetValueOrDefault(Scalar.All(-1));
            NativeMethods.features2d_drawKeypoints(image.CvPtr, keypointsArray, keypointsArray.Length,
                outImage.CvPtr, color0, (int)flags);
        }
示例#3
0
        /// <summary>
        /// 学習データを与えて初期化
        /// </summary>
        /// <param name="trainData"></param>
        /// <param name="tflag"></param>
        /// <param name="responses"></param>
        /// <param name="varIdx"></param>
        /// <param name="sampleIdx"></param>
        /// <param name="varType"></param>
        /// <param name="missingMask"></param>
        /// <param name="param"></param>
#else
        /// <summary>
        /// Training constructor
        /// </summary>
        /// <param name="trainData"></param>
        /// <param name="tflag"></param>
        /// <param name="responses"></param>
        /// <param name="varIdx"></param>
        /// <param name="sampleIdx"></param>
        /// <param name="varType"></param>
        /// <param name="missingMask"></param>
        /// <param name="param"></param>
#endif
        public CvBoost(
            Mat trainData,
            DTreeDataLayout tflag,
            Mat responses,
            Mat varIdx          = null,
            Mat sampleIdx       = null,
            Mat varType         = null,
            Mat missingMask     = null,
            CvBoostParams param = null)
        {
            if (trainData == null)
            {
                throw new ArgumentNullException("trainData");
            }
            if (responses == null)
            {
                throw new ArgumentNullException("responses");
            }
            trainData.ThrowIfDisposed();
            responses.ThrowIfDisposed();

            if (param == null)
            {
                param = new CvBoostParams();
            }

            ptr = NativeMethods.ml_CvBoost_new_Mat(
                trainData.CvPtr,
                (int)tflag,
                responses.CvPtr,
                Cv2.ToPtr(varIdx),
                Cv2.ToPtr(sampleIdx),
                Cv2.ToPtr(varType),
                Cv2.ToPtr(missingMask),
                param.CvPtr
                );
        }
示例#4
0
        /// <summary>
        /// 決定木を学習する
        /// </summary>
        /// <param name="trainData"></param>
        /// <param name="tflag"></param>
        /// <param name="responses"></param>
        /// <param name="varIdx"></param>
        /// <param name="sampleIdx"></param>
        /// <param name="varType"></param>
        /// <param name="missingMask"></param>
        /// <param name="param"></param>
        /// <returns></returns>
#else
        /// <summary>
        /// Trains decision tree
        /// </summary>
        /// <param name="trainData"></param>
        /// <param name="tflag"></param>
        /// <param name="responses"></param>
        /// <param name="varIdx"></param>
        /// <param name="sampleIdx"></param>
        /// <param name="varType"></param>
        /// <param name="missingMask"></param>
        /// <param name="param"></param>
        /// <returns></returns>
#endif
        public virtual bool Train(
            Mat trainData,
            DTreeDataLayout tflag,
            Mat responses,
            Mat varIdx,
            Mat sampleIdx,
            Mat varType,
            Mat missingMask,
            CvDTreeParams param)
        {
            if (trainData == null)
            {
                throw new ArgumentNullException("trainData");
            }
            if (responses == null)
            {
                throw new ArgumentNullException("responses");
            }
            trainData.ThrowIfDisposed();
            responses.ThrowIfDisposed();

            if (param == null)
            {
                param = new CvDTreeParams();
            }

            return(NativeMethods.ml_CvDTree_train_Mat(
                       ptr,
                       trainData.CvPtr,
                       (int)tflag,
                       responses.CvPtr,
                       Cv2.ToPtr(varIdx),
                       Cv2.ToPtr(sampleIdx),
                       Cv2.ToPtr(varType),
                       Cv2.ToPtr(missingMask),
                       param.CvPtr) != 0);
        }
        /// <summary>
        /// Detects objects of different sizes in the input image. The detected objects are returned as a list of rectangles.
        /// </summary>
        /// <param name="image">Matrix of the type CV_8U containing an image where objects are detected.</param>
        /// <param name="rejectLevels"></param>
        /// <param name="levelWeights"></param>
        /// <param name="scaleFactor">Parameter specifying how much the image size is reduced at each image scale.</param>
        /// <param name="minNeighbors">Parameter specifying how many neighbors each candidate rectangle should have to retain it.</param>
        /// <param name="flags">Parameter with the same meaning for an old cascade as in the function cvHaarDetectObjects.
        /// It is not used for a new cascade.</param>
        /// <param name="minSize">Minimum possible object size. Objects smaller than that are ignored.</param>
        /// <param name="maxSize">Maximum possible object size. Objects larger than that are ignored.</param>
        /// <param name="outputRejectLevels"></param>
        /// <returns>Vector of rectangles where each rectangle contains the detected object.</returns>
        public virtual Rect[] DetectMultiScale(
            Mat image,
            out int[] rejectLevels,
            out double[] levelWeights,
            double scaleFactor      = 1.1,
            int minNeighbors        = 3,
            HaarDetectionType flags = HaarDetectionType.Zero,
            Size?minSize            = null,
            Size?maxSize            = null,
            bool outputRejectLevels = false)
        {
            if (disposed)
            {
                throw new ObjectDisposedException("CascadeClassifier");
            }
            if (image == null)
            {
                throw new ArgumentNullException(nameof(image));
            }
            image.ThrowIfDisposed();

            Size minSize0 = minSize.GetValueOrDefault(new Size());
            Size maxSize0 = maxSize.GetValueOrDefault(new Size());

            using (var objectsVec = new VectorOfRect())
                using (var rejectLevelsVec = new VectorOfInt32())
                    using (var levelWeightsVec = new VectorOfDouble())
                    {
                        NativeMethods.objdetect_CascadeClassifier_detectMultiScale(
                            ptr, image.CvPtr, objectsVec.CvPtr, rejectLevelsVec.CvPtr, levelWeightsVec.CvPtr,
                            scaleFactor, minNeighbors, (int)flags, minSize0, maxSize0, outputRejectLevels ? 1 : 0);

                        rejectLevels = rejectLevelsVec.ToArray();
                        levelWeights = levelWeightsVec.ToArray();
                        return(objectsVec.ToArray());
                    }
        }
示例#6
0
        /// <summary>
        /// Draw keypoints.
        /// </summary>
        /// <param name="image"></param>
        /// <param name="keypoints"></param>
        /// <param name="outImage"></param>
        /// <param name="color"></param>
        /// <param name="flags"></param>
        public static void DrawKeypoints(Mat image, IEnumerable <KeyPoint> keypoints, Mat outImage,
                                         Scalar?color = null, DrawMatchesFlags flags = DrawMatchesFlags.Default)
        {
            if (image == null)
            {
                throw new ArgumentNullException(nameof(image));
            }
            if (outImage == null)
            {
                throw new ArgumentNullException(nameof(outImage));
            }
            if (keypoints == null)
            {
                throw new ArgumentNullException(nameof(keypoints));
            }
            image.ThrowIfDisposed();
            outImage.ThrowIfDisposed();

            KeyPoint[] keypointsArray = EnumerableEx.ToArray(keypoints);
            Scalar     color0         = color.GetValueOrDefault(Scalar.All(-1));

            NativeMethods.features2d_drawKeypoints(image.CvPtr, keypointsArray, keypointsArray.Length,
                                                   outImage.CvPtr, color0, (int)flags);
        }
示例#7
0
        /// <summary>
        /// MLPの学習と更新
        /// </summary>
        /// <param name="inputs">入力ベクトルの浮動小数点の行列で,1行で1ベクトル.</param>
		/// <param name="outputs">対応する出力ベクトルの浮動小数点の行列で,1行で1ベクトル.</param>
        /// <param name="sampleWeights">(RPROPのみ)各サンプルの重みを指定する浮動小数点のベクトル.オプション. 学習において,幾つかのサンプルは他のものより重要な場合がある. 例えば検出率と誤検出率間の適切なバランスを探すために,あるクラスの重みを増加させたい場合など.</param>
        /// <param name="sampleIdx">用いるサンプルを表す整数のベクトル(すなわち_inputsと_outputsの行).</param>
        /// <param name="param">学習パラメータ</param>
		/// <param name="flags">学習アルゴリズムを制御する様々なパラメータ</param>
		/// <returns>ネットワークの重みを計算/調整した繰り返し回数.</returns>
#else
        /// <summary>
        /// Trains/updates MLP
        /// </summary>
        /// <param name="inputs">A floating-point matrix of input vectors, one vector per row. </param>
        /// <param name="outputs">A floating-point matrix of the corresponding output vectors, one vector per row. </param>
        /// <param name="sampleWeights">(RPROP only) The optional floating-point vector of weights for each sample. Some samples may be more important than others for training, e.g. user may want to gain the weight of certain classes to find the right balance between hit-rate and false-alarm rate etc. </param>
        /// <param name="sampleIdx">The optional integer vector indicating the samples (i.e. rows of _inputs and _outputs) that are taken into account. </param>
        /// <param name="param">The training params.</param>
        /// <param name="flags">The various parameters to control the training algorithm.</param>
        /// <returns>the number of done iterations.</returns>
#endif
        public virtual int Train(Mat inputs, Mat outputs, Mat sampleWeights,
            Mat sampleIdx = null,
            CvANN_MLP_TrainParams param = null,
            MLPTrainingFlag flags = MLPTrainingFlag.Zero)
        {
            if (inputs == null)
                throw new ArgumentNullException("inputs");
            if (outputs == null)
                throw new ArgumentNullException("outputs");
            inputs.ThrowIfDisposed();
            outputs.ThrowIfDisposed();

            if (param == null)
                param = new CvANN_MLP_TrainParams();

            return NativeMethods.ml_CvANN_MLP_train_CvMat(
                ptr,
                inputs.CvPtr,
                outputs.CvPtr,
                Cv2.ToPtr(sampleWeights),
                Cv2.ToPtr(sampleIdx),
                param.NativeStruct,
                (int)flags
            );
        }
示例#8
0
 /// <summary>
 /// replicates the input matrix the specified number of times in the horizontal and/or vertical direction
 /// </summary>
 /// <param name="src"></param>
 /// <param name="ny"></param>
 /// <param name="nx"></param>
 /// <returns></returns>
 public static Mat Repeat(Mat src, int ny, int nx)
 {
     if (src == null)
         throw new ArgumentNullException("src");
     src.ThrowIfDisposed();
     IntPtr matPtr = NativeMethods.core_repeat(src.CvPtr, ny, nx);
     return new Mat(matPtr);
 }
示例#9
0
        /// <summary>
        /// 指定したトポロジーでMLPを構築する
        /// </summary>
        /// <param name="layerSizes">入出力層を含む各層のニューロン数を指定する整数のベクトル</param>
        /// <param name="activFunc">各ニューロンの活性化関数</param>
        /// <param name="fParam1">活性化関数のフリーパラメータα</param>
        /// <param name="fParam2">活性化関数のフリーパラメータβ</param>
#else
        /// <summary>
        /// Constructs the MLP with the specified topology
        /// </summary>
        /// <param name="layerSizes">The integer vector specifies the number of neurons in each layer including the input and output layers. </param>
        /// <param name="activFunc">Specifies the activation function for each neuron</param>
        /// <param name="fParam1">Free parameter α of the activation function</param>
        /// <param name="fParam2">Free parameter β of the activation function</param>
#endif
        public void Create(
            Mat layerSizes,
            MLPActivationFunc activFunc = MLPActivationFunc.SigmoidSym,
            double fParam1 = 0, double fParam2 = 0)
        {
            if (disposed)
                throw new ObjectDisposedException("StatModel");
            if (layerSizes == null)
                throw new ArgumentNullException("layerSizes");
            layerSizes.ThrowIfDisposed();

            NativeMethods.ml_CvANN_MLP_create_Mat(
                ptr, layerSizes.CvPtr, (int)activFunc, fParam1, fParam2);
        }
示例#10
0
        /// <summary>
        /// evaluate specified ROI and return confidence value for each location in multiple scales
        /// </summary>
        /// <param name="img"></param>
        /// <param name="foundLocations"></param>
        /// <param name="locations"></param>
        /// <param name="hitThreshold"></param>
        /// <param name="groupThreshold"></param>
        public void DetectMultiScaleROI(
            Mat img,
            out Rect[] foundLocations,
            out DetectionROI[] locations,
            double hitThreshold = 0,
            int groupThreshold = 0)
        {
            if (disposed)
                throw new ObjectDisposedException("HOGDescriptor");
            if (img == null)
                throw new ArgumentNullException("img");
            img.ThrowIfDisposed();

            using (var flVec = new VectorOfRect())
            using (var scalesVec = new VectorOfDouble())
            using (var locationsVec = new VectorOfVectorPoint())
            using (var confidencesVec = new VectorOfVectorDouble())
            {
                NativeMethods.objdetect_HOGDescriptor_detectMultiScaleROI(
                    ptr, img.CvPtr, flVec.CvPtr, 
                    scalesVec.CvPtr, locationsVec.CvPtr, confidencesVec.CvPtr,
                    hitThreshold, groupThreshold);
                foundLocations = flVec.ToArray();

                double[] s = scalesVec.ToArray();
                Point[][] l = locationsVec.ToArray();
                double[][] c = confidencesVec.ToArray();

                if(s.Length != l.Length || l.Length != c.Length)
                    throw new OpenCvSharpException("Invalid result data 'locations'");
                locations = new DetectionROI[s.Length];
                for (int i = 0; i < s.Length; i++)
                {
                    locations[i] = new DetectionROI
                    {
                        Scale = s[i],
                        Locations = l[i],
                        Confidences = c[i]
                    };
                }
            }
        }
示例#11
0
        /// <summary>
        /// Draws matches of keypints from two images on output image.
        /// </summary>
        /// <param name="img1"></param>
        /// <param name="keypoints1"></param>
        /// <param name="img2"></param>
        /// <param name="keypoints2"></param>
        /// <param name="matches1To2"></param>
        /// <param name="outImg"></param>
        /// <param name="matchColor"></param>
        /// <param name="singlePointColor"></param>
        /// <param name="matchesMask"></param>
        /// <param name="flags"></param>
        public static void DrawMatches(Mat img1, IEnumerable <KeyPoint> keypoints1,
                                       Mat img2, IEnumerable <KeyPoint> keypoints2,
                                       IEnumerable <IEnumerable <DMatch> > matches1To2, Mat outImg,
                                       Scalar?matchColor = null, Scalar?singlePointColor = null,
                                       IEnumerable <IEnumerable <byte> > matchesMask = null,
                                       DrawMatchesFlags flags = DrawMatchesFlags.Default)
        {
            if (img1 == null)
            {
                throw new ArgumentNullException(nameof(img1));
            }
            if (img2 == null)
            {
                throw new ArgumentNullException(nameof(img2));
            }
            if (outImg == null)
            {
                throw new ArgumentNullException(nameof(outImg));
            }
            if (keypoints1 == null)
            {
                throw new ArgumentNullException(nameof(keypoints1));
            }
            if (keypoints2 == null)
            {
                throw new ArgumentNullException(nameof(keypoints2));
            }
            if (matches1To2 == null)
            {
                throw new ArgumentNullException(nameof(matches1To2));
            }
            img1.ThrowIfDisposed();
            img2.ThrowIfDisposed();
            outImg.ThrowIfDisposed();

            KeyPoint[] keypoints1Array  = EnumerableEx.ToArray(keypoints1);
            KeyPoint[] keypoints2Array  = EnumerableEx.ToArray(keypoints2);
            DMatch[][] matches1To2Array = EnumerableEx.SelectToArray(matches1To2, EnumerableEx.ToArray);
            int        matches1To2Size1 = matches1To2Array.Length;

            int[]  matches1To2Size2  = EnumerableEx.SelectToArray(matches1To2Array, dm => dm.Length);
            Scalar matchColor0       = matchColor.GetValueOrDefault(Scalar.All(-1));
            Scalar singlePointColor0 = singlePointColor.GetValueOrDefault(Scalar.All(-1));

            using (var matches1To2Ptr = new ArrayAddress2 <DMatch>(matches1To2Array))
            {
                if (matchesMask == null)
                {
                    NativeMethods.features2d_drawMatches2(img1.CvPtr, keypoints1Array, keypoints1Array.Length,
                                                          img2.CvPtr, keypoints2Array, keypoints2Array.Length,
                                                          matches1To2Ptr, matches1To2Size1, matches1To2Size2,
                                                          outImg.CvPtr, matchColor0, singlePointColor0,
                                                          null, 0, null, (int)flags);
                }
                else
                {
                    byte[][] matchesMaskArray = EnumerableEx.SelectToArray(matchesMask, EnumerableEx.ToArray);
                    int      matchesMaskSize1 = matches1To2Array.Length;
                    int[]    matchesMaskSize2 = EnumerableEx.SelectToArray(matchesMaskArray, dm => dm.Length);
                    using (var matchesMaskPtr = new ArrayAddress2 <byte>(matchesMaskArray))
                    {
                        NativeMethods.features2d_drawMatches2(img1.CvPtr, keypoints1Array, keypoints1Array.Length,
                                                              img2.CvPtr, keypoints2Array, keypoints2Array.Length,
                                                              matches1To2Ptr.Pointer, matches1To2Size1, matches1To2Size2,
                                                              outImg.CvPtr, matchColor0, singlePointColor0,
                                                              matchesMaskPtr, matchesMaskSize1, matchesMaskSize2, (int)flags);
                    }
                }
            }
        }
示例#12
0
 /// <summary>
 /// computes per-element maximum of array and scalar (dst = max(src1, src2))
 /// </summary>
 /// <param name="src1"></param>
 /// <param name="src2"></param>
 /// <param name="dst"></param>
 public static void Max(Mat src1, double src2, Mat dst)
 {
     if (src1 == null)
         throw new ArgumentNullException("src1");
     if (dst == null)
         throw new ArgumentNullException("dst");
     src1.ThrowIfDisposed();
     dst.ThrowIfDisposed();
     NativeMethods.core_max_MatDouble(src1.CvPtr, src2, dst.CvPtr);
 }
示例#13
0
        /// <summary>
        /// SVMを最適なパラメータで学習する
        /// </summary>
        /// <param name="trainData"></param>
        /// <param name="responses"></param>
        /// <param name="varIdx"></param>
        /// <param name="sampleIdx"></param>
        /// <param name="param"></param>
        /// <param name="kFold">交差検定(Cross-validation)パラメータ.学習集合は,k_foldの部分集合に分割され,一つの部分集合がモデルの学習に用いられ,その他の部分集合はテスト集合となる.つまり,SVM アルゴリズムは,k_fold回実行される.</param>
        /// <param name="cGrid"></param>
        /// <param name="gammaGrid"></param>
        /// <param name="pGrid"></param>
        /// <param name="nuGrid"></param>
        /// <param name="coefGrid"></param>
        /// <param name="degreeGrid"></param>
        /// <param name="balanced"></param>
        /// <returns></returns>
#else
        /// <summary>
        /// Trains SVM with optimal parameters
        /// </summary>
        /// <param name="trainData"></param>
        /// <param name="responses"></param>
        /// <param name="varIdx"></param>
        /// <param name="sampleIdx"></param>
        /// <param name="param"></param>
        /// <param name="kFold">Cross-validation parameter. The training set is divided into k_fold subsets, one subset being used to train the model, the others forming the test set. So, the SVM algorithm is executed k_fold times. </param>
        /// <param name="cGrid"></param>
        /// <param name="gammaGrid"></param>
        /// <param name="pGrid"></param>
        /// <param name="nuGrid"></param>
        /// <param name="coefGrid"></param>
        /// <param name="degreeGrid"></param>
        /// <param name="balanced"></param>
        /// <returns></returns>
#endif
        public virtual bool TrainAuto(
            Mat trainData,
            Mat responses,
            Mat varIdx,
            Mat sampleIdx,
            CvSVMParams param,
            int kFold              = 10,
            CvParamGrid?cGrid      = null,
            CvParamGrid?gammaGrid  = null,
            CvParamGrid?pGrid      = null,
            CvParamGrid?nuGrid     = null,
            CvParamGrid?coefGrid   = null,
            CvParamGrid?degreeGrid = null,
            bool balanced          = false)
        {
            if (trainData == null)
            {
                throw new ArgumentNullException("trainData");
            }
            if (responses == null)
            {
                throw new ArgumentNullException("responses");
            }
            if (varIdx == null)
            {
                throw new ArgumentNullException("varIdx");
            }
            if (sampleIdx == null)
            {
                throw new ArgumentNullException("sampleIdx");
            }
            trainData.ThrowIfDisposed();
            responses.ThrowIfDisposed();
            varIdx.ThrowIfDisposed();
            sampleIdx.ThrowIfDisposed();

            if (param == null)
            {
                param = new CvSVMParams();
            }
            var cGrid0      = cGrid.GetValueOrDefault(GetDefaultGrid(SVMParamType.C));
            var gammaGrid0  = gammaGrid.GetValueOrDefault(GetDefaultGrid(SVMParamType.C));
            var pGrid0      = pGrid.GetValueOrDefault(GetDefaultGrid(SVMParamType.C));
            var nuGrid0     = nuGrid.GetValueOrDefault(GetDefaultGrid(SVMParamType.C));
            var coefGrid0   = coefGrid.GetValueOrDefault(GetDefaultGrid(SVMParamType.C));
            var degreeGrid0 = degreeGrid.GetValueOrDefault(GetDefaultGrid(SVMParamType.C));

            return(NativeMethods.ml_CvSVM_train_auto_CvMat(
                       ptr,
                       trainData.CvPtr,
                       responses.CvPtr,
                       varIdx.CvPtr,
                       sampleIdx.CvPtr,
                       param.NativeStruct,
                       kFold,
                       cGrid0,
                       gammaGrid0,
                       pGrid0,
                       nuGrid0,
                       coefGrid0,
                       degreeGrid0,
                       balanced ? 1 : 0) != 0);
        }
示例#14
0
        /// <summary>
        /// 
        /// </summary>
        /// <param name="baseMat"></param>
        /// <param name="nOctaves"></param>
        /// <returns></returns>
        public Mat[] BuildGaussianPyramid(Mat baseMat, int nOctaves)
        {
            ThrowIfDisposed();
            if (baseMat == null)
                throw new ArgumentNullException("baseMat");
            baseMat.ThrowIfDisposed();

            using (VectorOfMat pyrVec = new VectorOfMat())
            {
                NativeMethods.nonfree_SIFT_buildGaussianPyramid(ptr, baseMat.CvPtr, pyrVec.CvPtr, nOctaves);
                return pyrVec.ToArray();
            }
        }
示例#15
0
        /// <summary>
        /// Find rectangular regions in the given image that are likely to contain objects of 
        /// loaded classes (models) and corresponding confidence levels.
        /// </summary>
        /// <param name="image">An image.</param>
        /// <param name="overlapThreshold">Threshold for the non-maximum suppression algorithm.</param>
        /// <param name="numThreads">Number of threads used in parallel version of the algorithm.</param>
        /// <returns>The detections: rectangulars, scores and class IDs.</returns>
        public virtual ObjectDetection[] Detect(Mat image,
            float overlapThreshold = 0.5f, int numThreads = -1)
        {
            if (disposed)
                throw new ObjectDisposedException("LatentSvmDetector");
            if (image == null)
                throw new ArgumentNullException("image");
            image.ThrowIfDisposed();

            using (var odVec = new VectorOfVec6d())
            {
                NativeMethods.objdetect_LatentSvmDetector_detect(
                    ptr, image.CvPtr, odVec.CvPtr, overlapThreshold, numThreads);

                return EnumerableEx.SelectToArray(odVec.ToArray(), v => 
                    new ObjectDetection
                    {
                        Rect = new Rect((int)v.Item0, (int)v.Item1, (int)v.Item2, (int)v.Item3),
                        Score = (float)v.Item4,
                        ClassId = (int)v.Item5
                    }
                );
            }
        }
示例#16
0
        /// <summary>
        /// 枠だけの楕円,楕円弧,もしくは塗りつぶされた扇形の楕円を描画する
        /// </summary>
        /// <param name="img">楕円が描画される画像</param>
        /// <param name="center">楕円の中心</param>
        /// <param name="axes">楕円の軸の長さ</param>
        /// <param name="angle">回転角度</param>
        /// <param name="startAngle">楕円弧の開始角度</param>
        /// <param name="endAngle">楕円弧の終了角度</param>
        /// <param name="color">楕円の色</param>
        /// <param name="thickness">楕円弧の線の幅 [既定値は1]</param>
        /// <param name="lineType">楕円弧の線の種類 [既定値はLineType.Link8]</param>
        /// <param name="shift">中心座標と軸の長さの小数点以下の桁を表すビット数 [既定値は0]</param>
#else
        /// <summary>
        /// Draws simple or thick elliptic arc or fills ellipse sector
        /// </summary>
        /// <param name="img">Image. </param>
        /// <param name="center">Center of the ellipse. </param>
        /// <param name="axes">Length of the ellipse axes. </param>
        /// <param name="angle">Rotation angle. </param>
        /// <param name="startAngle">Starting angle of the elliptic arc. </param>
        /// <param name="endAngle">Ending angle of the elliptic arc. </param>
        /// <param name="color">Ellipse color. </param>
        /// <param name="thickness">Thickness of the ellipse arc. [By default this is 1]</param>
        /// <param name="lineType">Type of the ellipse boundary. [By default this is LineType.Link8]</param>
        /// <param name="shift">Number of fractional bits in the center coordinates and axes' values. [By default this is 0]</param>
#endif
        public static void Ellipse(Mat img, Point center, Size axes, double angle, double startAngle, double endAngle, Scalar color,
            int thickness = 1, LineType lineType = LineType.Link8, int shift = 0)
        {
            if (img == null)
                throw new ArgumentNullException("img");
            img.ThrowIfDisposed();
            NativeMethods.core_ellipse(img.CvPtr, center, axes, angle, startAngle, endAngle, color, thickness, (int)lineType, shift);
        }
示例#17
0
        /// <summary>
        /// 枠だけの楕円,もしくは塗りつぶされた楕円を描画する
        /// </summary>
        /// <param name="img">楕円が描かれる画像.</param>
        /// <param name="box">描画したい楕円を囲む矩形領域.</param>
        /// <param name="color">楕円の色.</param>
        /// <param name="thickness">楕円境界線の幅.[既定値は1]</param>
        /// <param name="lineType">楕円境界線の種類.[既定値はLineType.Link8]</param>
#else
        /// <summary>
        /// Draws simple or thick elliptic arc or fills ellipse sector
        /// </summary>
        /// <param name="img">Image. </param>
        /// <param name="box">The enclosing box of the ellipse drawn </param>
        /// <param name="color">Ellipse color. </param>
        /// <param name="thickness">Thickness of the ellipse boundary. [By default this is 1]</param>
        /// <param name="lineType">Type of the ellipse boundary. [By default this is LineType.Link8]</param>
#endif
        public static void Ellipse(Mat img, RotatedRect box, Scalar color, 
            int thickness = 1, LineType lineType = LineType.Link8)
        {
            if (img == null)
                throw new ArgumentNullException("img");
            img.ThrowIfDisposed();
            NativeMethods.core_ellipse(img.CvPtr, box, color, thickness, (int)lineType);
        }
示例#18
0
        /// <summary>
        /// 円を描画する
        /// </summary>
        /// <param name="img">画像</param>
        /// <param name="center">円の中心</param>
        /// <param name="radius">円の半径</param>
        /// <param name="color">円の色</param>
        /// <param name="thickness">線の幅.負の値を指定した場合は塗りつぶされる.[既定値は1]</param>
        /// <param name="lineType">線の種類. [既定値はLineType.Link8]</param>
        /// <param name="shift">中心座標と半径の小数点以下の桁を表すビット数. [既定値は0]</param>
#else
        /// <summary>
        /// Draws a circle
        /// </summary>
        /// <param name="img">Image where the circle is drawn. </param>
        /// <param name="center">Center of the circle. </param>
        /// <param name="radius">Radius of the circle. </param>
        /// <param name="color">Circle color. </param>
        /// <param name="thickness">Thickness of the circle outline if positive, otherwise indicates that a filled circle has to be drawn. [By default this is 1]</param>
        /// <param name="lineType">Type of the circle boundary. [By default this is LineType.Link8]</param>
        /// <param name="shift">Number of fractional bits in the center coordinates and radius value. [By default this is 0]</param>
#endif
        public static void Circle(Mat img, Point center, int radius, Scalar color, 
            int thickness = 1, LineType lineType = LineType.Link8, int shift = 0)
        {
            if (img == null)
                throw new ArgumentNullException("img");
            img.ThrowIfDisposed();
            NativeMethods.core_circle(img.CvPtr, center, radius, color, thickness, (int)lineType, shift);
        }
示例#19
0
 /// <summary>
 /// 
 /// </summary>
 /// <param name="src"></param>
 /// <returns></returns>
 public static MatExpr Abs(Mat src)
 {
     if (src == null)
         throw new ArgumentNullException("src");
     src.ThrowIfDisposed();
     IntPtr retPtr = NativeMethods.core_abs_Mat(src.CvPtr);
     return new MatExpr(retPtr);
 }
示例#20
0
 /// <summary>
 /// computes covariation matrix of a set of samples
 /// </summary>
 /// <param name="samples"></param>
 /// <param name="covar"></param>
 /// <param name="mean"></param>
 /// <param name="flags"></param>
 /// <param name="ctype"></param>
 public static void CalcCovarMatrix(Mat[] samples, Mat covar, Mat mean,
     CovarMatrixFlag flags, MatType ctype)
 {
     if (samples == null)
         throw new ArgumentNullException("samples");
     if (covar == null)
         throw new ArgumentNullException("covar");
     if (mean == null)
         throw new ArgumentNullException("mean");
     covar.ThrowIfDisposed();
     mean.ThrowIfDisposed();
     IntPtr[] samplesPtr = EnumerableEx.SelectPtrs(samples);
     NativeMethods.core_calcCovarMatrix_Mat(samplesPtr, samples.Length, covar.CvPtr, mean.CvPtr, (int)flags, ctype);
 }
示例#21
0
        /// <summary>
        /// 入力サンプルに対する応答を予測する
        /// </summary>
        /// <param name="inputs">入力サンプル</param>
		/// <param name="outputs"></param>
        /// <returns></returns>
#else
        /// <summary>
        /// Predicts response for the input sample
        /// </summary>
        /// <param name="inputs">The input sample. </param>
        /// <param name="outputs"></param>
        /// <returns></returns>
#endif
        public float Predict(Mat inputs, Mat outputs)
        {
            if (inputs == null)
                throw new ArgumentNullException("inputs");
            if (outputs == null)
                throw new ArgumentNullException("outputs");
            inputs.ThrowIfDisposed();
            outputs.ThrowIfDisposed();

            return NativeMethods.ml_CvANN_MLP_predict_CvMat(ptr, inputs.CvPtr, outputs.CvPtr);
        }
示例#22
0
        /// <summary>
        /// computes dense optical flow using Simple Flow algorithm
        /// </summary>
        /// <param name="from">First 8-bit 3-channel image.</param>
        /// <param name="to">Second 8-bit 3-channel image</param>
        /// <param name="flow">Estimated flow</param>
        /// <param name="layers">Number of layers</param>
        /// <param name="averagingBlockSize">Size of block through which we sum up when calculate cost function for pixel</param>
        /// <param name="maxFlow">maximal flow that we search at each level</param>
        /// <param name="sigmaDist">vector smooth spatial sigma parameter</param>
        /// <param name="sigmaColor">vector smooth color sigma parameter</param>
        /// <param name="postprocessWindow">window size for postprocess cross bilateral filter</param>
        /// <param name="sigmaDistFix">spatial sigma for postprocess cross bilateralf filter</param>
        /// <param name="sigmaColorFix">color sigma for postprocess cross bilateral filter</param>
        /// <param name="occThr">threshold for detecting occlusions</param>
        /// <param name="upscaleAveragingRadius">window size for bilateral upscale operation</param>
        /// <param name="upscaleSigmaDist">spatial sigma for bilateral upscale operation</param>
        /// <param name="upscaleSigmaColor">color sigma for bilateral upscale operation</param>
        /// <param name="speedUpThr">threshold to detect point with irregular flow - where flow should be recalculated after upscale</param>
        public static void calcOpticalFlowSF(
            Mat from,
            Mat to,
            Mat flow,
            int layers,
            int averagingBlockSize,
            int maxFlow,
            double sigmaDist,
            double sigmaColor,
            int postprocessWindow,
            double sigmaDistFix,
            double sigmaColorFix,
            double occThr,
            int upscaleAveragingRadius,
            double upscaleSigmaDist,
            double upscaleSigmaColor,
            double speedUpThr)
        {
            if (from == null)
                throw new ArgumentNullException("from");
            if (to == null)
                throw new ArgumentNullException("to");
            if (flow == null)
                throw new ArgumentNullException("flow");
            from.ThrowIfDisposed();
            to.ThrowIfDisposed();
            flow.ThrowIfDisposed();

            NativeMethods.video_calcOpticalFlowSF2(
                from.CvPtr, to.CvPtr, flow.CvPtr,
                layers, averagingBlockSize, maxFlow,
                sigmaDist, sigmaColor, postprocessWindow, sigmaDistFix,
                sigmaColorFix, occThr, upscaleAveragingRadius,
                upscaleSigmaDist, upscaleSigmaColor, speedUpThr);
        }
示例#23
0
        /// <summary>
        /// 塗りつぶされた凸ポリゴンを描きます.
        /// </summary>
        /// <param name="img">画像</param>
        /// <param name="pts">ポリゴンの頂点.</param>
        /// <param name="color">ポリゴンの色.</param>
        /// <param name="lineType">ポリゴンの枠線の種類,</param>
        /// <param name="shift">ポリゴンの頂点座標において,小数点以下の桁を表すビット数.</param>
#else
        /// <summary>
        /// Fills a convex polygon.
        /// </summary>
        /// <param name="img">Image</param>
        /// <param name="pts">The polygon vertices</param>
        /// <param name="color">Polygon color</param>
        /// <param name="lineType">Type of the polygon boundaries</param>
        /// <param name="shift">The number of fractional bits in the vertex coordinates</param>
#endif
        public static void FillConvexPoly(Mat img, IEnumerable<Point> pts, Scalar color, 
            LineType lineType = LineType.Link8, int shift = 0)
        {
            if (img == null)
                throw new ArgumentNullException("img");
            img.ThrowIfDisposed();

            Point[] ptsArray = Util.ToArray(pts);
            NativeMethods.core_fillConvexPoly(img.CvPtr, ptsArray, ptsArray.Length, color, (int)lineType, shift);
        }
示例#24
0
 /// <summary>
 /// computes per-element minimum of two arrays (dst = min(src1, src2))
 /// </summary>
 /// <param name="src1"></param>
 /// <param name="src2"></param>
 /// <param name="dst"></param>
 public static void Min(Mat src1, Mat src2, Mat dst)
 {
     if (src1 == null)
         throw new ArgumentNullException("src1");
     if (src2 == null)
         throw new ArgumentNullException("src2");
     if (dst == null)
         throw new ArgumentNullException("dst");
     src1.ThrowIfDisposed();
     src2.ThrowIfDisposed();
     dst.ThrowIfDisposed();
     NativeMethods.core_min_MatMat(src1.CvPtr, src2.CvPtr, dst.CvPtr);
 }
示例#25
0
 /// <summary>
 /// 
 /// </summary>
 /// <param name="src"></param>
 /// <param name="dst"></param>
 /// <param name="m"></param>
 /// <param name="dsize"></param>
 /// <param name="flags"></param>
 /// <param name="borderMode"></param>
 /// <param name="borderValue"></param>
 public static void WarpPerspective(InputArray src, OutputArray dst, Mat m, Size dsize,
     Interpolation flags = Interpolation.Linear, BorderType borderMode = BorderType.Constant, Scalar? borderValue = null)
 {
     if (src == null)
         throw new ArgumentNullException("src");
     if (dst == null)
         throw new ArgumentNullException("dst");
     if (m == null)
         throw new ArgumentNullException("m");
     src.ThrowIfDisposed();
     dst.ThrowIfDisposed();
     m.ThrowIfDisposed();
     CvScalar borderValue0 = borderValue.GetValueOrDefault(CvScalar.ScalarAll(0));
     NativeMethods.imgproc_warpPerspective(src.CvPtr, dst.CvPtr, m.CvPtr, dsize, (int)flags, (int)borderMode, borderValue0);
     dst.Fix();
 }
示例#26
0
        /// <summary>
        /// 1つ,または複数のポリゴンで区切られた領域を塗りつぶします.
        /// </summary>
        /// <param name="img">画像</param>
        /// <param name="pts">ポリゴンの配列.各要素は,点の配列で表現されます.</param>
        /// <param name="color">ポリゴンの色.</param>
        /// <param name="lineType">ポリゴンの枠線の種類,</param>
        /// <param name="shift">ポリゴンの頂点座標において,小数点以下の桁を表すビット数.</param>
        /// <param name="offset"></param>
#else
        /// <summary>
        /// Fills the area bounded by one or more polygons
        /// </summary>
        /// <param name="img">Image</param>
        /// <param name="pts">Array of polygons, each represented as an array of points</param>
        /// <param name="color">Polygon color</param>
        /// <param name="lineType">Type of the polygon boundaries</param>
        /// <param name="shift">The number of fractional bits in the vertex coordinates</param>
        /// <param name="offset"></param>
#endif
        public static void FillPoly(Mat img, IEnumerable<IEnumerable<Point>> pts, Scalar color, 
            LineType lineType = LineType.Link8, int shift = 0, Point? offset = null)
        {
            if (img == null)
                throw new ArgumentNullException("img");
            img.ThrowIfDisposed();
            Point offset0 = offset.GetValueOrDefault(new Point());

            List<Point[]> ptsList = new List<Point[]>();
            List<int> nptsList = new List<int>();
            foreach (IEnumerable<Point> pts1 in pts)
            {
                Point[] pts1Arr = Util.ToArray(pts1);
                ptsList.Add(pts1Arr);
                nptsList.Add(pts1Arr.Length);
            }
            Point[][] ptsArr = ptsList.ToArray();
            int[] npts = nptsList.ToArray();
            int ncontours = ptsArr.Length;
            using (ArrayAddress2<Point> ptsPtr = new ArrayAddress2<Point>(ptsArr))
            {
                NativeMethods.core_fillPoly(img.CvPtr, ptsPtr.Pointer, npts, ncontours, color, (int)lineType, shift, offset0);
            }
        }
示例#27
0
        /// <summary>
        /// computes dense optical flow using Simple Flow algorithm
        /// </summary>
        /// <param name="from">First 8-bit 3-channel image.</param>
        /// <param name="to">Second 8-bit 3-channel image</param>
        /// <param name="flow">Estimated flow</param>
        /// <param name="layers">Number of layers</param>
        /// <param name="averagingBlockSize">Size of block through which we sum up when calculate cost function for pixel</param>
        /// <param name="maxFlow">maximal flow that we search at each level</param>
        public static void CalcOpticalFlowSF(
            Mat from,
            Mat to,
            Mat flow,
            int layers,
            int averagingBlockSize,
            int maxFlow)
        {
            if (from == null)
                throw new ArgumentNullException("from");
            if (to == null)
                throw new ArgumentNullException("to");
            if (flow == null)
                throw new ArgumentNullException("flow");
            from.ThrowIfDisposed();
            to.ThrowIfDisposed();
            flow.ThrowIfDisposed();

            NativeMethods.video_calcOpticalFlowSF1(
                from.CvPtr, to.CvPtr, flow.CvPtr,
                layers, averagingBlockSize, maxFlow);
        }
示例#28
0
        /// <summary>
        /// Performs object detection without a multi-scale window.
        /// </summary>
        /// <param name="img">Source image. CV_8UC1 and CV_8UC4 types are supported for now.</param>
        /// <param name="weights"></param>
        /// <param name="hitThreshold">Threshold for the distance between features and SVM classifying plane. 
        /// Usually it is 0 and should be specfied in the detector coefficients (as the last free coefficient). 
        /// But if the free coefficient is omitted (which is allowed), you can specify it manually here.</param>
        /// <param name="winStride">Window stride. It must be a multiple of block stride.</param>
        /// <param name="padding">Mock parameter to keep the CPU interface compatibility. It must be (0,0).</param>
        /// <param name="searchLocations"></param>
        /// <returns>Left-top corner points of detected objects boundaries.</returns>
        public virtual Point[] Detect(Mat img, out double[] weights, 
            double hitThreshold = 0, Size? winStride = null, Size? padding = null, Point[] searchLocations = null)
        {
            if (disposed)
                throw new ObjectDisposedException("HOGDescriptor");
            if (img == null)
                throw new ArgumentNullException("img");
            img.ThrowIfDisposed();

            Size winStride0 = winStride.GetValueOrDefault(new Size());
            Size padding0 = padding.GetValueOrDefault(new Size());
            using (var flVec = new VectorOfPoint())
            using (var weightsVec = new VectorOfDouble())
            {
                int slLength = (searchLocations != null) ? searchLocations.Length : 0;
                NativeMethods.objdetect_HOGDescriptor_detect(ptr, img.CvPtr, flVec.CvPtr, weightsVec.CvPtr,
                    hitThreshold, winStride0, padding0, searchLocations, slLength);
                weights = weightsVec.ToArray();
                return flVec.ToArray();
            }
        }
示例#29
0
        /// <summary>
        /// draws one or more polygonal curves
        /// </summary>
        /// <param name="img"></param>
        /// <param name="pts"></param>
        /// <param name="isClosed"></param>
        /// <param name="color"></param>
        /// <param name="thickness"></param>
        /// <param name="lineType"></param>
        /// <param name="shift"></param>
        public static void Polylines(Mat img, IEnumerable<IEnumerable<Point>> pts, bool isClosed, Scalar color, 
            int thickness = 1, LineType lineType = LineType.Link8, int shift = 0)
        {
            if (img == null)
                throw new ArgumentNullException("img");
            img.ThrowIfDisposed();

            List<Point[]> ptsList = new List<Point[]>();
            List<int> nptsList = new List<int>();
            foreach (IEnumerable<Point> pts1 in pts)
            {
                Point[] pts1Arr = Util.ToArray(pts1);
                ptsList.Add(pts1Arr);
                nptsList.Add(pts1Arr.Length);
            }
            Point[][] ptsArr = ptsList.ToArray();
            int[] npts = nptsList.ToArray();
            int ncontours = ptsArr.Length;
            using (ArrayAddress2<Point> ptsPtr = new ArrayAddress2<Point>(ptsArr))
            {
                NativeMethods.core_polylines(img.CvPtr, ptsPtr.Pointer, npts, ncontours, isClosed ? 1 : 0, color, thickness, (int)lineType, shift);
            }
        }
示例#30
0
        /// <summary>
        /// 
        /// </summary>
        /// <param name="img"></param>
        /// <param name="grad"></param>
        /// <param name="angleOfs"></param>
        /// <param name="paddingTL"></param>
        /// <param name="paddingBR"></param>
        public virtual void ComputeGradient(Mat img, Mat grad, Mat angleOfs, Size? paddingTL = null, Size? paddingBR = null)
        {
            if (disposed)
                throw new ObjectDisposedException("HOGDescriptor");
            if (img == null)
                throw new ArgumentNullException("img");
            if (grad == null)
                throw new ArgumentNullException("grad");
            if (angleOfs == null)
                throw new ArgumentNullException("angleOfs");
            img.ThrowIfDisposed();
            grad.ThrowIfDisposed();
            angleOfs.ThrowIfDisposed();

            Size paddingTL0 = paddingTL.GetValueOrDefault(new Size());
            Size paddingBR0 = paddingBR.GetValueOrDefault(new Size());
            NativeMethods.objdetect_HOGDescriptor_computeGradient(ptr, img.CvPtr, grad.CvPtr, angleOfs.CvPtr, paddingTL0, paddingBR0);
        }
示例#31
0
        /// <summary>
        /// Performs object detection with a multi-scale window.
        /// </summary>
        /// <param name="img">Source image. CV_8UC1 and CV_8UC4 types are supported for now.</param>
        /// <param name="foundWeights"></param>
        /// <param name="hitThreshold">Threshold for the distance between features and SVM classifying plane.</param>
        /// <param name="winStride">Window stride. It must be a multiple of block stride.</param>
        /// <param name="padding">Mock parameter to keep the CPU interface compatibility. It must be (0,0).</param>
        /// <param name="scale">Coefficient of the detection window increase.</param>
        /// <param name="groupThreshold">Coefficient to regulate the similarity threshold. 
        /// When detected, some objects can be covered by many rectangles. 0 means not to perform grouping.</param>
        /// <returns>Detected objects boundaries.</returns>
        public virtual Rect[] DetectMultiScale(Mat img, out double[] foundWeights,
            double hitThreshold = 0, Size? winStride = null, Size? padding = null, double scale = 1.05, int groupThreshold = 2)
        {
            if (disposed)
                throw new ObjectDisposedException("HOGDescriptor");
            if (img == null)
                throw new ArgumentNullException("img");
            img.ThrowIfDisposed();

            Size winStride0 = winStride.GetValueOrDefault(new Size());
            Size padding0 = padding.GetValueOrDefault(new Size());
            using (var flVec = new VectorOfRect())
            using (var foundWeightsVec = new VectorOfDouble())
            {
                NativeMethods.objdetect_HOGDescriptor_detectMultiScale(ptr, img.CvPtr, flVec.CvPtr, foundWeightsVec.CvPtr,
                    hitThreshold, winStride0, padding0, scale, groupThreshold);
                foundWeights = foundWeightsVec.ToArray();
                return flVec.ToArray();
            }
        }
示例#32
0
 /// <summary>
 /// renders text string in the image
 /// </summary>
 /// <param name="img"></param>
 /// <param name="text"></param>
 /// <param name="org"></param>
 /// <param name="fontFace"></param>
 /// <param name="fontScale"></param>
 /// <param name="color"></param>
 /// <param name="thickness"></param>
 /// <param name="lineType"></param>
 /// <param name="bottomLeftOrigin"></param>
 public static void PutText(Mat img, string text, Point org,
     FontFace fontFace, double fontScale, Scalar color,
     int thickness = 1, LineType lineType = LineType.Link8, bool bottomLeftOrigin = false) 
 {
     if (img == null)
         throw new ArgumentNullException("img");
     if (String.IsNullOrEmpty(text))
         throw new ArgumentNullException(text); 
     img.ThrowIfDisposed();
     NativeMethods.core_putText(img.CvPtr, text, org, (int)fontFace, fontScale, color, 
         thickness, (int)lineType, bottomLeftOrigin ? 1 : 0);
 }
示例#33
0
        /// <summary>
        /// evaluate specified ROI and return confidence value for each location
        /// </summary>
        /// <param name="img"></param>
        /// <param name="locations"></param>
        /// <param name="foundLocations"></param>
        /// <param name="confidences"></param>
        /// <param name="hitThreshold"></param>
        /// <param name="winStride"></param>
        /// <param name="padding"></param>
        public void DetectROI(
            Mat img, Point[] locations, out Point[] foundLocations, out double[] confidences,
            double hitThreshold = 0, Size? winStride = null, Size? padding = null)
        {
            if (disposed)
                throw new ObjectDisposedException("HOGDescriptor");
            if (img == null)
                throw new ArgumentNullException("img");
            if (locations == null)
                throw new ArgumentNullException("locations");
            img.ThrowIfDisposed();

            Size winStride0 = winStride.GetValueOrDefault(new Size());
            Size padding0 = padding.GetValueOrDefault(new Size());
            using (var flVec = new VectorOfPoint())
            using (var cVec = new VectorOfDouble())
            {
                NativeMethods.objdetect_HOGDescriptor_detectROI(ptr, img.CvPtr, locations, locations.Length,
                    flVec.CvPtr, cVec.CvPtr, hitThreshold, winStride0, padding0);
                foundLocations = flVec.ToArray();
                confidences = cVec.ToArray();
            }
        }
示例#34
0
        /// <summary>
        /// makes multi-channel array out of several single-channel arrays
        /// </summary>
        /// <param name="mv"></param>
        /// <param name="dst"></param>
        public static void Merge(Mat[] mv, Mat dst)
        {
            if (mv == null)
                throw new ArgumentNullException("mv");
            if (mv.Length == 0)
                throw new ArgumentException("mv.Length == 0");
            if (dst == null)
                throw new ArgumentNullException("dst");
            foreach (Mat m in mv)
            {
                if(m == null)
                    throw new ArgumentException("mv contains null element");
                m.ThrowIfDisposed();
            }
            dst.ThrowIfDisposed();

            var mvPtr = new IntPtr[mv.Length];
            for (int i = 0; i < mv.Length; i++)
            {
                mvPtr[i] = mv[i].CvPtr;
            }
            NativeMethods.core_merge(mvPtr, (uint)mvPtr.Length, dst.CvPtr);
        }
示例#35
0
        /// <summary>
        /// StarDetectorアルゴリズムによりキーポイントを取得する
        /// </summary>
        /// <param name="image">8ビット グレースケールの入力画像</param>
        /// <returns></returns>
#else
        /// <summary>
        /// Retrieves keypoints using the StarDetector algorithm.
        /// </summary>
        /// <param name="image">The input 8-bit grayscale image</param>
        /// <returns></returns>
#endif
        public KeyPoint[] Run(Mat image)
        {
            if (image == null)
                throw new ArgumentNullException("image");
            image.ThrowIfDisposed();

            IntPtr keypoints;
            NativeMethods.features2d_StarDetector_detect(ptr, image.CvPtr, out keypoints);

            using (VectorOfKeyPoint keypointsVec = new VectorOfKeyPoint(keypoints))
            {
                return keypointsVec.ToArray();
            }
        }
示例#36
0
        /// <summary>
        /// Copies each plane of a multi-channel array to a dedicated array
        /// </summary>
        /// <param name="src"></param>
        /// <param name="mv"></param>
        public static void Split(Mat src, out Mat[] mv)
        {
            if (src == null)
                throw new ArgumentNullException("src");
            src.ThrowIfDisposed();

            IntPtr mvPtr;
            NativeMethods.core_split(src.CvPtr, out mvPtr);

            using (var vec = new VectorOfMat(mvPtr))
            {
                mv = vec.ToArray();
            }
        }