Example #1
0
 public CvLevMarq()
 {
     Mask = PrevParam = Param = J = Err = JtJ = JtJN = JtErr = JtJV = JtJW = null;
     LambdaLg10 = 0; 
     State = LevMarqState.Done;
     Criteria = new CvTermCriteria(0, 0);
     Iters = 0;
     CompleteSymmFlag = false;
 }
    // Use this for initialization
    void Start()
    {
        cascade = CvHaarClassifierCascade.FromFile(@"./Assets/haarcascade_frontalface_alt.xml");
        capture = Cv.CreateCameraCapture(0);
        Cv.SetCaptureProperty(capture, CaptureProperty.FrameWidth, CAPTURE_WIDTH);
        Cv.SetCaptureProperty(capture, CaptureProperty.FrameHeight, CAPTURE_HEIGHT);
        IplImage frame = Cv.QueryFrame(capture);
        Cv.NamedWindow("FaceDetect");

        CvSVM svm = new CvSVM ();
          	CvTermCriteria criteria = new CvTermCriteria (CriteriaType.Epsilon, 1000, double.Epsilon);
          	CvSVMParams param = new CvSVMParams (CvSVM.C_SVC, CvSVM.RBF, 10.0, 8.0, 1.0, 10.0, 0.5, 0.1, null, criteria);
    }
        void Awake()
        {
            _cap = new CvCapture(0);

            _capImage = _cap.QueryFrame();
            _capRgbImage = new IplImage(_capImage.Width, _capImage.Height, BitDepth.U8, 3);
            Debug.Log(string.Format("Capture info : size{0}", _capImage.Size));
               	_capGrayImage0 = new IplImage(_capImage.Size, BitDepth.U8, 1);
            _capGrayImage1 = new IplImage(_capImage.Size, BitDepth.U8, 1);
            _pyramidImage0 = new IplImage(new CvSize(_capImage.Width + 8, _capImage.Height/3), BitDepth.U8, 1);
            _pyramidImage1 = new IplImage(new CvSize(_capImage.Width + 8, _capImage.Height/3), BitDepth.U8, 1);
            _eigImage = new IplImage(_capImage.Size, BitDepth.F32, 1);
            _tmpImage = new IplImage(_capImage.Size, BitDepth.F32, 1);
            Cv.ConvertImage(_capImage, _capGrayImage0, 0);
            width = _capImage.Width;
            height = _capImage.Height;

            _opticalFlowWinSize = new CvSize(opticalFlowWinSize, opticalFlowWinSize);
            _opticalFlowCrit = new CvTermCriteria(CriteriaType.Iteration | CriteriaType.Epsilon, ofCritIterations, ofCritError);

            _prevTime = _currTime = UnityEngine.Time.time;
        }
Example #4
0
 public static extern int cvKMeans2(IntPtr samples, int cluster_count, IntPtr labels, CvTermCriteria termcrit,
     int attempts, ref UInt64 rng, [MarshalAs(UnmanagedType.I4)] KMeansFlag flags, IntPtr _centers, out double compactness);
Example #5
0
 public static extern void cvStereoCalibrate(IntPtr object_points, IntPtr image_points1, IntPtr image_points2, IntPtr npoints,
                        IntPtr camera_matrix1, IntPtr dist_coeffs1, IntPtr camera_matrix2, IntPtr dist_coeffs2,
                        CvSize image_size, IntPtr R, IntPtr T, IntPtr E, IntPtr F,
                        CvTermCriteria term_crit, CalibrationFlag flags);
Example #6
0
 public static extern void cvPyrMeanShiftFiltering(IntPtr src, IntPtr dst, double sp, double sr, int max_level, CvTermCriteria termcrit);
Example #7
0
 public static extern void cvCalcEigenObjects(int nObjects, CvCallback input, CvCallback output, [MarshalAs(UnmanagedType.I4)] EigenObjectsIOFlag ioFlags,
                  int ioBufSize, IntPtr userData, ref CvTermCriteria calcLimit, IntPtr avg, [In] float[] eigVals);
Example #8
0
 public static extern void cvSnakeImage(IntPtr image, IntPtr points, int length,
     [MarshalAs(UnmanagedType.LPArray)] float[] alpha, [MarshalAs(UnmanagedType.LPArray)] float[] beta, [MarshalAs(UnmanagedType.LPArray)] float[] gamma,
     int coeffUsage, CvSize win, CvTermCriteria criteria, [MarshalAs(UnmanagedType.Bool)] bool calc_gradient);
Example #9
0
 public void Init(int nparams, int nerrs, CvTermCriteria criteria)
 {
     Init(nparams, nerrs, criteria, false);
 }
Example #10
0
 public static extern int cvMeanShift(IntPtr prob_image, CvRect window, CvTermCriteria criteria, IntPtr comp);
Example #11
0
        /// <summary>
        /// 内部エネルギーと外部エネルギーの総和が最小になるように snake を更新する. 
        /// 内部エネルギーは輪郭形状に依存する(滑らかな輪郭ほど内部エネルギーが小さい).
        /// 外部エネルギーはエネルギー場に依存し,画像勾配の場合は画像のエッジに対応するような局所的なエネルギー極小値において最小になる.
        /// </summary>
        /// <param name="points">輪郭点. ここに格納されている点をもとに計算をし、結果もここに入る</param>
        /// <param name="alpha">連続エネルギーの重み.pointsと同じ長さを持つ浮動小数点型配列(各輪郭点に配列の各要素が対応).</param>
        /// <param name="beta">曲率エネルギーの重み.alpha と同様.</param>
        /// <param name="gamma">画像エネルギーの重み.alpha と同様.</param>
        /// <param name="win">最小値を探索する各点の近傍のサイズ. Width と Height は奇数でなければならない.</param>
        /// <param name="criteria">終了条件</param>
#else
        /// <summary>
        /// Changes contour position to minimize its energy
        /// </summary>
        /// <param name="points">Contour points (snake). </param>
        /// <param name="alpha">Weight of continuity energy, single float or array of length floats, one per each contour point. </param>
        /// <param name="beta">Weight of curvature energy, similar to alpha. </param>
        /// <param name="gamma">Weight of image energy, similar to alpha. </param>
        /// <param name="win">Size of neighborhood of every point used to search the minimum, both win.width and win.height must be odd. </param>
        /// <param name="criteria">Termination criteria. </param>
#endif
        public void SnakeImage(CvPoint[] points, float alpha, float beta, float gamma, CvSize win, CvTermCriteria criteria)
        {
            Cv.SnakeImage(this, points, alpha, beta, gamma, win, criteria);
        }
Example #12
0
        /// <summary>
        /// POSITアルゴリズムの実装
        /// </summary>
        /// <param name="image_points">オブジェクト上の点を二次元画像平面上へ投影して得られる点群</param>
        /// <param name="focal_length">使用するカメラの焦点距離</param>
        /// <param name="criteria">反復POSITアルゴリズムの終了条件</param>
        /// <param name="rotation_matrix">回転行列</param>
        /// <param name="translation_vector">並進ベクトル</param>
#else
        /// <summary>
        /// Implements POSIT algorithm
        /// </summary>
        /// <param name="image_points">Object points projections on the 2D image plane.</param>
        /// <param name="focal_length">Focal length of the camera used.</param>
        /// <param name="criteria">Termination criteria of the iterative POSIT algorithm.</param>
        /// <param name="rotation_matrix">Matrix of rotations.</param>
        /// <param name="translation_vector">Translation vector.</param>
#endif
        public void POSIT(CvPoint2D32f[] image_points, double focal_length,
              CvTermCriteria criteria, out float[,] rotation_matrix, out float[] translation_vector)
        {
            Cv.POSIT(this, image_points, focal_length, criteria, out rotation_matrix, out translation_vector);
        }
Example #13
0
        /// <summary>
        /// ベクトル集合を,与えられたクラスタ数に分割する.
        /// 入力サンプルを各クラスタに分類するために cluster_count 個のクラスタの中心を求める k-means 法を実装する.
        /// 出力 labels(i) は,配列 samples のi番目の行のサンプルが属するクラスタのインデックスを表す.
        /// </summary>
        /// <param name="samples">浮動小数点型の入力サンプル行列.1行あたり一つのサンプル.</param>
        /// <param name="clusterCount">集合を分割するクラスタ数</param>
        /// <param name="labels">出力の整数ベクトル.すべてのサンプルについて,それぞれがどのクラスタに属しているかが保存されている.</param>
        /// <param name="termcrit">最大繰り返し数と(または),精度(1ループでの各クラスタ中心位置移動距離)の指定</param>
        /// <returns></returns>
#else
        /// <summary>
        /// Splits set of vectors by given number of clusters
        /// </summary>
        /// <param name="samples">Floating-point matrix of input samples, one row per sample. </param>
        /// <param name="clusterCount">Number of clusters to split the set by. </param>
        /// <param name="labels">Output integer vector storing cluster indices for every sample. </param>
        /// <param name="termcrit">Specifies maximum number of iterations and/or accuracy (distance the centers move by between the subsequent iterations). </param>
        /// <returns></returns>
#endif
        public static int KMeans2(CvArr samples, int clusterCount, CvArr labels, CvTermCriteria termcrit)
        {
            double compactness;

            return(KMeans2(samples, clusterCount, labels, termcrit, 1, null, KMeansFlag.Zero, null, out compactness));
        }
Example #14
0
        /// <summary>
        /// ベクトル集合を,与えられたクラスタ数に分割する.
        /// 入力サンプルを各クラスタに分類するために cluster_count 個のクラスタの中心を求める k-means 法を実装する.
        /// 出力 labels(i) は,配列 samples のi番目の行のサンプルが属するクラスタのインデックスを表す.
        /// </summary>
        /// <param name="samples">浮動小数点型の入力サンプル行列.1行あたり一つのサンプル.</param>
        /// <param name="clusterCount">集合を分割するクラスタ数</param>
        /// <param name="labels">出力の整数ベクトル.すべてのサンプルについて,それぞれがどのクラスタに属しているかが保存されている.</param>
        /// <param name="termcrit">最大繰り返し数と(または),精度(1ループでの各クラスタ中心位置移動距離)の指定</param>
        /// <param name="attemps"></param>
        /// <param name="rng"></param>
        /// <param name="flag"></param>
        /// <returns></returns>
#else
        /// <summary>
        /// Splits set of vectors by given number of clusters
        /// </summary>
        /// <param name="samples">Floating-point matrix of input samples, one row per sample. </param>
        /// <param name="clusterCount">Number of clusters to split the set by. </param>
        /// <param name="labels">Output integer vector storing cluster indices for every sample. </param>
        /// <param name="termcrit">Specifies maximum number of iterations and/or accuracy (distance the centers move by between the subsequent iterations). </param>
        /// <param name="attemps"></param>
        /// <param name="rng"></param>
        /// <param name="flag"></param>
        /// <returns></returns>
#endif
        public static int KMeans2(CvArr samples, int clusterCount, CvArr labels, CvTermCriteria termcrit, int attemps, CvRNG rng, KMeansFlag flag)
        {
            double compactness;

            return(KMeans2(samples, clusterCount, labels, termcrit, attemps, rng, flag, null, out compactness));
        }
Example #15
0
 public void SnakeImage(CvPoint[] points, float[] alpha, float[] beta, float[] gamma, CvSize win,
                        CvTermCriteria criteria, bool calc_gradient);
Example #16
0
 public CvLevMarq(int nparams, int nerrs, CvTermCriteria criteria, bool completeSymmFlag)
 {
     Mask = PrevParam = Param = J = Err = JtJ = JtJN = JtErr = JtJV = JtJW = null;
     Init(nparams, nerrs, criteria, completeSymmFlag);
 }
Example #17
0
 public CvLevMarq(int nparams, int nerrs, CvTermCriteria criteria)
     : this(nparams, nerrs, criteria, false)
 {
 }
Example #18
0
 public void SnakeImage(CvPoint[] points, float[] alpha, float[] beta, float[] gamma, CvSize win,
                        CvTermCriteria criteria);
Example #19
0
 public static extern void cvCalcOpticalFlowPyrLK(IntPtr prev, IntPtr curr, IntPtr prev_pyr, IntPtr curr_pyr,
     [In] CvPoint2D32f[] prev_features, [Out] CvPoint2D32f[] curr_features, int count, CvSize winSize,
     int level, [Out] sbyte[] status, [Out] float[] track_error, CvTermCriteria criteria, [MarshalAs(UnmanagedType.I4)] LKFlowFlag flags);
Example #20
0
        /// <summary>
        /// SVM
        /// </summary>
        /// <param name="dataFilename"></param>
        /// <param name="filenameToSave"></param>
        /// <param name="filenameToLoad"></param>
        private void BuildSvmClassifier(string dataFilename, string filenameToSave, string filenameToLoad)
        {
            //C_SVCのパラメータ
            const float SvmC = 1000;
            //RBFカーネルのパラメータ
            const float SvmGamma = 0.1f;

            CvMat data = null;
            CvMat responses = null;
            CvMat sampleIdx = null;

            int nsamplesAll = 0, ntrainSamples = 0;
            double trainHr = 0, testHr = 0;

            CvSVM svm = new CvSVM();
            CvTermCriteria criteria = new CvTermCriteria(100, 0.001);

            try
            {
                ReadNumClassData(dataFilename, 16, out data, out responses);
            }
            catch
            {
                Console.WriteLine("Could not read the database {0}", dataFilename);
                return;
            }
            Console.WriteLine("The database {0} is loaded.", dataFilename);

            nsamplesAll = data.Rows;
            ntrainSamples = (int)(nsamplesAll * 0.2);

            // Create or load Random Trees classifier
            if (filenameToLoad != null)
            {
                // load classifier from the specified file
                svm.Load(filenameToLoad);
                ntrainSamples = 0;
                if (svm.GetSupportVectorCount() == 0)
                {
                    Console.WriteLine("Could not read the classifier {0}", filenameToLoad);
                    return;
                }
                Console.WriteLine("The classifier {0} is loaded.", filenameToLoad);
            }
            else
            {
                // create classifier by using <data> and <responses>
                Console.Write("Training the classifier ...");

                // 2. create sample_idx
                sampleIdx = new CvMat(1, nsamplesAll, MatrixType.U8C1);
                {
                    CvMat mat;
                    Cv.GetCols(sampleIdx, out mat, 0, ntrainSamples);
                    mat.Set(CvScalar.RealScalar(1));

                    Cv.GetCols(sampleIdx, out mat, ntrainSamples, nsamplesAll);
                    mat.SetZero();
                }

                // 3. train classifier
                // 方法、カーネルにより使わないパラメータは0で良く、
                // 重みについてもNULLで良い
                svm.Train(data, responses, null, sampleIdx, new CvSVMParams(CvSVM.C_SVC, CvSVM.RBF, 0, SvmGamma, 0, SvmC, 0, 0, null, criteria));
                Console.WriteLine();
            }

            
            // compute prediction error on train and test data            
            for (int i = 0; i < nsamplesAll; i++)
            {
                double r;
                CvMat sample;
                Cv.GetRow(data, out sample, i);

                r = svm.Predict(sample);
                // compare results
                Console.WriteLine(
                    "predict: {0}, responses: {1}, {2}",
                    (char)r,
                    (char)responses.DataArraySingle[i],
                    Math.Abs((double)r - responses.DataArraySingle[i]) <= float.Epsilon ? "Good!" : "Bad!"
                );
                r = Math.Abs((double)r - responses.DataArraySingle[i]) <= float.Epsilon ? 1 : 0;

                if (i < ntrainSamples)
                    trainHr += r;
                else
                    testHr += r;
            }

            testHr /= (double)(nsamplesAll - ntrainSamples);
            trainHr /= (double)ntrainSamples;
            Console.WriteLine("Gamma={0:F5}, C={1:F5}", SvmGamma, SvmC);
            if (filenameToLoad != null)
            {
                Console.WriteLine("Recognition rate: test = {0:F1}%", testHr * 100.0);
            }
            else
            {
                Console.WriteLine("Recognition rate: train = {0:F1}%, test = {1:F1}%", trainHr * 100.0, testHr * 100.0);
            }

            Console.WriteLine("Number of Support Vector: {0}", svm.GetSupportVectorCount());
            // Save SVM classifier to file if needed
            if (filenameToSave != null)
            {
                svm.Save(filenameToSave);
            }


            Console.Read();


            if (sampleIdx != null) sampleIdx.Dispose();
            data.Dispose();
            responses.Dispose();
            svm.Dispose();
        }
Example #21
0
        /// <summary>
        /// POSITアルゴリズムの実装
        /// </summary>
        /// <param name="image_points">オブジェクト上の点を二次元画像平面上へ投影して得られる点群</param>
        /// <param name="focal_length">使用するカメラの焦点距離</param>
        /// <param name="criteria">反復POSITアルゴリズムの終了条件</param>
        /// <param name="rotation_matrix">回転行列</param>
        /// <param name="translation_vector">並進ベクトル</param>
#else
        /// <summary>
        /// Implements POSIT algorithm
        /// </summary>
        /// <param name="imagePoints">Object points projections on the 2D image plane.</param>
        /// <param name="focalLength">Focal length of the camera used.</param>
        /// <param name="criteria">Termination criteria of the iterative POSIT algorithm.</param>
        /// <param name="rotationMatrix">Matrix of rotations.</param>
        /// <param name="translationVector">Translation vector.</param>
#endif
        public void POSIT(CvPoint2D32f[] imagePoints, double focalLength,
                          CvTermCriteria criteria, out float[,] rotationMatrix, out float[] translationVector)
        {
            Cv.POSIT(this, imagePoints, focalLength, criteria, out rotationMatrix, out translationVector);
        }
Example #22
0
        /// <summary>
        /// 内部エネルギーと外部エネルギーの総和が最小になるように snake を更新する. 
        /// 内部エネルギーは輪郭形状に依存する(滑らかな輪郭ほど内部エネルギーが小さい).
        /// 外部エネルギーはエネルギー場に依存し,画像勾配の場合は画像のエッジに対応するような局所的なエネルギー極小値において最小になる.
        /// </summary>
        /// <param name="points">輪郭点. ここに格納されている点をもとに計算をし、結果もここに入る</param>
        /// <param name="alpha">連続エネルギーの重み.pointsと同じ長さを持つ浮動小数点型配列(各輪郭点に配列の各要素が対応).</param>
        /// <param name="beta">曲率エネルギーの重み.alpha と同様.</param>
        /// <param name="gamma">画像エネルギーの重み.alpha と同様.</param>
        /// <param name="win">最小値を探索する各点の近傍のサイズ. Width と Height は奇数でなければならない.</param>
        /// <param name="criteria">終了条件</param>
        /// <param name="calcGradient">勾配フラグ.trueの場合,この関数は全ての画像ピクセルに対する勾配の強さを計算し,これをエネルギー場と見なす. falseの場合は,入力画像自体がエネルギー場と見なされる.</param>
#else
        /// <summary>
        /// Changes contour position to minimize its energy
        /// </summary>
        /// <param name="points">Contour points (snake). </param>
        /// <param name="alpha">Weights of continuity energy, single float or array of length floats, one per each contour point. </param>
        /// <param name="beta">Weights of curvature energy, similar to alpha. </param>
        /// <param name="gamma">Weights of image energy, similar to alpha. </param>
        /// <param name="win">Size of neighborhood of every point used to search the minimum, both win.width and win.height must be odd. </param>
        /// <param name="criteria">Termination criteria. </param>
        /// <param name="calcGradient">Gradient flag. If true, the function calculates gradient magnitude for every image pixel and consideres it as the energy field, otherwise the input image itself is considered. </param>
#endif
        public void SnakeImage(CvPoint[] points, float[] alpha, float[] beta, float[] gamma, CvSize win, CvTermCriteria criteria, bool calcGradient)
        {
            Cv.SnakeImage(this, points, alpha, beta, gamma, win, criteria, calcGradient);
        }
Example #23
0
        /// <summary>
        /// ベクトル集合を,与えられたクラスタ数に分割する.
        /// 入力サンプルを各クラスタに分類するために cluster_count 個のクラスタの中心を求める k-means 法を実装する.
        /// 出力 labels(i) は,配列 samples のi番目の行のサンプルが属するクラスタのインデックスを表す. 
        /// </summary>
        /// <param name="samples">浮動小数点型の入力サンプル行列.1行あたり一つのサンプル.</param>
        /// <param name="samplesType"></param>
        /// <param name="clusterCount">集合を分割するクラスタ数</param>
        /// <param name="labels">出力の整数ベクトル.すべてのサンプルについて,それぞれがどのクラスタに属しているかが保存されている.</param>
        /// <param name="termcrit">最大繰り返し数と(または),精度(1ループでの各クラスタ中心位置移動距離)の指定</param>
        /// <returns></returns>
#else
        /// <summary>
        /// Splits set of vectors by given number of clusters
        /// </summary>
        /// <param name="samples">Floating-point matrix of input samples, one row per sample. </param>
        /// <param name="samplesType"></param>
        /// <param name="clusterCount">Number of clusters to split the set by. </param>
        /// <param name="labels">Output integer vector storing cluster indices for every sample. </param>
        /// <param name="termcrit">Specifies maximum number of iterations and/or accuracy (distance the centers move by between the subsequent iterations). </param>
        /// <returns></returns>
#endif
        public static int KMeans2(Array samples, MatrixType samplesType, int clusterCount, int[] labels, CvTermCriteria termcrit)
        {
            double compactness;
            return KMeans2(samples, samplesType, clusterCount, labels, termcrit, 1, null, KMeansFlag.Zero, null, out compactness);
        }
Example #24
0
        /// <summary>
        /// ベクトル集合を,与えられたクラスタ数に分割する.
        /// 入力サンプルを各クラスタに分類するために cluster_count 個のクラスタの中心を求める k-means 法を実装する.
        /// 出力 labels(i) は,配列 samples のi番目の行のサンプルが属するクラスタのインデックスを表す. 
        /// </summary>
        /// <param name="samples">浮動小数点型の入力サンプル行列.1行あたり一つのサンプル.</param>
        /// <param name="samplesType"></param>
        /// <param name="clusterCount">集合を分割するクラスタ数</param>
        /// <param name="labels">出力の整数ベクトル.すべてのサンプルについて,それぞれがどのクラスタに属しているかが保存されている.</param>
        /// <param name="termcrit">最大繰り返し数と(または),精度(1ループでの各クラスタ中心位置移動距離)の指定</param>
        /// <param name="attemps"></param>
        /// <param name="rng"></param>
        /// <param name="flag"></param>
        /// <param name="centers"></param>
        /// <returns></returns>
#else
        /// <summary>
        /// Splits set of vectors by given number of clusters
        /// </summary>
        /// <param name="samples">Floating-point matrix of input samples, one row per sample. </param>
        /// <param name="samplesType"></param>
        /// <param name="clusterCount">Number of clusters to split the set by. </param>
        /// <param name="labels">Output integer vector storing cluster indices for every sample. </param>
        /// <param name="termcrit">Specifies maximum number of iterations and/or accuracy (distance the centers move by between the subsequent iterations). </param>
        /// <param name="attemps"></param>
        /// <param name="rng"></param>
        /// <param name="flag"></param>
        /// <param name="centers"></param>
        /// <returns></returns>
#endif
        public static int KMeans2(Array samples, MatrixType samplesType, int clusterCount, int[] labels, CvTermCriteria termcrit, int attemps, CvRNG rng, KMeansFlag flag, CvArr centers)
        {
            double compactness;
            return KMeans2(samples, samplesType, clusterCount, labels, termcrit, attemps, rng, flag, centers, out compactness);
        }
Example #25
0
        /// <summary>
        /// ベクトル集合を,与えられたクラスタ数に分割する.
        /// 入力サンプルを各クラスタに分類するために cluster_count 個のクラスタの中心を求める k-means 法を実装する.
        /// 出力 labels(i) は,配列 samples のi番目の行のサンプルが属するクラスタのインデックスを表す. 
        /// </summary>
        /// <param name="samples">浮動小数点型の入力サンプル行列.1行あたり一つのサンプル.</param>
        /// <param name="samplesType"></param>
        /// <param name="clusterCount">集合を分割するクラスタ数</param>
        /// <param name="labels">出力の整数ベクトル.すべてのサンプルについて,それぞれがどのクラスタに属しているかが保存されている.</param>
        /// <param name="termcrit">最大繰り返し数と(または),精度(1ループでの各クラスタ中心位置移動距離)の指定</param>
        /// <param name="attemps"></param>
        /// <param name="rng"></param>
        /// <param name="flag"></param>
        /// <param name="centers"></param>
        /// <param name="compactness"></param>
        /// <returns></returns>
#else
        /// <summary>
        /// Splits set of vectors by given number of clusters
        /// </summary>
        /// <param name="samples">Floating-point matrix of input samples, one row per sample. </param>
        /// <param name="samplesType"></param>
        /// <param name="clusterCount">Number of clusters to split the set by. </param>
        /// <param name="labels">Output integer vector storing cluster indices for every sample. </param>
        /// <param name="termcrit">Specifies maximum number of iterations and/or accuracy (distance the centers move by between the subsequent iterations). </param>
        /// <param name="attemps"></param>
        /// <param name="rng"></param>
        /// <param name="flag"></param>
        /// <param name="centers"></param>
        /// <param name="compactness"></param>
        /// <returns></returns>
#endif
        public static int KMeans2(Array samples, MatrixType samplesType, int clusterCount, int[] labels, CvTermCriteria termcrit, int attemps, CvRNG rng, KMeansFlag flag, CvArr centers, out double compactness)
        {
            if (samples == null)
                throw new ArgumentNullException("samples");

            using (CvMat samplesMat = new CvMat(labels.Length, 1, samplesType, samples, false))
            using (CvMat labelsMat = new CvMat(labels.Length, 1, MatrixType.S32C1, labels, false))
            {
                return KMeans2(samplesMat, clusterCount, labelsMat, termcrit, attemps, rng, flag, centers, out compactness);
            }
        }
Example #26
0
 public static extern int cvCamShift(IntPtr prob_image, CvRect window, CvTermCriteria criteria, IntPtr comp, ref CvBox2D box);
Example #27
0
        /// <summary>
        /// コーナー位置を高精度化する.
        /// </summary>
        /// <param name="image">入力画像</param>
        /// <param name="corners">コーナーの初期座標が入力され,高精度化された座標が出力される.</param>
        /// <param name="count">コーナーの数</param>
        /// <param name="win">検索ウィンドウの半分のサイズ.(例)win=(5,5) ならば 5*2+1 × 5*2+1 = 11 × 11 が探索ウィンドウして使われる.</param>
        /// <param name="zeroZone">総和を計算する際に含まれない,探索領域の中心に存在する総和対象外領域の半分のサイズ.この値は,自己相関行列において発生しうる特異点を避けるために用いられる. 値が (-1,-1) の場合は,そのようなサイズはないということを意味する</param>
        /// <param name="criteria">コーナー座標の高精度化のための繰り返し処理の終了条件.コーナー位置の高精度化の繰り返し処理は,規定回数に達するか,目標精度に達したときに終了する.</param>
#else
        /// <summary>
        /// Iterates to find the sub-pixel accurate location of corners, or radial saddle points.
        /// </summary>
        /// <param name="image">Input image. </param>
        /// <param name="corners">Initial coordinates of the input corners and refined coordinates on output. </param>
        /// <param name="count">Number of corners. </param>
        /// <param name="win">Half sizes of the search window.</param>
        /// <param name="zeroZone">Half size of the dead region in the middle of the search zone over which the summation in formulae below is not done. It is used sometimes to avoid possible singularities of the autocorrelation matrix. The value of (-1,-1) indicates that there is no such size. </param>
        /// <param name="criteria">Criteria for termination of the iterative process of corner refinement. That is, the process of corner position refinement stops either after certain number of iteration or when a required accuracy is achieved. The criteria may specify either of or both the maximum number of iteration and the required accuracy. </param>
#endif
        public static void FindCornerSubPix(CvArr image, CvPoint2D32f[] corners, int count, CvSize win, CvSize zeroZone, CvTermCriteria criteria)
        {
            if (image == null)
                throw new ArgumentNullException("image");
            if (corners == null)
                throw new ArgumentNullException("corners");

            NativeMethods.cvFindCornerSubPix(image.CvPtr, corners, count, win, zeroZone, criteria);
        }
Example #28
0
 public static extern void cvCalcOpticalFlowHS(IntPtr prev, IntPtr curr, [MarshalAs(UnmanagedType.Bool)] bool use_previous,
     IntPtr velx, IntPtr vely, double lambda, CvTermCriteria criteria);
Example #29
0
        /// <summary>
        /// Находит углы в изображении шахматной доски, установленном с помощью SetImage
        /// </summary>
        /// <param name="corners">Найденные углы</param>
        /// <returns>Показывает соответствуют-ли найленные углы шаблону CornersPattern</returns>
        public bool FindCorners(out CvPoint2D32f[] corners)
        {
            bool result;
            int numCorners;
            ChessboardFlag flags = ChessboardFlag.AdaptiveThresh | ChessboardFlag.NormalizeImage | ChessboardFlag.FilterQuads;
            CvTermCriteria criteria = new CvTermCriteria(CriteriaType.Iteration | CriteriaType.Epsilon, MaxIterations, Epsilon);

            // Находим углы
            result = chessBoard.FindChessboardCorners(CornersPattern, out corners, out numCorners, flags);

            // Уточнаем положение углов
            chessBoard.CvtColor(grayChessBoard, ColorConversion.RgbToGray);
            grayChessBoard.FindCornerSubPix(corners, corners.Length, new CvSize(11, 11), new CvSize(-1, -1), criteria);

            return result;
        }
Example #30
0
 public static extern IntPtr cvContourFromContourTree(IntPtr tree, IntPtr storage, CvTermCriteria criteria);
Example #31
0
        /// <summary>
        /// ツリーから輪郭を復元する
        /// </summary>
        /// <param name="storage">復元した輪郭の出力先</param>
        /// <param name="criteria">復元を止める基準</param>
        /// <returns></returns>
#else
        /// <summary>
        /// Restores contour from tree.
        /// </summary>
        /// <param name="storage">Container for the reconstructed contour. </param>
        /// <param name="criteria">Criteria, where to stop reconstruction. </param>
        /// <returns></returns>
#endif
        public CvSeq ContourFromContourTree(CvMemStorage storage, CvTermCriteria criteria)
        {
            return Cv.ContourFromContourTree(this, storage, criteria);
        }
Example #32
0
 public static extern void cvPOSIT(IntPtr posit_object, [MarshalAs(UnmanagedType.LPArray)]  CvPoint2D32f[] image_points, double focal_length,
     CvTermCriteria criteria, [MarshalAs(UnmanagedType.LPArray)] float[,] rotation_matrix, [MarshalAs(UnmanagedType.LPArray)] float[] translation_vector);
Example #33
0
        /// <summary>
        /// ベクトル集合を,与えられたクラスタ数に分割する.
        /// 入力サンプルを各クラスタに分類するために cluster_count 個のクラスタの中心を求める k-means 法を実装する.
        /// 出力 labels(i) は,配列 samples のi番目の行のサンプルが属するクラスタのインデックスを表す. 
        /// </summary>
        /// <param name="samples">浮動小数点型の入力サンプル行列.1行あたり一つのサンプル.</param>
        /// <param name="cluster_count">集合を分割するクラスタ数</param>
        /// <param name="labels">出力の整数ベクトル.すべてのサンプルについて,それぞれがどのクラスタに属しているかが保存されている.</param>
        /// <param name="termcrit">最大繰り返し数と(または),精度(1ループでの各クラスタ中心位置移動距離)の指定</param>
        /// <param name="attemps"></param>
        /// <returns></returns>
#else
        /// <summary>
        /// Splits set of vectors by given number of clusters
        /// </summary>
        /// <param name="samples">Floating-point matrix of input samples, one row per sample. </param>
        /// <param name="cluster_count">Number of clusters to split the set by. </param>
        /// <param name="labels">Output integer vector storing cluster indices for every sample. </param>
        /// <param name="termcrit">Specifies maximum number of iterations and/or accuracy (distance the centers move by between the subsequent iterations). </param>
        /// <param name="attemps"></param>
        /// <returns></returns>
#endif
        public static int KMeans2(CvArr samples, int cluster_count, CvArr labels, CvTermCriteria termcrit, int attemps)
        {
            double compactness;
            return KMeans2(samples, cluster_count, labels, termcrit, attemps, null, KMeansFlag.Zero, null, out compactness);
        }
Example #34
0
 public static extern void cvFindCornerSubPix(IntPtr image, IntPtr[] corners, int count, CvSize win, CvSize zero_zone, CvTermCriteria criteria);
Example #35
0
        /// <summary>
        /// ベクトル集合を,与えられたクラスタ数に分割する.
        /// 入力サンプルを各クラスタに分類するために cluster_count 個のクラスタの中心を求める k-means 法を実装する.
        /// 出力 labels(i) は,配列 samples のi番目の行のサンプルが属するクラスタのインデックスを表す. 
        /// </summary>
        /// <param name="samples">浮動小数点型の入力サンプル行列.1行あたり一つのサンプル.</param>
        /// <param name="cluster_count">集合を分割するクラスタ数</param>
        /// <param name="labels">出力の整数ベクトル.すべてのサンプルについて,それぞれがどのクラスタに属しているかが保存されている.</param>
        /// <param name="termcrit">最大繰り返し数と(または),精度(1ループでの各クラスタ中心位置移動距離)の指定</param>
        /// <param name="attemps"></param>
        /// <param name="rng"></param>
        /// <param name="flag"></param>
        /// <param name="_centers"></param>
        /// <returns></returns>
#else
        /// <summary>
        /// Splits set of vectors by given number of clusters
        /// </summary>
        /// <param name="samples">Floating-point matrix of input samples, one row per sample. </param>
        /// <param name="cluster_count">Number of clusters to split the set by. </param>
        /// <param name="labels">Output integer vector storing cluster indices for every sample. </param>
        /// <param name="termcrit">Specifies maximum number of iterations and/or accuracy (distance the centers move by between the subsequent iterations). </param>
        /// <param name="attemps"></param>
        /// <param name="rng"></param>
        /// <param name="flag"></param>
        /// <param name="_centers"></param>
        /// <returns></returns>
#endif
        public static int KMeans2(CvArr samples, int cluster_count, CvArr labels, CvTermCriteria termcrit, int attemps, CvRNG rng, KMeansFlag flag, CvArr _centers)
        {
            double compactness;
            return KMeans2(samples, cluster_count, labels, termcrit, attemps, rng, flag, _centers, out compactness);
        }
Example #36
0
 public static extern CvTermCriteria cvCheckTermCriteria(CvTermCriteria criteria, double default_eps, int default_max_iters);
Example #37
0
        /// <summary>
        /// ベクトル集合を,与えられたクラスタ数に分割する.
        /// 入力サンプルを各クラスタに分類するために cluster_count 個のクラスタの中心を求める k-means 法を実装する.
        /// 出力 labels(i) は,配列 samples のi番目の行のサンプルが属するクラスタのインデックスを表す. 
        /// </summary>
        /// <param name="samples">浮動小数点型の入力サンプル行列.1行あたり一つのサンプル.</param>
        /// <param name="cluster_count">集合を分割するクラスタ数</param>
        /// <param name="labels">出力の整数ベクトル.すべてのサンプルについて,それぞれがどのクラスタに属しているかが保存されている.</param>
        /// <param name="termcrit">最大繰り返し数と(または),精度(1ループでの各クラスタ中心位置移動距離)の指定</param>
        /// <param name="attemps"></param>
        /// <param name="rng"></param>
        /// <param name="flag"></param>
        /// <param name="_centers"></param>
        /// <param name="compactness"></param>
        /// <returns></returns>
#else
        /// <summary>
        /// Splits set of vectors by given number of clusters
        /// </summary>
        /// <param name="samples">Floating-point matrix of input samples, one row per sample. </param>
        /// <param name="cluster_count">Number of clusters to split the set by. </param>
        /// <param name="labels">Output integer vector storing cluster indices for every sample. </param>
        /// <param name="termcrit">Specifies maximum number of iterations and/or accuracy (distance the centers move by between the subsequent iterations). </param>
        /// <param name="attemps"></param>
        /// <param name="rng"></param>
        /// <param name="flag"></param>
        /// <param name="_centers"></param>
        /// <param name="compactness"></param>
        /// <returns></returns>
#endif
        public static int KMeans2(CvArr samples, int cluster_count, CvArr labels, CvTermCriteria termcrit, int attemps, CvRNG rng, KMeansFlag flag, CvArr _centers, out double compactness)
        {
            if (samples == null)
                throw new ArgumentNullException("samples");
            if (labels == null)
                throw new ArgumentNullException("labels");

            if (rng == null)
            {
                rng = new CvRNG();
            }
            IntPtr centersPtr = (_centers != null) ? _centers.CvPtr : IntPtr.Zero;

            UInt64 rngValue = rng.Seed;
            int result = CvInvoke.cvKMeans2(samples.CvPtr, cluster_count, labels.CvPtr, termcrit, attemps, ref rngValue, flag, centersPtr, out compactness);
            rng.Seed = rngValue;

            return result;
        }
Example #38
0
        public SVM()
        {
            // CvSVM
            // SVMを利用して2次元ベクトルの3クラス分類問題を解く

            const int S = 1000;
            const int SIZE = 400;
            CvRNG rng = new CvRNG((ulong)DateTime.Now.Ticks);

            // (1)画像領域の確保と初期化
            using (IplImage img = new IplImage(SIZE, SIZE, BitDepth.U8, 3))
            {
                img.Zero();
                // (2)学習データの生成
                CvPoint[] pts = new CvPoint[S];
                int[] res = new int[S];
                for (int i = 0; i < S; i++)
                {
                    pts[i].X = (int)(rng.RandInt() % SIZE);
                    pts[i].Y = (int)(rng.RandInt() % SIZE);
                    if (pts[i].Y > 50 * Math.Cos(pts[i].X * Cv.PI / 100) + 200)
                    {
                        img.Line(new CvPoint(pts[i].X - 2, pts[i].Y - 2), new CvPoint(pts[i].X + 2, pts[i].Y + 2), new CvColor(255, 0, 0));
                        img.Line(new CvPoint(pts[i].X + 2, pts[i].Y - 2), new CvPoint(pts[i].X - 2, pts[i].Y + 2), new CvColor(255, 0, 0));
                        res[i] = 1;
                    }
                    else
                    {
                        if (pts[i].X > 200)
                        {
                            img.Line(new CvPoint(pts[i].X - 2, pts[i].Y - 2), new CvPoint(pts[i].X + 2, pts[i].Y + 2), new CvColor(0, 255, 0));
                            img.Line(new CvPoint(pts[i].X + 2, pts[i].Y - 2), new CvPoint(pts[i].X - 2, pts[i].Y + 2), new CvColor(0, 255, 0));
                            res[i] = 2;
                        }
                        else
                        {
                            img.Line(new CvPoint(pts[i].X - 2, pts[i].Y - 2), new CvPoint(pts[i].X + 2, pts[i].Y + 2), new CvColor(0, 0, 255));
                            img.Line(new CvPoint(pts[i].X + 2, pts[i].Y - 2), new CvPoint(pts[i].X - 2, pts[i].Y + 2), new CvColor(0, 0, 255));
                            res[i] = 3;
                        }
                    }
                }

                // (3)学習データの表示
                Cv.NamedWindow("SVM", WindowMode.AutoSize);
                Cv.ShowImage("SVM", img);
                Cv.WaitKey(0);

                // (4)学習パラメータの生成
                float[] data = new float[S * 2];
                for (int i = 0; i < S; i++)
                {
                    data[i * 2] = ((float)pts[i].X) / SIZE;
                    data[i * 2 + 1] = ((float)pts[i].Y) / SIZE;
                }

                // (5)SVMの学習
                using (CvSVM svm = new CvSVM())
                {
                    CvMat data_mat = new CvMat(S, 2, MatrixType.F32C1, data);
                    CvMat res_mat = new CvMat(S, 1, MatrixType.S32C1, res);
                    CvTermCriteria criteria = new CvTermCriteria(1000, float.Epsilon);
                    CvSVMParams param = new CvSVMParams(SVMType.CSvc, SVMKernelType.Rbf, 10.0, 8.0, 1.0, 10.0, 0.5, 0.1, null, criteria);
                    svm.Train(data_mat, res_mat, null, null, param);

                    // (6)学習結果の描画
                    for (int i = 0; i < SIZE; i++)
                    {
                        for (int j = 0; j < SIZE; j++)
                        {
                            float[] a = { (float)j / SIZE, (float)i / SIZE };
                            CvMat m = new CvMat(1, 2, MatrixType.F32C1, a);
                            float ret = svm.Predict(m);
                            CvColor color = new CvColor();
                            switch ((int)ret)
                            {
                                case 1:
                                    color = new CvColor(100, 0, 0); break;
                                case 2:
                                    color = new CvColor(0, 100, 0); break;
                                case 3:
                                    color = new CvColor(0, 0, 100); break;
                            }
                            img[i, j] = color;
                        }
                    }

                    // (7)トレーニングデータの再描画
                    for (int i = 0; i < S; i++)
                    {
                        CvColor color = new CvColor();
                        switch (res[i])
                        {
                            case 1:
                                color = new CvColor(255, 0, 0); break;
                            case 2:
                                color = new CvColor(0, 255, 0); break;
                            case 3:
                                color = new CvColor(0, 0, 255); break;
                        }
                        img.Line(new CvPoint(pts[i].X - 2, pts[i].Y - 2), new CvPoint(pts[i].X + 2, pts[i].Y + 2), color);
                        img.Line(new CvPoint(pts[i].X + 2, pts[i].Y - 2), new CvPoint(pts[i].X - 2, pts[i].Y + 2), color);
                    }

                    // (8)サポートベクターの描画
                    int sv_num = svm.GetSupportVectorCount();
                    for (int i = 0; i < sv_num; i++)
                    {
                        var support = svm.GetSupportVector(i);
                        img.Circle(new CvPoint((int)(support[0] * SIZE), (int)(support[1] * SIZE)), 5, new CvColor(200, 200, 200));
                    }

                    // (9)画像の表示
                    Cv.NamedWindow("SVM", WindowMode.AutoSize);
                    Cv.ShowImage("SVM", img);
                    Cv.WaitKey(0);
                    Cv.DestroyWindow("SVM");

                }
            }

        }
Example #39
0
        /// <summary>
        /// ツリーから輪郭を復元する
        /// </summary>
        /// <param name="storage">復元した輪郭の出力先</param>
        /// <param name="criteria">復元を止める基準</param>
        /// <returns></returns>
#else
        /// <summary>
        /// Restores contour from tree.
        /// </summary>
        /// <param name="storage">Container for the reconstructed contour. </param>
        /// <param name="criteria">Criteria, where to stop reconstruction. </param>
        /// <returns></returns>
#endif
        public CvSeq ContourFromContourTree(CvMemStorage storage, CvTermCriteria criteria)
        {
            return(Cv.ContourFromContourTree(this, storage, criteria));
        }