public KmeansInput(Array samples, MatrixType samplesType, int nClusters, CvTermCriteria criteria)
 {
     Samples = samples;
     SamplesType = samplesType;
     NClusters = nClusters;
     Criteria = criteria;
 }
Exemple #2
0
        /// <summary>
        /// 学習データを与えて初期化
        /// </summary>
		/// <param name="max_depth">このパラメータは木が取りうる最大の深さを決定する.学習アルゴリズムは,ノードの深さが max_depth  よりも小さいならば,それを分岐させようとする.他の終了条件が満たされた場合や(セクション始めにある学習手続きの概要を参照), あるいは/さらに,木が刈り込まれた場合など,実際の深さはもっと浅いかもしれない.</param>
		/// <param name="min_sample_count">あるノードに対するサンプル数がこのパラメータ値よりも少ない場合,そのノードは分岐しない.</param>
		/// <param name="regression_accuracy">別の終了条件 - 回帰木の場合のみ. 推定されたノード値が,そのノードの学習サンプルの応答に対して,このパラメータ値よりも低い精度を持つ場合,ノードはそれ以上分岐しなくなる.</param>
		/// <param name="use_surrogates">trueの場合,代理分岐が構築される. 代理分岐は観測値データの欠損を処理する場合や,変数の重要度の推定に必要である.</param>
		/// <param name="max_categories">学習手続きが分岐を作るときの離散変数が max_categoriesよりも多くの値を取ろうとするならば, (アルゴリズムが指数関数的であるので)正確な部分集合推定を行う場合に非常に時間がかかる可能性がある. 代わりに,(MLを含む)多くの決定木エンジンが,全サンプルを max_categories 個のクラスタに分類することによって (つまりいくつかのカテゴリは一つにマージされる),この場合の次善最適分岐を見つけようとする.このテクニックは,N(>2)-クラス分類問題においてのみ適用されることに注意する. 回帰および 2-クラス分類の場合は,このような手段をとらなくても効率的に最適分岐を見つけることができるので,このパラメータは使用されない.</param>
		/// <param name="priors">クラスラベル値によって保存されたクラス事前確率の配列. このパラメータは,ある特定のクラスに対する決定木の優先傾向を調整するために用いられる. 例えば,もしユーザがなんらかの珍しい例外的発生を検出したいと考えた場合,学習データは,おそらく例外的なケースよりもずっと多くの正常なケースを含んでいるので, 全ケースが正常であるとみなすだけで,非常に優れた分類性能が実現されるだろう. このように例外ケースを無視して分類性能を上げることを避けるために,事前確率を指定することができる. 例外的なケースの確率を人工的に増加させる(0.5 まで,あるいはそれ以上に)ことで,分類に失敗した例外の重みがより大きくなり,木は適切に調節される. </param>
		/// <param name="calc_var_importance">セットされている場合,変数の重要度が学習の際に計算される. 計算した変数の重要度の配列を取り出すためには,CvRTrees::get_var_importance()を呼びだす. </param>
		/// <param name="nactive_vars">変数の数.それぞれのツリーでランダムに選択され,最適な分割を求めるために使用される.</param>
		/// <param name="term_crit">forestの成長に対する終了条件. term_crit.max_iterは,forestの中のツリーの最大数 (コンストラクタのパラメータであるmax_tree_countも参照する,デフォルトでは50).term_crit.epsilonは,満足される精度を表す(OOB error). </param>
#else
		/// <summary>
		/// Training constructor
		/// </summary>
		/// <param name="maxDepth">This parameter specifies the maximum possible depth of the tree. That is the training algorithms attempts to split a node while its depth is less than max_depth. The actual depth may be smaller if the other termination criteria are met (see the outline of the training procedure in the beginning of the section), and/or if the tree is pruned. </param>
		/// <param name="minSampleCount">A node is not split if the number of samples directed to the node is less than the parameter value. </param>
		/// <param name="regressionAccuracy">Another stop criteria - only for regression trees. As soon as the estimated node value differs from the node training samples responses by less than the parameter value, the node is not split further. </param>
		/// <param name="useSurrogates">If true, surrogate splits are built. Surrogate splits are needed to handle missing measurements and for variable importance estimation. </param>
		/// <param name="maxCategories">If a discrete variable, on which the training procedure tries to make a split, takes more than max_categories values, the precise best subset estimation may take a very long time (as the algorithm is exponential). Instead, many decision trees engines (including ML) try to find sub-optimal split in this case by clustering all the samples into max_categories clusters (i.e. some categories are merged together). Note that this technique is used only in N(>2)-class classification problems. In case of regression and 2-class classification the optimal split can be found efficiently without employing clustering, thus the parameter is not used in these cases. </param>
		/// <param name="priors">The array of a priori class probabilities, sorted by the class label value. The parameter can be used to tune the decision tree preferences toward a certain class. For example, if users want to detect some rare anomaly occurrence, the training base will likely contain much more normal cases than anomalies, so a very good classification performance will be achieved just by considering every case as normal. To avoid this, the priors can be specified, where the anomaly probability is artificially increased (up to 0.5 or even greater), so the weight of the misclassified anomalies becomes much bigger, and the tree is adjusted properly. </param>
		/// <param name="calcVarImportance">If it is set, then variable importance is computed by the training procedure. To retrieve the computed variable importance array, call the method CvRTrees::get_var_importance(). </param>
		/// <param name="nactiveVars">The number of variables that are randomly selected at each tree node and that are used to find the best split(s). </param>
		/// <param name="termCrit">Termination criteria for growing the forest: term_crit.max_iter is the maximum number of trees in the forest (see also max_tree_count parameter of the constructor, by default it is set to 50) term_crit.epsilon is the sufficient accuracy (OOB error). </param>
#endif
        public CvRTParams(int maxDepth, int minSampleCount, float regressionAccuracy, bool useSurrogates, int maxCategories, float[] priors,
            bool calcVarImportance, int nactiveVars, CvTermCriteria termCrit)
            : base(maxDepth, minSampleCount, regressionAccuracy, useSurrogates, maxCategories, 0, false, false, priors)
        {
            IntPtr priorsPtr = IntPtr.Zero;
            if (priors != null)
            {
                _handle = GCHandle.Alloc(priors, GCHandleType.Pinned);
                priorsPtr = _handle.AddrOfPinnedObject();
            }
            _priors = priors;

            ptr = MLInvoke.CvRTParams_construct(maxDepth, minSampleCount, regressionAccuracy, useSurrogates, maxCategories,
                priorsPtr, calcVarImportance, nactiveVars, termCrit.MaxIter, (float)termCrit.Epsilon, (int)termCrit.Type);
        }
Exemple #3
0
        /// <summary>
        /// 学習データを与えて初期化
        /// </summary>
        /// <param name="maxDepth">このパラメータは木が取りうる最大の深さを決定する.学習アルゴリズムは,ノードの深さが max_depth  よりも小さいならば,それを分岐させようとする.他の終了条件が満たされた場合や(セクション始めにある学習手続きの概要を参照), あるいは/さらに,木が刈り込まれた場合など,実際の深さはもっと浅いかもしれない.</param>
        /// <param name="minSampleCount">あるノードに対するサンプル数がこのパラメータ値よりも少ない場合,そのノードは分岐しない.</param>
        /// <param name="regressionAccuracy">別の終了条件 - 回帰木の場合のみ. 推定されたノード値が,そのノードの学習サンプルの応答に対して,このパラメータ値よりも低い精度を持つ場合,ノードはそれ以上分岐しなくなる.</param>
        /// <param name="useSurrogates">trueの場合,代理分岐が構築される. 代理分岐は観測値データの欠損を処理する場合や,変数の重要度の推定に必要である.</param>
        /// <param name="maxCategories">学習手続きが分岐を作るときの離散変数が max_categoriesよりも多くの値を取ろうとするならば, (アルゴリズムが指数関数的であるので)正確な部分集合推定を行う場合に非常に時間がかかる可能性がある. 代わりに,(MLを含む)多くの決定木エンジンが,全サンプルを max_categories 個のクラスタに分類することによって (つまりいくつかのカテゴリは一つにマージされる),この場合の次善最適分岐を見つけようとする.このテクニックは,N(>2)-クラス分類問題においてのみ適用されることに注意する. 回帰および 2-クラス分類の場合は,このような手段をとらなくても効率的に最適分岐を見つけることができるので,このパラメータは使用されない.</param>
		/// <param name="priors">クラスラベル値によって保存されたクラス事前確率の配列. このパラメータは,ある特定のクラスに対する決定木の優先傾向を調整するために用いられる. 例えば,もしユーザがなんらかの珍しい例外的発生を検出したいと考えた場合,学習データは,おそらく例外的なケースよりもずっと多くの正常なケースを含んでいるので, 全ケースが正常であるとみなすだけで,非常に優れた分類性能が実現されるだろう. このように例外ケースを無視して分類性能を上げることを避けるために,事前確率を指定することができる. 例外的なケースの確率を人工的に増加させる(0.5 まで,あるいはそれ以上に)ことで,分類に失敗した例外の重みがより大きくなり,木は適切に調節される. </param>
        /// <param name="calcVarImportance">セットされている場合,変数の重要度が学習の際に計算される. 計算した変数の重要度の配列を取り出すためには,CvRTrees::get_var_importance()を呼びだす. </param>
        /// <param name="nactiveVars">変数の数.それぞれのツリーでランダムに選択され,最適な分割を求めるために使用される.</param>
        /// <param name="termCrit">forestの成長に対する終了条件. term_crit.max_iterは,forestの中のツリーの最大数 (コンストラクタのパラメータであるmax_tree_countも参照する,デフォルトでは50).term_crit.epsilonは,満足される精度を表す(OOB error). </param>
#else
		/// <summary>
		/// Training constructor
		/// </summary>
		/// <param name="maxDepth">This parameter specifies the maximum possible depth of the tree. That is the training algorithms attempts to split a node while its depth is less than max_depth. The actual depth may be smaller if the other termination criteria are met (see the outline of the training procedure in the beginning of the section), and/or if the tree is pruned. </param>
		/// <param name="minSampleCount">A node is not split if the number of samples directed to the node is less than the parameter value. </param>
		/// <param name="regressionAccuracy">Another stop criteria - only for regression trees. As soon as the estimated node value differs from the node training samples responses by less than the parameter value, the node is not split further. </param>
		/// <param name="useSurrogates">If true, surrogate splits are built. Surrogate splits are needed to handle missing measurements and for variable importance estimation. </param>
		/// <param name="maxCategories">If a discrete variable, on which the training procedure tries to make a split, takes more than max_categories values, the precise best subset estimation may take a very long time (as the algorithm is exponential). Instead, many decision trees engines (including ML) try to find sub-optimal split in this case by clustering all the samples into max_categories clusters (i.e. some categories are merged together). Note that this technique is used only in N(>2)-class classification problems. In case of regression and 2-class classification the optimal split can be found efficiently without employing clustering, thus the parameter is not used in these cases. </param>
		/// <param name="priors">The array of a priori class probabilities, sorted by the class label value. The parameter can be used to tune the decision tree preferences toward a certain class. For example, if users want to detect some rare anomaly occurrence, the training base will likely contain much more normal cases than anomalies, so a very good classification performance will be achieved just by considering every case as normal. To avoid this, the priors can be specified, where the anomaly probability is artificially increased (up to 0.5 or even greater), so the weight of the misclassified anomalies becomes much bigger, and the tree is adjusted properly. </param>
		/// <param name="calcVarImportance">If it is set, then variable importance is computed by the training procedure. To retrieve the computed variable importance array, call the method CvRTrees::get_var_importance(). </param>
		/// <param name="nactiveVars">The number of variables that are randomly selected at each tree node and that are used to find the best split(s). </param>
		/// <param name="termCrit">Termination criteria for growing the forest: term_crit.max_iter is the maximum number of trees in the forest (see also max_tree_count parameter of the constructor, by default it is set to 50) term_crit.epsilon is the sufficient accuracy (OOB error). </param>
#endif
        public CvRTParams(
            int maxDepth, int minSampleCount, float regressionAccuracy, 
            bool useSurrogates, int maxCategories, float[] priors,
            bool calcVarImportance, int nactiveVars, CvTermCriteria termCrit)
            : base(IntPtr.Zero)
        {
            IntPtr priorsPtr = IntPtr.Zero;
            if (priors != null)
            {
                handle = GCHandle.Alloc(priors, GCHandleType.Pinned);
                priorsPtr = handle.AddrOfPinnedObject();
            }
            this.priors = priors;

            ptr = NativeMethods.ml_CvRTParams_new2(
                maxDepth, minSampleCount, regressionAccuracy, useSurrogates ? 1 : 0, 
                maxCategories, priorsPtr, calcVarImportance ? 1 : 0, nactiveVars, 
                termCrit.MaxIter, (float)termCrit.Epsilon, (int)termCrit.Type);
        }
Exemple #4
0
        private void buttonTest0_Click(object sender, RoutedEventArgs e)
        {
            int    cols, rows;
            double horizLength, vertLength;

            if (!parseChessboardParameters(out cols, out rows, out horizLength, out vertLength))
            {
                return;
            }

            // 以下修正
            MotionDataHandler handler;
            string            path;

            if (openMotionData(out handler, out path))
            {
                CvMat displayMat1 = null;
                CvMat displayMat3 = null;
                CvMat displayMat4 = null;
                CvMat gray        = null;
                int   length      = handler.FrameCount;
                if (length == 0)
                {
                    return;
                }

                CvSize boardSize        = new CvSize(cols, rows);
                CvSize imageSize        = new CvSize();
                double minVarDistance2d = double.MaxValue;

                IEnumerable <CvMat> colorImages, depthImages;
                Utility.LoadImages(handler.GetColorImagePaths(), out colorImages);
                Utility.LoadImages(handler.GetDepthImagePaths(), out depthImages);
                var images = colorImages.Zip(depthImages, (first, second) => Tuple.Create(first, second));

                foreach (Tuple <CvMat, CvMat> imagePair in images)
                {
                    CvMat imageMat = imagePair.Item1;
                    CvMat depthMat = imagePair.Item2;

                    if (displayMat4 == null)
                    {
                        displayMat4 = CvEx.InitCvMat(imageMat);
                    }

                    imageSize = new CvSize(imageMat.Cols, imageMat.Rows);
                    CvSize depthUserSize = new CvSize(depthMat.Cols, depthMat.Rows);

                    CvPoint2D32f[] corners;
                    int            count;
                    CvEx.InitCvMat(ref gray, imageMat, MatrixType.U8C1);
                    imageMat.CvtColor(gray, ColorConversion.RgbToGray);
                    if (gray.FindChessboardCorners(boardSize, out corners, out count, ChessboardFlag.AdaptiveThresh))
                    {
                        CvEx.CloneCvMat(ref displayMat1, imageMat);
                        CvTermCriteria criteria = new CvTermCriteria(50, 0.01);
                        gray.FindCornerSubPix(corners, count, new CvSize(3, 3), new CvSize(-1, -1), criteria);
                        CvPoint3D32f?[] cornerPoints = new CvPoint3D32f?[corners.Length];
                        for (int j = 0; j < corners.Length; j++)
                        {
                            CvPoint2D32f corner = new CvPoint2D32f(corners[j].X - 10, corners[j].Y - 10);
                            double?      value  = CvEx.Get2DSubPixel(depthMat, corner, 0);
                            if (value.HasValue)
                            {
                                double depth = UndistortionData.UndistortDepth(corner.X, corner.Y, value.Value, depthUserSize);
                                cornerPoints[j] = new CvPoint3D32f(corner.X, corner.Y, depth);
                            }
                        }
                        List <double> distance2dList = new List <double>();
                        for (int x = 0; x < cols; x++)
                        {
                            for (int y = 0; y < rows; y++)
                            {
                                if (!cornerPoints[x + y * cols].HasValue)
                                {
                                    continue;
                                }
                                int nextX = x + 1;
                                if (nextX < cols)
                                {
                                    if (!cornerPoints[nextX + y * cols].HasValue)
                                    {
                                        continue;
                                    }
                                    CvPoint3D32f point     = cornerPoints[x + y * cols].Value;
                                    CvPoint3D32f nextPoint = cornerPoints[nextX + y * cols].Value;
                                    distance2dList.Add(Math.Sqrt(Math.Pow(point.X - nextPoint.X, 2) + Math.Pow(point.Y - nextPoint.Y, 2)));
                                }
                                int nextY = y + 1;
                                if (nextY < rows)
                                {
                                    if (!cornerPoints[x + nextY * cols].HasValue)
                                    {
                                        continue;
                                    }
                                    CvPoint3D32f point     = cornerPoints[x + y * cols].Value;
                                    CvPoint3D32f nextPoint = cornerPoints[x + nextY * cols].Value;
                                    distance2dList.Add(Math.Sqrt(Math.Pow(point.X - nextPoint.X, 2) + Math.Pow(point.Y - nextPoint.Y, 2)));
                                }
                            }
                        }
                        if (distance2dList.Count >= 2)
                        {
                            double stdevDistance2d = CalcEx.GetStdDev(distance2dList);
                            displayMat1.PutText(string.Format("{0:0.00}/{1:0.00}", stdevDistance2d, minVarDistance2d), new CvPoint(0, 20), new CvFont(FontFace.HersheyPlain, 1, 1), new CvScalar(255, 255, 0));
                            double avgDepth = cornerPoints.Where(p => p.HasValue).Select(p => p.Value.Z).Average();
                            for (int x = 0; x < cols; x++)
                            {
                                for (int y = 0; y < rows; y++)
                                {
                                    if (!cornerPoints[x + y * cols].HasValue)
                                    {
                                        continue;
                                    }
                                    CvPoint3D32f point = cornerPoints[x + y * cols].Value;
                                    displayMat1.PutText((point.Z - avgDepth).ToString("0.00"), new CvPoint((int)point.X, (int)point.Y), new CvFont(FontFace.HersheyPlain, 0.6, 0.6), new CvScalar(255, 0, 0));
                                    displayMat1.PutText(((point.Z - avgDepth) / avgDepth * 100).ToString("0.000"), new CvPoint((int)point.X, (int)point.Y + 12), new CvFont(FontFace.HersheyPlain, 0.6, 0.6), new CvScalar(0, 255, 255));
                                }
                            }
                            //displayMat1.DrawChessboardCorners(boardSize, corners, true);

                            if (stdevDistance2d < minVarDistance2d)
                            {
                                minVarDistance2d = stdevDistance2d;
                                CvEx.CloneCvMat(ref displayMat4, displayMat1);
                            }
                            //System.Threading.Thread.Sleep(500);
                        }
                        putImage(displayMat1, PixelFormats.Rgb24);
                    }
                    else
                    {
                        CvEx.CloneCvMat(ref displayMat3, imageMat);
                        putImage(displayMat3, PixelFormats.Rgb24);
                    }
                }

                putImage(displayMat4, PixelFormats.Rgb24);
                displayLabels();
            }
        }
Exemple #5
0
        /// <summary>
	    /// 初期化
	    /// </summary>
	    /// <param name="_svm_type">SVMの種類</param>
	    /// <param name="_kernel_type">SVMカーネルの種類</param>
	    /// <param name="_degree">poly 用</param>
	    /// <param name="_gamma">poly/rbf/sigmoid 用</param>
	    /// <param name="_coef0">poly/sigmoid 用</param>
	    /// <param name="_C">SVMType.CSvc, SVMType.EpsSvr, SVMType.NuSvr 用</param>
	    /// <param name="_nu">SVMType.NuSvc, SVMType.OneClass, SVMType.NuSvr 用</param>
	    /// <param name="p">SVMType.EpsSvr 用</param>
	    /// <param name="_class_weights">SVMType.CSvc 用</param>
	    /// <param name="_term_crit">終了条件</param>
#else
		/// <summary>
	    /// Constructor
	    /// </summary>
	    /// <param name="_svm_type">Type of SVM</param>
	    /// <param name="_kernel_type">The kernel type</param>
	    /// <param name="_degree">for poly</param>
	    /// <param name="_gamma">for poly/rbf/sigmoid</param>
	    /// <param name="_coef0">for poly/sigmoid</param>
	    /// <param name="_C">for SVMType.CSvc, SVMType.EpsSvr and SVMType.NuSvr</param>
	    /// <param name="_nu">for SVMType.NuSvc, SVMType.OneClass and SVMType.NuSvr</param>
	    /// <param name="_p">for SVMType.EpsSvr</param>
	    /// <param name="_class_weights">for SVMType.CSvc</param>
	    /// <param name="_term_crit">Termination criteria</param>
#endif
		public CvSVMParams(SVMType _svm_type, SVMKernelType _kernel_type, double _degree, double _gamma, double _coef0, 
            double _C, double _nu, double _p, CvMat _class_weights, CvTermCriteria _term_crit )
	    {
            _data = new WCvSVMParams();
            IntPtr _class_weights_ptr = (_class_weights == null) ? IntPtr.Zero : _class_weights.CvPtr;
            MLInvoke.CvSVMParams_construct(ref _data, (int)_svm_type, (int)_kernel_type, _degree, _gamma, _coef0, _C, _nu, _p, _class_weights_ptr, _term_crit);
	    }
 public static extern void ml_ANN_MLP_TrainParams_new2(
     CvTermCriteria termCrit, int trainMethod, double param1, double param2,
     out WCvANN_MLP_TrainParams result);
        /// <summary>
        /// 学習データを与えて初期化
        /// </summary>
		/// <param name="term_crit">学習アルゴリズムの終了条件.アルゴリズムにより何度繰り返されるか (逐次型の誤差逆伝播アルゴリズムでは,この数は学習データセットのサイズと掛け合わされる)と,1ターンで重みをどの程度変更するかを指定する.</param>
		/// <param name="train_method">用いる学習アルゴリズム</param>
		/// <param name="param1"></param>
		/// <param name="param2"></param>
#else
		/// <summary>
        /// Training constructor
        /// </summary>
		/// <param name="term_crit">The termination criteria for the training algorithm. It identifies how many iterations is done by the algorithm (for sequential backpropagation algorithm the number is multiplied by the size of the training set) and how much the weights could change between the iterations to make the algorithm continue. </param>
		/// <param name="train_method">The training algorithm to use</param>
		/// <param name="param1"></param>
		/// <param name="param2"></param>
#endif
		public CvANN_MLP_TrainParams(CvTermCriteria term_crit, MLPTrainingMethod train_method, double param1, double param2)
		{
            IntPtr p = MLInvoke.CvANN_MLP_TrainParams_construct(term_crit, (int)train_method, param1, param2);
            _data = (WCvANN_MLP_TrainParams)Marshal.PtrToStructure(p, typeof(WCvANN_MLP_TrainParams));
            MLInvoke.CvANN_MLP_TrainParams_destruct(p);
		}
 public static extern int video_meanShift(
     IntPtr probImage, ref CvRect window, CvTermCriteria criteria);
Exemple #9
0
 public static extern void cvFindCornerSubPix(IntPtr image, IntPtr corners,
                                              int count, CvSize win, CvSize zero_zone, CvTermCriteria criteria);
 public static extern void imgproc_cornerSubPix(IntPtr image, IntPtr corners,
     Size winSize, Size zeroZone, CvTermCriteria criteria);
        /// <summary>
        /// adjusts the corner locations with sub-pixel accuracy to maximize the certain cornerness criteria
        /// </summary>
        /// <param name="image">Input image.</param>
        /// <param name="inputCorners">Initial coordinates of the input corners and refined coordinates provided for output.</param>
        /// <param name="winSize">Half of the side length of the search window.</param>
        /// <param name="zeroZone">Half of the size of the dead region in the middle of the search zone 
        /// over which the summation in the formula below is not done. It is used sometimes to avoid possible singularities 
        /// of the autocorrelation matrix. The value of (-1,-1) indicates that there is no such a size.</param>
        /// <param name="criteria">Criteria for termination of the iterative process of corner refinement. 
        /// That is, the process of corner position refinement stops either after criteria.maxCount iterations 
        /// or when the corner position moves by less than criteria.epsilon on some iteration.</param>
        /// <returns></returns>
        public static Point2f[] CornerSubPix(InputArray image, IEnumerable<Point2f> inputCorners,
            Size winSize, Size zeroZone, CvTermCriteria criteria)
        {
            if (image == null)
                throw new ArgumentNullException("image");
            if (inputCorners == null)
                throw new ArgumentNullException("inputCorners");
            image.ThrowIfDisposed();

            var inputCornersSrc = Util.ToArray(inputCorners);
            var inputCornersCopy = new Point2f[inputCornersSrc.Length];
            Array.Copy(inputCornersSrc, inputCornersCopy, inputCornersSrc.Length);
            using (var vector = new VectorOfPoint2f(inputCornersCopy))
            {
                NativeMethods.imgproc_cornerSubPix(image.CvPtr, vector.CvPtr, winSize, zeroZone, criteria);
                return vector.ToArray();
            }
        }
 public static extern double calib3d_stereoCalibrate_array(
     IntPtr[] objectPoints, int opSize1, int[] opSizes2,
     IntPtr[] imagePoints1, int ip1Size1, int[] ip1Sizes2,
     IntPtr[] imagePoints2, int ip2Size1, int[] ip2Sizes2,
     [In, Out] double[,] cameraMatrix1,
     [In, Out] double[] distCoeffs1, int dc1Size,
     [In, Out] double[,] cameraMatrix2,
     [In, Out] double[] distCoeffs2, int dc2Size,
     CvSize imageSize,
     IntPtr R, IntPtr T,
     IntPtr E, IntPtr F,
     CvTermCriteria criteria, int flags);
Exemple #13
0
        private void buttonScalingScore_Click(object sender, RoutedEventArgs e)
        {
            int    cols, rows;
            double horizLength, vertLength;

            if (!parseChessboardParameters(out cols, out rows, out horizLength, out vertLength))
            {
                return;
            }

            // 以下改造
            MotionDataHandler handler;
            string            path;

            if (openMotionData(out handler, out path))
            {
                CvMat displayMat1 = null;
                CvMat displayMat3 = null;
                CvMat displayMat4 = null;
                CvMat gray        = null;

                int length = handler.FrameCount;
                if (length == 0)
                {
                    return;
                }

                CvSize boardSize = new CvSize(cols, rows);
                CvSize imageSize = new CvSize();
                List <Tuple <double, double> > pairs = new List <Tuple <double, double> >();
                CvPoint2D32f[] lastCorners           = null;

                IEnumerable <CvMat> colorImages, depthImages;
                Utility.LoadImages(handler.GetColorImagePaths(), out colorImages);
                Utility.LoadImages(handler.GetDepthImagePaths(), out depthImages);
                var images = colorImages.Zip(depthImages, (first, second) => Tuple.Create(first, second));

                foreach (Tuple <CvMat, CvMat> imagePair in images)
                {
                    CvMat imageMat = imagePair.Item1;
                    CvMat depthMat = imagePair.Item2;

                    if (displayMat4 == null)
                    {
                        displayMat4 = CvEx.InitCvMat(imageMat);
                    }

                    imageSize = new CvSize(imageMat.Cols, imageMat.Rows);
                    CvPoint2D32f[] corners;
                    int            count;
                    CvEx.InitCvMat(ref gray, imageMat, MatrixType.U8C1);
                    imageMat.CvtColor(gray, ColorConversion.RgbToGray);
                    if (gray.FindChessboardCorners(boardSize, out corners, out count, ChessboardFlag.AdaptiveThresh))
                    {
                        CvEx.CloneCvMat(ref displayMat1, imageMat);
                        CvTermCriteria criteria = new CvTermCriteria(50, 0.01);
                        gray.FindCornerSubPix(corners, count, new CvSize(3, 3), new CvSize(-1, -1), criteria);
                        CvPoint3D32f?[] cornerPoints = new CvPoint3D32f?[corners.Length];
                        for (int j = 0; j < corners.Length; j++)
                        {
                            CvPoint2D32f corner = corners[j];
                            double?      value  = CalcEx.BilateralFilterDepthMatSinglePixel(corner, depthMat, 100, 4, 9);
                            if (value.HasValue)
                            {
                                cornerPoints[j] = new CvPoint3D32f(corner.X, corner.Y, value.Value);
                            }
                        }
                        for (int x = 0; x < cols; x++)
                        {
                            for (int y = 0; y < rows; y++)
                            {
                                if (!cornerPoints[x + y * cols].HasValue)
                                {
                                    continue;
                                }
                                CvPoint3D32f point1          = cornerPoints[x + y * cols].Value;
                                CvPoint3D64f undistortPoint1 = this.UndistortionData.GetRealFromScreenPos(point1, imageSize);
                                foreach (var offset in new[] { new { X = 1, Y = 0, D = horizLength }, new { X = 0, Y = 1, D = vertLength } })
                                {
                                    int dx = x + offset.X;
                                    int dy = y + offset.Y;
                                    if (dx >= cols || dy >= rows)
                                    {
                                        continue;
                                    }
                                    if (!cornerPoints[dx + dy * cols].HasValue)
                                    {
                                        continue;
                                    }

                                    CvPoint3D32f point2          = cornerPoints[dx + dy * cols].Value;
                                    CvPoint3D64f undistortPoint2 = this.UndistortionData.GetRealFromScreenPos(point2, imageSize);
                                    double       distance        = Math.Sqrt(CvEx.GetDistanceSq(undistortPoint1, undistortPoint2));
                                    double       scale           = distance / offset.D;
                                    CvColor      color           = CalcEx.HSVtoRGB(Math.Max(0, Math.Min(300, scale * 600 - 450)), scale, 2 - scale);
                                    displayMat4.DrawLine((int)point1.X, (int)point1.Y, (int)point2.X, (int)point2.Y, new CvScalar(color.R, color.G, color.B), 1, LineType.AntiAlias);
                                    pairs.Add(new Tuple <double, double>(distance, offset.D));
                                }
                            }
                        }
                        CvEx.DrawChessboardCornerFrame(displayMat1, boardSize, corners, new CvScalar(64, 128, 64));
                        displayMat1.DrawChessboardCorners(boardSize, corners, true);
                        lastCorners = corners;
                        putImage(displayMat1, PixelFormats.Rgb24);
                    }
                    else
                    {
                        CvEx.CloneCvMat(ref displayMat3, imageMat);
                        putImage(displayMat3, PixelFormats.Rgb24);
                    }
                }

                CvMat displayMat2 = CvEx.InitCvMat(displayMat1);
                displayMat1.Undistort2(displayMat2, this.UndistortionData.CameraStruct.CreateCvMat(), this.UndistortionData.DistortStruct.CreateCvMat(true));
                if (lastCorners != null)
                {
                    drawUndistortedCornerFrame(displayMat2, lastCorners, boardSize);
                }
                displayMat2.PutText(string.Format("Min: {0}", pairs.Min(x => x.Item1 / x.Item2)), new CvPoint(20, 20), new CvFont(FontFace.HersheyPlain, 1, 1), new CvScalar(255, 255, 255));
                displayMat2.PutText(string.Format("Max: {0}", pairs.Max(x => x.Item1 / x.Item2)), new CvPoint(20, 40), new CvFont(FontFace.HersheyPlain, 1, 1), new CvScalar(255, 255, 255));
                displayMat2.PutText(string.Format("Avg: {0}", pairs.Average(x => x.Item1 / x.Item2)), new CvPoint(20, 60), new CvFont(FontFace.HersheyPlain, 1, 1), new CvScalar(255, 255, 255));
                displayMat2.PutText(string.Format("Med: {0}", CalcEx.GetMedian(pairs.Select(x => x.Item1 / x.Item2).ToList())), new CvPoint(20, 80), new CvFont(FontFace.HersheyPlain, 1, 1), new CvScalar(255, 255, 255));
                putImage(displayMat4, PixelFormats.Rgb24);
                displayLabels();
            }
        }
 public static extern IntPtr ml_EM_new(
     int nclusters, int covMatType, CvTermCriteria termCrit);
Exemple #15
0
        private void buttonCalibrateScaleOffset_Click(object sender, RoutedEventArgs e)
        {
            int    cols, rows;
            double horizLength, vertLength;

            if (!parseChessboardParameters(out cols, out rows, out horizLength, out vertLength))
            {
                return;
            }

            // 以下改造
            MotionDataHandler handler;
            string            path;

            if (openMotionData(out handler, out path))
            {
                CvMat displayMat1 = null;
                CvMat displayMat3 = null;
                CvMat gray        = null;

                if (ProgressData.DoAction(progress =>
                {
                    int length = handler.FrameCount;
                    if (length == 0)
                    {
                        return;
                    }
                    progress.InitProgress("Find Chessboard...", length * 2);

                    CvSize boardSize = new CvSize(cols, rows);
                    List <CvPoint3D32f?[]> list = new List <CvPoint3D32f?[]>();
                    CvSize imageSize = new CvSize();
                    CvPoint2D32f[] lastCorners = null;

                    IEnumerable <CvMat> colorImages, depthImages;
                    Utility.LoadImages(handler.GetColorImagePaths(), out colorImages);
                    Utility.LoadImages(handler.GetDepthImagePaths(), out depthImages);
                    var images = colorImages.Zip(depthImages, (first, second) => Tuple.Create(first, second));

                    foreach (Tuple <CvMat, CvMat> imagePair in images)
                    {
                        progress.CurrentValue++;

                        CvMat imageMat = imagePair.Item1;
                        CvMat depthMat = imagePair.Item2;
                        imageSize = new CvSize(imageMat.Cols, imageMat.Rows);
                        CvPoint2D32f[] corners;
                        int count;
                        CvEx.InitCvMat(ref gray, imageMat, MatrixType.U8C1);
                        imageMat.CvtColor(gray, ColorConversion.RgbToGray);
                        if (gray.FindChessboardCorners(boardSize, out corners, out count, ChessboardFlag.AdaptiveThresh))
                        {
                            CvEx.CloneCvMat(ref displayMat1, imageMat);
                            CvTermCriteria criteria = new CvTermCriteria(50, 0.01);
                            gray.FindCornerSubPix(corners, count, new CvSize(3, 3), new CvSize(-1, -1), criteria);
                            CvPoint3D32f?[] cornerPoints = new CvPoint3D32f?[corners.Length];
                            for (int j = 0; j < corners.Length; j++)
                            {
                                CvPoint2D32f corner = corners[j];
                                double?value = CalcEx.BilateralFilterDepthMatSinglePixel(corner, depthMat, 100, 4, 9);
                                if (value.HasValue)
                                {
                                    cornerPoints[j] = new CvPoint3D32f(corner.X, corner.Y, value.Value);
                                }
                            }
                            list.Add(cornerPoints);
                            CvEx.DrawChessboardCornerFrame(displayMat1, boardSize, corners, new CvScalar(64, 128, 64));
                            displayMat1.DrawChessboardCorners(boardSize, corners, true);
                            lastCorners = corners;
                            //putImage(displayMat1, PixelFormats.Bgr24);
                        }
                        else
                        {
                            CvEx.CloneCvMat(ref displayMat3, imageMat);
                            //putImage(displayMat3, PixelFormats.Bgr24);
                        }
                    }
                    progress.SetProgress("Scale Offset Calibrating...", length);

                    this.UndistortionData.CalibrateRealScaleAndOffset(list, cols, rows, horizLength, vertLength, imageSize);
                    CvMat displayMat2 = CvEx.InitCvMat(displayMat1);
                    displayMat1.Undistort2(displayMat2, this.UndistortionData.CameraStruct.CreateCvMat(), this.UndistortionData.DistortStruct.CreateCvMat(true));
                    if (lastCorners != null)
                    {
                        drawUndistortedCornerFrame(displayMat2, lastCorners, boardSize);
                    }

                    displayMat2.PutText(string.Format("XScale: {0}", this.UndistortionData.XScale), new CvPoint(20, 20), new CvFont(FontFace.HersheyPlain, 1, 1), new CvScalar(255, 255, 255));
                    displayMat2.PutText(string.Format("YScale: {0}", this.UndistortionData.YScale), new CvPoint(20, 40), new CvFont(FontFace.HersheyPlain, 1, 1), new CvScalar(255, 255, 255));
                    displayMat2.PutText(string.Format("Zoffset: {0}", this.UndistortionData.ZOffset), new CvPoint(20, 60), new CvFont(FontFace.HersheyPlain, 1, 1), new CvScalar(255, 255, 255));
                    putImage(displayMat2, PixelFormats.Bgr24);
                }, "Calibrate Scale Offset", true))
                {
                    displayLabels();
                }
            }
        }
Exemple #16
0
        private void buttonCameraCalibration_Click(object sender, RoutedEventArgs e)
        {
            int    cols, rows;
            double horizLength, vertLength;

            if (!parseChessboardParameters(out cols, out rows, out horizLength, out vertLength))
            {
                return;
            }

            int imageNum;

            if (!int.TryParse(textCalibrationCameraIteration.Text, out imageNum) || imageNum <= 0)
            {
                System.Windows.MessageBox.Show(string.Format("キャリブレーションに使用するイメージ数が不正です: {0}", textCalibrationCameraIteration.Text));
                return;
            }

            // 以下改造
            MotionDataHandler handler;
            string            path;

            if (openMotionData(out handler, out path))
            {
                CvMat displayMat1 = null;
                CvMat displayMat3 = null;
                CvMat gray        = null;
                if (ProgressData.DoAction(progress =>
                {
                    int length = handler.FrameCount;
                    if (length == 0)
                    {
                        return;
                    }
                    progress.InitProgress("Find Chessboard...", length * 2);
                    CvSize boardSize = new CvSize(cols, rows);
                    List <CvPoint2D32f[]> list = new List <CvPoint2D32f[]>();
                    CvSize imageSize = new CvSize();
                    CvPoint2D32f[] lastCorners = null;

                    IEnumerable <CvMat> colorImages;
                    Utility.LoadImages(handler.GetColorImagePaths(), out colorImages);

                    foreach (CvMat imageMat in colorImages)
                    {
                        progress.CurrentValue++;
                        using (imageMat)
                        {
                            imageSize = new CvSize(imageMat.Cols, imageMat.Rows);
                            CvPoint2D32f[] corners;
                            int count;
                            CvEx.InitCvMat(ref gray, imageMat, MatrixType.U8C1);
                            imageMat.CvtColor(gray, ColorConversion.RgbToGray);
                            if (gray.FindChessboardCorners(boardSize, out corners, out count, ChessboardFlag.AdaptiveThresh))
                            {
                                CvEx.CloneCvMat(ref displayMat1, imageMat);
                                CvTermCriteria criteria = new CvTermCriteria(20, 0.1);
                                gray.FindCornerSubPix(corners, count, new CvSize(11, 11), new CvSize(-1, -1), criteria);
                                list.Add(corners);
                                CvEx.DrawChessboardCornerFrame(displayMat1, boardSize, corners, new CvScalar(64, 128, 64));
                                displayMat1.DrawChessboardCorners(boardSize, corners, true);
                                lastCorners = corners;
                                putImage(displayMat1, PixelFormats.Bgr24);
                            }
                            else
                            {
                                CvEx.CloneCvMat(ref displayMat3, imageMat);
                                putImage(displayMat3, PixelFormats.Bgr24);
                            }
                        }
                    }

                    progress.SetProgress("Calibrating...", length);

                    this.UndistortionData.CalibrateCamera(list, cols, rows, (horizLength + vertLength) / 2, imageSize, imageNum, path);
                    CvMat displayMat2 = CvEx.InitCvMat(displayMat1);
                    displayMat1.Undistort2(displayMat2, this.UndistortionData.CameraStruct.CreateCvMat(), this.UndistortionData.DistortStruct.CreateCvMat(true));
                    if (lastCorners != null)
                    {
                        drawUndistortedCornerFrame(displayMat2, lastCorners, boardSize);
                    }

                    putImage(displayMat2, PixelFormats.Bgr24);
                }, "Camera Calib", true))
                {
                    displayLabels();
                }
            }
        }
        private void Run()
        {
            CvCapture cap = Cv.CreateCameraCapture(1);
            CvCapture vid = CvCapture.FromFile("trailer.avi");
            IplImage  pic = new IplImage("pic.jpg", LoadMode.AnyColor | LoadMode.AnyDepth);

            Cv.Flip(pic, pic, FlipMode.Y);

            int    b_width   = 5;
            int    b_height  = 4;
            int    b_squares = 20;
            CvSize b_size    = new CvSize(b_width, b_height);

            CvMat warp_matrix = Cv.CreateMat(3, 3, MatrixType.F32C1);

            CvPoint2D32f[] corners = new CvPoint2D32f[b_squares];

            IplImage img;
            IplImage frame;
            IplImage disp;
            IplImage cpy_img;
            IplImage neg_img;

            int corner_count;

            while (_thread != null)
            {
                img = Cv.QueryFrame(cap);

                Cv.Flip(img, img, FlipMode.Y);

                disp    = Cv.CreateImage(Cv.GetSize(img), BitDepth.U8, 3);
                cpy_img = Cv.CreateImage(Cv.GetSize(img), BitDepth.U8, 3);
                neg_img = Cv.CreateImage(Cv.GetSize(img), BitDepth.U8, 3);

                IplImage gray  = Cv.CreateImage(Cv.GetSize(img), img.Depth, 1);
                bool     found = Cv.FindChessboardCorners(img, b_size, out corners, out corner_count, ChessboardFlag.AdaptiveThresh | ChessboardFlag.FilterQuads);

                Cv.CvtColor(img, gray, ColorConversion.BgrToGray);

                CvTermCriteria criteria = new CvTermCriteria(CriteriaType.Epsilon, 30, 0.1);
                Cv.FindCornerSubPix(gray, corners, corner_count, new CvSize(11, 11), new CvSize(-1, -1), criteria);

                if (corner_count == b_squares)
                {
                    if (_option == 1)
                    {
                        CvPoint2D32f[] p = new CvPoint2D32f[4];
                        CvPoint2D32f[] q = new CvPoint2D32f[4];

                        IplImage blank = Cv.CreateImage(Cv.GetSize(pic), BitDepth.U8, 3);
                        Cv.Zero(blank);

                        q[0].X = (float)pic.Width * 0;
                        q[0].Y = (float)pic.Height * 0;
                        q[1].X = (float)pic.Width;
                        q[1].Y = (float)pic.Height * 0;

                        q[2].X = (float)pic.Width;
                        q[2].Y = (float)pic.Height;
                        q[3].X = (float)pic.Width * 0;
                        q[3].Y = (float)pic.Height;

                        p[0].X = corners[0].X;
                        p[0].Y = corners[0].Y;
                        p[1].X = corners[4].X;
                        p[1].Y = corners[4].Y;

                        p[2].X = corners[19].X;
                        p[2].Y = corners[19].Y;
                        p[3].X = corners[15].X;
                        p[3].Y = corners[15].Y;

                        Cv.GetPerspectiveTransform(q, p, out warp_matrix);

                        Cv.Zero(neg_img);
                        Cv.Zero(cpy_img);

                        Cv.WarpPerspective(pic, neg_img, warp_matrix);
                        Cv.WarpPerspective(blank, cpy_img, warp_matrix);
                        Cv.Not(cpy_img, cpy_img);

                        Cv.And(cpy_img, img, cpy_img);
                        Cv.Or(cpy_img, neg_img, img);

                        Cv.Flip(img, img, FlipMode.Y);
                        //Cv.ShowImage("video", img);
                        Bitmap bm = BitmapConverter.ToBitmap(img);
                        bm.SetResolution(pictureBox1.Width, pictureBox1.Height);
                        pictureBox1.Image = bm;
                    }
                    else if (_option == 2)
                    {
                        CvPoint2D32f[] p = new CvPoint2D32f[4];
                        CvPoint2D32f[] q = new CvPoint2D32f[4];

                        frame = Cv.QueryFrame(vid);

                        Cv.Flip(frame, frame, FlipMode.Y);

                        IplImage blank = Cv.CreateImage(Cv.GetSize(frame), BitDepth.U8, 3);
                        Cv.Zero(blank);
                        Cv.Not(blank, blank);

                        q[0].X = (float)frame.Width * 0;
                        q[0].Y = (float)frame.Height * 0;
                        q[1].X = (float)frame.Width;
                        q[1].Y = (float)frame.Height * 0;

                        q[2].X = (float)frame.Width;
                        q[2].Y = (float)frame.Height;
                        q[3].X = (float)frame.Width * 0;
                        q[3].Y = (float)frame.Height;

                        p[0].X = corners[0].X;
                        p[0].Y = corners[0].Y;
                        p[1].X = corners[4].X;
                        p[1].Y = corners[4].Y;

                        p[2].X = corners[19].X;
                        p[2].Y = corners[19].Y;
                        p[3].X = corners[15].X;
                        p[3].Y = corners[15].Y;

                        Cv.GetPerspectiveTransform(q, p, out warp_matrix);

                        Cv.Zero(neg_img);
                        Cv.Zero(cpy_img);

                        Cv.WarpPerspective(frame, neg_img, warp_matrix);
                        Cv.WarpPerspective(blank, cpy_img, warp_matrix);
                        Cv.Not(cpy_img, cpy_img);

                        Cv.And(cpy_img, img, cpy_img);
                        Cv.Or(cpy_img, neg_img, img);

                        Cv.Flip(img, img, FlipMode.Y);
                        //Cv.ShowImage("video", img);
                        Bitmap bm = BitmapConverter.ToBitmap(img);
                        bm.SetResolution(pictureBox1.Width, pictureBox1.Height);
                        pictureBox1.Image = bm;
                    }
                    else
                    {/*
                      * CvPoint[] p = new CvPoint[4];
                      *
                      * p[0].X = (int)corners[0].X;
                      * p[0].Y = (int)corners[0].Y;
                      * p[1].X = (int)corners[4].X;
                      * p[1].Y = (int)corners[4].Y;
                      *
                      * p[2].X = (int)corners[19].X;
                      * p[2].Y = (int)corners[19].Y;
                      * p[3].X = (int)corners[15].X;
                      * p[3].Y = (int)corners[15].Y;
                      *
                      * Cv.Line(img, p[0], p[1], CvColor.Red, 2);
                      * Cv.Line(img, p[1], p[2], CvColor.Green, 2);
                      * Cv.Line(img, p[2], p[3], CvColor.Blue, 2);
                      * Cv.Line(img, p[3], p[0], CvColor.Yellow, 2);
                      */
                        //or
                        Cv.DrawChessboardCorners(img, b_size, corners, found);
                        Cv.Flip(img, img, FlipMode.Y);

                        //Cv.ShowImage("video", img);
                        Bitmap bm = BitmapConverter.ToBitmap(img);
                        bm.SetResolution(pictureBox1.Width, pictureBox1.Height);
                        pictureBox1.Image = bm;
                    }
                }
                else
                {
                    Cv.Flip(gray, gray, FlipMode.Y);
                    //Cv.ShowImage("video", gray);
                    Bitmap bm = BitmapConverter.ToBitmap(gray);
                    bm.SetResolution(pictureBox1.Width, pictureBox1.Height);
                    pictureBox1.Image = bm;
                }
            }
        }
Exemple #18
0
        /// <summary>
        ///
        /// </summary>
        /// <param name="termCrit"></param>
        /// <returns></returns>
#else
        /// <summary>
        ///
        /// </summary>
        /// <param name="termCrit"></param>
        /// <returns></returns>
#endif
        protected bool GrowForest(CvTermCriteria termCrit)
        {
            return(NativeMethods.ml_CvERTrees_grow_forest(ptr, termCrit) != 0);
        }
 public static extern int ml_CvERTrees_grow_forest(
     IntPtr obj, CvTermCriteria termCrit);
Exemple #20
0
        /// <summary>
        /// 初期化
        /// </summary>
        /// <param name="nclusters"></param>
        /// <param name="covMatType"></param>
        /// <param name="termCrit"></param>
#else
        /// <summary>
        /// Training constructor
        /// </summary>
        /// <param name="nclusters"></param>
        /// <param name="covMatType"></param>
        /// <param name="termCrit"></param>
#endif
        public CvEM(int nclusters, EMCovMatType covMatType, CvTermCriteria termCrit)
        {
            ptr = MLInvoke.cv_EM_new(nclusters, covMatType, termCrit);
			NotifyMemoryPressure(SizeOf);
        }
Exemple #21
0
 public static extern void imgproc_pyrMeanShiftFiltering(IntPtr src, IntPtr dst,
                                                         double sp, double sr, int maxLevel, CvTermCriteria termcrit);
    //  Use the CamShift algorithm to track to base histogram throughout the
    // succeeding frames
    void CalculateCamShift(CvMat _image)
    {
        CvMat _backProject = CalculateBackProjection(_image, _histogramToTrack);

        // Create convolution kernel for erosion and dilation
        IplConvKernel elementErode  = Cv.CreateStructuringElementEx(10, 10, 5, 5, ElementShape.Rect, null);
        IplConvKernel elementDilate = Cv.CreateStructuringElementEx(4, 4, 2, 2, ElementShape.Rect, null);

        // Try eroding and then dilating the back projection
        // Hopefully this will get rid of the noise in favor of the blob objects.
        Cv.Erode(_backProject, _backProject, elementErode, 1);
        Cv.Dilate(_backProject, _backProject, elementDilate, 1);


        if (backprojWindowFlag)
        {
            Cv.ShowImage("Back Projection", _backProject);
        }

        // Parameters returned by Camshift algorithm
        CvBox2D         _outBox;
        CvConnectedComp _connectComp;

        // Set the criteria for the CamShift algorithm
        // Maximum 10 iterations and at least 1 pixel change in centroid
        CvTermCriteria term_criteria = Cv.TermCriteria(CriteriaType.Iteration | CriteriaType.Epsilon, 10, 1);

        // Draw object center based on Kalman filter prediction
        CvMat _kalmanPrediction = _kalman.Predict();

        int predictX = Mathf.FloorToInt((float)_kalmanPrediction.GetReal2D(0, 0));
        int predictY = Mathf.FloorToInt((float)_kalmanPrediction.GetReal2D(1, 0));

        // Run the CamShift algorithm
        if (Cv.CamShift(_backProject, _rectToTrack, term_criteria, out _connectComp, out _outBox) > 0)
        {
            // Use the CamShift estimate of the object center to update the Kalman model
            CvMat _kalmanMeasurement = Cv.CreateMat(2, 1, MatrixType.F32C1);
            // Update Kalman model with raw data from Camshift estimate
            _kalmanMeasurement.Set2D(0, 0, _outBox.Center.X); // Raw X position
            _kalmanMeasurement.Set2D(1, 0, _outBox.Center.Y); // Raw Y position
                                                              //_kalmanMeasurement.Set2D (2, 0, _outBox.Center.X - lastPosition.X);
                                                              //_kalmanMeasurement.Set2D (3, 0, _outBox.Center.Y - lastPosition.Y);

            lastPosition.X = Mathf.FloorToInt(_outBox.Center.X);
            lastPosition.Y = Mathf.FloorToInt(_outBox.Center.Y);

            _kalman.Correct(_kalmanMeasurement); // Correct Kalman model with raw data

            // CamShift function returns two values: _connectComp and _outBox.

            //	_connectComp contains is the newly estimated position and size
            //  of the region of interest. This is passed into the subsequent
            // call to CamShift
            // Update the ROI rectangle with CamShift's new estimate of the ROI
            _rectToTrack = CheckROIBounds(_connectComp.Rect);

            // Draw a rectangle over the tracked ROI
            // This method will draw the rectangle but won't rotate it.
            _image.DrawRect(_rectToTrack, CvColor.Aqua);
            _image.DrawMarker(predictX, predictY, CvColor.Aqua);

            // _outBox contains a rotated rectangle esimating the position, size, and orientation
            // of the object we want to track (specified by the initial region of interest).
            // We then take this estimation and draw a rotated bounding box.
            // This method will draw the rotated rectangle
            rotatedBoxToTrack = _outBox;

            // Draw a rotated rectangle representing Camshift's estimate of the
            // object's position, size, and orientation.
            _image.DrawPolyLine(rectangleBoxPoint(_outBox.BoxPoints()), true, CvColor.Red);
        }
        else
        {
            //Debug.Log ("Object lost by Camshift tracker");

            _image.DrawMarker(predictX, predictY, CvColor.Purple, MarkerStyle.CircleLine);

            _rectToTrack = CheckROIBounds(new CvRect(predictX - Mathf.FloorToInt(_rectToTrack.Width / 2),
                                                     predictY - Mathf.FloorToInt(_rectToTrack.Height / 2),
                                                     _rectToTrack.Width, _rectToTrack.Height));
            _image.DrawRect(_rectToTrack, CvColor.Purple);
        }

        if (trackWindowFlag)
        {
            Cv.ShowImage("Image", _image);
        }
    }
Exemple #23
0
 public static extern void imgproc_cornerSubPix(IntPtr image, IntPtr corners,
                                                Size winSize, Size zeroZone, CvTermCriteria criteria);
 public static extern double core_kmeans(IntPtr data, int k, IntPtr bestLabels,
                                         CvTermCriteria criteria, int attempts, int flags, IntPtr centers);
Exemple #25
0
        //学習ファイルの作成
        public void TrainingExec(List <FaceFeature.FeatureValue> FeatureList)
        {
            //特徴量をMatに移し替える 2個で一つ
            //2個のfloat * LISTの大きさの配列
            double[] feature_array = new double[2 * FeatureList.Count];

            //特徴量をSVMで扱えるように配列に置き換える
            SetFeatureListToArray(FeatureList, ref feature_array);
            CvPoint2D32f[] feature_points = new CvPoint2D32f[feature_array.Length / 2];
            int            id             = 0;

            for (int i = 0; i < feature_array.Length / 2; i++)
            {
                feature_points[id].X = (float)feature_array[i * 2];
                feature_points[id].Y = (float)feature_array[i * 2 + 1];
                id++;
            }
            CvMat dataMat = new CvMat(feature_points.Length, 2, MatrixType.F32C1, feature_points, true);

            //これがラベル番号
            int[] id_array = new int[FeatureList.Count];
            for (int i = 0; i < id_array.Length; i++)
            {
                id_array[i] = FeatureList[i].ID;
            }
            CvMat resMat = new CvMat(id_array.Length, 1, MatrixType.S32C1, id_array, true);


            // dataとresponsesの様子を描画
            CvPoint2D32f[] points = new CvPoint2D32f[id_array.Length];
            int            idx    = 0;

            for (int i = 0; i < id_array.Length; i++)
            {
                points[idx].X = (float)feature_array[i * 2];
                points[idx].Y = (float)feature_array[i * 2 + 1];
                idx++;
            }

            //学習データを図にする
            Debug_DrawInputFeature(points, id_array);

            //デバッグ用 学習させる特徴量を出力する
            OutPut_FeatureAndID(points, id_array);

            //LibSVMのテスト
            //学習用のデータの読み込み
            SVMProblem problem     = SVMProblemHelper.Load(@"wine.txt");
            SVMProblem testProblem = SVMProblemHelper.Load(@"wine.txt");

            SVMParameter parameter = new SVMParameter();

            parameter.Type   = LibSVMsharp.SVMType.C_SVC;
            parameter.Kernel = LibSVMsharp.SVMKernelType.RBF;
            parameter.C      = 1;
            parameter.Gamma  = 1;

            SVMModel model = SVM.Train(problem, parameter);

            double[] target = new double[testProblem.Length];


            for (int i = 0; i < testProblem.Length; i++)
            {
                target[i] = SVM.Predict(model, testProblem.X[i]);
            }
            double accuracy = SVMHelper.EvaluateClassificationProblem(testProblem, target);


            //SVMの用意
            CvTermCriteria criteria = new CvTermCriteria(1000, 0.000001);
            CvSVMParams    param    = new CvSVMParams(
                OpenCvSharp.CPlusPlus.SVMType.CSvc,
                OpenCvSharp.CPlusPlus.SVMKernelType.Rbf,
                10.0,            // degree
                100.0,           // gamma        調整
                1.0,             // coeff0
                10.0,            // c               調整
                0.5,             // nu
                0.1,             // p
                null,
                criteria);

            //学習実行
            svm.Train(dataMat, resMat, null, null, param);

            Debug_DispPredict();
        }
 public static extern void imgproc_pyrMeanShiftFiltering(IntPtr src, IntPtr dst,
     double sp, double sr, int maxLevel, CvTermCriteria termcrit);
Exemple #27
0
 public static extern void ml_CvRTParams_term_crit_set(IntPtr obj, CvTermCriteria value);
 public static extern void ml_CvSVMParams_new2(ref WCvSVMParams result, 
     int svmType, int kernelType, double degree, double gamma, double coef0,
     double c, double nu, double p, IntPtr classWeights, CvTermCriteria termCrit);
Exemple #29
0
 public static extern void cvCalcOpticalFlowPyrLK(IntPtr prev, IntPtr curr, IntPtr prev_pyr, IntPtr curr_pyr, IntPtr prev_features, IntPtr curr_features, int count, CvSize win_size, int level, IntPtr status, IntPtr track_error, CvTermCriteria criteria, int flags);
 public static extern CvBox2D video_CamShift(
     IntPtr probImage, ref CvRect window, CvTermCriteria criteria);
Exemple #31
0
 public static extern int cvCamShift(IntPtr prob_image, CvRect window, CvTermCriteria criteria, IntPtr comp, IntPtr box);
 public static extern void video_calcOpticalFlowPyrLK_vector(
     IntPtr prevImg, IntPtr nextImg,
     Point2f[] prevPts, int prevPtsSize,
     IntPtr nextPts, IntPtr status, IntPtr err,
     CvSize winSize, int maxLevel, CvTermCriteria criteria,
     int flags, double minEigThreshold);
Exemple #33
0
 public static extern IntPtr cvContourFromContourTree(IntPtr tree, IntPtr storage, CvTermCriteria criteria);
Exemple #34
0
 /// <summary>
 /// conversion from CvTermCriteria
 /// </summary>
 /// <param name="criteria"></param>
 public TermCriteria(CvTermCriteria criteria)
 {
     Type = criteria.Type;
     MaxCount = criteria.MaxIter;
     Epsilon = criteria.Epsilon;
 }
Exemple #35
0
 public static extern void cvFindCornerSubPix(IntPtr image, IntPtr corners, int count, CvSize win, CvSize zero_zone, CvTermCriteria criteria);
Exemple #36
0
        /// <summary>
        /// 初期化
        /// </summary>
        /// <param name="_nclusters">混合数</param>
        /// <param name="_cov_mat_type">混合分布共変動行列のタイプ</param>
        /// <param name="_start_step">アルゴリズムをスタートする最初のステップ</param>
        /// <param name="_term_crit">処理の終了条件</param>
        /// <param name="_probs">確率p_i,kの初期値. start_step=EMStartStep.Eのときのみ使用する(その場合はnullであってはならない).</param>
        /// <param name="_weights">混合分布の重みπ_kの初期値. start_step=EMStartStep.Eのときのみ(nullでない場合は)使用する. </param>
        /// <param name="_means">混合分布の平均 a_kの初期値. start_step=EMStartStep.Eのときのみ使用する(その場合はnullであってはならない).</param>
        /// <param name="_covs">混合分布の共変動行列Skの初期値. start_step=EMStartStep.Eのときのみ(nullでない場合は)使用する.</param>
#else
		/// <summary>
        /// Constructor
        /// </summary>
        /// <param name="_nclusters">The number of mixtures. Some of EM implementation could determine the optimal number of mixtures within a specified value range, but that is not the case in ML yet. </param>
        /// <param name="_cov_mat_type">The type of the mixture covariation matrices</param>
        /// <param name="_start_step">The initial step the algorithm starts from</param>
        /// <param name="_term_crit">Termination criteria of the procedure. </param>
        /// <param name="_probs">Initial probabilities p_i,k; are used (and must be not null) only when start_step=EMStartStep.E. </param>
        /// <param name="_weights">Initial mixture weights π_k; are used (if not null) only when start_step=EMStartStep.E. </param>
        /// <param name="_means">Initial mixture means a_k; are used (and must be not null) only when start_step=EMStartStep.E. </param>
        /// <param name="_covs">Initial mixture covariation matrices S_k; are used (if not null) only when start_step=EMStartStep.E. </param>
#endif
		public CvEMParams(int _nclusters, EMCovMatType _cov_mat_type, EMStartStep _start_step, CvTermCriteria _term_crit, 
            CvMat _probs, CvMat _weights, CvMat _means, CvMat[] _covs ) 
        {
            unsafe
            {
                _data = new WCvEMParams()
                {
                    nclusters = _nclusters,
                    cov_mat_type = (int)_cov_mat_type,
                    start_step = (int)_start_step,
                    term_crit = _term_crit,
                    probs = (_probs == null) ? null : _probs.CvPtr.ToPointer(),
                    weights = (_weights == null) ? null : _weights.CvPtr.ToPointer(),
                    means = (_means == null) ? null : _means.CvPtr.ToPointer(),
                    //covs = (_means == null) ? null : (void**)_means.CvPtr,
                };
            }
            Covs = _covs;
        }
Exemple #37
0
 public static extern int cvMeanShift(IntPtr prob_image, CvRect window, CvTermCriteria criteria, IntPtr comp);
Exemple #38
0
        /// <summary>
        /// 
        /// </summary>
        /// <param name="term_crit"></param>
        /// <returns></returns>
#else
        /// <summary>
        /// 
        /// </summary>
        /// <param name="termCrit"></param>
        /// <returns></returns>
#endif
        protected bool GrowForest(CvTermCriteria termCrit)
        {
            return MLInvoke.CvERTrees_grow_forest(ptr, termCrit);
        }
 public static extern double calib3d_calibrateCamera_InputArray(
     IntPtr[] objectPoints, int objectPointsSize,
     IntPtr[] imagePoints, int imagePointsSize,
     CvSize imageSize,
     IntPtr cameraMatrix,IntPtr distCoeffs,
     IntPtr rvecs, IntPtr tvecs,
     int flags, CvTermCriteria criteria);
Exemple #40
0
        public static CvPoint2D32f[] CornerSubPix(Mat image, CvSize winSize, CvSize zeroZone, CvTermCriteria criteria)
        {
            if (image == null)
                throw new ArgumentNullException("image");

            using (StdVectorVec2f vec = new StdVectorVec2f())
            {
                CppInvoke.cv_cornerSubPix(image.CvPtr, vec.CvPtr, winSize, zeroZone, criteria);
                return vec.ToArray<CvPoint2D32f>();
            }
        }
 public static extern double calib3d_calibrateCamera_vector(
     IntPtr[] objectPoints, int opSize1, int[] opSize2,
     IntPtr[] imagePoints, int ipSize1, int[] ipSize2,
     CvSize imageSize,
     [In, Out] double[,] cameraMatrix,
     [In, Out] double[] distCoeffs, int distCoeffsSize,
     IntPtr rvecs, IntPtr tvecs,
     int flags, CvTermCriteria criteria);
Exemple #42
0
 public static extern double core_kmeans(IntPtr data, int k, IntPtr bestLabels,
     CvTermCriteria criteria, int attempts, int flags, IntPtr centers);
 public static extern double calib3d_stereoCalibrate_InputArray(
     IntPtr[] objectPoints, int opSize,
     IntPtr[] imagePoints1, int ip1Size,
     IntPtr[] imagePoints2, int ip2Size,
     IntPtr cameraMatrix1,
     IntPtr distCoeffs1,
     IntPtr cameraMatrix2,
     IntPtr distCoeffs2,
     CvSize imageSize,
     IntPtr R, IntPtr T,
     IntPtr E, IntPtr F,
     CvTermCriteria criteria, int flags);
Exemple #44
0
 /// <summary>
 /// adjusts the corner locations with sub-pixel accuracy to maximize the certain cornerness criteria
 /// </summary>
 /// <param name="inputCorners">Initial coordinates of the input corners and refined coordinates provided for output.</param>
 /// <param name="winSize">Half of the side length of the search window.</param>
 /// <param name="zeroZone">Half of the size of the dead region in the middle of the search zone 
 /// over which the summation in the formula below is not done. It is used sometimes to avoid possible singularities 
 /// of the autocorrelation matrix. The value of (-1,-1) indicates that there is no such a size.</param>
 /// <param name="criteria">Criteria for termination of the iterative process of corner refinement. 
 /// That is, the process of corner position refinement stops either after criteria.maxCount iterations 
 /// or when the corner position moves by less than criteria.epsilon on some iteration.</param>
 /// <returns></returns>
 public Point2f[] CornerSubPix(IEnumerable<Point2f> inputCorners,
     Size winSize, Size zeroZone, CvTermCriteria criteria)
 {
     return Cv2.CornerSubPix(this, inputCorners, winSize, zeroZone, criteria);
 }
Exemple #45
0
        /// <summary>
        /// SVM
        /// </summary>
        /// <param name="dataFilename"></param>
        /// <param name="filenameToSave"></param>
        /// <param name="filenameToLoad"></param>
        private void BuildSvmClassifier(string dataFilename, string filenameToSave, string filenameToLoad)
        {
            //C_SVCのパラメータ
            const float SvmC = 1000;
            //RBFカーネルのパラメータ
            const float SvmGamma = 0.1f;

            CvMat data      = null;
            CvMat responses = null;
            CvMat sampleIdx = null;

            int    nsamplesAll = 0, ntrainSamples = 0;
            double trainHr = 0, testHr = 0;

            CvSVM          svm      = new CvSVM();
            CvTermCriteria criteria = new CvTermCriteria(100, 0.001);

            try
            {
                ReadNumClassData(dataFilename, 16, out data, out responses);
            }
            catch
            {
                Console.WriteLine("Could not read the database {0}", dataFilename);
                return;
            }
            Console.WriteLine("The database {0} is loaded.", dataFilename);

            nsamplesAll   = data.Rows;
            ntrainSamples = (int)(nsamplesAll * 0.2);

            // Create or load Random Trees classifier
            if (filenameToLoad != null)
            {
                // load classifier from the specified file
                svm.Load(filenameToLoad);
                ntrainSamples = 0;
                if (svm.GetSupportVectorCount() == 0)
                {
                    Console.WriteLine("Could not read the classifier {0}", filenameToLoad);
                    return;
                }
                Console.WriteLine("The classifier {0} is loaded.", filenameToLoad);
            }
            else
            {
                // create classifier by using <data> and <responses>
                Console.Write("Training the classifier ...");

                // 2. create sample_idx
                sampleIdx = new CvMat(1, nsamplesAll, MatrixType.U8C1);
                {
                    CvMat mat;
                    Cv.GetCols(sampleIdx, out mat, 0, ntrainSamples);
                    mat.Set(CvScalar.RealScalar(1));

                    Cv.GetCols(sampleIdx, out mat, ntrainSamples, nsamplesAll);
                    mat.SetZero();
                }

                // 3. train classifier
                // 方法、カーネルにより使わないパラメータは0で良く、
                // 重みについてもNULLで良い
                svm.Train(data, responses, null, sampleIdx, new CvSVMParams(CvSVM.C_SVC, CvSVM.RBF, 0, SvmGamma, 0, SvmC, 0, 0, null, criteria));
                Console.WriteLine();
            }


            // compute prediction error on train and test data
            for (int i = 0; i < nsamplesAll; i++)
            {
                double r;
                CvMat  sample;
                Cv.GetRow(data, out sample, i);

                r = svm.Predict(sample);
                // compare results
                Console.WriteLine(
                    "predict: {0}, responses: {1}, {2}",
                    (char)r,
                    (char)responses.DataArraySingle[i],
                    Math.Abs((double)r - responses.DataArraySingle[i]) <= float.Epsilon ? "Good!" : "Bad!"
                    );
                r = Math.Abs((double)r - responses.DataArraySingle[i]) <= float.Epsilon ? 1 : 0;

                if (i < ntrainSamples)
                {
                    trainHr += r;
                }
                else
                {
                    testHr += r;
                }
            }

            testHr  /= (double)(nsamplesAll - ntrainSamples);
            trainHr /= (double)ntrainSamples;
            Console.WriteLine("Gamma={0:F5}, C={1:F5}", SvmGamma, SvmC);
            if (filenameToLoad != null)
            {
                Console.WriteLine("Recognition rate: test = {0:F1}%", testHr * 100.0);
            }
            else
            {
                Console.WriteLine("Recognition rate: train = {0:F1}%, test = {1:F1}%", trainHr * 100.0, testHr * 100.0);
            }

            Console.WriteLine("Number of Support Vector: {0}", svm.GetSupportVectorCount());
            // Save SVM classifier to file if needed
            if (filenameToSave != null)
            {
                svm.Save(filenameToSave);
            }


            Console.Read();


            if (sampleIdx != null)
            {
                sampleIdx.Dispose();
            }
            data.Dispose();
            responses.Dispose();
            svm.Dispose();
        }