/// <summary> /// 決定木を学習する /// </summary> /// <param name="trainData"></param> /// <param name="subsampleIdx"></param> /// <param name="ensemble"></param> /// <returns></returns> #else /// <summary> /// Trains decision tree /// </summary> /// <param name="trainData"></param> /// <param name="subsampleIdx"></param> /// <param name="ensemble"></param> /// <returns></returns> #endif public virtual bool Train(CvDTreeTrainData trainData, CvMat subsampleIdx, CvBoost ensemble) { if (trainData == null) { throw new ArgumentNullException("trainData"); } if (subsampleIdx == null) { throw new ArgumentNullException("subsampleIdx"); } return(NativeMethods.ml_CvBoostTree_train( ptr, trainData.CvPtr, subsampleIdx.CvPtr, Cv2.ToPtr(ensemble)) != 0); }
/// <summary> /// サンプル集合からガウス混合パラメータを推定する /// </summary> /// <param name="samples"></param> /// <param name="logLikelihoods"></param> /// <param name="labels"></param> /// <param name="probs"></param> #else /// <summary> /// Estimates Gaussian mixture parameters from the sample set /// </summary> /// <param name="samples"></param> /// <param name="logLikelihoods"></param> /// <param name="labels"></param> /// <param name="probs"></param> #endif public virtual bool Train( InputArray samples, OutputArray logLikelihoods = null, OutputArray labels = null, OutputArray probs = null) { if (disposed) { throw new ObjectDisposedException("EM"); } if (samples == null) { throw new ArgumentNullException("samples"); } samples.ThrowIfDisposed(); if (logLikelihoods != null) { logLikelihoods.ThrowIfNotReady(); } if (labels != null) { labels.ThrowIfNotReady(); } if (probs != null) { probs.ThrowIfNotReady(); } int ret = NativeMethods.ml_EM_train( ptr, samples.CvPtr, Cv2.ToPtr(logLikelihoods), Cv2.ToPtr(labels), Cv2.ToPtr(probs)); if (logLikelihoods != null) { logLikelihoods.Fix(); } if (labels != null) { labels.Fix(); } if (probs != null) { probs.Fix(); } return(ret != 0); }
/// <summary> /// /// </summary> /// <param name="defaultMat"></param> /// <returns></returns> public SparseMat ReadSparseMat(SparseMat defaultMat = null) { var value = new SparseMat(); try { NativeMethods.core_FileNode_read_SparseMat(ptr, value.CvPtr, Cv2.ToPtr(defaultMat)); } catch { value.Dispose(); throw; } return(value); }
/// <summary> /// 高速なマルチスケール Hesian 検出器を用いて keypoint を検出します. /// </summary> /// <param name="img"></param> /// <param name="mask"></param> /// <returns></returns> #else /// <summary> /// detects keypoints using fast multi-scale Hessian detector /// </summary> /// <param name="img"></param> /// <param name="mask"></param> /// <returns></returns> #endif public KeyPoint[] Run(InputArray img, Mat mask) { ThrowIfDisposed(); if (img == null) { throw new ArgumentNullException("img"); } img.ThrowIfDisposed(); using (VectorOfKeyPoint keypointsVec = new VectorOfKeyPoint()) { NativeMethods.nonfree_SURF_run1(ptr, img.CvPtr, Cv2.ToPtr(mask), keypointsVec.CvPtr); return(keypointsVec.ToArray()); } }
/// <summary> /// Compute the BRISK features on an image /// </summary> /// <param name="image"></param> /// <param name="mask"></param> /// <returns></returns> public KeyPoint[] Run(InputArray image, InputArray mask = null) { ThrowIfDisposed(); if (image == null) { throw new ArgumentNullException("image"); } image.ThrowIfDisposed(); using (VectorOfKeyPoint keyPointsVec = new VectorOfKeyPoint()) { NativeMethods.features2d_BRISK_run1(ptr, image.CvPtr, Cv2.ToPtr(mask), keyPointsVec.CvPtr); return(keyPointsVec.ToArray()); } }
/// <summary> /// 二つの学習サンプル間の近さを取り出す /// </summary> /// <param name="sample1"></param> /// <param name="sample2"></param> /// <param name="missing1"></param> /// <param name="missing2"></param> /// <returns></returns> #else /// <summary> /// Retrieves proximity measure between two training samples /// </summary> /// <param name="sample1"></param> /// <param name="sample2"></param> /// <param name="missing1"></param> /// <param name="missing2"></param> /// <returns></returns> #endif public virtual float GetProximity( CvMat sample1, CvMat sample2, CvMat missing1 = null, CvMat missing2 = null) { if (sample1 == null) { throw new ArgumentNullException("sample1"); } if (sample2 == null) { throw new ArgumentNullException("sample2"); } return(NativeMethods.ml_CvRTrees_get_proximity( ptr, sample1.CvPtr, sample2.CvPtr, Cv2.ToPtr(missing1), Cv2.ToPtr(missing2))); }
/// <summary> /// MSERのすべての輪郭情報を抽出する /// </summary> /// <param name="image"></param> /// <param name="mask"></param> /// <returns></returns> #else /// <summary> /// Extracts the contours of Maximally Stable Extremal Regions /// </summary> /// <param name="image"></param> /// <param name="mask"></param> /// <returns></returns> #endif public Point[][] Run(Mat image, Mat mask) { ThrowIfDisposed(); if (image == null) { throw new ArgumentNullException("image"); } image.ThrowIfDisposed(); IntPtr msers; NativeMethods.features2d_MSER_detect(ptr, image.CvPtr, out msers, Cv2.ToPtr(mask)); using (VectorOfVectorPoint msersVec = new VectorOfVectorPoint(msers)) { return(msersVec.ToArray()); } }
/// <summary> /// Find one best match for each query descriptor (if mask is empty). /// </summary> /// <param name="queryDescriptors"></param> /// <param name="trainDescriptors"></param> /// <param name="mask"></param> /// <returns></returns> public DMatch[] Match(Mat queryDescriptors, Mat trainDescriptors, Mat mask = null) { ThrowIfDisposed(); if (queryDescriptors == null) { throw new ArgumentNullException(nameof(queryDescriptors)); } if (trainDescriptors == null) { throw new ArgumentNullException(nameof(trainDescriptors)); } using (var matchesVec = new VectorOfDMatch()) { NativeMethods.features2d_DescriptorMatcher_match1( ptr, queryDescriptors.CvPtr, trainDescriptors.CvPtr, matchesVec.CvPtr, Cv2.ToPtr(mask)); return(matchesVec.ToArray()); } }
/// <summary> /// /// </summary> /// <param name="trainData"></param> /// <param name="tflag"></param> /// <param name="responses"></param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <param name="varType"></param> /// <param name="missingMask"></param> /// <param name="param"></param> /// <param name="shared"></param> /// <param name="addLabels"></param> /// <param name="updateData"></param> /// <returns></returns> #else /// <summary> /// /// </summary> /// <param name="trainData"></param> /// <param name="tflag"></param> /// <param name="responses"></param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <param name="varType"></param> /// <param name="missingMask"></param> /// <param name="param"></param> /// <param name="shared"></param> /// <param name="addLabels"></param> /// <param name="updateData"></param> /// <returns></returns> #endif public void SetData( CvMat trainData, DTreeDataLayout tflag, CvMat responses, CvMat varIdx = null, CvMat sampleIdx = null, CvMat varType = null, CvMat missingMask = null, CvDTreeParams param = null, bool shared = false, bool addLabels = false, bool updateData = false) { if (trainData == null) { throw new ArgumentNullException(nameof(trainData)); } if (responses == null) { throw new ArgumentNullException(nameof(responses)); } if (param == null) { param = new CvDTreeParams(); } NativeMethods.ml_CvDTreeTrainData_set_data( ptr, trainData.CvPtr, (int)tflag, responses.CvPtr, Cv2.ToPtr(varIdx), Cv2.ToPtr(sampleIdx), Cv2.ToPtr(varType), Cv2.ToPtr(missingMask), param.CvPtr, shared ? 1 : 0, addLabels ? 1 : 0, updateData ? 1 : 0 ); }
/// <summary> /// keypoint を検出し,その SURF ディスクリプタを計算します.[useProvidedKeypoints = true] /// </summary> /// <param name="img"></param> /// <param name="mask"></param> /// <param name="keypoints"></param> /// <param name="descriptors"></param> /// <param name="useProvidedKeypoints"></param> #else /// <summary> /// detects keypoints and computes the SURF descriptors for them. [useProvidedKeypoints = true] /// </summary> /// <param name="img"></param> /// <param name="mask"></param> /// <param name="keypoints"></param> /// <param name="descriptors"></param> /// <param name="useProvidedKeypoints"></param> #endif public void Run(InputArray img, InputArray mask, out KeyPoint[] keypoints, out float[] descriptors, bool useProvidedKeypoints = false) { ThrowIfDisposed(); if (img == null) { throw new ArgumentNullException("img"); } img.ThrowIfDisposed(); using (VectorOfKeyPoint keypointsVec = new VectorOfKeyPoint()) using (VectorOfFloat descriptorsVec = new VectorOfFloat()) { NativeMethods.nonfree_SURF_run2_vector(ptr, img.CvPtr, Cv2.ToPtr(mask), keypointsVec.CvPtr, descriptorsVec.CvPtr, useProvidedKeypoints ? 1 : 0); keypoints = keypointsVec.ToArray(); descriptors = descriptorsVec.ToArray(); } }
/// <summary> /// Find k best matches for each query descriptor (in increasing order of distances). /// compactResult is used when mask is not empty. If compactResult is false matches /// vector will have the same size as queryDescriptors rows. If compactResult is true /// matches vector will not contain matches for fully masked out query descriptors. /// </summary> /// <param name="queryDescriptors"></param> /// <param name="trainDescriptors"></param> /// <param name="k"></param> /// <param name="mask"></param> /// <param name="compactResult"></param> /// <returns></returns> public DMatch[][] KnnMatch(Mat queryDescriptors, Mat trainDescriptors, int k, Mat mask = null, bool compactResult = false) { ThrowIfDisposed(); if (queryDescriptors == null) { throw new ArgumentNullException("queryDescriptors"); } if (trainDescriptors == null) { throw new ArgumentNullException("trainDescriptors"); } using (VectorOfVectorDMatch matchesVec = new VectorOfVectorDMatch()) { NativeMethods.features2d_DescriptorMatcher_knnMatch( ptr, queryDescriptors.CvPtr, trainDescriptors.CvPtr, matchesVec.CvPtr, k, Cv2.ToPtr(mask), compactResult ? 1 : 0); return(matchesVec.ToArray()); } }
/// <summary> /// 入力ベクトルに対する決定木の葉ノードを返す /// </summary> /// <param name="sample"></param> /// <param name="missingDataMask"></param> /// <param name="preprocessedInput">falseは通常の入力を意味する.true の場合,このメソッドは離散入力変数の全ての値があらかじめ, 0..<num_of_categories_i>-1 の範囲に正規化されていることを仮定する(決定木は内部的にはこのような正規化された表現を用いている).これは決定木集合の高速な予測に役立つ.連続変数の入力変数に対しては,このフラグは利用されない.</param> /// <returns></returns> #else /// <summary> /// Returns the leaf node of decision tree corresponding to the input vector /// </summary> /// <param name="sample"></param> /// <param name="missingDataMask"></param> /// <param name="preprocessedInput"></param> /// <returns></returns> #endif public virtual CvDTreeNode Predict( Mat sample, Mat missingDataMask = null, bool preprocessedInput = false) { if (sample == null) { throw new ArgumentNullException(nameof(sample)); } sample.ThrowIfDisposed(); IntPtr result = NativeMethods.ml_CvDTree_predict_Mat( ptr, sample.CvPtr, Cv2.ToPtr(missingDataMask), preprocessedInput ? 1 : 0); if (result == IntPtr.Zero) { return(null); } return(new CvDTreeNode(result)); }
/// <summary> /// Find best matches for each query descriptor which have distance less than /// maxDistance (in increasing order of distances). /// </summary> /// <param name="queryDescriptors"></param> /// <param name="trainDescriptors"></param> /// <param name="maxDistance"></param> /// <param name="mask"></param> /// <param name="compactResult"></param> /// <returns></returns> public DMatch[][] RadiusMatch(Mat queryDescriptors, Mat trainDescriptors, float maxDistance, Mat mask = null, bool compactResult = false) { ThrowIfDisposed(); if (queryDescriptors == null) { throw new ArgumentNullException(nameof(queryDescriptors)); } if (trainDescriptors == null) { throw new ArgumentNullException(nameof(trainDescriptors)); } using (var matchesVec = new VectorOfVectorDMatch()) { NativeMethods.features2d_DescriptorMatcher_radiusMatch1( ptr, queryDescriptors.CvPtr, trainDescriptors.CvPtr, matchesVec.CvPtr, maxDistance, Cv2.ToPtr(mask), compactResult ? 1 : 0); return(matchesVec.ToArray()); } }
/// <summary> /// 学習データを与えて初期化 /// </summary> /// <param name="trainData">既知のサンプル (m*n)</param> /// <param name="responses">既知のサンプルのクラス (m*1)</param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <returns></returns> #else /// <summary> /// Bayes classifier for normally distributed data /// </summary> /// <param name="trainData">Known samples (m*n)</param> /// <param name="responses">Classes for known samples (m*1)</param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <returns></returns> #endif public CvNormalBayesClassifier( Mat trainData, Mat responses, Mat varIdx = null, Mat sampleIdx = null) { if (trainData == null) { throw new ArgumentNullException("trainData"); } if (responses == null) { throw new ArgumentNullException("responses"); } trainData.ThrowIfDisposed(); responses.ThrowIfDisposed(); ptr = NativeMethods.ml_CvNormalBayesClassifier_new2_Mat( trainData.CvPtr, responses.CvPtr, Cv2.ToPtr(varIdx), Cv2.ToPtr(sampleIdx)); }
/// <summary> /// 学習データを与えて初期化 /// </summary> /// <param name="trainData"></param> /// <param name="tflag"></param> /// <param name="responses"></param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <param name="varType"></param> /// <param name="missingMask"></param> /// <param name="param"></param> /// <param name="shared"></param> /// <param name="addLabels"></param> /// <returns></returns> #else /// <summary> /// Training constructor /// </summary> /// <param name="trainData"></param> /// <param name="tflag"></param> /// <param name="responses"></param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <param name="varType"></param> /// <param name="missingMask"></param> /// <param name="param"></param> /// <param name="shared"></param> /// <param name="addLabels"></param> /// <returns></returns> #endif public CvDTreeTrainData( CvMat trainData, DTreeDataLayout tflag, CvMat responses, CvMat varIdx = null, CvMat sampleIdx = null, CvMat varType = null, CvMat missingMask = null, CvDTreeParams param = null, bool shared = false, bool addLabels = false) { if (trainData == null) { throw new ArgumentNullException("trainData"); } if (responses == null) { throw new ArgumentNullException("responses"); } trainData.ThrowIfDisposed(); responses.ThrowIfDisposed(); if (param == null) { param = new CvDTreeParams(); } ptr = NativeMethods.ml_CvDTreeTrainData_new2( trainData.CvPtr, (int)tflag, responses.CvPtr, Cv2.ToPtr(varIdx), Cv2.ToPtr(sampleIdx), Cv2.ToPtr(varType), Cv2.ToPtr(missingMask), param.CvPtr, shared ? 1 : 0, addLabels ? 1 : 0 ); }
/// <summary> /// ブーストされた分類器の学習 /// </summary> /// <param name="trainData"></param> /// <param name="tflag"></param> /// <param name="responses"></param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <param name="varType"></param> /// <param name="missingMask"></param> /// <param name="param"></param> /// <param name="update"></param> /// <returns></returns> #else /// <summary> /// Trains boosted tree classifier /// </summary> /// <param name="trainData"></param> /// <param name="tflag"></param> /// <param name="responses"></param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <param name="varType"></param> /// <param name="missingMask"></param> /// <param name="param"></param> /// <param name="update"></param> /// <returns></returns> #endif public virtual bool Train( Mat trainData, DTreeDataLayout tflag, Mat responses, Mat varIdx = null, Mat sampleIdx = null, Mat varType = null, Mat missingMask = null, CvBoostParams param = null, bool update = false) { if (trainData == null) { throw new ArgumentNullException("trainData"); } if (responses == null) { throw new ArgumentNullException("responses"); } trainData.ThrowIfDisposed(); responses.ThrowIfDisposed(); if (param == null) { param = new CvBoostParams(); } int ret = NativeMethods.ml_CvBoost_train_Mat( ptr, trainData.CvPtr, (int)tflag, responses.CvPtr, Cv2.ToPtr(varIdx), Cv2.ToPtr(sampleIdx), Cv2.ToPtr(varType), Cv2.ToPtr(missingMask), param.CvPtr, update ? 1 : 0); return(ret != 0); }
/// <summary> /// 入力サンプルに対する応答を予測する /// </summary> /// <param name="sample">入力サンプル</param> /// <param name="missing">データ欠損マスク(オプション).データ欠損を扱うためには,弱い決定木が代理分岐を含まなければならない.</param> /// <param name="slice">予測に用いられる弱い決定木シーケンスの連続的部分集合(スライス).デフォルトでは,全ての弱い分類器が用いられる.</param> /// <param name="rawMode">falseは通常の入力を意味する.true の場合,このメソッドは離散入力変数の全ての値があらかじめ, 0..<num_of_categories_i>-1 の範囲に正規化されていることを仮定する(決定木は内部的にはこのような正規化された表現を用いている).これは決定木集合の高速な予測に役立つ.連続変数の入力変数に対しては,このフラグは利用されない.</param> /// <param name="returnSum"></param> /// <returns>重み付き投票に基づく出力クラスラベル</returns> #else /// <summary> /// Predicts response for the input sample /// </summary> /// <param name="sample">The input sample. </param> /// <param name="missing">The optional mask of missing measurements. To handle missing measurements, the weak classifiers must include surrogate splits. </param> /// <param name="slice">The continuous subset of the sequence of weak classifiers to be used for prediction. By default, all the weak classifiers are used. </param> /// <param name="rawMode">The last parameter is normally set to false that implies a regular input. If it is true, the method assumes that all the values of the discrete input variables have been already normalized to 0..<num_of_categoriesi>-1 ranges. (as the decision tree uses such normalized representation internally). It is useful for faster prediction with tree ensembles. For ordered input variables the flag is not used. </param> /// <param name="returnSum"></param> /// <returns>the output class label based on the weighted voting. </returns> #endif public float Predict( Mat sample, Mat missing = null, Range?slice = null, bool rawMode = false, bool returnSum = false) { if (sample == null) { throw new ArgumentNullException("sample"); } CvSlice slice0 = slice.GetValueOrDefault(CvSlice.WholeSeq); return(NativeMethods.ml_CvBoost_predict_Mat( ptr, sample.CvPtr, Cv2.ToPtr(missing), slice0, rawMode ? 1 : 0, returnSum ? 1 : 0)); }
/// <summary> /// keypoint を検出し,その SURF ディスクリプタを計算します.[useProvidedKeypoints = true] /// </summary> /// <param name="img"></param> /// <param name="mask"></param> /// <param name="keypoints"></param> /// <param name="descriptors"></param> /// <param name="useProvidedKeypoints"></param> #else /// <summary> /// detects keypoints and computes the SURF descriptors for them. [useProvidedKeypoints = true] /// </summary> /// <param name="img"></param> /// <param name="mask"></param> /// <param name="keypoints"></param> /// <param name="descriptors"></param> /// <param name="useProvidedKeypoints"></param> #endif public void Run(InputArray img, InputArray mask, out KeyPoint[] keypoints, OutputArray descriptors, bool useProvidedKeypoints = false) { ThrowIfDisposed(); if (img == null) { throw new ArgumentNullException(nameof(img)); } if (descriptors == null) { throw new ArgumentNullException(nameof(descriptors)); } img.ThrowIfDisposed(); descriptors.ThrowIfNotReady(); using (VectorOfKeyPoint keypointsVec = new VectorOfKeyPoint()) { NativeMethods.nonfree_SURF_run2_OutputArray(ptr, img.CvPtr, Cv2.ToPtr(mask), keypointsVec.CvPtr, descriptors.CvPtr, useProvidedKeypoints ? 1 : 0); keypoints = keypointsVec.ToArray(); } }
/// <summary> /// Compute the BRISK features and descriptors on an image /// </summary> /// <param name="image"></param> /// <param name="mask"></param> /// <param name="keyPoints"></param> /// <param name="descriptors"></param> /// <param name="useProvidedKeypoints"></param> public void Run(InputArray image, InputArray mask, out KeyPoint[] keyPoints, OutputArray descriptors, bool useProvidedKeypoints = false) { ThrowIfDisposed(); if (image == null) { throw new ArgumentNullException("image"); } if (descriptors == null) { throw new ArgumentNullException("descriptors"); } image.ThrowIfDisposed(); descriptors.ThrowIfNotReady(); using (VectorOfKeyPoint keyPointsVec = new VectorOfKeyPoint()) { NativeMethods.features2d_BRISK_run2(ptr, image.CvPtr, Cv2.ToPtr(mask), keyPointsVec.CvPtr, descriptors.CvPtr, useProvidedKeypoints ? 1 : 0); keyPoints = keyPointsVec.ToArray(); } descriptors.Fix(); }
/// <summary> /// /// </summary> /// <param name="trainData"></param> /// <param name="tflag"></param> /// <param name="responses"></param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <param name="varType"></param> /// <param name="missingMask"></param> /// <param name="param"></param> /// <returns></returns> #else /// <summary> /// /// </summary> /// <param name="trainData"></param> /// <param name="tflag"></param> /// <param name="responses"></param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <param name="varType"></param> /// <param name="missingMask"></param> /// <param name="param"></param> /// <returns></returns> #endif public virtual bool Train( CvMat trainData, int tflag, CvMat responses, CvMat varIdx = null, CvMat sampleIdx = null, CvMat varType = null, CvMat missingMask = null, CvRTParams param = null) { if (trainData == null) { throw new ArgumentNullException("trainData"); } if (responses == null) { throw new ArgumentNullException("responses"); } trainData.ThrowIfDisposed(); responses.ThrowIfDisposed(); if (param == null) { param = new CvRTParams(); } return(NativeMethods.ml_CvERTrees_train1( ptr, trainData.CvPtr, tflag, responses.CvPtr, Cv2.ToPtr(varIdx), Cv2.ToPtr(sampleIdx), Cv2.ToPtr(varType), Cv2.ToPtr(missingMask), param.CvPtr) != 0); }
/// <summary> /// 決定木を学習する /// </summary> /// <param name="trainData"></param> /// <param name="tflag"></param> /// <param name="responses"></param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <param name="varType"></param> /// <param name="missingMask"></param> /// <param name="param"></param> /// <returns></returns> #else /// <summary> /// Trains decision tree /// </summary> /// <param name="trainData"></param> /// <param name="tflag"></param> /// <param name="responses"></param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <param name="varType"></param> /// <param name="missingMask"></param> /// <param name="param"></param> /// <returns></returns> #endif public virtual bool Train( Mat trainData, DTreeDataLayout tflag, Mat responses, Mat varIdx, Mat sampleIdx, Mat varType, Mat missingMask, CvDTreeParams param) { if (trainData == null) { throw new ArgumentNullException(nameof(trainData)); } if (responses == null) { throw new ArgumentNullException(nameof(responses)); } trainData.ThrowIfDisposed(); responses.ThrowIfDisposed(); if (param == null) { param = new CvDTreeParams(); } return(NativeMethods.ml_CvDTree_train_Mat( ptr, trainData.CvPtr, (int)tflag, responses.CvPtr, Cv2.ToPtr(varIdx), Cv2.ToPtr(sampleIdx), Cv2.ToPtr(varType), Cv2.ToPtr(missingMask), param.CvPtr) != 0); }
/// <summary> /// 学習データを与えて初期化 /// </summary> /// <param name="trainData"></param> /// <param name="tflag"></param> /// <param name="responses"></param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <param name="varType"></param> /// <param name="missingMask"></param> /// <param name="param"></param> #else /// <summary> /// Training constructor /// </summary> /// <param name="trainData"></param> /// <param name="tflag"></param> /// <param name="responses"></param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <param name="varType"></param> /// <param name="missingMask"></param> /// <param name="param"></param> #endif public CvBoost( CvMat trainData, DTreeDataLayout tflag, CvMat responses, CvMat varIdx = null, CvMat sampleIdx = null, CvMat varType = null, CvMat missingMask = null, CvBoostParams param = null) { if (trainData == null) { throw new ArgumentNullException("trainData"); } if (responses == null) { throw new ArgumentNullException("responses"); } trainData.ThrowIfDisposed(); responses.ThrowIfDisposed(); if (param == null) { param = new CvBoostParams(); } ptr = NativeMethods.ml_CvBoost_new_CvMat( trainData.CvPtr, (int)tflag, responses.CvPtr, Cv2.ToPtr(varIdx), Cv2.ToPtr(sampleIdx), Cv2.ToPtr(varType), Cv2.ToPtr(missingMask), param.CvPtr ); }
/// <summary> /// 入力サンプルに対する応答を予測する /// </summary> /// <param name="sample">入力サンプル</param> /// <param name="missing">データ欠損マスク(オプション).データ欠損を扱うためには,弱い決定木が代理分岐を含まなければならない.</param> /// <param name="weakResponses">個々の弱い決定木からの応答の出力パラメータ(オプション)で,これは浮動小数点型ベクトルである.ベクトルの要素数は,slice 長と等しくなければならない.</param> /// <param name="slice">予測に用いられる弱い決定木シーケンスの連続的部分集合(スライス).デフォルトでは,全ての弱い分類器が用いられる.</param> /// <param name="rawMode">falseは通常の入力を意味する.true の場合,このメソッドは離散入力変数の全ての値があらかじめ, 0..<num_of_categories_i>-1 の範囲に正規化されていることを仮定する(決定木は内部的にはこのような正規化された表現を用いている).これは決定木集合の高速な予測に役立つ.連続変数の入力変数に対しては,このフラグは利用されない.</param> /// <param name="returnSum"></param> /// <returns>重み付き投票に基づく出力クラスラベル</returns> #else /// <summary> /// Predicts response for the input sample /// </summary> /// <param name="sample">The input sample. </param> /// <param name="missing">The optional mask of missing measurements. To handle missing measurements, the weak classifiers must include surrogate splits. </param> /// <param name="weakResponses">The optional output parameter, a floating-point vector, of responses from each individual weak classifier. The number of elements in the vector must be equal to the slice length. </param> /// <param name="slice">The continuous subset of the sequence of weak classifiers to be used for prediction. By default, all the weak classifiers are used. </param> /// <param name="rawMode">The last parameter is normally set to false that implies a regular input. If it is true, the method assumes that all the values of the discrete input variables have been already normalized to 0..<num_of_categoriesi>-1 ranges. (as the decision tree uses such normalized representation internally). It is useful for faster prediction with tree ensembles. For ordered input variables the flag is not used. </param> /// <param name="returnSum"></param> /// <returns>the output class label based on the weighted voting. </returns> #endif public float Predict( CvMat sample, CvMat missing = null, CvMat weakResponses = null, CvSlice?slice = null, bool rawMode = false, bool returnSum = false) { if (sample == null) { throw new ArgumentNullException(nameof(sample)); } CvSlice slice0 = slice.GetValueOrDefault(CvSlice.WholeSeq); return(NativeMethods.ml_Boost_predict_CvMat( ptr, sample.CvPtr, Cv2.ToPtr(missing), Cv2.ToPtr(weakResponses), slice0, rawMode ? 1 : 0, returnSum ? 1 : 0)); }
/// <summary> /// モデルの学習 /// </summary> /// <param name="trainData">既知のサンプル (m*n)</param> /// <param name="responses">既知のサンプルのクラス (m*1)</param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <param name="update">モデルを最初から学習する(false)か,新しい学習データを用いて更新する(true)か</param> /// <returns></returns> #else /// <summary> /// Trains the model /// </summary> /// <param name="trainData">Known samples (m*n)</param> /// <param name="responses">Classes for known samples (m*1)</param> /// <param name="varIdx"></param> /// <param name="sampleIdx"></param> /// <param name="update">Adds known samples to model(true) or makes a new one(false)</param> /// <returns></returns> #endif public virtual bool Train( Mat trainData, Mat responses, Mat varIdx, Mat sampleIdx, bool update = false) { if (trainData == null) { throw new ArgumentNullException("trainData"); } if (responses == null) { throw new ArgumentNullException("responses"); } return(NativeMethods.ml_CvNormalBayesClassifier_train_Mat( ptr, trainData.CvPtr, responses.CvPtr, Cv2.ToPtr(varIdx), Cv2.ToPtr(sampleIdx), update ? 1 : 0) != 0); }
/// <summary> /// 入力ベクトルの近傍を探す /// </summary> /// <param name="samples">既知のサンプル (l*n)</param> /// <param name="k">探索する近傍の数の最大数</param> /// <param name="results"></param> /// <param name="neighbors"></param> /// <param name="neighborResponses">それぞれのサンプルの近傍 (l*k)</param> /// <param name="dist">サンプルから近傍までの距離</param> /// <returns></returns> #else /// <summary> /// Finds the K nearest neighbors of samples /// </summary> /// <param name="samples">Known samples (l*n)</param> /// <param name="k">max neighbors to find</param> /// <param name="results"></param> /// <param name="neighbors"></param> /// <param name="neighborResponses">Neighbors for each samples (l*k)</param> /// <param name="dist">Distance from each sample to neighbors</param> /// <returns></returns> #endif public virtual float FindNearest( CvMat samples, int k, CvMat results = null, float[][] neighbors = null, CvMat neighborResponses = null, CvMat dist = null) { if (samples == null) { throw new ArgumentNullException(nameof(samples)); } if (neighbors == null) { return(NativeMethods.ml_CvKNearest_find_nearest_CvMat( ptr, samples.CvPtr, k, Cv2.ToPtr(results), null, Cv2.ToPtr(neighborResponses), Cv2.ToPtr(dist))); } using (var aa = new ArrayAddress2 <Single>(neighbors)) { return(NativeMethods.ml_CvKNearest_find_nearest_CvMat( ptr, samples.CvPtr, k, Cv2.ToPtr(results), aa.Pointer, Cv2.ToPtr(neighborResponses), Cv2.ToPtr(dist))); } }
/// <summary> /// サンプル集合からガウス混合パラメータを推定する /// </summary> /// <param name="samples"></param> /// <param name="means0"></param> /// <param name="covs0"></param> /// <param name="weights0"></param> /// <param name="logLikelihoods"></param> /// <param name="labels"></param> /// <param name="probs"></param> #else /// <summary> /// Estimates Gaussian mixture parameters from the sample set /// </summary> /// <param name="samples"></param> /// <param name="means0"></param> /// <param name="covs0"></param> /// <param name="weights0"></param> /// <param name="logLikelihoods"></param> /// <param name="labels"></param> /// <param name="probs"></param> #endif public virtual bool TrainE( InputArray samples, InputArray means0, InputArray covs0 = null, InputArray weights0 = null, OutputArray logLikelihoods = null, OutputArray labels = null, OutputArray probs = null) { if (disposed) { throw new ObjectDisposedException("EM"); } if (samples == null) { throw new ArgumentNullException(nameof(samples)); } if (means0 == null) { throw new ArgumentNullException(nameof(means0)); } samples.ThrowIfDisposed(); means0.ThrowIfDisposed(); if (logLikelihoods != null) { logLikelihoods.ThrowIfNotReady(); } if (covs0 != null) { covs0.ThrowIfDisposed(); } if (weights0 != null) { weights0.ThrowIfDisposed(); } if (labels != null) { labels.ThrowIfNotReady(); } if (probs != null) { probs.ThrowIfNotReady(); } int ret = NativeMethods.ml_EM_trainE( ptr, samples.CvPtr, means0.CvPtr, Cv2.ToPtr(covs0), Cv2.ToPtr(weights0), Cv2.ToPtr(logLikelihoods), Cv2.ToPtr(labels), Cv2.ToPtr(probs)); if (logLikelihoods != null) { logLikelihoods.Fix(); } if (labels != null) { labels.Fix(); } if (probs != null) { probs.Fix(); } return(ret != 0); }
/// <summary> /// /// </summary> /// <param name="indexParams"></param> /// <param name="searchParams"></param> public FlannBasedMatcher(IndexParams indexParams = null, SearchParams searchParams = null) { ptr = NativeMethods.features2d_FlannBasedMatcher_new( Cv2.ToPtr(indexParams), Cv2.ToPtr(searchParams)); }
/// <summary> /// Computes an image descriptor using the set visual vocabulary. /// </summary> /// <param name="image">Image, for which the descriptor is computed.</param> /// <param name="keypoints">Keypoints detected in the input image.</param> /// <param name="imgDescriptor">Computed output image descriptor.</param> /// <param name="pointIdxsOfClusters">pointIdxsOfClusters Indices of keypoints that belong to the cluster. /// This means that pointIdxsOfClusters[i] are keypoint indices that belong to the i -th cluster(word of vocabulary) returned if it is non-zero.</param> /// <param name="descriptors">Descriptors of the image keypoints that are returned if they are non-zero.</param> public void Compute(Mat image, out KeyPoint[] keypoints, Mat imgDescriptor, out int[][] pointIdxsOfClusters, Mat descriptors = null) { if (IsDisposed) { throw new ObjectDisposedException(GetType().Name); } if (image == null) { throw new ArgumentNullException(nameof(image)); } if (imgDescriptor == null) { throw new ArgumentNullException(nameof(imgDescriptor)); } using (var keypointsVec = new VectorOfKeyPoint()) using (var pointIdxsOfClustersVec = new VectorOfVectorInt()) { NativeMethods.features2d_BOWImgDescriptorExtractor_compute1(ptr, image.CvPtr, keypointsVec.CvPtr, imgDescriptor.CvPtr, pointIdxsOfClustersVec.CvPtr, Cv2.ToPtr(descriptors)); keypoints = keypointsVec.ToArray(); pointIdxsOfClusters = pointIdxsOfClustersVec.ToArray(); } GC.KeepAlive(image); GC.KeepAlive(imgDescriptor); GC.KeepAlive(descriptors); }
/// <summary> /// Detect keypoints in an image. /// </summary> /// <param name="image">The image.</param> /// <param name="mask">Mask specifying where to look for keypoints (optional). /// Must be a char matrix with non-zero values in the region of interest.</param> /// <returns>The detected keypoints.</returns> public KeyPoint[] Detect(Mat image, Mat mask = null) { if (image == null) { throw new ArgumentNullException("image"); } using (var keypoints = new VectorOfKeyPoint()) { NativeMethods.features2d_FeatureDetector_detect(ptr, image.CvPtr, keypoints.CvPtr, Cv2.ToPtr(mask)); return(keypoints.ToArray()); } }