예제 #1
0
        public void TrainANNModel()
        {
            LoadImageGray();
            // the output layer must know the number of classes.
            //var numberOfClasses = imageBindingModel.ConvertAll(x => x.Label).Distinct().Count();
            var numberOfClasses = 1;
            var numberInput     = resolutionImage.Width * resolutionImage.Height;

            int            trainSampleCount = imageBindingModel.Count;
            Matrix <float> trainData        = new Matrix <float>(trainSampleCount, numberInput);
            Matrix <float> trainClasses     = new Matrix <float>(trainSampleCount, numberOfClasses);

            for (int i = 0; i < imageBindingModel.Count; i++)
            {
                for (int j = 0; j < numberInput; j++)
                {
                    trainData[i, j]    = imageBindingModel[i].Image.Bytes[j];
                    trainClasses[i, 0] = (float)imageBindingModel[i].Id;
                }
            }

            Matrix <int> layerSize = new Matrix <int>(new int[] { numberInput, numberInput + numberInput, numberOfClasses });

            MCvANN_MLP_TrainParams parameters = new MCvANN_MLP_TrainParams();

            parameters.term_crit       = new MCvTermCriteria(100, 1.0e-8);
            parameters.train_method    = Emgu.CV.ML.MlEnum.ANN_MLP_TRAIN_METHOD.BACKPROP;
            parameters.bp_dw_scale     = 0.1;
            parameters.bp_moment_scale = 0.1;

            using (ANN_MLP network = new ANN_MLP(layerSize, Emgu.CV.ML.MlEnum.ANN_MLP_ACTIVATION_FUNCTION.SIGMOID_SYM, 1.0, 1.0))
            {
                network.Train(trainData, trainClasses, null, null, parameters, Emgu.CV.ML.MlEnum.ANN_MLP_TRAINING_FLAG.DEFAULT);


                Matrix <float> sample     = new Matrix <float>(1, numberInput);
                Matrix <float> prediction = new Matrix <float>(1, numberOfClasses);

                int recog_true = 0;

                for (int i = 0; i < imageBindingModel.Count; i++)
                {
                    for (int j = 0; j < numberInput; j++)
                    {
                        sample[0, j] = trainData[i, j];
                    }

                    network.Predict(sample, prediction);
                    var response = prediction.Data[0, 0];

                    if (Math.Abs(response - imageBindingModel[i].Id) < 0.5)
                    {
                        recog_true++;
                    }


                    Console.WriteLine($"recoge : {response} -- target: {imageBindingModel[i].Id} -- result: {Math.Abs(response - imageBindingModel[i].Id) < 0.5}");
                }
            }
        }
예제 #2
0
 public static extern int CvANN_MLPTrain(
     IntPtr model,
     IntPtr trainData,
     IntPtr responses,
     IntPtr sampleWeights,
     IntPtr sampleIdx,
     ref MCvANN_MLP_TrainParams parameters,
     MlEnum.AnnMlpTrainingFlag flags);
예제 #3
0
 /// <summary>
 /// Train the ANN_MLP model with the specific paramters
 /// </summary>
 /// <param name="trainData">The training data. A 32-bit floating-point, single-channel matrix, one vector per row</param>
 /// <param name="responses">A floating-point matrix of the corresponding output vectors, one vector per row. </param>
 /// <param name="sampleWeights">It is not null only for RPROP. The optional floating-point vector of weights for each sample. Some samples may be more important than others for training, e.g. user may want to gain the weight of certain classes to find the right balance between hit-rate and false-alarm rate etc</param>
 /// <param name="parameters">The parameters for ANN_MLP</param>
 /// <param name="flag">The traning flag</param>
 /// <returns>The number of done iterations</returns>
 public int Train(
     Matrix <float> trainData,
     Matrix <float> responses,
     Matrix <float> sampleWeights,
     MCvANN_MLP_TrainParams parameters,
     MlEnum.AnnMlpTrainingFlag flag)
 {
     return
         (MlInvoke.CvANN_MLPTrain(
              _ptr,
              trainData.Ptr,
              responses.Ptr,
              sampleWeights == null ? IntPtr.Zero : sampleWeights.Ptr,
              IntPtr.Zero,
              ref parameters,
              flag));
 }
예제 #4
0
 /// <summary>
 /// Train the ANN_MLP model with the specific paramters
 /// </summary>
 /// <param name="trainData">The training data. A 32-bit floating-point, single-channel matrix, one vector per row</param>
 /// <param name="responses">A floating-point matrix of the corresponding output vectors, one vector per row. </param>
 /// <param name="sampleWeights">It is not null only for RPROP. The optional floating-point vector of weights for each sample. Some samples may be more important than others for training, e.g. user may want to gain the weight of certain classes to find the right balance between hit-rate and false-alarm rate etc</param>
 /// <param name="sampleIdx">Can be null if not needed. When specified, identifies samples of interest. It is a Matrix&gt;int&lt; of nx1</param>
 /// <param name="parameters">The parameters for ANN_MLP</param>
 /// <param name="flag">The traning flag</param>
 /// <returns>The number of done iterations</returns>
 public int Train(
     Matrix <float> trainData,
     Matrix <float> responses,
     Matrix <float> sampleWeights,
     Matrix <Byte> sampleIdx,
     MCvANN_MLP_TrainParams parameters,
     MlEnum.ANN_MLP_TRAINING_FLAG flag)
 {
     return
         (MlInvoke.CvANN_MLPTrain(
              _ptr,
              trainData.Ptr,
              responses.Ptr,
              sampleWeights == null? IntPtr.Zero : sampleWeights.Ptr,
              sampleIdx == null ? IntPtr.Zero : sampleIdx.Ptr,
              ref parameters,
              flag));
 }
예제 #5
0
    public void ANN()
    {
        int trainSampleCount = 100;

        #region Generate the traning data and classes
        Matrix <float> trainData    = new Matrix <float>(trainSampleCount, 2);
        Matrix <float> trainClasses = new Matrix <float>(trainSampleCount, 1);

        Image <Bgr, Byte> img = new Image <Bgr, byte>(500, 500);

        Matrix <float> sample     = new Matrix <float>(1, 2);
        Matrix <float> prediction = new Matrix <float>(1, 1);

        Matrix <float> trainData1 = trainData.GetRows(0, trainSampleCount >> 1, 1);
        trainData1.SetRandNormal(new MCvScalar(200), new MCvScalar(50));
        Matrix <float> trainData2 = trainData.GetRows(trainSampleCount >> 1, trainSampleCount, 1);
        trainData2.SetRandNormal(new MCvScalar(300), new MCvScalar(50));

        Matrix <float> trainClasses1 = trainClasses.GetRows(0, trainSampleCount >> 1, 1);
        trainClasses1.SetValue(1);
        Matrix <float> trainClasses2 = trainClasses.GetRows(trainSampleCount >> 1, trainSampleCount, 1);
        trainClasses2.SetValue(2);
        #endregion

        Matrix <int> layerSize = new Matrix <int>(new int[] { 2, 5, 1 });

        MCvANN_MLP_TrainParams parameters = new MCvANN_MLP_TrainParams();
        parameters.term_crit       = new MCvTermCriteria(10, 1.0e-8);
        parameters.train_method    = Emgu.CV.ML.MlEnum.ANN_MLP_TRAIN_METHOD.BACKPROP;
        parameters.bp_dw_scale     = 0.1;
        parameters.bp_moment_scale = 0.1;

        using (ANN_MLP network = new ANN_MLP(layerSize, Emgu.CV.ML.MlEnum.ANN_MLP_ACTIVATION_FUNCTION.SIGMOID_SYM, 1.0, 1.0))
        {
            network.Train(trainData, trainClasses, null, null, parameters, Emgu.CV.ML.MlEnum.ANN_MLP_TRAINING_FLAG.DEFAULT);

            for (int i = 0; i < img.Height; i++)
            {
                for (int j = 0; j < img.Width; j++)
                {
                    sample.Data[0, 0] = j;
                    sample.Data[0, 1] = i;
                    network.Predict(sample, prediction);

                    // estimates the response and get the neighbors' labels
                    float response = prediction.Data[0, 0];

                    // highlight the pixel depending on the accuracy (or confidence)
                    img[i, j] = response < 1.5 ? new Bgr(90, 0, 0) : new Bgr(0, 90, 0);
                }
            }
        }

        // display the original training samples
        for (int i = 0; i < (trainSampleCount >> 1); i++)
        {
            PointF p1 = new PointF(trainData1[i, 0], trainData1[i, 1]);
            img.Draw(new CircleF(p1, 2), new Bgr(255, 100, 100), -1);
            PointF p2 = new PointF((int)trainData2[i, 0], (int)trainData2[i, 1]);
            img.Draw(new CircleF(p2, 2), new Bgr(100, 255, 100), -1);
        }
        Emgu.CV.UI.ImageViewer.Show(img);
    }
예제 #6
0
        public void TestANN_MLP()
        {
            int trainSampleCount = 100;

            #region Generate the traning data and classes
            Matrix <float> trainData    = new Matrix <float>(trainSampleCount, 2);
            Matrix <float> trainClasses = new Matrix <float>(trainSampleCount, 1);

            Image <Bgr, Byte> img = new Image <Bgr, byte>(500, 500);

            Matrix <float> sample     = new Matrix <float>(1, 2);
            Matrix <float> prediction = new Matrix <float>(1, 1);

            Matrix <float> trainData1 = trainData.GetRows(0, trainSampleCount >> 1, 1);
            trainData1.SetRandNormal(new MCvScalar(200), new MCvScalar(50));
            Matrix <float> trainData2 = trainData.GetRows(trainSampleCount >> 1, trainSampleCount, 1);
            trainData2.SetRandNormal(new MCvScalar(300), new MCvScalar(50));

            Matrix <float> trainClasses1 = trainClasses.GetRows(0, trainSampleCount >> 1, 1);
            trainClasses1.SetValue(1);
            Matrix <float> trainClasses2 = trainClasses.GetRows(trainSampleCount >> 1, trainSampleCount, 1);
            trainClasses2.SetValue(2);
            #endregion

            Matrix <int> layerSize = new Matrix <int>(new int[] { 2, 5, 1 });

            MCvANN_MLP_TrainParams parameters = new MCvANN_MLP_TrainParams();
            parameters.term_crit       = new MCvTermCriteria(10, 1.0e-8);
            parameters.train_method    = Emgu.CV.ML.MlEnum.AnnMlpTrainMethod.Backprop;
            parameters.bp_dw_scale     = 0.1;
            parameters.bp_moment_scale = 0.1;

            using (ANN_MLP network = new ANN_MLP(layerSize, Emgu.CV.ML.MlEnum.AnnMlpActivationFunction.SigmoidSym, 1.0, 1.0))
            {
                network.Train(trainData, trainClasses, null, parameters, Emgu.CV.ML.MlEnum.AnnMlpTrainingFlag.Default);

#if !NETFX_CORE
                String fileName = Path.Combine(Path.GetTempPath(), "ann_mlp_model.xml");
                network.Save(fileName);
                if (File.Exists(fileName))
                {
                    File.Delete(fileName);
                }
#endif

                for (int i = 0; i < img.Height; i++)
                {
                    for (int j = 0; j < img.Width; j++)
                    {
                        sample.Data[0, 0] = j;
                        sample.Data[0, 1] = i;
                        network.Predict(sample, prediction);

                        // estimates the response and get the neighbors' labels
                        float response = prediction.Data[0, 0];

                        // highlight the pixel depending on the accuracy (or confidence)
                        img[i, j] = response < 1.5 ? new Bgr(90, 0, 0) : new Bgr(0, 90, 0);
                    }
                }
            }

            // display the original training samples
            for (int i = 0; i < (trainSampleCount >> 1); i++)
            {
                PointF p1 = new PointF(trainData1[i, 0], trainData1[i, 1]);
                img.Draw(new CircleF(p1, 2), new Bgr(255, 100, 100), -1);
                PointF p2 = new PointF((int)trainData2[i, 0], (int)trainData2[i, 1]);
                img.Draw(new CircleF(p2, 2), new Bgr(100, 255, 100), -1);
            }
        }