示例#1
2
        static void Main(string[] args)
        {
            Image<Gray, Byte>[] trainingImages = new Image<Gray, Byte>[2];
            trainingImages[0] = new Image<Gray, byte>("C:\\Image\\Romy.jpg");
            trainingImages[1] = new Image<Gray, byte>("C:\\Image\\Stevie.jpg");

            int[] labels = new int[] { 0 , 1 };

            MCvTermCriteria termCrit = new MCvTermCriteria(16, 0.001);

            EigenFaceRecognizer recognizer = new EigenFaceRecognizer(0,0.2);
            Image<Gray, Byte> testImage = new Image<Gray, Byte>("C:\\Image\\Stevie.jpg");

            recognizer.Train(trainingImages,labels);
            EigenFaceRecognizer.PredictionResult result = recognizer.Predict(testImage);

            Console.WriteLine(result.Label);
            Console.WriteLine(result.Label);
            Console.ReadKey();
        }
示例#2
0
       public static List<List<Point>> Lines(List<Point> input, int lanes = 2)
        {
            float[,] samples = new float[input.Count, 2];
            int i = 0;
            foreach (var p in input)
            {
                samples[i, 0] = p.X;
                samples[i, 1] = p.Y;
                ++i;
            }

           MCvTermCriteria term = new MCvTermCriteria();

           Matrix<float> samplesMatrix = new Matrix<float>(samples);
           Matrix<Int32> labels = new Matrix<Int32>(input.Count, 1);
           CvInvoke.cvKMeans2(samplesMatrix, 2, labels, term, lanes, IntPtr.Zero, Emgu.CV.CvEnum.KMeansInitType.RandomCenters, IntPtr.Zero, IntPtr.Zero);

           List<Point> leftLane = new List<Point>(input.Count);
           List<Point> rightLane = new List<Point>(input.Count);
           for (i = 0; i < input.Count; ++i)
           {
               if (labels[i, 0] == 0)
                   leftLane.Add(input[i]);
               else
                   rightLane.Add(input[2]);
           }

           return new List<List<Point>> { leftLane, rightLane };
        }
示例#3
0
        /// <summary>
        /// Trains SVM using Bio inspired features.
        /// </summary>
        /// <param name="trainData"> Feature vectors for training. </param>
        /// <param name="trainClasses"> Class vector of instances for training. </param>
        /// <param name="saveModelName"> saved Model name using  </param>
        /// <param name="svm_type"> Svm type. </param>
        /// <param name="C"> Cost. </param>
        /// <param name="coef0"> Coeff. </param>
        /// <param name="degree"> Degree. </param>
        /// <param name="eps"> Eps for termination criteria. </param>
        /// <param name="gamma"> Gamma. </param>
        /// <param name="kernel_type"> Kernel Type of svm. </param>
        /// <param name="nu"> Nu. </param>
        /// <param name="maxIter"> Maximun number of iterations. </param>
        /// <param name="termCritType"> Termiantion Criteria Type. </param>
        /// <returns></returns>
        public bool TrainSVM(Matrix <float> trainData, Matrix <int> trainClasses, string saveModelName, SVM.SvmType svm_type = SVM.SvmType.CSvc, int kFold = 10, double C = 1.0, double coef0 = 0.1, int degree = 3, double eps = 0.001, double gamma = 1.0, SVM.SvmKernelType kernel_type = SVM.SvmKernelType.Rbf, double nu = 0.5, int maxIter = 500, Emgu.CV.CvEnum.TermCritType termCritType = Emgu.CV.CvEnum.TermCritType.Eps)
        {
            var svmModel         = new SVM();
            var termCriteria     = new Emgu.CV.Structure.MCvTermCriteria();
            var trainSampleCount = trainData.Rows;

            svmModel.C            = C;
            svmModel.Coef0        = coef0;
            svmModel.Degree       = degree;
            svmModel.Gamma        = gamma;
            svmModel.Nu           = nu;
            svmModel.Type         = svm_type;
            termCriteria.Epsilon  = eps;
            termCriteria.MaxIter  = maxIter;
            termCriteria.Type     = termCritType;
            svmModel.TermCriteria = termCriteria;
            svmModel.P            = 1;
            svmModel.SetKernel(kernel_type);
            bool trained;

            try
            {
                using (svmModel)
                {
                    TrainData td = new TrainData(trainData, Emgu.CV.ML.MlEnum.DataLayoutType.RowSample, trainClasses);
                    trained = svmModel.TrainAuto(td, kFold);
                    svmModel.Save(saveModelName);
                }
            }
            catch (Exception esvm)
            {
                throw esvm;
            }
            return(trained);
        }
示例#4
0
        /// <summary>
        /// Running k-means algorithm with k-means++ initial algorithm for k = 2.
        /// Can be used to find right and left lane boundary.
        /// </summary>
        /// <param name="input">Input points</param>
        /// <param name="a">First lane</param>
        /// <param name="b">Second lane</param>
        public static void Two_Means_Clustering(List<Point> input, ref List<Point> first, ref List<Point> second, int attempts = 3)
        {
            if (input.Count < 7)
                return;

            // formatting input data
            float[,] samples = new float[input.Count, 2];
            int i = 0;
            foreach (var p in input)
            {
                samples[i, 0] = p.X;
                samples[i, 1] = p.Y;
                ++i;
            }

            MCvTermCriteria term = new MCvTermCriteria();

            Matrix<float> samplesMatrix = new Matrix<float>(samples);
            Matrix<Int32> labels = new Matrix<Int32>(input.Count, 1);

            CvInvoke.cvKMeans2(samplesMatrix, 2, labels, term, attempts, IntPtr.Zero, KMeansInitType.RandomCenters, IntPtr.Zero, IntPtr.Zero);

            first.Clear();
            second.Clear();
            for (i = 0; i < input.Count; ++i)
            {
                if (labels[i, 0] == 0)
                    first.Add(input[i]);
                else
                    second.Add(input[i]);
            }
        }
示例#5
0
 //private IntPtr[] _covsPtr;
 //private GCHandle _covsPtrHandle;
 /// <summary>
 /// Create EM parameters with default value
 /// </summary>
 public EMParams()
 {
     Nclusters = 10;
      CovMatType = Emgu.CV.ML.MlEnum.EM_COVARIAN_MATRIX_TYPE.COV_MAT_DIAGONAL;
      StartStep = Emgu.CV.ML.MlEnum.EM_INIT_STEP_TYPE.START_AUTO_STEP;
      _termCrit = new MCvTermCriteria(100, 1.0e-6);
 }
示例#6
0
 public Calibration(Camera mainCamera)
 {
     _mainCamera = mainCamera;
     _termCriteria = new MCvTermCriteria();
     _flags = CALIB_TYPE.CV_CALIB_USE_INTRINSIC_GUESS | CALIB_TYPE.CV_CALIB_FIX_K1 | CALIB_TYPE.CV_CALIB_FIX_K2 | CALIB_TYPE.CV_CALIB_FIX_K3 | CALIB_TYPE.CV_CALIB_FIX_K4 | CALIB_TYPE.CV_CALIB_FIX_K5 | CALIB_TYPE.CV_CALIB_ZERO_TANGENT_DIST;
     _size = new Size(Screen.width, Screen.height);
 }
        /// <summary>
        /// Create an object recognizer using the specific tranning data and parameters
        /// </summary>
        /// <param name="images">The images used for training, each of them should be the same size. It's recommended the images are histogram normalized</param>
        /// <param name="labels">The labels corresponding to the images</param>
        /// <param name="eigenDistanceThreshold">
        /// The eigen distance threshold, (0, ~1000].
        /// The smaller the number, the more likely an examined image will be treated as unrecognized object. 
        /// If the threshold is &lt; 0, the recognizer will always treated the examined image as one of the known object. 
        /// </param>
        /// <param name="termCrit">The criteria for recognizer training</param>
        public EigenObjectRecognizer(Image<Gray, Byte>[] images, Guid[] labels, int cacheSize, double eigenDistanceThreshold, ref MCvTermCriteria termCrit)
        {
            Debug.Assert(images.Length == labels.Length, "The number of images should equals the number of labels");
               Debug.Assert(eigenDistanceThreshold >= 0.0, "Eigen-distance threshold should always >= 0.0");

               CalcEigenObjects(images, ref termCrit, out _eigenImages, out _avgImage);

               /*
               _avgImage.SerializationCompressionRatio = 9;

               foreach (Image<Gray, Single> img in _eigenImages)
               //Set the compression ration to best compression. The serialized object can therefore save spaces
               img.SerializationCompressionRatio = 9;
               */

               _eigenValues = Array.ConvertAll<Image<Gray, Byte>, Matrix<float>>(images,
               delegate(Image<Gray, Byte> img)
               {
                  return new Matrix<float>(ConstructEigenDecomposite(img, _eigenImages, _avgImage));
               });

               _labels = labels;
               _eigenDistanceThreshold = eigenDistanceThreshold;
            queueMaxCount = cacheSize;
        }
示例#8
0
      /*
      /// <summary>
      /// Create a LevMarqSparse solver
      /// </summary>
      public LevMarqSparse()
      {
         _ptr = CvInvoke.CvCreateLevMarqSparse();
      }*/

      /// <summary>
      /// Useful function to do simple bundle adjustment tasks
      /// </summary>
      /// <param name="points">Positions of points in global coordinate system (input and output), values will be modified by bundle adjustment</param>
      /// <param name="imagePoints">Projections of 3d points for every camera</param>
      /// <param name="visibility">Visibility of 3d points for every camera</param>
      /// <param name="cameraMatrix">Intrinsic matrices of all cameras (input and output), values will be modified by bundle adjustment</param>
      /// <param name="R">rotation matrices of all cameras (input and output), values will be modified by bundle adjustment</param>
      /// <param name="T">translation vector of all cameras (input and output), values will be modified by bundle adjustment</param>
      /// <param name="distCoeffcients">distortion coefficients of all cameras (input and output), values will be modified by bundle adjustment</param>
      /// <param name="termCrit">Termination criteria, a reasonable value will be (30, 1.0e-12) </param>
      public static void BundleAdjust(
         MCvPoint3D64f[] points, MCvPoint2D64f[][] imagePoints, int[][] visibility,
         Matrix<double>[] cameraMatrix, Matrix<double>[] R, Matrix<double>[] T, Matrix<double>[] distCoeffcients, MCvTermCriteria termCrit)
      {
         using (Matrix<double> imagePointsMat = CvToolbox.GetMatrixFromPoints(imagePoints))
         using (Matrix<int> visibilityMat = CvToolbox.GetMatrixFromArrays(visibility))
         using (VectorOfMat cameraMatVec = new VectorOfMat())
         using (VectorOfMat rMatVec = new VectorOfMat())
         using (VectorOfMat tMatVec = new VectorOfMat())
         using (VectorOfMat distorMatVec = new VectorOfMat())
         {
            cameraMatVec.Push(cameraMatrix);
            rMatVec.Push(R);
            tMatVec.Push(T);
            distorMatVec.Push(distCoeffcients);


            GCHandle handlePoints = GCHandle.Alloc(points, GCHandleType.Pinned);

            CvInvoke.CvLevMarqSparseAdjustBundle(
               cameraMatrix.Length,
               points.Length, handlePoints.AddrOfPinnedObject(),
               imagePointsMat, visibilityMat, cameraMatVec, rMatVec, tMatVec, distorMatVec, ref termCrit);

            handlePoints.Free();

         }
      }
示例#9
0
 public static extern void cvCalcOpticalFlowHS(
         IntPtr prev,
         IntPtr curr,
         int usePrevious,
         IntPtr velx,
         IntPtr vely,
         double lambda,
         MCvTermCriteria criteria);
示例#10
0
 /// <summary>
 /// Iterates to find the object center given its back projection and initial position of search window. The iterations are made until the search window center moves by less than the given value and/or until the function has done the maximum number of iterations. 
 /// </summary>
 /// <param name="probImage">Back projection of object histogram</param>
 /// <param name="window">Initial search window</param>
 /// <param name="criteria">Criteria applied to determine when the window search should be finished. </param>
 /// <returns>The number of iterations made</returns>
 public static int MeanShift(
    IInputArray probImage,
    ref Rectangle window,
    MCvTermCriteria criteria)
 {
    using (InputArray iaProbImage = probImage.GetInputArray())
       return cveMeanShift(iaProbImage, ref window, ref criteria);
 }
        //-------------------------------------------------------------------------------------//
        //<<<<<----------------FUNCTIONS USED TO TRAIN RECOGNIZER ON TRAINING SET----------->>>>
        //-------------------------------------------------------------------------------------//
        /// <summary>
        /// Trains recognizer on fetched face-label pairs and saves the trained data to recognition variables
        /// </summary>
        public void TrainRecognizer()
        {
            MCvTermCriteria termCrit = new MCvTermCriteria(iMaxItereations, dEPS);
            ImageInDatabase dbTrainingSet = new ImageInDatabase();

            // This will fill the training images array AND labels array
            dbTrainingSet.LoadCompleteTrainingSet();
            recognizer = new EigenObjectRecognizer(dbTrainingSet.m_trainingImages, dbTrainingSet.m_TrainingLabels, dDistTreshHold, ref termCrit);
        }
示例#12
0
        //public static KDetector kdetector;
        public string Apply(int k, string fn, string outImagePath)
        {
            Image<Bgr, float> src = new Image<Bgr, float>(fn);
            Matrix<float> samples = new Matrix<float>(src.Rows * src.Cols, 1, 3);
            Matrix<int> finalClusters = new Matrix<int>(src.Rows * src.Cols, 1);

            //Convert image to a sample matrix that its rows equal to width*height of image and its
            //column equals to 3 feature (R/G/B) or (H/L/S)
            for (int y = 0; y < src.Rows; y++)
            {
                for (int x = 0; x < src.Cols; x++)
                {
                    samples.Data[y + x * src.Rows, 0] = (float)src[y, x].Blue;
                    samples.Data[y + x * src.Rows, 1] = (float)src[y, x].Green;
                    samples.Data[y + x * src.Rows, 2] = (float)src[y, x].Red;
                }
            }

            MCvTermCriteria term = new MCvTermCriteria(10000, 0.0001);
            term.type = TERMCRIT.CV_TERMCRIT_ITER | TERMCRIT.CV_TERMCRIT_EPS;

            int clusterCount = k;
            int attempts = 10;

            //center matrix after call k-means function holds the cluster value
            Matrix<float> centers;
            centers = new Matrix<float>(clusterCount, 3);

            int mm = CvInvoke.cvKMeans2(samples, clusterCount, finalClusters, term, attempts, IntPtr.Zero, KMeansInitType.PPCenters, centers, IntPtr.Zero);

            Image<Bgr, float> new_image = new Image<Bgr, float>(src.Size);

            //find color of cluster values
            Bgr[] clusterColors = new Bgr[clusterCount];
            for (int i = 0; i < clusterCount; i++)
            {
                Bgr b = new Bgr(centers[i, 0], centers[i, 1], centers[i, 2]);

                clusterColors[i] = b;
            }

            //Draw a image based on cluster color
            for (int y = 0; y < src.Rows; y++)
            {
                for (int x = 0; x < src.Cols; x++)
                {
                    PointF p = new PointF(x, y);
                    new_image.Draw(new CircleF(p, 1.0f), clusterColors[finalClusters[y + x * src.Rows, 0]], 1);
                }
            }

            new_image.Save(outImagePath);

            return outImagePath;
        }
示例#13
0
 public static extern void cvSnakeImage(
    IntPtr image,
    IntPtr points,
    int length,
    [MarshalAs(UnmanagedType.LPArray)] float[] alpha,
    [MarshalAs(UnmanagedType.LPArray)] float[] beta,
    [MarshalAs(UnmanagedType.LPArray)] float[] gamma,
    int coeffUsage,
    Size win,
    MCvTermCriteria criteria,
    int calcGradient);
示例#14
0
    private void InitEigenObjectRecognizer() {
      if (trainedImages.Count <= 0) { return; }
      initEigen = true;

      // TermCriteria for face recognition with numbers of trained images like maxIteration
      termCrit = new MCvTermCriteria(trainedImages.Count, 0.001);

      // Eigen face recognizer
      recognizer = new EigenObjectRecognizer(trainedImages.ToArray(), trainedLabels.ToArray(), 5000, ref termCrit);

      initEigen = false;
    }
示例#15
0
 /// <summary>
 /// Implements CAMSHIFT object tracking algorithm ([Bradski98]). First, it finds an object center using cvMeanShift and, after that, calculates the object size and orientation. 
 /// </summary>
 /// <param name="probImage">Back projection of object histogram </param>
 /// <param name="window">Initial search window</param>
 /// <param name="criteria">Criteria applied to determine when the window search should be finished</param>
 /// <returns>Circumscribed box for the object, contains object size and orientation</returns>
 public static RotatedRect CamShift(
    IInputArray probImage,
    ref Rectangle window,
    MCvTermCriteria criteria)
 {
    RotatedRect box = new RotatedRect();
    using (InputArray iaProbImage = probImage.GetInputArray())
    {
       cveCamShift(iaProbImage, ref window, ref criteria, ref box);
    }
    return box;
 }
示例#16
0
 /// <summary>
 /// Calculates optical flow for a sparse feature set using iterative Lucas-Kanade method in pyramids
 /// </summary>
 /// <param name="prev">First frame, at time t</param>
 /// <param name="curr">Second frame, at time t + dt </param>
 /// <param name="prevFeatures">Array of points for which the flow needs to be found</param>
 /// <param name="winSize">Size of the search window of each pyramid level</param>
 /// <param name="level">Maximal pyramid level number. If 0 , pyramids are not used (single level), if 1 , two levels are used, etc</param>
 /// <param name="criteria">Specifies when the iteration process of finding the flow for each point on each pyramid level should be stopped</param>
 /// <param name="currFeatures">Array of 2D points containing calculated new positions of input features in the second image</param>
 /// <param name="status">Array. Every element of the array is set to 1 if the flow for the corresponding feature has been found, 0 otherwise</param>
 /// <param name="trackError">Array of double numbers containing difference between patches around the original and moved points</param>
 public static void PyrLK(
    Image<Gray, Byte> prev,
    Image<Gray, Byte> curr,
    PointF[] prevFeatures,
    Size winSize,
    int level,
    MCvTermCriteria criteria,
    out PointF[] currFeatures,
    out Byte[] status,
    out float[] trackError)
 {
    PyrLK(prev, curr, null, null, prevFeatures, winSize, level, criteria, Emgu.CV.CvEnum.LKFLOW_TYPE.DEFAULT, out currFeatures, out status, out trackError);
 }
示例#17
0
        public EigenCardDetector(string foldername)
        {
            List<FileInfo> files = new List<FileInfo>(new DirectoryInfo(foldername).GetFiles());
            List<Image<Gray, byte>> images = new List<Image<Gray, byte>>(files.Count);
            foreach (FileInfo info in files)
            {
                Bitmap bit = new Bitmap(info.FullName);
                images.Add(new Image<Gray, byte>(bit));
            }

            MCvTermCriteria crit = new MCvTermCriteria(0.05);
            recog = new EigenObjectRecognizer(images.ToArray(), ref crit);
        }
        public EigenObjectRecognizer(Image<Gray, Byte>[] images, String[] labels, double eigenDistanceThreshold, ref MCvTermCriteria termCrit)
        {
            Debug.Assert(images.Length == labels.Length, "The number of images should equals the number of labels");
             Debug.Assert(eigenDistanceThreshold >= 0.0, "Eigen-distance threshold should always >= 0.0");

             CalcEigenObjects(images, ref termCrit, out _eigenImages, out _avgImage);

             _eigenValues = Array.ConvertAll<Image<Gray, Byte>, Matrix<float>>(images,
             delegate(Image<Gray, Byte> img)
             {
                return new Matrix<float>(EigenDecomposite(img, _eigenImages, _avgImage));
             });

             _labels = labels;

             _eigenDistanceThreshold = eigenDistanceThreshold;
        }
 /// <summary>
 /// Calculates orthonormal eigen basis and the averaged object for a group of the input objects.
 /// </summary>
 /// <param name="input">Pointer to the array of IplImage input objects </param>
 /// <param name="calcLimit">Criteria that determine when to stop calculation of eigen objects. Depending on the parameter calcLimit, calculations are finished either after first calcLimit.max_iter dominating eigen objects are retrieved or if the ratio of the current eigenvalue to the largest eigenvalue comes down to calcLimit.epsilon threshold. The value calcLimit -> type must be CV_TERMCRIT_NUMB, CV_TERMCRIT_EPS, or CV_TERMCRIT_NUMB | CV_TERMCRIT_EPS . The function returns the real values calcLimit->max_iter and calcLimit->epsilon</param>
 /// <param name="avg">Averaged object</param>
 /// <param name="eigVals">Pointer to the eigenvalues array in the descending order; may be NULL</param>
 /// <param name="eigVecs">Pointer either to the array of eigen objects</param>
 /// <returns>Pointer either to the array of eigen objects or to the write callback function</returns>
 public static void cvCalcEigenObjects(
  IntPtr[] input,
  ref MCvTermCriteria calcLimit,
  IntPtr[] eigVecs,
  float[] eigVals,
  IntPtr avg)
 {
     cvCalcEigenObjects(
      input.Length,
      input,
      eigVecs,
      CvEnum.EIGOBJ_TYPE.CV_EIGOBJ_NO_CALLBACK,
      0,
      IntPtr.Zero,
      ref calcLimit,
      avg,
      eigVals);
 }
示例#20
0
        public FacialInfo()
        {
            StreamReader SR = new StreamReader("CVConfig.txt");
            gFacedetection = new HaarCascade(@"haarcascade_frontalface_alt.xml");
            gHanddetection = new HaarCascade(@"haarcascade_hand.xml");

            gCompareBoxes = new List<Image<Gray, byte>>();
            gRecognitionBoxes = new List<Image<Gray, byte>>();


            Image<Gray, byte> Blank = new Image<Gray, byte>(128, 120, new Gray(0.5));
            for (int x = 0; x < 6; x++)
            {
                gCompareBoxes.Add(Blank);
                gRecognitionBoxes.Add(Blank);
            }

            try
            {
                ImageWidth = int.Parse(SR.ReadLine().Split(':')[1]);
                ImageHeight = int.Parse(SR.ReadLine().Split(':')[1]);
                Threshold = int.Parse(SR.ReadLine().Split(':')[1]);
                termcrit = double.Parse(SR.ReadLine().Split(':')[1]);
            }
            catch 
            {
                //default values
                ImageWidth = 128;
                ImageHeight = 120;
                termcrit = 0.001;
                Threshold = 4500;
            }
            SR.Close();

            gUP = new UserProfile();

            MCvTermCriteria termCrit = new MCvTermCriteria(gUP.LoadedUsers.Count, termcrit);
            gEOR = new EigenObjectRecognizer(
               gUP.getImages(),
               gUP.getNumbers(),
                10000,//4500
               ref termCrit);
        }
示例#21
0
      /// <summary>
      /// Calculates optical flow for a sparse feature set using iterative Lucas-Kanade method in pyramids
      /// </summary>
      /// <param name="prev">First frame, at time t</param>
      /// <param name="curr">Second frame, at time t + dt </param>
      /// <param name="prevPyrBuffer">Buffer for the pyramid for the first frame. If it is not NULL, the buffer must have a sufficient size to store the pyramid from level 1 to level #level ; the total size of (image_width+8)*image_height/3 bytes is sufficient</param>
      /// <param name="currPyrBuffer">Similar to prev_pyr, used for the second frame</param>
      /// <param name="prevFeatures">Array of points for which the flow needs to be found</param>
      /// <param name="winSize">Size of the search window of each pyramid level</param>
      /// <param name="level">Maximal pyramid level number. If 0 , pyramids are not used (single level), if 1 , two levels are used, etc</param>
      /// <param name="criteria">Specifies when the iteration process of finding the flow for each point on each pyramid level should be stopped</param>
      /// <param name="flags">Flags</param>
      /// <param name="currFeatures">Array of 2D points containing calculated new positions of input features in the second image</param>
      /// <param name="status">Array. Every element of the array is set to 1 if the flow for the corresponding feature has been found, 0 otherwise</param>
      /// <param name="trackError">Array of double numbers containing difference between patches around the original and moved points</param>
      public static void PyrLK(
         Image<Gray, Byte> prev,
         Image<Gray, Byte> curr,
         Image<Gray, Byte> prevPyrBuffer,
         Image<Gray, Byte> currPyrBuffer,
         System.Drawing.PointF[] prevFeatures,
         System.Drawing.Size winSize,
         int level,
         MCvTermCriteria criteria,
         Emgu.CV.CvEnum.LKFLOW_TYPE flags,
         out System.Drawing.PointF[] currFeatures,
         out Byte[] status,
         out float[] trackError)
      {
         if (prevPyrBuffer == null)
         {
            prevPyrBuffer = new Image<Gray, byte>(prev.Width + 8, prev.Height / 3);
         }
         if (currPyrBuffer == null)
         {
            currPyrBuffer = prevPyrBuffer.CopyBlank();
         }

         status = new Byte[prevFeatures.Length];
         trackError = new float[prevFeatures.Length];

         currFeatures = new System.Drawing.PointF[prevFeatures.Length];

         CvInvoke.cvCalcOpticalFlowPyrLK(
            prev,
            curr,
            prevPyrBuffer,
            currPyrBuffer,
            prevFeatures,
            currFeatures,
            prevFeatures.Length,
            winSize,
            level,
            status,
            trackError,
            criteria,
            flags);
      }
示例#22
0
      /// <summary>
      /// Calculates optical flow for a sparse feature set using iterative Lucas-Kanade method in pyramids
      /// </summary>
      /// <param name="prev">First frame, at time t</param>
      /// <param name="curr">Second frame, at time t + dt </param>
      /// <param name="prevPyrBuffer">Buffer for the pyramid for the first frame. If it is not NULL, the buffer must have a sufficient size to store the pyramid from level 1 to level #level ; the total size of (image_width+8)*image_height/3 bytes is sufficient</param>
      /// <param name="currPyrBuffer">Similar to prev_pyr, used for the second frame</param>
      /// <param name="prevFeatures">Array of points for which the flow needs to be found</param>
      /// <param name="winSize">Size of the search window of each pyramid level</param>
      /// <param name="level">Maximal pyramid level number. If 0 , pyramids are not used (single level), if 1 , two levels are used, etc</param>
      /// <param name="criteria">Specifies when the iteration process of finding the flow for each point on each pyramid level should be stopped</param>
      /// <param name="flags">Flags</param>
      /// <param name="currFeatures">Array of 2D points containing calculated new positions of input features in the second image</param>
      /// <param name="status">Array. Every element of the array is set to 1 if the flow for the corresponding feature has been found, 0 otherwise</param>
      /// <param name="trackError">Array of double numbers containing difference between patches around the original and moved points</param>
      public static void PyrLK(
         Image<Gray, Byte> prev,
         Image<Gray, Byte> curr,
         Image<Gray, Byte> prevPyrBuffer,
         Image<Gray, Byte> currPyrBuffer,
         PointF[] prevFeatures,
         Size winSize,
         int level,
         MCvTermCriteria criteria,
         Emgu.CV.CvEnum.LKFLOW_TYPE flags,
         out PointF[] currFeatures,
         out Byte[] status,
         out float[] trackError)
      {
         Image<Gray, Byte> prevPyrBufferParam = prevPyrBuffer ?? new Image<Gray, byte>(prev.Width + 8, prev.Height / 3);
         Image<Gray, Byte> currPyrBufferParam = currPyrBuffer ?? prevPyrBufferParam.CopyBlank();

         status = new Byte[prevFeatures.Length];
         trackError = new float[prevFeatures.Length];

         currFeatures = new PointF[prevFeatures.Length];

         CvInvoke.cvCalcOpticalFlowPyrLK(
            prev,
            curr,
            prevPyrBufferParam,
            currPyrBufferParam,
            prevFeatures,
            currFeatures,
            prevFeatures.Length,
            winSize,
            level,
            status,
            trackError,
            criteria,
            flags);

         #region Release buffer images if they are create within this function call
         if (!object.ReferenceEquals(prevPyrBufferParam, prevPyrBuffer)) prevPyrBufferParam.Dispose();
         if (!object.ReferenceEquals(currPyrBufferParam, currPyrBuffer)) currPyrBufferParam.Dispose();
         #endregion
      }
        private void btnDetect_Click(object sender, EventArgs e)
        {
            current = new Image<Bgr, byte>(filePath).Resize(300, 250, INTER.CV_INTER_CUBIC);
            Image<Gray, byte> grayScale = current.Convert<Gray, byte>();

                MCvAvgComp[][] detected = grayScale.DetectHaarCascade(face, scale, minNeighbors, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(20, 20));
                foreach (MCvAvgComp d in detected[0])
                {
                    current.Draw(d.rect, new Bgr(Color.LawnGreen), 2);
                    if (trainingImgs.Count > 0)
                    {
                        Image<Gray, byte> dFace = current.Copy(d.rect).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
                        MCvTermCriteria criteria = new MCvTermCriteria(trainingImgs.Count, epsilon);  //count, epsilon value
                        EigenObjectRecognizer recognize = new EigenObjectRecognizer(trainingImgs.ToArray(), trainingNames.ToArray(), 0, ref criteria);
                        MCvFont font = new MCvFont(FONT.CV_FONT_HERSHEY_TRIPLEX, 1, 1);
                        string name = recognize.Recognize(dFace);
                        current.Draw(name, ref font, new Point(d.rect.X - 2, d.rect.Y - 20), new Bgr(Color.Red));
                    }
                }
            picWebCam.Image = current.ToBitmap();
        }
示例#24
0
        public FaceDetection(bool type)
        {
            this._image = null;
            this._faces = new HaarCascade(@currentDirectory + @"\Resources\haarcascade_frontalface_alt2.xml");
            this._eyes = new HaarCascade(@currentDirectory + @"\Resources\haarcascade_eye.xml");

            // Testing recognition            
            recognitionImages = new Image<Gray, byte>[9];
            recognitionImages[0] = new Image<Gray, Byte>(@currentDirectory + @"\Resources\RecognitionDB\lena.jpg");
            recognitionImages[1] = new Image<Gray, Byte>(@currentDirectory + @"\Resources\RecognitionDB\jorge1.jpg");
            recognitionImages[2] = new Image<Gray, Byte>(@currentDirectory + @"\Resources\RecognitionDB\david.jpg");
            recognitionImages[3] = new Image<Gray, Byte>(@currentDirectory + @"\Resources\RecognitionDB\foof.jpg");
            recognitionImages[4] = new Image<Gray, Byte>(@currentDirectory + @"\Resources\RecognitionDB\irfan.jpg");
            recognitionImages[5] = new Image<Gray, Byte>(@currentDirectory + @"\Resources\RecognitionDB\raul.jpg");
            recognitionImages[6] = new Image<Gray, Byte>(@currentDirectory + @"\Resources\RecognitionDB\ernesto.jpg");
            recognitionImages[7] = new Image<Gray, Byte>(@currentDirectory + @"\Resources\RecognitionDB\raul1.jpg");
            recognitionImages[8] = new Image<Gray, Byte>(@currentDirectory + @"\Resources\RecognitionDB\raul2.jpg");
            

            recognitionImagesLabels = new String[9];
            recognitionImagesLabels[0] = "Lena";
            recognitionImagesLabels[1] = "Jorge1";
            recognitionImagesLabels[2] = "David";
            recognitionImagesLabels[3] = "Foof";
            recognitionImagesLabels[4] = "Irfan";
            recognitionImagesLabels[5] = "Raul1";
            recognitionImagesLabels[6] = "Ernesto";
            recognitionImagesLabels[7] = "Raul2";
            recognitionImagesLabels[8] = "Raul3";


            termCrit = new MCvTermCriteria(2000, 0.001);
            imgRecognizer = new EigenObjectRecognizer(
                recognitionImages,
                recognitionImagesLabels,
                1500,
                ref termCrit);   
         
            // 5000 -> 1000 -> 500 ( de mas a menos exigencia)
        }
        public static EigenObjectRecognizer CreateRecognizerFromFotosInFolder(String folder,  String pattern, double accuracy, int eigenDistanceThreshold)
        {
            List<Image<Gray, byte>> trainedImages = new List<Image<Gray, byte>>();
            List<String> labels = new List<string>();

             string[] subdirEntries = Directory.GetDirectories(folder);
             foreach (var directory in subdirEntries)
             {
                 string[] fileEntries = Directory.GetFiles(directory);
                 var label = directory.Remove(0, directory.LastIndexOf("\\")+1);
                 foreach (var file in fileEntries.Where(x=>x.Contains(pattern)))
                 {
                     Image<Gray, byte> image = new Image<Gray, byte>(file);
                     trainedImages.Add(image);
                     labels.Add(label);
                 }
             }

             MCvTermCriteria termCrit = new MCvTermCriteria(40, accuracy);

             EigenObjectRecognizer recognizer = new EigenObjectRecognizer(
                 trainedImages.ToArray(),
                 labels.ToArray(),
                 eigenDistanceThreshold,
                 ref termCrit);

             //int i = 1;

             //Image<Gray, float>[] eigenfaces;
             //Image<Gray, float> avg;
             //EigenObjectRecognizer.CalcEigenObjects(trainedImages.ToArray(),ref termCrit,out eigenfaces, out avg);

            //foreach(var eigenface in eigenfaces)
            // {
            //     eigenface.Bitmap.Save(@"e:\data\phototest\eigen" + i + ".bmp");
            //     i++;
            // }
             return recognizer;
        }
示例#26
0
        /// <summary>
        /// Jedyny konstruktor. Odczytuje twarze z bazy i inicjuje wewnętrzny obiekt odpowiedzialny za rozpoznawanie twarzy.
        /// </summary>
        public Recognizer()
        {
            readFiles();//odczyt twarzy z bazy

            //ustawienie etykiet i dokładności z jaką ma być wykonywane ropoznawanie
            MCvTermCriteria criteria = new MCvTermCriteria(labels.Length, 0.001);

            //utworzenie nowego obiektu do rozpoznawania twarzy
            //obiekt ten wylicza eigenvectors dla każdej twarzy w bazie
            //oraz dla każdej sprawdzanej twarz, po czym wartości wyliczone
            //dla sprawdzanej twarzy porównuje z wartościami wyliczonymi dla tych
            //twarzy z bazy
            eor = new EigenObjectRecognizer(
                faces,//tablica twarzy
                labels,//etykiety odpowiadające twarzom
                3000,//poziom progowania pomiędzy poszczególnymi eigenvectors
                ref criteria//kryterium
                );

            //utwórz wzorzec do wykrywania twarzy
            haar = new HaarCascade("haarcascade_frontalface_default.xml");
        }
        public override int FaceRecognition()
        {
            FaceDetect(this.userImage);

            foreach (CImage tempImage in lDatabaseImages)
            {
                FaceDetect(tempImage);
            }

            int ContTrain = lDatabaseImages.Count;  // Количество изображений для тренировки.
            //Image<Gray, byte> result;
            foreach (MCvAvgComp f in aDetectedFaces[0])
            {
                if (this.lDatabaseImages.ToArray().Length != 0)
                {
                    MCvTermCriteria termCrit = new MCvTermCriteria(ContTrain, 0.0001);
                    FisherFaceRecognizer modelRecognition = new FisherFaceRecognizer(0, 3500);

                    Image<Gray, Byte>[] im = this.lDatabaseImages.Select(w => w.grayImage).ToArray();

                    modelRecognition.Train(this.lDatabaseImages.Select(w => w.grayImage).ToArray(), lIntDatabaseImagesLabels.ToArray());
                    FaceRecognizer.PredictionResult resultRecognition = new FaceRecognizer.PredictionResult();
                    resultRecognition = modelRecognition.Predict(userImage.grayImage);
                    // Зависимость степени похожести от разности изображений приблизительно имет вид
                    // 10 -> 99%; 100 -> 90%; 750 -> 50%, 10000 -> 1%
                    // тогда необходимо ввести шкалу (например, линейную):
                    // Алгоритм расчёта степени похожести.
                    float threshold = 750;              // пороговое значение, равное 50% похожести изображений (установленно экспериментально)
                    float thresholdMismatch = 10000;    // пороговое значение несовпадения изоюражений (равное 1%, установлено экспериментально)
                    if (resultRecognition.Distance < threshold)
                        similarityDegree = (100 - (resultRecognition.Distance * 50.0 / threshold)).ToString();
                    else
                        similarityDegree = (50 - (resultRecognition.Distance * 50 / thresholdMismatch)).ToString();
                }
            }

            return 0;
        }
示例#28
0
        public CommandService()
        {
            userInfoManager = new UserInfoManager();
            userInfoManager.connect();

            foreach(var user in userInfoManager.ListUsers())
            {
                foreach (var photo in userInfoManager.ListUserPhotoData(user))
                {
                    labels.Add(user.Name);
                    images.Add(photo.Image);

                    Console.WriteLine("Added image");
                }
            }

            MCvTermCriteria crit = new MCvTermCriteria(1.0);

            this.recognizer = new EigenObjectRecognizer(images.ToArray(),
                                                        labels.ToArray(),
                                                        2000,
                                                        ref crit);
        }
        /// <summary>
        /// Interpolates the path using the iterative Lucas-Kanade method for each point in the path as a feature
        /// </summary>
        /// <param name="previousPath">The path of the previous frame</param>
        /// <param name="nextPath">The empty path of the to be interpolated frame</param>
        /// <param name="previousImage">The previous image</param>
        /// <param name="nextImage">The next image to interpolate to</param>
        /// <returns>A value between 0 and 1 indicating the accuracy of the interpolation</returns>
        protected override double Interpolate(Model.IPathContainer previousPath, Model.IPathContainer nextPath, Image <Bgr, Byte> previousImage, Image <Bgr, Byte> nextImage)
        {
            List <PointF> featureList = new List <PointF>();
            List <Path>   activePaths = new List <Path>();

            for (int i = 0; i < previousPath.LayerIndices[previousPath.ActivePathsLayer].Count; i++)
            {
                activePaths.Add(previousPath.Paths[previousPath.LayerIndices[previousPath.ActivePathsLayer][i]]);
            }

            IEnumerator <Path> pathEnumerator = activePaths.GetEnumerator();

            while (pathEnumerator.MoveNext())
            {
                LinkedList <BezierPoint> .Enumerator pointEnumerator = pathEnumerator.Current.GetEnumerator();
                while (pointEnumerator.MoveNext())
                {
                    featureList.Add(GetImagePoint(pointEnumerator.Current));
                }
            }

            Image <Gray, Byte> prevGray = previousImage.Convert <Gray, Byte>();
            Image <Gray, Byte> nextGray = nextImage.Convert <Gray, Byte>();

            PointF[] featureArray = featureList.ToArray();
            PointF[] newFeatures;
            byte[]   errors;
            float[]  trackErrors;
            Emgu.CV.Structure.MCvTermCriteria criteria = new Emgu.CV.Structure.MCvTermCriteria(10);

            OpticalFlow.PyrLK(prevGray, nextGray, featureArray, new Size(10, 10), 5, criteria, out newFeatures, out errors, out trackErrors);

            IPathContainer     tempPathContainer  = previousPath.Clone();
            IEnumerator <Path> tempPathEnumerator = tempPathContainer.GetPathsEnumerator();
            int index = 0;

            for (int j = 0; j < previousPath.Count; j++)
            {
                if (previousPath.LayerIndices[previousPath.ActivePathsLayer].Contains(j))
                {
                    LinkedList <BezierPoint> .Enumerator pointEnumerator = tempPathContainer.Paths[j].GetEnumerator();
                    while (pointEnumerator.MoveNext())
                    {
                        Translate(pointEnumerator.Current, featureArray[index], newFeatures[index++]);
                    }
                }
            }

            IEnumerator <Path> nextPathEnumerator = tempPathContainer.GetPathsEnumerator();

            while (nextPathEnumerator.MoveNext())
            {
                nextPath.AddPath(nextPathEnumerator.Current);
            }

            nextPath.ActivePathsLayer = tempPathContainer.ActivePathsLayer;
            nextPath.LayerIndices     = tempPathContainer.LayerIndices;

            double error = DetermineError(previousPath, nextPath, errors);

            return(error);
        }
示例#30
0
 /// <summary>
 /// Updates snake in order to minimize its total energy that is a sum of internal energy that depends on contour shape (the smoother contour is, the smaller internal energy is) and external energy that depends on the energy field and reaches minimum at the local energy extremums that correspond to the image edges in case of image gradient.
 /// </summary>
 /// <param name="image">The source image or external energy field</param>
 /// <param name="points">Seq points (snake). </param>
 /// <param name="length">Number of points in the contour</param>
 /// <param name="alpha">Weight[s] of continuity energy, single float or array of length floats, one per each contour point</param>
 /// <param name="beta">Weight[s] of curvature energy, similar to alpha</param>
 /// <param name="gamma">Weight[s] of image energy, similar to alpha</param>
 /// <param name="coeffUsage">Variant of usage of the previous three parameters: 
 /// CV_VALUE indicates that each of alpha, beta, gamma is a pointer to a single value to be used for all points; 
 /// CV_ARRAY indicates that each of alpha, beta, gamma is a pointer to an array of coefficients different for all the points of the snake. All the arrays must have the size equal to the contour size.
 /// </param>
 /// <param name="win">Size of neighborhood of every point used to search the minimum, both win.width and win.height must be odd</param>
 /// <param name="criteria">Termination criteria</param>
 /// <param name="calcGradient">
 /// Gradient flag. If true, the function calculates gradient magnitude for every image pixel and consideres it as the energy field, 
 /// otherwise the input image itself is considered
 /// </param>
 public static void cvSnakeImage(
    IntPtr image,
    IntPtr points,
    int length,
    float[] alpha,
    float[] beta,
    float[] gamma,
    int coeffUsage,
    Size win,
    MCvTermCriteria criteria,
    bool calcGradient)
 {
    cvSnakeImage(
       image,
       points,
       length,
       alpha,
       beta,
       gamma,
       coeffUsage,
       win,
       criteria,
       calcGradient ? 1 : 0);
 }
示例#31
0
 private static extern void cvCalcEigenObjects(
    int nObjects,
    IntPtr[] input,
    IntPtr[] output,
    CvEnum.EIGOBJ_TYPE ioFlags,
    int ioBufSize,
    IntPtr userData,
    ref MCvTermCriteria calcLimit,
    IntPtr avg,
    float[] eigVals);
示例#32
0
        static void Main(string[] args)
        {
            MCvPoint3D32f objectp_1 = new MCvPoint3D32f(1f, 1f, 1f);
            MCvPoint3D32f objectp_2 = new MCvPoint3D32f(1f, 1f, 1f);
            MCvPoint3D32f objectp_3 = new MCvPoint3D32f(1f, 1f, 1f);

            //List<MCvPoint3D32f> objectPoints = new List<MCvPoint3D32f> { objectp_1, objectp_2, objectp_3 };
            MCvPoint3D32f[][] objectPoints = new MCvPoint3D32f[][] { new MCvPoint3D32f[]
                                                                     { objectp_1, objectp_1, objectp_1, objectp_1 } };



            PointF imagep_1 = new PointF(1f, 1f);
            PointF imagep_2 = new PointF(1f, 1f);
            PointF imagep_3 = new PointF(1f, 1f);

            //List<PointF> imagePoints = new List<PointF> { imagep_1, imagep_2, imagep_3 };
            PointF[][] imagePoints = new PointF[][] { new PointF[] { imagep_1, imagep_1, imagep_1, imagep_1 } };

            Size imageSize = new Size(500, 500);

            Mat cameraMat = new Mat(new Size(3, 3), DepthType.Cv32F, 1);

            cameraMat.SetValue(0, 0, 302);
            cameraMat.SetValue(0, 1, 0);
            cameraMat.SetValue(0, 2, 101);
            cameraMat.SetValue(1, 0, 0);
            cameraMat.SetValue(1, 1, 411);
            cameraMat.SetValue(1, 2, 106);
            cameraMat.SetValue(2, 0, 0);
            cameraMat.SetValue(2, 1, 0);
            cameraMat.SetValue(2, 1, 1);

            Matrix <double> cameraMatrix = new Matrix <double>(new double[, ] {
                { 302, 0, 101 }, { 0, 411, 106 }, { 0, 0, 1 }
            });

            cameraMat.ToImage <Gray, byte>().Save("test.jpg");
            //CvInvoke.cvSetReal2D(cameraMat.DataPointer, 0, 0, 302);
            //CvInvoke.cvSetReal2D(cameraMat.DataPointer, 0, 1, 0);
            //CvInvoke.cvSetReal2D(cameraMat.DataPointer, 0, 2, 101);
            //CvInvoke.cvSetReal2D(cameraMat.DataPointer, 1, 0, 0);
            //CvInvoke.cvSetReal2D(cameraMat.DataPointer, 1, 1, 411);
            //CvInvoke.cvSetReal2D(cameraMat.DataPointer, 1, 2, 106);
            //CvInvoke.cvSetReal2D(cameraMat.DataPointer, 2, 0, 0);
            //CvInvoke.cvSetReal2D(cameraMat.DataPointer, 2, 1, 0);
            //CvInvoke.cvSetReal2D(cameraMat.DataPointer, 2, 2, 1);

            Emgu.CV.CvEnum.CalibType calibrationType = Emgu.CV.CvEnum.CalibType.UseIntrinsicGuess;

            Emgu.CV.Structure.MCvTermCriteria termCriteria = new Emgu.CV.Structure.MCvTermCriteria(50);

            Mat _distortionCoeffs = new Mat(new Size(1, 5), DepthType.Cv32F, 1);


            Emgu.CV.ExtrinsicCameraParameters[] extrinsicParams;

            Mat[] rotation;    // = new Mat(new Size(3, 3), DepthType.Cv32F, 1);
            Mat[] translation; //= new Mat(new Size(3, 3), DepthType.Cv32F, 1);

            var    result = CvInvoke.CalibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix, _distortionCoeffs, calibrationType, termCriteria, out rotation, out translation);
            double t      = rotation[0].GetValue(0, 0);
            double t2     = rotation[0].GetValue(2, 0);
        }
示例#33
0
        /// <summary>
        /// Estimates transformation between the 2 cameras making a stereo pair. If we have a stereo camera, where the relative position and orientatation of the 2 cameras is fixed, and if we computed poses of an object relative to the fist camera and to the second camera, (R1, T1) and (R2, T2), respectively (that can be done with cvFindExtrinsicCameraParams2), obviously, those poses will relate to each other, i.e. given (R1, T1) it should be possible to compute (R2, T2) - we only need to know the position and orientation of the 2nd camera relative to the 1st camera. That's what the described function does. It computes (R, T) such that:
        /// R2=R*R1,
        /// T2=R*T1 + T
        /// </summary>
        /// <param name="objectPoints">The 3D location of the object points. The first index is the index of image, second index is the index of the point</param>
        /// <param name="imagePoints1">The 2D image location of the points for camera 1. The first index is the index of the image, second index is the index of the point</param>
        /// <param name="imagePoints2">The 2D image location of the points for camera 2. The first index is the index of the image, second index is the index of the point</param>
        /// <param name="intrinsicParam1">The intrisinc parameters for camera 1, might contains some initial values. The values will be modified by this function.</param>
        /// <param name="intrinsicParam2">The intrisinc parameters for camera 2, might contains some initial values. The values will be modified by this function.</param>
        /// <param name="imageSize">Size of the image, used only to initialize intrinsic camera matrix</param>
        /// <param name="flags">Different flags</param>
        /// <param name="extrinsicParams">The extrinsic parameters which contains:
        /// R - The rotation matrix between the 1st and the 2nd cameras' coordinate systems; 
        /// T - The translation vector between the cameras' coordinate systems. </param>
        /// <param name="essentialMatrix">essential matrix</param>
        /// <param name="termCrit"> Termination criteria for the iterative optimiziation algorithm </param>
        /// <param name="foundamentalMatrix">fundamental matrix</param>
        public static void StereoCalibrate(
            MCvPoint3D32f[][] objectPoints,
            PointF[][] imagePoints1,
            PointF[][] imagePoints2,
            IntrinsicCameraParameters intrinsicParam1,
            IntrinsicCameraParameters intrinsicParam2,
            Size imageSize,
            CvEnum.CALIB_TYPE flags,
            MCvTermCriteria termCrit,
            out ExtrinsicCameraParameters extrinsicParams,
            out Matrix<double> foundamentalMatrix,
            out Matrix<double> essentialMatrix)
        {
            Debug.Assert(objectPoints.Length == imagePoints1.Length && objectPoints.Length == imagePoints2.Length, "The number of images for objects points should be equal to the number of images for image points");

             #region get the matrix that represent the point counts
             int[,] pointCounts = new int[objectPoints.Length, 1];
             for (int i = 0; i < objectPoints.Length; i++)
             {
            Debug.Assert(objectPoints[i].Length == imagePoints1[i].Length && objectPoints[i].Length == imagePoints2[i].Length, String.Format("Number of 3D points and image points should be equal in the {0}th image", i));
            pointCounts[i, 0] = objectPoints[i].Length;
             }
             #endregion

             using (Matrix<float> objectPointMatrix = ToMatrix(objectPoints))
             using (Matrix<float> imagePointMatrix1 = ToMatrix(imagePoints1))
             using (Matrix<float> imagePointMatrix2 = ToMatrix(imagePoints2))
             using (Matrix<int> pointCountsMatrix = new Matrix<int>(pointCounts))
             {
            extrinsicParams = new ExtrinsicCameraParameters();
            essentialMatrix = new Matrix<double>(3, 3);
            foundamentalMatrix = new Matrix<double>(3, 3);

            CvInvoke.cvStereoCalibrate(
               objectPointMatrix.Ptr,
               imagePointMatrix1.Ptr,
               imagePointMatrix2.Ptr,
               pointCountsMatrix.Ptr,
               intrinsicParam1.IntrinsicMatrix,
               intrinsicParam1.DistortionCoeffs,
               intrinsicParam2.IntrinsicMatrix,
               intrinsicParam2.DistortionCoeffs,
               imageSize,
               extrinsicParams.RotationVector,
               extrinsicParams.TranslationVector,
               essentialMatrix.Ptr,
               foundamentalMatrix.Ptr,
               termCrit,
               flags);
             }
        }
示例#34
0
    // Original method http://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html
    //// An example of the code working can be found in VVVV
    //// vvvv git https://github.com/elliotwoods/VVVV.Nodes.EmguCV/blob/master/src/CameraCalibration/CalibrateCamera.cs
    //// to see it working in vvvv, check "{vvvv}\packs\{vvvv.imagepack}\nodes\modules\Image\OpenCV\CalibrateProjector (CV.Transform).v4p"
    //// The general idea is to take the camera calibration and use it for a projector, by adding near/far planes
    //// The code might be outdated, since they changed the EmguCV interface

    // Returns CalibrationCameraResult with the calibration parameters if callibration was possible, null otherwise
    public static CameraCalibrationResult ComputeCameraCalibration(
        Vector3[] inObjectPoints,                      // N Points in space(x,y,z)
                                                       // * when computing intrinsics, use the checkerboard as reference frame (that means the corners won't move)
                                                       // * when computing extrinsics, you can use the checkerboard corners in world coordinates for global position estimation
        Vector2[] inImagePoints,                       // N*S points on the image plane(u,v) matching the N points in space, where
                                                       // * for intrinsic computation, S = number of samples
                                                       // * for extrinsic computation, S = 1
        Size sensorSize,                               // Size of the image, used only to initialize intrinsic camera matrix
        Matrix <double> IntrinsicMatrix,               // The output camera matrix(A)[fx 0 cx; 0 fy cy; 0 0 1]. If CV_CALIB_USE_INTRINSIC_GUESS and / or CV_CALIB_FIX_ASPECT_RATION are specified, some or all of fx, fy, cx, cy must be initialized
        out string status,                             // OK if everything went well, verbse error description otherwise
        bool intrinsicGuess             = true,        // If intrinsicGuess is true, the intrinsic matrix will be initialized with default values
        bool normalizedImageCoordinates = true,        // if true, the image coordinates are normalized between 0-1
        CalibType flags = CalibType.UseIntrinsicGuess) // Different flags:
                                                       // * If Emgu.CV.CvEnum.CalibType == CV_CALIB_USE_INTRINSIC_GUESS and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be initialized before calling the function
                                                       // * if you use FIX_ASPECT_RATIO and FIX_FOCAL_LEGNTH options, these values needs to be set in the intrinsic parameters before the CalibrateCamera function is called. Otherwise 0 values are used as default.
                                                       //            flags |= CalibType.UseIntrinsicGuess;   // uses the intrinsicMatrix as initial estimation, or generates an initial estimation using imageSize
                                                       //            flags |= CalibType.FixFocalLength;      // if (CV_CALIB_USE_INTRINSIC_GUESS) then: {fx,fy} are constant
                                                       //            flags |= CalibType.FixAspectRatio;      // if (CV_CALIB_USE_INTRINSIC_GUESS) then: fy is a free variable, fx/fy stays constant
                                                       //            flags |= CalibType.FixPrincipalPoint;   // if (CV_CALIB_USE_INTRINSIC_GUESS) then: {cx,cy} are constant
                                                       //            flags |= (CalibType.FixK1               //  Given CalibType.FixK{i}: if (CV_CALIB_USE_INTRINSIC_GUESS) then: K{i} = distortionCoefficents[i], else:k ki = 0
                                                       //                    | CalibType.FixK2
                                                       //                    | CalibType.FixK3
                                                       //                    | CalibType.FixK4
                                                       //                    | CalibType.FixK5
                                                       //                    | CalibType.FixK6);
                                                       //            flags |= CalibType.ZeroTangentDist;     // tangential distortion is zero: {P1,P2} = {0,0}
                                                       //            flags |= CalibType.RationalModel;       // enable K4,k5,k6, disabled by default
    {
        int nPointsPerImage = inObjectPoints.Length;

        if (nPointsPerImage == 0)
        {
            status = "Insufficient points";
            return(null);
        }
        int nImages = inImagePoints.Length / nPointsPerImage;

        Debug.Log("point/images" + nPointsPerImage + "/" + nImages);


        //Intrinsics: an inout intrisic matrix, and depending on the calibrationType, the distortion Coefficients
        if (intrinsicGuess)
        {
            IntrinsicMatrix = new Matrix <double>(3, 3);
            // NOTE: A possible cause of failure is that this matrix might be transposed (given how openCV handles indexes)
            IntrinsicMatrix[0, 0] = sensorSize.Width;
            IntrinsicMatrix[1, 1] = sensorSize.Height;
            IntrinsicMatrix[0, 2] = sensorSize.Width / 2.0d;
            IntrinsicMatrix[1, 2] = sensorSize.Height / 2.0d;
            IntrinsicMatrix[2, 2] = 1;
        }
        Emgu.CV.IInputOutputArray distortionCoeffs = new Matrix <double>(1, 8); // The output 4x1 or 1x4 vector of distortion coefficients[k1, k2, p1, p2]

        // Matching world points (3D) to image points (2D), with the accompaining size of the image in pixels
        MCvPoint3D32f[][] objectPoints = new MCvPoint3D32f[nImages][];  //The joint matrix of object points, 3xN or Nx3, where N is the total number of points in all views
        PointF[][]        imagePoints  = new PointF[nImages][];         //The joint matrix of corresponding image points, 2xN or Nx2, where N is the total number of points in all views

        for (int i = 0; i < nImages; i++)
        {
            objectPoints[i] = new MCvPoint3D32f[nPointsPerImage];
            imagePoints[i]  = new PointF[nPointsPerImage];

            for (int j = 0; j < nPointsPerImage; j++)
            {
                objectPoints[i][j].X = inObjectPoints[j].x;
                objectPoints[i][j].Y = inObjectPoints[j].y;
                objectPoints[i][j].Z = inObjectPoints[j].z;

                if (normalizedImageCoordinates)
                {
                    imagePoints[i][j].X = inImagePoints[i * nPointsPerImage + j].x * (sensorSize.Width - 1);
                    imagePoints[i][j].Y = (1 - inImagePoints[i * nPointsPerImage + j].y) * (sensorSize.Height - 1);
                }
                else
                {
                    imagePoints[i][j].X = inImagePoints[i * nPointsPerImage + j].x;
                    imagePoints[i][j].Y = (1 - inImagePoints[i * nPointsPerImage + j].y);
                }
            }
        }

        //Extrinsics: they are decomposed in position and orientation
        Mat[] rotationVectors;    //The output 3xM or Mx3 array of rotation vectors(compact representation of rotation matrices, see cvRodrigues2).
        Mat[] translationVectors; //The output 3xM or Mx3 array of translation vectors

        // When to end: 10 iterations
        Emgu.CV.Structure.MCvTermCriteria termCriteria = new Emgu.CV.Structure.MCvTermCriteria(10); //The termination criteria

        try
        {
            // To make this method work it was necessary to patch it (see below)
            double reprojectionError = CalibrateCamera(
                objectPoints,
                imagePoints,
                sensorSize,
                IntrinsicMatrix,
                distortionCoeffs,
                flags,
                termCriteria,
                out rotationVectors,
                out translationVectors);


            var rotation = new Matrix <double>(rotationVectors[0].Rows, rotationVectors[0].Cols, rotationVectors[0].DataPointer);

            CameraCalibrationResult calibration = new CameraCalibrationResult(
                sensorSize.Width, sensorSize.Height,
                new CameraCalibrationResult.Extrinsics(MatToVector3(translationVectors[0]), MatToVector3(rotationVectors[0])),
                new CameraCalibrationResult.Intrinsics(IntrinsicMatrix, sensorSize),
                new CameraCalibrationResult.Distortion(distortionCoeffs),
                reprojectionError
                );
            DebugMatrix(IntrinsicMatrix);
            status = "OK! " + reprojectionError;


            return(calibration);
        }
        catch (Exception e)
        {   // Error
            status = e.Message;
            return(null);
        }
    }