Beispiel #1
0
 /// <summary>
 /// Create an Expectation Maximization model
 /// </summary>
 /// <param name="nclusters">The number of mixture components in the Gaussian mixture model. Use 5 for default.</param>
 /// <param name="covMatType">Constraint on covariance matrices which defines type of matrices</param>
 /// <param name="termcrit">The termination criteria of the EM algorithm. The EM algorithm can be terminated by the number of iterations termCrit.maxCount (number of M-steps) or when relative change of likelihood logarithm is less than termCrit.epsilon. Default maximum number of iterations is 100</param>
 public EM(int nclusters, MlEnum.EmCovarianMatrixType covMatType, MCvTermCriteria termcrit)
 {
     _ptr = MlInvoke.CvEMDefaultCreate(nclusters, covMatType, ref termcrit);
 }
Beispiel #2
0
 internal static extern void cveSVMSetTermCriteria(IntPtr obj, ref MCvTermCriteria val);
Beispiel #3
0
 internal static extern IntPtr CvBOWKMeansTrainerCreate(int clusterCount, MCvTermCriteria termcrit, int attempts, CvEnum.KMeansInitType flags);
Beispiel #4
0
 public Classifier_Train(string Training_Folder)
 {
     termCrit   = new MCvTermCriteria(ContTrain, 0.001);
     _IsTrained = LoadTrainingData(Training_Folder);
 }
        /// <param name="eigenDistanceThreshold">
        /// eigen distance threshold, (0, ~1000].
        /// </param>
        /// <param name="termCrit">The criteria for recognizer training</param>
        public EigenObjectRecognize(Image <Gray, Byte>[] images, String[] labels, double eigenDistanceThreshold, ref MCvTermCriteria termCrit)
        {
            Debug.Assert(images.Length == labels.Length, "=================================", "Masalah!!!");
            Debug.Assert(eigenDistanceThreshold >= nilaiEigen, "===================================", "Ambang harus diatas " + nilaiEigen);

            CalcEigenObjects(images, ref termCrit, out _eigenImages, out _avgImage);
            _eigenValues = Array.ConvertAll <Image <Gray, Byte>, Matrix <float> >(images,
                                                                                  delegate(Image <Gray, Byte> img)
            {
                return(new Matrix <float>(EigenDecomposite(img, _eigenImages, _avgImage)));
            });

            _labels = labels;
            _eigenDistanceThreshold = eigenDistanceThreshold;
        }
Beispiel #6
0
 private static extern int cveMeanShift(
     IntPtr probImage,
     ref Rectangle window,
     ref MCvTermCriteria criteria);
        public EigenObjectRecognizer(Image <Gray, Byte>[] images, String[] labels, double eigenDistanceThreshold, ref MCvTermCriteria termCrit)
        {
            Debug.Assert(images.Length == labels.Length, "The number of images should equals the number of labels");
            Debug.Assert(eigenDistanceThreshold >= 0.0, "Eigen-distance threshold should always >= 0.0");

            CalcEigenObjects(images, ref termCrit, out _eigenImages, out _avgImage);



            _eigenValues = Array.ConvertAll <Image <Gray, Byte>, Matrix <float> >(images,
                                                                                  delegate(Image <Gray, Byte> img)
            {
                return(new Matrix <float>(EigenDecomposite(img, _eigenImages, _avgImage)));
            });

            _labels = labels;

            _eigenDistanceThreshold = eigenDistanceThreshold;
        }
 /// <summary>
 /// membuat deteksian berdasarkan data and parameter gambar yang disimpan, mengemmbalikan nilai gambar yang mirip/sesuai
 /// </summary>
 /// <param name="images">gambar yang di cocokka harus sesama ukuran.</param>
 /// <param name="termCrit">untuk gambar</param>
 public EigenObjectRecognize(Image <Gray, Byte>[] images, ref MCvTermCriteria termCrit)
     : this(images, GenerateLabels(images.Length), ref termCrit)
 {
 }
Beispiel #9
0
 internal static extern double cveArucoCalibrateCameraAruco(
     IntPtr corners, IntPtr ids, IntPtr counter, IntPtr board,
     ref Size imageSize, IntPtr cameraMatrix, IntPtr distCoeffs,
     IntPtr rvecs, IntPtr tvecs, CalibType flags,
     ref MCvTermCriteria criteria);
Beispiel #10
0
        private void FrameGrabber(object sender, EventArgs e)
        {
            lblCantidad.Text = "0";
            NamePersons.Add("");
            try
            {
                currentFrame = grabber.QueryFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
                //Convertir a escala de grises
                gray = currentFrame.Convert <Gray, Byte>();

                //Detector de Rostros
                MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(face, 1.5, 10, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(20, 20));
                //1.2
                //Accion para cada elemento detectado
                foreach (MCvAvgComp f in facesDetected[0])
                {
                    t      = t + 1;
                    result = currentFrame.Copy(f.rect).Convert <Gray, byte>().Resize(100, 100, INTER.CV_INTER_CUBIC);
                    //Dibujar el cuadro para el rostro
                    currentFrame.Draw(f.rect, new Bgr(Color.Blue), 1);

                    if (trainingImages.ToArray().Length != 0)
                    {
                        //Clase para reconocimiento con el nùmero de imagenes
                        MCvTermCriteria termCrit = new MCvTermCriteria(ContTrain, 0.88);
                        //0.001
                        //Clase Eigen para reconocimiento de rostro
                        EigenObjectRecognizer recognizer = new EigenObjectRecognizer(trainingImages.ToArray(), labels.ToArray(), ref termCrit);
                        var fa = new Image <Gray, byte> [trainingImages.Count];

                        name = recognizer.Recognize(result);

                        //Dibujar el nombre para cada rostro detectado y reconocido
                        //currentFrame.Draw(name, ref font, new Point(f.rect.X - 2, f.rect.Y - 2), new Bgr(Color.Blue));
                    }


                    string   phrase = name;
                    string[] words  = phrase.Split('#');
                    NamePersons[t - 1] = name;
                    NamePersons.Add("");
                    //Establecer el nùmero de rostros detectados
                    lblCantidad.Text = facesDetected[0].Length.ToString();
                    lblNombre.Text   = words[1];
                    lbApellido.Text  = words[2];
                    lbCedula.Text    = words[3];
                    lbEdad.Text      = words[4];
                    lb.Text          = words[5];
                }
                t = 0;

                //Nombres concatenados de todos los rostros reconocidos
                for (int nnn = 0; nnn < facesDetected[0].Length; nnn++)
                {
                    names = names + NamePersons[nnn] + ", ";
                }

                //Mostrar los rostros procesados y reconocidos
                imageBox1.Image = currentFrame;
                //lblNombre.Text = names;
                name = "";
                //Borrar la lista de nombres
                NamePersons.Clear();
            }
            catch (Exception ex)
            {
                MessageBox.Show(ex.Message);
            }
        }
Beispiel #11
0
        void FrameGrabber(object sender, EventArgs e)
        {
            label3.Text = "0";
            //label4.Text = "";
            NamePersons.Add("");


            //Get the current frame form capture device
            currentFrame = grabber.QueryFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);

            //Convert it to Grayscale
            gray = currentFrame.Convert <Gray, Byte>();

            //Face Detector
            MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
                face,
                1.2,
                10,
                Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                new Size(20, 20));

            //Action for each element detected
            foreach (MCvAvgComp f in facesDetected[0])
            {
                t      = t + 1;
                result = currentFrame.Copy(f.rect).Convert <Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
                //draw the face detected in the 0th (gray) channel with blue color
                currentFrame.Draw(f.rect, new Bgr(Color.Red), 2);


                if (trainingImages.ToArray().Length != 0)
                {
                    //TermCriteria for face recognition with numbers of trained images like maxIteration
                    MCvTermCriteria termCrit = new MCvTermCriteria(ContTrain, 0.001);

                    //Eigen face recognizer
                    EigenObjectRecognizer recognizer = new EigenObjectRecognizer(
                        trainingImages.ToArray(),
                        labels.ToArray(),
                        3000,
                        ref termCrit);

                    name  = recognizer.Recognize(result);
                    name2 = name;

                    //Draw the label for each face detected and recognized
                    currentFrame.Draw(name, ref font, new Point(f.rect.X - 2, f.rect.Y - 2), new Bgr(Color.LightGreen));
                }

                NamePersons[t - 1] = name;
                NamePersons.Add("");


                //Set the number of faces detected on the scene
                label3.Text = facesDetected[0].Length.ToString();

                /*
                 * //Set the region of interest on the faces
                 *
                 * gray.ROI = f.rect;
                 * MCvAvgComp[][] eyesDetected = gray.DetectHaarCascade(
                 * eye,
                 * 1.1,
                 * 10,
                 * Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                 * new Size(20, 20));
                 * gray.ROI = Rectangle.Empty;
                 *
                 * foreach (MCvAvgComp ey in eyesDetected[0])
                 * {
                 *  Rectangle eyeRect = ey.rect;
                 *  eyeRect.Offset(f.rect.X, f.rect.Y);
                 *  currentFrame.Draw(eyeRect, new Bgr(Color.Blue), 2);
                 * }
                 */
            }
            t = 0;

            //Names concatenation of persons recognized
            for (int nnn = 0; nnn < facesDetected[0].Length; nnn++)
            {
                names = names + NamePersons[nnn] + ", ";
            }
            //Show the faces procesed and recognized
            imageBoxFrameGrabber.Image = currentFrame;
            label4.Text = names;
            names       = "";
            //Clear the list(vector) of names
            NamePersons.Clear();


            DataTable      dtDatos;
            DataSet        ds;
            SqlDataAdapter datos, datosUpdate;
            string         cadenaSelect = "SELECT socID, socDNI, socDireccion, socTelefono FROM socio WHERE socDNI LIKE '" + name2 + "'";

            SqlConnection conexion = new SqlConnection("Data Source=DESKTOP-47369CL\\SEMINARIO;Initial Catalog=master;Integrated Security=True");

            conexion.Open();
            SqlCommand comando = new SqlCommand(cadenaSelect, conexion);

            lectorSelect = comando.ExecuteReader();
            if (lectorSelect.Read())
            {
                //label4.Text = lector["socNombre"].ToString();
                lbldni.Text       = lector["socDNI"].ToString();
                lblDireccion.Text = lector["socDireccion"].ToString();
                lblTelefono.Text  = lector["socTelefono"].ToString();
            }
            //else
            //{
            //    lbldni.Text = "";
            //    lblDireccion.Text = "";
            //    lblTelefono.Text = "";
            //}
            lectorSelect.Close();
            conexion.Close();
        }
Beispiel #12
0
 private static extern double cveFindTransformECC(IntPtr templateImage, IntPtr inputImage,
                                                  IntPtr warpMatrix, CvEnum.MotionType motionType,
                                                  ref MCvTermCriteria criteria,
                                                  IntPtr inputMask);
Beispiel #13
0
        void FrameGrabber(object sender, EventArgs e)
        {
            label3.Text = "0";

            NamePersons.Add("");
            adjunto.Add("");
            Fecha.Add("");

            //obtener el marco del dispositivo que estamos usando
            currentFrame = grabber.QueryFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);

            //convertido de escala a grices
            gray = currentFrame.Convert <Gray, Byte>();

            //Face Detector
            MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
                face,
                1.2,
                10,
                Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                new Size(20, 20));

            //Action for each element detected
            foreach (MCvAvgComp f in facesDetected[0])
            {
                t      = t + 1;
                result = currentFrame.Copy(f.rect).Convert <Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);

                currentFrame.Draw(f.rect, new Bgr(Color.Red), 2);


                if (trainingImages.ToArray().Length != 0)
                {
                    //Interacciones maximas a usar para recone el rostro
                    MCvTermCriteria termCrit = new MCvTermCriteria(ContTrain, 0.001);

                    //Eigen face recognizer
                    EigenObjectRecognizer recognizer = new EigenObjectRecognizer(
                        trainingImages.ToArray(),
                        labels.ToArray(),
                        3000,
                        ref termCrit);

                    name = recognizer.Recognize(result);
                    EigenObjectRecognizer dates = new EigenObjectRecognizer(
                        trainingImages.ToArray(),
                        fecha.ToArray(),
                        3000,
                        ref termCrit);
                    epoca = dates.Recognize(result);

                    EigenObjectRecognizer adjunto = new EigenObjectRecognizer(
                        trainingImages.ToArray(),
                        links.ToArray(),
                        3000,
                        ref termCrit);
                    link = adjunto.Recognize(result);

                    //Escribir el laber con el nombre del rostro que reconocio
                    currentFrame.Draw(name, ref font, new Point(f.rect.X - 2, f.rect.Y - 2), new Bgr(Color.LightGreen));
                }
                adjunto[t - 1] = link;
                adjunto.Add("");
                NamePersons[t - 1] = name;
                NamePersons.Add("");
                Fecha[t - 1] = epoca;
                Fecha.Add("");

                //mostrar el numero de rostros detectados
                label3.Text = facesDetected[0].Length.ToString();
            }
            t = 0;

            //Names concatenation of persons recognized
            for (int nnn = 0; nnn < facesDetected[0].Length; nnn++)
            {
                names    = names + NamePersons[nnn];
                adjuntar = adjuntar + adjunto[nnn];
                genial   = genial + Fecha[nnn];
            }
            //Show the faces procesed and recognized
            imageBoxFrameGrabber.Image = currentFrame;
            label4.Text = names;

            names    = "";
            adjuntar = "";
            genial   = "";
            //Clear the list(vector) of names
            NamePersons.Clear();
            adjunto.Clear();
            Fecha.Clear();
            if (label4.Text != "")
            {
                DateTime t2 = new DateTime(DateTime.Now.Year, DateTime.Now.Month, DateTime.Now.Day, 1, 0, 0);
                if (DateTime.Compare(DateTime.Now, t2) > 0)
                {
                    File.AppendAllText(Application.StartupPath + "/TrainedFaces/Personasretardo.txt", label4.Text + "%");
                    System.Threading.Thread.Sleep(5000);
                }
            }
        }
Beispiel #14
0
        public void MainStuff()
        {
            SRC_Img       = new Image <Gray, byte>(@"C:\Users\Админ\Downloads\image63341262,2002.png");
            Corrected_Img = SRC_Img.Clone();

            //CvInvoke.CLAHE(SRC_Img, 40, new Size(8, 8), Corrected_Img);
            //CvInvoke.FindChessboardCorners(SRC_Img, new Size(8,8), vec);
            #region
            PointF[] corners = new PointF[] { new PointF(100, 196), new PointF(261, 190), new PointF(417, 192), new PointF(584, 201),
                                              new PointF(111, 277), new PointF(284, 287), new PointF(458, 291), new PointF(580, 284),
                                              new PointF(130, 368), new PointF(276, 395), new PointF(429, 391), new PointF(563, 365) };
            #endregion
            VectorOfPointF vec = new VectorOfPointF();
            vec.Push(corners);
            // X: 0 - 480 / 3 ||0 159 329 479
            // Y: 0 - 210 / 2 || 0 104 209

            MCvPoint3D32f[] objCorners = new MCvPoint3D32f[] { new MCvPoint3D32f(0, 0, 0.0f), new MCvPoint3D32f(SRC_Img.Width / 3 - 1, 0, 0.0f), new MCvPoint3D32f(2 * SRC_Img.Width / 3 - 1, 0, 0.0f), new MCvPoint3D32f(SRC_Img.Width - 1, 0, 0.0f),
                                                               new MCvPoint3D32f(0, SRC_Img.Height / 2 - 1, 0.0f), new MCvPoint3D32f(SRC_Img.Width / 3 - 1, SRC_Img.Height / 2 - 1, 0.0f), new MCvPoint3D32f(2 * SRC_Img.Width / 3 - 1, SRC_Img.Height / 2 - 1, 0.0f), new MCvPoint3D32f(SRC_Img.Width - 1, SRC_Img.Height / 2 - 1, 0.0f),
                                                               new MCvPoint3D32f(0, SRC_Img.Height - 1, 0.0f), new MCvPoint3D32f(SRC_Img.Width / 3 - 1, SRC_Img.Height - 1, 0.0f), new MCvPoint3D32f(2 * SRC_Img.Width / 3 - 1, SRC_Img.Height - 1, 0.0f), new MCvPoint3D32f(SRC_Img.Width - 1, SRC_Img.Height - 1, 0.0f) };

            /*
             * for (int i = 0; i < objCorners.Length; i++)
             * {
             * objCorners[i].X += SRC_Img.Width / 2;
             * objCorners[i].Y += SRC_Img.Height / 2;
             * }*/
            //VectorOfPointF objvec = new VectorOfPointF();
            //objvec.Push(objCorners);


            //Corrected_Img = FindTable(SRC_Img);
            Matrix <double> CameraMatrix = new Matrix <double>(3, 3, 1);
            CameraMatrix[0, 0] = 1;
            CameraMatrix[1, 1] = 1;
            CameraMatrix[2, 2] = 1;
            CameraMatrix[0, 2] = 349.417;
            CameraMatrix[1, 2] = 286.417;

            Mat newCameraMatrix = CvInvoke.GetDefaultNewCameraMatrix(CameraMatrix);
            //CvInvoke.Undistort(SRC_Img, Corrected_Img,
            //CvInvoke.FindChessboardCorners(SRC_Img, new System.Drawing.Size(5,5),

            Mat             distCoeffs    = new Mat(1, 5, DepthType.Cv32F, 1);
            Mat             rotCoeffs     = new Mat();
            Mat             translVectors = new Mat();
            MCvTermCriteria TermCriteria  = new MCvTermCriteria(30, 0.1);
            Corrected_Img = SRC_Img.Clone();
            CvInvoke.DrawChessboardCorners(Corrected_Img, new System.Drawing.Size(4, 3), vec, true);
            //CvInvoke.CornerSubPix(SRC_Img, vec, new Size(2, 2), new Size(-1, -1), TermCriteria);
            //CvInvoke.DrawChessboardCorners(SRC_Img, new System.Drawing.Size(4, 3), objvec, true);

            /*
             * try
             * {
             * CvInvoke.Remap(SRC_Img, Corrected_Img, vec, objvec, Inter.Nearest, BorderType.Constant);
             * } catch (Exception ex) { string s = ex.Message; }
             */
            VectorOfPoint3D32F obj3dvec = new VectorOfPoint3D32F();
            obj3dvec.Push(objCorners);

            try
            {
                MCvPoint3D32f[][] corners_object_list = new MCvPoint3D32f[1][];
                PointF[][]        corners_points_list = new PointF[1][];
                corners_object_list[0] = objCorners;
                corners_points_list[0] = corners;
                double r = CvInvoke.CalibrateCamera(obj3dvec,
                                                    vec,
                                                    SRC_Img.Size,
                                                    CameraMatrix,
                                                    distCoeffs,
                                                    rotCoeffs,
                                                    translVectors,
                                                    CalibType.Default,
                                                    TermCriteria);

                //double error = CameraCalibration.CalibrateCamera(corners_object_list, corners_points_list, Gray_Frame.Size, IC, Emgu.CV.CvEnum.CALIB_TYPE.CV_CALIB_RATIONAL_MODEL, out EX_Param);
                r += 0;
                //Matrix<float> dist = new Matrix<float>( new float[] {

                //CvInvoke.Undistort(SRC_Img, Corrected_Img, cameraMatrix, );
            } catch (Exception ex) { }

            IntrinsicCameraParameters IC = new IntrinsicCameraParameters(8);
            Matrix <float>            Map1, Map2;
            IC.InitUndistortMap(SRC_Img.Width, SRC_Img.Height, out Map1, out Map2);
            Image <Gray, Byte> stuff = Undistort(SRC_Img);

            imageBox1.Image = SRC_Img.Resize(imageBox1.Width, imageBox1.Height, Inter.Linear);
            imageBox2.Image = Corrected_Img.Resize(imageBox1.Width, imageBox1.Height, Inter.Linear);
        }
Beispiel #15
0
        public AlignedResult CreateAlignedSecondImageEcc(SKBitmap firstImage, SKBitmap secondImage, bool discardTransX, AlignmentSettings settings)
        {
#if __NO_EMGU__
            return(null);
#endif
            var topDownsizeFactor = settings.EccDownsizePercentage / 100f;

            var eccs = new List <double>();
            using var mat1       = new Mat();
            using var mat2       = new Mat();
            using var warpMatrix = Mat.Eye(2, 3, DepthType.Cv32F, 1);
            var termCriteria = new MCvTermCriteria(settings.EccIterations, Math.Pow(10, -settings.EccEpsilonLevel));
            for (var ii = settings.EccPyramidLayers - 1; ii >= 0; ii--)
            {
                var downsize = topDownsizeFactor / Math.Pow(2, ii);
                CvInvoke.Imdecode(GetBytes(firstImage, downsize), ImreadModes.Grayscale, mat1);
                CvInvoke.Imdecode(GetBytes(secondImage, downsize), ImreadModes.Grayscale, mat2);

                try
                {
                    var ecc = CvInvoke.FindTransformECC(mat2, mat1, warpMatrix, MotionType.Euclidean, termCriteria);
                    eccs.Add(ecc);
                }
                catch (CvException e)
                {
                    if (e.Status == (int)ErrorCodes.StsNoConv)
                    {
                        return(null);
                    }
                    throw;
                }

                if (warpMatrix.IsEmpty)
                {
                    return(null);
                }

                unsafe
                {
                    var ptr = (float *)warpMatrix.DataPointer.ToPointer(); //ScaleX
                    ptr++;                                                 //SkewX
                    ptr++;                                                 //TransX
                    *ptr *= 2;                                             //scale up the shifting
                    ptr++;                                                 //SkewY
                    ptr++;                                                 //ScaleY
                    ptr++;                                                 //TransY
                    *ptr *= 2;                                             //scale up the shifting
                }
            }

            var lastUpscaleFactor = 1 / (2 * topDownsizeFactor);
            ScaleUpCvMatOfFloats(warpMatrix, lastUpscaleFactor);

            if (eccs.Last() * 100 < settings.EccThresholdPercentage)
            {
                return(null);
            }

            var skMatrix = ConvertCvMatOfFloatsToSkMatrix(warpMatrix, discardTransX);

            var result = new AlignedResult
            {
                TransformMatrix2 = skMatrix
            };

            using var alignedMat             = new Mat();
            using var fullSizeColorSecondMat = new Mat();
            CvInvoke.Imdecode(GetBytes(secondImage, 1), ImreadModes.Color, fullSizeColorSecondMat);
            CvInvoke.WarpAffine(fullSizeColorSecondMat, alignedMat, warpMatrix,
                                fullSizeColorSecondMat.Size);

#if __IOS__
            result.AlignedBitmap2 = alignedMat.ToCGImage().ToSKBitmap();
#elif __ANDROID__
            result.AlignedBitmap2 = alignedMat.ToBitmap().ToSKBitmap();
#endif
            return(result);
        }
 internal extern static IntPtr CvBOWKMeansTrainerCreate(int clusterCount, ref MCvTermCriteria termcrit, int attempts, CvEnum.KMeansInitType flags);
Beispiel #17
0
        void FrameGrabber(object sender, EventArgs e)
        {
            //label3.Text = "0";
            //label4.Text = "";
            NamePersons.Add("");


            //Get the current frame form capture device
            currentFrame = grabber.QueryFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
            imageBoxFrameGrabber.Image = currentFrame;
            //Convert it to Grayscale
            gray = currentFrame.Convert <Gray, Byte>();

            //Face Detector
            MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
                face,
                1.2,
                10,
                Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                new Size(20, 20));

            //Action for each element detected
            foreach (MCvAvgComp f in facesDetected[0])
            {
                t      = t + 1;
                result = currentFrame.Copy(f.rect).Convert <Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
                //draw the face detected in the 0th (gray) channel with blue color
                currentFrame.Draw(f.rect, new Bgr(Color.Red), 2);


                if (trainingImages.ToArray().Length != 0)
                {
                    //TermCriteria for face recognition with numbers of trained images like maxIteration
                    MCvTermCriteria termCrit = new MCvTermCriteria(ContTrain, 0.001);

                    //Eigen face recognizer
                    EigenObjectRecognizer recognizer = new EigenObjectRecognizer(
                        trainingImages.ToArray(),
                        labels.ToArray(),
                        1850,
                        ref termCrit);

                    name = recognizer.Recognize(result);

                    //Draw the label for each face detected and recognized
                    currentFrame.Draw(name, ref font, new Point(f.rect.X - 2, f.rect.Y - 2), new Bgr(Color.LightGreen));
                }

                NamePersons[t - 1] = name;
                NamePersons.Add("");

                numDetected = facesDetected[0].Length;

                //Set the number of faces detected on the scene
                if (numDetected > 0)
                {
                    label1.Text = "You are not recognized";
                }


                /*
                 * //Set the region of interest on the faces
                 *
                 * gray.ROI = f.rect;
                 * MCvAvgComp[][] eyesDetected = gray.DetectHaarCascade(
                 * eye,
                 * 1.1,
                 * 10,
                 * Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                 * new Size(20, 20));
                 * gray.ROI = Rectangle.Empty;
                 *
                 * foreach (MCvAvgComp ey in eyesDetected[0])
                 * {
                 *  Rectangle eyeRect = ey.rect;
                 *  eyeRect.Offset(f.rect.X, f.rect.Y);
                 *  currentFrame.Draw(eyeRect, new Bgr(Color.Blue), 2);
                 * }
                 */
            }
            t = 0;

            //Names concatenation of persons recognized
            for (int nnn = 0; nnn < facesDetected[0].Length; nnn++)
            {
                names = names + NamePersons[nnn] + ", ";
            }
            //Show the faces procesed and recognized
            //imageBoxFrameGrabber.Image = currentFrame;
            //label4.Text = names;
            string xnames = "";

            if (numDetected > 0)
            {
                names = names.Trim();
                if (names.Length > 0)
                {
                    xnames = names.Substring(0, names.Length - 1);
                }

                if ((xnames == "") && (numDetected == 1))
                {
                    label4.Text    = "Please register first";
                    label4.Visible = true;
                }
                else if ((xnames != "") && (numDetected == 1))
                {
                    isLogin = true;

                    names          = names.Substring(0, names.Length - 1);
                    whosLogin      = names;
                    label1.Text    = "Welcome " + names;
                    label5.Visible = true;
                    label2.Visible = true;
                    label3.Text    = "It is not you?";
                    label2.Text    = "Login Here";
                    label3.Visible = true;
                    label4.Visible = false;
                    //label4.Text = "Welcome,"+names;
                    // label4.Visible = true;
                    Application.Idle -= new EventHandler(FrameGrabber);
                }
            }

            // label4.Text = names;
            names = "";


            //Clear the list(vector) of names
            NamePersons.Clear();
        }
 public EigenObjectRecognize(Image <Gray, Byte>[] images, String[] labels, ref MCvTermCriteria termCrit)
     : this(images, labels, 0, ref termCrit)
 {
 }
Beispiel #19
0
 /// <summary>
 /// Performs mean-shift filtering for each point of the source image. It maps each point of the source
 /// image into another point, and as the result we have new color and new position of each point.
 /// </summary>
 /// <param name="src">Source CudaImage. Only CV 8UC4 images are supported for now.</param>
 /// <param name="dst">Destination CudaImage, containing color of mapped points. Will have the same size and type as src.</param>
 /// <param name="sp">Spatial window radius.</param>
 /// <param name="sr">Color window radius.</param>
 /// <param name="criteria">Termination criteria.</param>
 /// <param name="stream">Use a Stream to call the function asynchronously (non-blocking) or null to call the function synchronously (blocking).</param>
 public static void MeanShiftFiltering(IInputArray src, IOutputArray dst, int sp, int sr, MCvTermCriteria criteria,
                                       Stream stream = null)
 {
     using (InputArray iaSrc = src.GetInputArray())
         using (OutputArray oaDst = dst.GetOutputArray())
             cudaMeanShiftFiltering(iaSrc, oaDst, sp, sr, ref criteria, stream);
 }
Beispiel #20
0
 private static extern void cveCamShift(
     IntPtr probImage,
     ref Rectangle window,
     ref MCvTermCriteria criteria,
     ref RotatedRect box);
Beispiel #21
0
 private static extern void cudaMeanShiftProc(IntPtr src, IntPtr dstr, IntPtr dstsp, int sp, int sr, ref MCvTermCriteria criteria, IntPtr stream);
        private void FrameGrabber(object sender, EventArgs e)
        {
            lblNumeroDetect.Text = "0";
            NamePersons.Add("");
            try
            {
                currentFrame = grabber.QueryFrame().Resize(400, 300, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
                currentFrame._Flip(FLIP.HORIZONTAL);
                //Convertir a escala de grises
                gray = currentFrame.Convert <Gray, Byte>();

                //Detector de Rostros
                MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(face, 1.2, 10, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new Size(20, 20));

                //Accion para cada elemento detectado
                foreach (MCvAvgComp f in facesDetected[0])
                {
                    t      = t + 1;
                    result = currentFrame.Copy(f.rect).Convert <Gray, byte>().Resize(100, 100, INTER.CV_INTER_CUBIC);
                    //Dibujar el cuadro para el rostro
                    currentFrame.Draw(f.rect, new Bgr(Color.FromArgb(0, 122, 204)), 1);

                    if (trainingImages.ToArray().Length != 0)
                    {
                        //Clase para reconocimiento con el nùmero de imagenes
                        MCvTermCriteria termCrit = new MCvTermCriteria(ContTrain, 0.001);

                        //Clase Eligen para reconocimiento de rostro
                        EigenObjectRecognizer recognizer = new EigenObjectRecognizer(trainingImages.ToArray(), labels.ToArray(), ref termCrit);
                        var fa = new Image <Gray, byte> [trainingImages.Count]; //currentFrame.Convert<Bitmap>();

                        name = recognizer.Recognize(result);
                        //Dibujar el nombre para cada rostro detectado y reconocido
                        currentFrame.Draw(name, ref font, new Point(f.rect.X - 2, f.rect.Y - 2), new Bgr(Color.Red));
                    }

                    NamePersons[t - 1] = name;
                    NamePersons.Add("");
                    //Establecer el nùmero de rostros detectados
                    lblNumeroDetect.Text = facesDetected[0].Length.ToString();
                    lblNadie.Text        = name;
                }
                t = 0;

                //Nombres concatenados de todos los rostros reconocidos
                for (int nnn = 0; nnn < facesDetected[0].Length; nnn++)
                {
                    names = names + NamePersons[nnn] + ", ";
                }

                //Mostrar los rostros procesados y reconocidos
                imageBoxFrameGrabber.Image = currentFrame;
                name = "";
                //Borrar la lista de nombres
                NamePersons.Clear();
            }
            catch (Exception ex)
            {
                MessageBox.Show(ex.Message);
            }
        }
Beispiel #23
0
 private static extern void cudaMeanShiftSegmentation(IntPtr src, IntPtr dst, int sp, int sr, int minsize, ref MCvTermCriteria criteria, IntPtr stream);
Beispiel #24
0
        void FrameGrabber(object sender, EventArgs e)
        {
            label3.Text = "0";
            //label4.Text = "";
            NamePersons.Add("");


            //Get the current frame form capture device
            currentFrame = grabber.QueryFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);

            //Convert it to Grayscale
            gray = currentFrame.Convert <Gray, Byte>();

            //Face Detector
            MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
                face,
                1.2,
                10,
                Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                new Size(20, 20));

            //Action for each element detected
            foreach (MCvAvgComp f in facesDetected[0])
            {
                t      = t + 1;
                result = currentFrame.Copy(f.rect).Convert <Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
                //draw the face detected in the 0th (gray) channel with blue color
                currentFrame.Draw(f.rect, new Bgr(Color.Red), 2);


                if (trainingImages.ToArray().Length != 0)
                {
                    //TermCriteria for face recognition with numbers of trained images like maxIteration
                    MCvTermCriteria termCrit = new MCvTermCriteria(ContTrain, 0.001);

                    //Eigen face recognizer
                    EigenObjectRecognizer recognizer = new EigenObjectRecognizer(
                        trainingImages.ToArray(),
                        labels.ToArray(),
                        3000,
                        ref termCrit);

                    //More Strict Rule For Eigen Face Algo to Recognise Face

                    /*
                     * EigenObjectRecognizer recognizer = new EigenObjectRecognizer(
                     * trainingImages.ToArray(),
                     * labels.ToArray(),
                     * 5000,
                     * ref termCrit);v
                     */

                    name = recognizer.Recognize(result);

                    //Draw the label for each face detected and recognized
                    currentFrame.Draw(name, ref font, new Point(f.rect.X - 2, f.rect.Y - 2), new Bgr(Color.LightGreen));
                }

                NamePersons[t - 1] = name;
                NamePersons.Add("");


                //Set the number of faces detected on the scene
                label3.Text = facesDetected[0].Length.ToString();

                /*
                 * //Set the region of interest on the faces
                 *
                 * gray.ROI = f.rect;
                 * MCvAvgComp[][] eyesDetected = gray.DetectHaarCascade(
                 * eye,
                 * 1.1,
                 * 10,
                 * Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                 * new Size(20, 20));
                 * gray.ROI = Rectangle.Empty;
                 *
                 * foreach (MCvAvgComp ey in eyesDetected[0])
                 * {
                 *  Rectangle eyeRect = ey.rect;
                 *  eyeRect.Offset(f.rect.X, f.rect.Y);
                 *  currentFrame.Draw(eyeRect, new Bgr(Color.Blue), 2);
                 * }
                 */
            }
            t = 0;

            //Names concatenation of persons recognized
            for (int nnn = 0; nnn < facesDetected[0].Length; nnn++)
            {
                names = names + NamePersons[nnn] + ", ";
            }
            //Show the faces procesed and recognized
            imageBoxFrameGrabber.Image = currentFrame;
            label4.Text = names;
            names       = "";
            //Clear the list(vector) of names
            NamePersons.Clear();
        }
Beispiel #25
0
        // EMGU's calibrate camera method has a bug.
        // Refer this case: https://stackoverflow.com/questions/33127581/how-do-i-access-the-rotation-and-translation-vectors-after-camera-calibration-in
        public static double CalibrateCamera(MCvPoint3D32f[][] objectPoints, PointF[][] imagePoints, Size imageSize, IInputOutputArray cameraMatrix, IInputOutputArray distortionCoeffs, CalibType calibrationType, MCvTermCriteria termCriteria, out Mat[] rotationVectors, out Mat[] translationVectors)
        {
            System.Diagnostics.Debug.Assert(objectPoints.Length == imagePoints.Length, "The number of images for objects points should be equal to the number of images for image points");
            int imageCount = objectPoints.Length;

            using (VectorOfVectorOfPoint3D32F vvObjPts = new VectorOfVectorOfPoint3D32F(objectPoints))
                using (VectorOfVectorOfPointF vvImgPts = new VectorOfVectorOfPointF(imagePoints)) {
                    double reprojectionError;
                    using (VectorOfMat rVecs = new VectorOfMat())
                        using (VectorOfMat tVecs = new VectorOfMat()) {
                            reprojectionError  = CvInvoke.CalibrateCamera(vvObjPts, vvImgPts, imageSize, cameraMatrix, distortionCoeffs, rVecs, tVecs, calibrationType, termCriteria);
                            rotationVectors    = new Mat[imageCount];
                            translationVectors = new Mat[imageCount];
                            for (int i = 0; i < imageCount; i++)
                            {
                                rotationVectors[i] = new Mat();
                                using (Mat matR = rVecs[i])
                                    matR.CopyTo(rotationVectors[i]);
                                translationVectors[i] = new Mat();
                                using (Mat matT = tVecs[i])
                                    matT.CopyTo(translationVectors[i]);
                            }
                        }
                    return(reprojectionError);
                }
        }
Beispiel #26
0
        void FrameGrabber(object sender, EventArgs e)
        {
            label3.Text = "0";
            //label4.Text = "";
            NamePersons.Add("");


            //Get the current frame form capture device
            currentFrame = grabber.QueryFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);

            //Convert it to Grayscale
            gray = currentFrame.Convert <Gray, Byte>();

            //Face Detector
            MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
                face,
                1.2,
                10,
                Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                new Size(20, 20));

            //Action for each element detected
            foreach (MCvAvgComp f in facesDetected[0])
            {
                t      = t + 1;
                result = currentFrame.Copy(f.rect).Convert <Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
                //draw the face detected in the 0th (gray) channel with blue color
                currentFrame.Draw(f.rect, new Bgr(Color.Red), 2);


                if (trainingImages.ToArray().Length != 0)
                {
                    MCvTermCriteria termCrit = new MCvTermCriteria(ContTrain, 0.001);

                    EigenObjectRecognizer recognizer = new EigenObjectRecognizer(
                        trainingImages.ToArray(),
                        labels.ToArray(),
                        3000,
                        ref termCrit);

                    name = recognizer.Recognize(result);

                    currentFrame.Draw(name, ref font, new Point(f.rect.X - 2, f.rect.Y - 2), new Bgr(Color.LightGreen));
                }

                NamePersons[t - 1] = name;
                NamePersons.Add("");


                label3.Text = facesDetected[0].Length.ToString();
            }
            t = 0;

            for (int nnn = 0; nnn < facesDetected[0].Length; nnn++)
            {
                names = names + NamePersons[nnn] + ", ";
                if (panel2.Visible == false)
                {
                    string          connectionString = "Data Source = localhost; User ID = root; Password = toor123; Database=attendance; pooling = false; port = 3306; Allow User Variables = true; SslMode = none";
                    MySqlConnection Conn             = new MySqlConnection(connectionString);
                    Conn.Open();
                    if (Conn.State == ConnectionState.Open)
                    {
                        MySqlDataReader Reader2;
                        //konek.Close();
                        DateTime     date            = DateTime.Now;
                        string       mark_attendance = "UPDATE `students` SET `" + date.ToString("M/d/yy") + "` = 'P' WHERE `students`.`Name` = '" + NamePersons[nnn] + "';";
                        MySqlCommand cmd2            = Conn.CreateCommand();
                        cmd2.CommandText = mark_attendance;
                        Reader2          = cmd2.ExecuteReader();
                        Reader2.Close();
                        Reader2.Dispose();
                    }
                    Conn.Close();
                }
            }
            imageBoxFrameGrabber.Image = currentFrame;
            label4.Text = names;
            names       = "";
            NamePersons.Clear();
        }
Beispiel #27
0
        /// <summary>
        /// Create an object recognizer using the specific tranning data and parameters
        /// </summary>
        /// <param name="images">The images used for training, each of them should be the same size. It's recommended the images are histogram normalized</param>
        /// <param name="labels">The labels corresponding to the images</param>
        /// <param name="eigenDistanceThreshold">
        /// The eigen distance threshold, (0, ~1000].
        /// The smaller the number, the more likely an examined image will be treated as unrecognized object.
        /// If the threshold is &lt; 0, the recognizer will always treated the examined image as one of the known object.
        /// </param>
        /// <param name="termCrit">The criteria for recognizer training</param>
        public EigenObjectRecognizer(Image <Gray, Byte>[] images, String[] labels, double eigenDistanceThreshold, ref MCvTermCriteria termCrit)
        {
            Debug.Assert(images.Length == labels.Length, "The number of images should equals the number of labels");
            Debug.Assert(eigenDistanceThreshold >= 0.0, "Eigen-distance threshold should always >= 0.0");

            CalcEigenObjects(images, ref termCrit, out _eigenImages, out _avgImage);

            /*
             * _avgImage.SerializationCompressionRatio = 9;
             *
             * foreach (Image<Gray, Single> img in _eigenImages)
             *  //Set the compression ration to best compression. The serialized object can therefore save spaces
             *  img.SerializationCompressionRatio = 9;
             */

            _eigenValues = Array.ConvertAll <Image <Gray, Byte>, Matrix <float> >(images,
                                                                                  delegate(Image <Gray, Byte> img)
            {
                return(new Matrix <float>(EigenDecomposite(img, _eigenImages, _avgImage)));
            });

            _labels = labels;

            _eigenDistanceThreshold = eigenDistanceThreshold;
        }
Beispiel #28
0
        private Image <Bgr, byte> findFaces(Image <Bgr, byte> img)
        {
            name = "";
            //Convert it to Grayscale
            Image <Gray, byte> gray = img.Convert <Gray, Byte>();

            //Equalization step
            gray._EqualizeHist();
            try
            {
                //Face Detector
                MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
                    face,
                    scaleFactor, minNeighbors,
                    //Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.FIND_BIGGEST_OBJECT,
                    HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                    new Size(minsize, minsize));

                Image <Gray, byte> result;

                foreach (Control ct in flowLayoutPanel1.Controls)
                {
                    (ct as PictureBox).Image = null;
                    ct.Dispose();
                }
                flowLayoutPanel1.Controls.Clear();

                int ContTrain = 0;
                //Action for each element detected
                foreach (MCvAvgComp f in facesDetected[0])
                {
                    //result = gray.Copy(f.rect).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
                    result = gray.Convert <Gray, byte>().Resize(500, 500, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);


                    //draw the face detected in the 0th (gray) channel with blue color
                    img.Draw(f.rect, new Bgr(Color.Red), 2);

                    if (trainedFaces.Count != 0 && !skipname.Checked)
                    {
                        ContTrain = trainedFaces.Count;
                        //TermCriteria for face recognition with numbers of trained images like maxIteration
                        MCvTermCriteria termCrit = new MCvTermCriteria(ContTrain * 2, 0.001);
                        try
                        {
                            //Eigen face recognizer
                            EigenObjectRecognizer recognizer = new EigenObjectRecognizer(trainedFaces.OrderBy(x => x.path).Select(x => x.img).ToArray(), trainedFaces.OrderBy(x => x.path).Select(x => x.name).ToList <string>().ToArray(), 4000, ref termCrit);

                            name = recognizer.Recognize(result);

                            //Draw the label for each face detected and recognized
                            img.Draw(name, ref font, new Point(f.rect.X - 2, f.rect.Y - 2), new Bgr(Color.LightGreen));
                        }
                        catch (Exception ex) { errorLabel.Text = ex.Message + "\n" + ex.StackTrace; }
                    }
                    addToFlow(img, f, name);
                }
            }
            catch (Exception ex) { errorLabel.Text = ex.Message + "\n" + ex.StackTrace; }
            GC.Collect();
            return(img);
        }
 /// <summary>
 /// Create a new BOWKmeans trainer
 /// </summary>
 /// <param name="clusterCount">Number of clusters to split the set by.</param>
 /// <param name="termcrit">Specifies maximum number of iterations and/or accuracy (distance the centers move by between the subsequent iterations). Use empty termcrit for default.</param>
 /// <param name="attempts">The number of attemps. Use 3 for default</param>
 /// <param name="flags">Kmeans initialization flag. Use PPCenters for default.</param>
 public BOWKMeansTrainer(int clusterCount, MCvTermCriteria termcrit, int attempts, CvEnum.KMeansInitType flags)
 {
     _ptr = CvBOWKMeansTrainerCreate(clusterCount, ref termcrit, attempts, flags);
 }
Beispiel #30
0
 internal static extern IntPtr CvEMDefaultCreate(int nclusters, MlEnum.EmCovarianMatrixType covMatType, ref MCvTermCriteria termcrit);
Beispiel #31
0
        public void SuperR()
        {
            SRC_Img       = new Image <Gray, byte>(@"C:\Users\Админ\Downloads\image63341262,2002.png");
            Corrected_Img = SRC_Img.Clone();

            PointF[] corners = new PointF[] { new PointF(100, 196), new PointF(261, 190), new PointF(417, 192), new PointF(584, 201),
                                              new PointF(111, 277), new PointF(284, 287), new PointF(458, 291), new PointF(580, 284),
                                              new PointF(130, 368), new PointF(276, 395), new PointF(429, 391), new PointF(563, 365) };

            /*MCvPoint3D32f[] objCorners = new MCvPoint3D32f[] { new MCvPoint3D32f( 0, 0, 0.0f),    new MCvPoint3D32f(SRC_Img.Width / 3 - 1, 0, 0.0f),       new MCvPoint3D32f( 2 * SRC_Img.Width / 3 - 1, 0, 0.0f),    new MCvPoint3D32f( SRC_Img.Width - 1, 0, 0.0f),
             *                                  new MCvPoint3D32f( 0, SRC_Img.Height / 2 - 1, 0.0f),  new MCvPoint3D32f(SRC_Img.Width / 3 - 1, SRC_Img.Height / 2 - 1, 0.0f),     new MCvPoint3D32f( 2 * SRC_Img.Width / 3 - 1, SRC_Img.Height / 2 - 1, 0.0f),  new MCvPoint3D32f( SRC_Img.Width - 1, SRC_Img.Height / 2 - 1, 0.0f),
             *                                  new MCvPoint3D32f( 0, SRC_Img.Height - 1, 0.0f),  new MCvPoint3D32f( SRC_Img.Width / 3 - 1, SRC_Img.Height - 1, 0.0f),    new MCvPoint3D32f( 2 * SRC_Img.Width / 3 - 1, SRC_Img.Height - 1, 0.0f),  new MCvPoint3D32f( SRC_Img.Width - 1, SRC_Img.Height - 1, 0.0f)
             *                            };
             */
            // X: 0 - 480 / 3 ||0 159 329 479
            // Y: 0 - 210 / 2 || 0 104 209

            MCvPoint3D32f[] objCorners = new MCvPoint3D32f[] { new MCvPoint3D32f(0, 0, 0.0f), new MCvPoint3D32f(159, 0, 0.0f), new MCvPoint3D32f(329, 0, 0.0f), new MCvPoint3D32f(479, 0, 0.0f),
                                                               new MCvPoint3D32f(0, 104, 0.0f), new MCvPoint3D32f(159, 104, 0.0f), new MCvPoint3D32f(329, 104, 0.0f), new MCvPoint3D32f(479, 104, 0.0f),
                                                               new MCvPoint3D32f(0, 209, 0.0f), new MCvPoint3D32f(159, 209, 0.0f), new MCvPoint3D32f(329, 209, 0.0f), new MCvPoint3D32f(479, 209, 0.0f) };

            VectorOfPointF veccorners = new VectorOfPointF();

            veccorners.Push(corners);
            VectorOfPoint3D32F vecobjcorners = new VectorOfPoint3D32F();

            vecobjcorners.Push(objCorners);

            MCvTermCriteria TermCriteria = new MCvTermCriteria(30, 0.1);

            CvInvoke.CornerSubPix(SRC_Img, veccorners, new Size(2, 2), new Size(-1, -1), TermCriteria);

            IntrinsicCameraParameters intrisic = new IntrinsicCameraParameters();

            ExtrinsicCameraParameters[] extrinsic;
            intrisic.IntrinsicMatrix = new Matrix <double>(new double[, ] {
                { 1, 0, 349.417 }, { 0, 1, 286.417 }, { 0, 0, 1 }
            });
            try
            {
                Matrix <float> distortCoeffs   = new Matrix <float>(1, 4);
                Mat            rotationVectors = new Mat();
                //rotationVectors[0] = new Mat(3,1, DepthType.Cv32F, 1);
                Mat translationVectors = new Mat();
                //translationVectors[0] = new Mat(1, 3, DepthType.Cv32F, 1);

                /*
                 * double error = CvInvoke.CalibrateCamera(new MCvPoint3D32f[][] { objCorners }, new PointF[][] { veccorners.ToArray() },
                 *   SRC_Img.Size, intrisic.IntrinsicMatrix, distortCoeffs, CalibType.UserIntrinsicGuess, new MCvTermCriteria(30, 0.01), out rotationVectors, out translationVectors);
                 */
                /*
                 *
                 * Fisheye.Calibrate(vecobjcorners, veccorners, SRC_Img.Size, intrisic.IntrinsicMatrix, distortCoeffs, rotationVectors, translationVectors,
                 * Fisheye.CalibrationFlag.UseIntrinsicGuess, TermCriteria);
                 * */

                Matrix <float> matrix = new Matrix <float>(new float[, ] {
                    { 1, 0, 349 }, { 0, 1, 286 }, { 0, 0, 1 }
                });
                Fisheye.UndistorImage(SRC_Img, Corrected_Img, matrix, new VectorOfFloat(new float[] { 3500, 3500, 0, 0 }));
                Image <Gray, Byte> Res_Img = new Image <Gray, byte>(2 * SRC_Img.Width, SRC_Img.Height);
                CvInvoke.HConcat(SRC_Img, Corrected_Img, Res_Img);
                int error = 0;
                error++;
                //error += 0;
                //Array aa = rotationVectors[0].Data;
                //error += 0;
                //float q = rotationVectors.ElementAt<float>(0);
            }
            catch (Exception) { }
        }