Пример #1
0
        /// <summary>
        /// Otvorí obrázky s šachovnicami a extrahuje rohové body
        /// </summary>
        /// <param name="fileList">zoznam mien obrázkov s šachovnicami</param>
        /// <param name="boardSize">počet vnútorných rohov šachovnice (x-1, y-1)</param>
        /// <returns></returns>
        private int AddChessboardPoints(List <string> fileList, Size boardSize)
        {
            //PointF[][] imageCorners = new PointF[Frame_array_buffer.Length][];
            //body na šachovnici
            //PointF[] imageCorners;
            //Emgu.CV.IOutputArray imageCorners;

            //poloha rohov šachovnice v 3D priestore
            MCvPoint3D32f[] objectCorners = new MCvPoint3D32f[boardSize.Height * boardSize.Width];

            //3D Scene Points:
            //Inicializácia vnútorných rohov šachovnice v 3D priestore (x,y,z) = (i,j,0)
            for (int i = 0; i < boardSize.Height; i++)
            {
                for (int j = 0; j < boardSize.Width; j++)
                {
                    objectCorners[i * boardSize.Width + j] = new MCvPoint3D32f(i, j, 0.0f);
                }
            }

            //2D body obrázka:
            Image <Gray, Byte> image; //obrázok pre načítavanie obrázka so šachovnicou
            int successes = 0;        //počet najdenych obrazkov so sachovnicou

            //List<VectorOfPointF> corners = new List<VectorOfPointF>();
            GC.Collect();
            //pre všetky vstupné obrázky - uhly pohľadu
            for (int i = 0; i < fileList.Count; i++)
            {
                var cornerPoints = new VectorOfPointF();                                             //vektor rohových bodov šachovnice
                image = new Image <Gray, Byte>(fileList[i]);                                         //načítaj obrázok zo zoznamu
                //imageCorners = null; //CameraCalibration.FindChessboardCorners(image, boardSize, CALIB_CB_TYPE.DEFAULT);
                CvInvoke.FindChessboardCorners(image, boardSize, cornerPoints, CalibCbType.Default); //získaj rohové body šachovnice

                if (cornerPoints == null)
                {
                    continue;                       //keď v aktuálnom obrázku nenašiel žiadne body, zoberie ďalší    //imageCorners
                }
                //corners.Add(cornerPoints);

                //image.FindCornerSubPix( imageCorners, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.1));

                //získaj rohové body so subpixelovou presnosťou
                CvInvoke.CornerSubPix(image, cornerPoints, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.1));

                //CvInvoke.cvFindCornerSubPix(image, imageCorners,
                //    boardSize.Height * boardSize.Width,
                //    new Size(5, 5), new Size(-1, -1),
                //    new MCvTermCriteria(30, 0.1));

                //keď našiel na obrázku dosť bodov (9*6), tak ich pridá do zoznamu
                if (cornerPoints.Size == boardSize.Height * boardSize.Width)  //imageCorners.Length
                {
                    //zavolá metódu na pridanie bodov do zoznamov
                    AddPoints(cornerPoints.ToArray(), objectCorners);
                    successes++;
                }
            }
            return(successes);
        }
        static private void RunWithImagesFolder(string imagesFolder, string outputFilepath, CascadeClassifier faceDetector, FacemarkLBF facemark)
        {
            using (StreamWriter writer = new StreamWriter(outputFilepath, false))
                foreach (string filename in Directory.EnumerateFiles(imagesFolder))
                {
                    Image <Gray, byte> image = new Image <Gray, byte>(
                        CvInvoke.Imread(filename, ImreadModes.Grayscale).Bitmap);

                    Rectangle face = image.ROI;
                    if (localiseFace)
                    {
                        Rectangle?detectionResult = DetectFace(faceDetector, image);
                        if (!detectionResult.HasValue)
                        {
                            continue;
                        }
                        face = detectionResult.Value;
                    }

                    VectorOfPointF landmarks = MarkFacialPoints(facemark, image, face, out bool isSuccess);
                    if (!isSuccess)
                    {
                        continue;
                    }

                    PointF[] facepoints = landmarks.ToArray();
                    if (normalise)
                    {
                        NormalizeFacepoints(facepoints);
                    }

                    SerializeFacepoints(writer, filename, ref facepoints);
                }
        }
Пример #3
0
        public Bitmap GetOverlayedImage(Color color, int thickness)
        {
            try
            {
                var contours = new VectorOfVectorOfPoint(
                    new VectorOfPoint(_outline.ToArray().Select(x => new Point((int)x.X, (int)x.Y)).ToArray())
                    );

                var im = _img.Clone();
                CvInvoke.DrawContours(im, contours, 0, new MCvScalar(color.B, color.G, color.R), thickness);

                return(im.Bitmap);
            }
            catch (Exception ex)
            {
                throw new Exception($"CV Exception: {ex.Message}", ex);
            }
        }
Пример #4
0
        private static VectorOfPoint VPointFToVPoint(VectorOfPointF input)
        {
            var ta  = input.ToArray();
            var pIn = new Point[input.Size];

            for (int i = 0; i < ta.Length; i++)
            {
                pIn[i] = new Point((int)ta[i].X, (int)ta[i].Y);
            }
            return(new VectorOfPoint(pIn));
        }
Пример #5
0
        public static PointF[] FindChessboardCorners(IInputArray image, Size ChessboardSize)
        {
            //PointF[] corners;
            VectorOfPointF corners = new VectorOfPointF();

            if (CVI.FindChessboardCorners(image, ChessboardSize, corners))
            {
                return(corners.ToArray());
            }
            throw new Exception("No Corners Found");
        }
Пример #6
0
 /// <summary>
 /// Given the input frame, create input blob, run net.
 /// </summary>
 /// <param name="frame">The input image.</param>
 /// <param name="thresh">minimum confidence threshold to select a keypoint</param>
 /// <returns>A vector holding the x and y coordinates of each detected keypoint</returns>
 public PointF[] Estimate(IInputArray frame, float thresh = 0.5f)
 {
     using (InputArray iaFrame = frame.GetInputArray())
         using (VectorOfPointF vpf = new VectorOfPointF())
         {
             DnnInvoke.cveDnnKeypointsModelEstimate(
                 _ptr,
                 iaFrame,
                 vpf,
                 thresh);
             return(vpf.ToArray());
         }
 }
Пример #7
0
        public void CalculateNewPPositionReverseAffine(Mat transformationMat)
        {
            var inverseTransformationMat = new Mat();

            CvInvoke.InvertAffineTransform(transformationMat, inverseTransformationMat);
            var ogPoints  = new VectorOfPointF();
            var newPoints = new VectorOfPointF();

            ogPoints.Push(new [] { this.PointBBoxA, this.PointBBoxB, this.PointP });
            CvInvoke.Transform(ogPoints, newPoints, inverseTransformationMat);
            var newPointsArray = newPoints.ToArray();

            this.TransformedPointBBoxA = newPointsArray[0];
            this.TransformedPointBBoxB = newPointsArray[1];
            this.TransformedPointP     = newPointsArray[2];
        }
Пример #8
0
        //**********************************************************************************************************************************************************************************************

        /// <summary>
        /// Get a subset of the given vector.
        /// </summary>
        /// <param name="vector">Vector to create the subset from</param>
        /// <param name="startIndex">Index of the first element that is part of the subset</param>
        /// <param name="endIndex">Index of the last element that is part of the subset</param>
        /// <returns>Subset of the vector</returns>
        public static VectorOfPointF GetSubsetOfVector(this VectorOfPointF vector, int startIndex, int endIndex)
        {
            PointF[] vectorArray         = vector.ToArray();
            PointF[] vectorArrayExtended = new PointF[vector.Size * 3];
            for (int i = 0; i < 3; i++)
            {
                Array.Copy(vectorArray, 0, vectorArrayExtended, i * vector.Size, vector.Size);
            }

            if (endIndex < startIndex)
            {
                endIndex += vector.Size;
            }
            PointF[] vectorArraySubset = new PointF[endIndex - startIndex + 1];
            Array.Copy(vectorArrayExtended, startIndex + vector.Size, vectorArraySubset, 0, endIndex - startIndex + 1);
            return(new VectorOfPointF(vectorArraySubset));
        }
Пример #9
0
        private Mat CamFrame(Mat input)
        {
            Mat output = null;
            var scaled = new Mat();

            // Check to see if we actually have to scale down and do it
            if (Math.Abs(_scaleFactor - 1.0f) > .0001)
            {
                CvInvoke.Resize(input, scaled, _scaleSize);
            }
            else
            {
                scaled = input.Clone();
            }

            // If we don't have a target, find one
            if (_lockTarget == null)
            {
                _lockTarget = FindTarget(scaled);
                if (_lockTarget != null)
                {
                    LogUtil.Write("Target hit.");
                    DataUtil.SetItem <PointF[]>("LockTarget", _lockTarget.ToArray());
                }
                else
                {
                    LogUtil.Write("No target.");
                }
            }

            // If we do or we found one...crop it out
            if (_lockTarget != null)
            {
                var dPoints = _lockTarget.ToArray();
                var warpMat = CvInvoke.GetPerspectiveTransform(dPoints, _vectors);
                output = new Mat();
                CvInvoke.WarpPerspective(scaled, output, warpMat, _scaleSize);
                warpMat.Dispose();
            }

            scaled.Dispose();
            // Once we have a warped frame, we need to do a check every N seconds for letterboxing...
            return(output);
        }
Пример #10
0
        private void CreateDelaunay()
        {
            // Delaunay
            if (facesArrNext != null && facesArrCurr != null)
            {
                using (VectorOfPointF vpfCurr = ffpCurr)
                    using (VectorOfPointF vpfNext = ffpNext)
                    {
                        ptsCurr = vpfCurr.ToArray();
                        ptsNext = vpfNext.ToArray();

                        using (Subdiv2D subdivisionLeft = new Subdiv2D(ptsCurr))
                            using (Subdiv2D subdivisionRight = new Subdiv2D(ptsNext))
                            {
                                //Obtain the delaunay's triangulation from the set of points;
                                delaunayTrianglesCurr = subdivisionLeft.GetDelaunayTriangles();
                                delaunayTrianglesNext = subdivisionRight.GetDelaunayTriangles();
                            }
                    }
            }
        }
    public Image <Bgr, byte> DebugMarkFacialPoints(Image <Gray, byte> grayImage, Image <Bgr, byte> image)
    {
        var detectionResult = DetectFace(grayImage);

        if (!detectionResult.HasValue)
        {
            return(image);
        }

        bool           isSuccess;
        Rectangle      faceRect  = detectionResult.Value;
        VectorOfPointF landmarks = MarkFacialPoints(grayImage, faceRect, out isSuccess);

        if (!isSuccess)
        {
            return(image);
        }

        PointF[] landmarksPoints = landmarks.ToArray();
        Bgr      color           = new Bgr(0, 255, 255);
        int      landmarkIndex   = 0;

        foreach (PointF point in landmarksPoints)
        {
            image.Draw(new CircleF(point, 1), color, 1);
            image.Draw(landmarkIndex++.ToString(),
                       new Point((int)point.X, (int)point.Y),
                       Emgu.CV.CvEnum.FontFace.HersheyPlain, 1.0,
                       new Bgr(255, 255, 255), 1);
        }

        PointF anchor = GetCenterOfMassPoint(landmarksPoints);

        image.Draw(new CircleF(anchor, 2), new Bgr(0, 0, 255), 1);

        //NormalizeFacepoints(landmarksPoints);

        return(image);
    }
    public PointF[] GetNormalisedFacepoints(Image <Gray, byte> grayImage)
    {
        var detectionResult = DetectFace(grayImage);

        if (!detectionResult.HasValue)
        {
            return(null);
        }

        bool           isSuccess;
        Rectangle      faceRect  = detectionResult.Value;
        VectorOfPointF landmarks = MarkFacialPoints(grayImage, faceRect, out isSuccess);

        if (!isSuccess)
        {
            return(null);
        }

        PointF[] facepoints = landmarks.ToArray();
        NormalizeFacepoints(facepoints);
        return(facepoints);
    }
Пример #13
0
        public void FindBlobs(bool draw, bool undistort)
        {
            _mKeyPoints = _blobDetector.Detect(_searchMat);

            if (_mKeyPoints.Length != 0)
            {
                VectorOfKeyPoint _vectorOfKeyPoint = new VectorOfKeyPoint(_mKeyPoints);

                if (draw)
                {
                    Features2DToolbox.DrawKeypoints(_searchMat, _vectorOfKeyPoint, _searchMat, _dColor);
                }


                _points = new PointF[_vectorOfKeyPoint.Size];
                for (int i = 0; i < _vectorOfKeyPoint.Size; i++)
                {
                    _points[i] = _vectorOfKeyPoint[i].Point;
                }

                if (undistort)
                {
                    VectorOfPointF _vectorOfPointF = new VectorOfPointF(_points);
                    VectorOfPointF _uVectorOfPoint = new VectorOfPointF();

                    CvInvoke.UndistortPoints(_vectorOfPointF, _uVectorOfPoint, _cameraMatrix, _distCoeffs);
                    PointF[] pu = _uVectorOfPoint.ToArray();

                    for (int i = 0; i < pu.Length; i++)
                    {
                        _points[i].X = pu[i].X * (float)_fx + (float)_cx;
                        _points[i].Y = pu[i].Y * (float)_fy + (float)_cy;
                    }
                }

                OnBlobDetected?.Invoke(new BlobDetectorEventArgs(_points, _deviceNum));
            }
        }
Пример #14
0
        /// <summary>
        /// Obtains the list of Voronoi Facets
        /// </summary>
        /// <returns>The list of Voronoi Facets</returns>
        public VoronoiFacet[] GetVoronoiFacets(int[] idx = null)
        {
            using (VectorOfInt vi = new VectorOfInt())
                using (VectorOfVectorOfPointF facetVec = new VectorOfVectorOfPointF())
                    using (VectorOfPointF centerVec = new VectorOfPointF())
                    {
                        if (idx != null)
                        {
                            vi.Push(idx);
                        }

                        CvInvoke.cveSubdiv2DGetVoronoiFacetList(_ptr, vi, facetVec, centerVec);
                        PointF[][] vertices = facetVec.ToArrayOfArray();
                        PointF[]   centers  = centerVec.ToArray();

                        VoronoiFacet[] facets = new VoronoiFacet[centers.Length];
                        for (int i = 0; i < facets.Length; i++)
                        {
                            facets[i] = new VoronoiFacet(centers[i], vertices[i]);
                        }
                        return(facets);
                    }
        }
Пример #15
0
 public static VectorOfPoint VectorOfPointF2VectorOfPoint(VectorOfPointF vectorOfPointF)
 {
     return(new VectorOfPoint(Array.ConvertAll <PointF, Point>(vectorOfPointF.ToArray(), Point.Round)));
 }
Пример #16
0
        private void ProcessFrame()
        {
            if (Ovrvision.imageDataLeft_Mat.Cols == 0 || Ovrvision.imageDataRight_Mat.Cols == 0)
            {
                //Util.WriteLine(ref mScene.rhinoDoc, "waiting camera views");
                return;
            }
            _frame_L = Ovrvision.imageDataLeft_Mat;
            _frame_R = Ovrvision.imageDataRight_Mat;

            CvInvoke.CvtColor(_frame_L, _grayFrame_L, ColorConversion.Bgr2Gray);
            CvInvoke.CvtColor(_frame_R, _grayFrame_R, ColorConversion.Bgr2Gray);

            //calculate view and projection matrix for opengl CalibCbType.AdaptiveThresh | CalibCbType.FastCheck | CalibCbType.NormalizeImag
            _find = CvInvoke.FindChessboardCorners(_grayFrame_L, _patternSize, _corners, CalibCbType.AdaptiveThresh | CalibCbType.FastCheck | CalibCbType.NormalizeImage);

            //we use this loop so we can show a colour image rather than a gray:
            if (_find) //chess board found
            {
                //Util.WriteLine(ref mScene.rhinoDoc, "left marker found");
                //make mesurments more accurate by using FindCornerSubPixel
                CvInvoke.CornerSubPix(_grayFrame_L, _corners, new Size(11, 11), new Size(-1, -1),
                                      new MCvTermCriteria(20, 0.001));
                CvInvoke.SolvePnP(objectList.ToArray(), _corners.ToArray(), _cameraMatrix_new, _distCoeffs_new, _rvecAR, _tvecAR);

                // drawing axis points or cubePoints
                imagePoints_L = new PointF[cubePoints.Count];
                imagePoints_L = CvInvoke.ProjectPoints(cubePoints.ToArray(), _rvecAR, _tvecAR, _cameraMatrix_new, _distCoeffs_new);

                imagePoints_axis_L = new PointF[axisPoints.Count];
                imagePoints_axis_L = CvInvoke.ProjectPoints(axisPoints.ToArray(), _rvecAR, _tvecAR, _cameraMatrix_new, _distCoeffs_new);

                foundMarker_L = true;

                //calculate view matrix
                BuildViewMatrix(0);
            }
            else
            {
                if (imagePoints_L != null)
                {
                    Array.Clear(imagePoints_L, 0, imagePoints_L.Length);
                }
                if (imagePoints_axis_L != null)
                {
                    Array.Clear(imagePoints_axis_L, 0, imagePoints_axis_L.Length);
                }

                foundMarker_L = false;
            }

            //calculate view and projection matrix for opengl CalibCbType.AdaptiveThresh | CalibCbType.FastCheck | CalibCbType.NormalizeImag
            _find = CvInvoke.FindChessboardCorners(_grayFrame_R, _patternSize, _corners, CalibCbType.AdaptiveThresh | CalibCbType.FastCheck | CalibCbType.NormalizeImage);

            //we use this loop so we can show a colour image rather than a gray:
            if (_find) //chess board found
            {
                //Util.WriteLine(ref mScene.rhinoDoc, "right marker found");
                //make mesurments more accurate by using FindCornerSubPixel
                CvInvoke.CornerSubPix(_grayFrame_R, _corners, new Size(11, 11), new Size(-1, -1),
                                      new MCvTermCriteria(20, 0.001));
                CvInvoke.SolvePnP(objectList.ToArray(), _corners.ToArray(), _cameraMatrix_new, _distCoeffs_new, _rvecAR, _tvecAR);

                // drawing axis points or cubePoints
                imagePoints_R = new PointF[cubePoints.Count];
                imagePoints_R = CvInvoke.ProjectPoints(cubePoints.ToArray(), _rvecAR, _tvecAR, _cameraMatrix_new, _distCoeffs_new);

                imagePoints_axis_R = new PointF[axisPoints.Count];
                imagePoints_axis_R = CvInvoke.ProjectPoints(axisPoints.ToArray(), _rvecAR, _tvecAR, _cameraMatrix_new, _distCoeffs_new);

                foundMarker_R = true;

                //calculate view matrix
                BuildViewMatrix(1);
            }
            else
            {
                if (imagePoints_R != null)
                {
                    Array.Clear(imagePoints_R, 0, imagePoints_R.Length);
                }
                if (imagePoints_axis_R != null)
                {
                    Array.Clear(imagePoints_axis_R, 0, imagePoints_axis_R.Length);
                }

                foundMarker_R = false;
            }
        }
Пример #17
0
        private void button1_Click(object sender, EventArgs e)//生成校准矩阵文件
        {
            board_size.Width  = Convert.ToInt16(textBox4.Text);
            board_size.Height = Convert.ToInt16(textBox5.Text);
            pic_n             = Convert.ToInt16(textBox3.Text);
            pic_h             = Convert.ToInt16(textBox7.Text);
            pic_w             = Convert.ToInt16(textBox6.Text);
            board_N           = board_size.Width * board_size.Height;
            count_time        = Convert.ToInt16(textBox8.Text);
            accuracy          = Convert.ToDouble(textBox9.Text);

            FolderBrowserDialog dialog = new FolderBrowserDialog();

            dialog.Description = "请选择文件路径";
            // dialog.SelectedPath = path;
            if (dialog.ShowDialog() == DialogResult.OK)
            {
                textBox2.Text = dialog.SelectedPath;
            }
            BDPicture_path = textBox2.Text.ToString();
            DirectoryInfo folder = new DirectoryInfo(BDPicture_path);

            image_count = 0;                                                 //图片数量

            Mat[]                rotateMat     = new Mat[pic_n];             //旋转矩阵
            Mat[]                transMat      = new Mat[pic_n];             //平移矩阵
            MCvPoint3D32f[][]    object_points = new MCvPoint3D32f[pic_n][]; //创建目标坐标集合
            List <MCvPoint3D32f> object_list   = new List <MCvPoint3D32f>();

            PointF[][] corner_count = new PointF[pic_n][];  //角点数量
            Mat        view_gray    = new Mat();

            //Matrix
            foreach (FileInfo file in folder.GetFiles("*.jpg").OrderBy(p => p.CreationTime))//定标版图片扫描处理
            {
                try
                {
                    image_count++;
                    // image = new Bitmap(file.FullName);
                    //获取图像的BitmapData对像
                    // Image<Bgr, byte> imageInput = new Image<Bgr, byte>(new Bitmap(Image.FromFile(filename)));
                    Image <Bgr, byte> imageInput = new Image <Bgr, byte>(new Bitmap(Image.FromFile(file.FullName)));
                    if (image_count == 1)
                    {
                        image_size.Width  = imageInput.Cols;
                        image_size.Height = imageInput.Rows;
                        textBox1.AppendText(image_size.Width.ToString() + "\r\n" + image_size.Height.ToString() + "\r\n");
                    }
                    CvInvoke.CvtColor(imageInput, view_gray, ColorConversion.Rgb2Gray);
                    CvInvoke.FindChessboardCorners(view_gray, board_size, Npointsl, CalibCbType.AdaptiveThresh);
                    corner_count[image_count - 1] = new PointF[board_N];
                    for (int S = 0; S < board_N; S++)
                    {
                        corner_count[image_count - 1][S] = Npointsl.ToArray()[S];
                    }
                    imageInput.Dispose();
                }
                catch
                {
                    // MessageBox.Show("图片质量不佳:" + file.FullName);
                    textBox1.AppendText("图片质量不佳:" + file.Name + "\r\n");
                }
                textBox1.AppendText(image_count.ToString() + "\r\n");
            }


            for (T = 0; T < pic_n; T++) ///把角坐标保存数组
            {
                object_list.Clear();
                for (i = 0; i < board_size.Height; i++)
                {
                    for (j = 0; j < board_size.Width; j++)
                    {
                        temple_points.X = j * pic_w;
                        temple_points.Y = i * pic_h;
                        temple_points.Z = 0;
                        object_list.Add(temple_points);
                    }
                }
                object_points[T] = object_list.ToArray();
            }

            CvInvoke.CalibrateCamera(object_points, corner_count, image_size, cameraMatrix, distCoeffs,
                                     CalibType.RationalModel, new MCvTermCriteria(count_time, accuracy), out rotateMat, out transMat);
            if (!ready)
            {
                MessageBox.Show("请选择矩阵输出路径!");
            }
            else
            {
                try
                {
                    FileStorage storage_came = new FileStorage(@matrix_path + "//" + "cameraMatrix.txt", FileStorage.Mode.Write);//路径不能出现中文名字。。。。。
                    storage_came.Write(cameraMatrix);
                    storage_came.ReleaseAndGetString();
                    FileStorage storage_dist = new FileStorage(@matrix_path + "//" + "distCoeffs.txt", FileStorage.Mode.Write);
                    storage_dist.Write(distCoeffs);//distCoeffs:输入参数,相机的畸变系数:,有4,5,8,12或14个元素。如果这个向量是空的,就认为是零畸变系数。
                    storage_dist.ReleaseAndGetString();
                    textBox1.AppendText("矩阵输出:" + matrix_path + "\r\n" + "-----------------------------------------------------" + "\r\n");
                    // cameraMatrix(1,1) =
                    MessageBox.Show("标定成功!");
                    storage_came.Dispose();
                    storage_dist.Dispose();
                }
                catch (Exception ex)
                {
                    MessageBox.Show(ex.Message);
                }
            }
            GC.Collect();
        }
Пример #18
0
        public Image <Gray, byte> Mask(Image <Bgr, byte> Image)
        {
            var lowR   = 0.0228 * 255;
            var highR  = 0.8876 * 255;
            var lowG   = 0.0515 * 255;
            var highG  = 0.9167 * 255;
            var lowB   = 0 * 255;
            var highB  = 0.3030 * 255;
            var lowH   = 0.0228 * 180;
            var highH  = 0.8876 * 180;
            var lowS   = 0.0515 * 255;
            var highS  = 0.9167 * 255;
            var lowV   = 0 * 255;
            var highV  = 0.3030 * 255;
            var lowRn  = 0.2088 * 255;
            var highRn = 0.5097 * 255;
            var lowGn  = 0.3726 * 255;
            var highGn = 0.6000 * 255;
            var lowBn  = 0 * 255;
            var highBn = 0.3468 * 255;


            #region Color_mask

            var hsvImage = Image.Convert <Hsv, byte>();
            var hsvMask  = hsvImage.InRange(new Hsv(lowH, lowS, lowV), new Hsv(highH, highS, highV));

            var cromaImage = ImageHelper.Bgr2Croma(Image);
            var cromaMask  = cromaImage.InRange(new Bgr(lowBn, lowGn, lowRn), new Bgr(highBn, highGn, highRn));

            var rgbMask = Image.InRange(new Bgr(lowB, lowG, lowR), new Bgr(highB, highG, highR));

            #endregion Color_mask

            var combinedMasks = rgbMask.CopyBlank();
            CvInvoke.Multiply(rgbMask, hsvMask, combinedMasks);
            CvInvoke.Multiply(cromaMask, combinedMasks, combinedMasks);
            CvInvoke.Dilate(combinedMasks, combinedMasks, CvInvoke.GetStructuringElement(ElementShape.Ellipse, new Size(7, 7), new Point(-1, -1)), new Point(-1, -1), 1, BorderType.Default, new MCvScalar(255));
            CvInvoke.Erode(combinedMasks, combinedMasks, CvInvoke.GetStructuringElement(ElementShape.Ellipse, new Size(7, 7), new Point(-1, -1)), new Point(-1, -1), 1, BorderType.Default, new MCvScalar(255));

            using (Mat hierarchy = new Mat())
                using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
                {
                    CvInvoke.FindContours(combinedMasks, contours, hierarchy,
                                          RetrType.Ccomp, ChainApproxMethod.ChainApproxSimple);

                    for (int i = 0; i < contours.Size; i++)
                    {
                        var contour = contours[i];
                        var vf      = new PointF[contour.Size];
                        for (int ii = 0; ii < contour.Size; ii++)
                        {
                            vf[ii] = new PointF(contour[ii].X, contour[ii].Y);
                        }
                        VectorOfPointF vvf = new VectorOfPointF(vf);
                        var            c   = new VectorOfPointF();
                        CvInvoke.ConvexHull(vvf, c, false, true);
                        var cf = c.ToArray();
                        var vp = new Point[c.Size];
                        for (int ii = 0; ii < c.Size; ii++)
                        {
                            vp[ii] = new Point((int)cf[ii].X, (int)cf[ii].Y);
                        }
                        var c2 = new VectorOfPoint(vp);

                        CvInvoke.DrawContours(combinedMasks, contours, i, new MCvScalar(255), -1);

                        //if (c.Size > 1)
                        //    CvInvoke.FillConvexPoly(combinedMasks, c2, new MCvScalar(255), LineType.FourConnected);
                    }
                }
            //combinedMasks._ThresholdBinary(new Gray(0), new Gray(255));

            hsvImage.Dispose();
            hsvMask.Dispose();
            cromaImage.Dispose();
            cromaMask.Dispose();
            rgbMask.Dispose();
            return(combinedMasks);
        }
Пример #19
0
        private void CreateDelaunay(ref Mat img, ref Subdiv2D subdiv, ref VectorOfPointF points,
                                    bool drawAnimated, ref VectorOfVectorOfInt triangleIndexes)
        {
            PointF[] pointsArr = points.ToArray();
            foreach (PointF p in pointsArr)
            {
                subdiv.Insert(p);
                if (drawAnimated)
                {
                    Mat imgCopy = img.Clone();
                    DrawDelaunay(ref imgCopy, ref subdiv, new MCvScalar(255, 255, 255));
                    CvInvoke.Imshow("Delaunay Triangulation", imgCopy);
                }
            }

            // Unfortunately we don't get the triangles by there original point indexes.
            // We only get them with their vertex coordinates.
            // So we have to map them again to get the triangles with their point indexes.

            Size      size = img.Size;
            Rectangle rect = new Rectangle(0, 0, size.Width, size.Height);

            VectorOfInt ind = new VectorOfInt();

            int[]         indArr       = new int[3];
            Triangle2DF[] triangleList = subdiv.GetDelaunayTriangles();
            for (int i = 0; i < triangleList.Length; i++)
            {
                Triangle2DF t = triangleList[i];

                PointF ptzero = new PointF {
                    X = t.V0.X, Y = t.V0.Y
                };
                PointF[] PZero = new PointF[] { ptzero };

                PointF ptone = new PointF {
                    X = t.V1.X, Y = t.V1.Y
                };
                PointF[] POne = new PointF[] { ptone };

                PointF pttwo = new PointF {
                    X = t.V2.X, Y = t.V2.Y
                };
                PointF[] PTwo = new PointF[] { pttwo };

                VectorOfPointF pt = new VectorOfPointF();

                pt.Push(PZero);

                pt.Push(POne);
                pt.Push(PTwo);

                if (rect.Contains(new Point((int)pt[0].X, (int)pt[0].Y)) &&
                    rect.Contains(new Point((int)pt[1].X, (int)pt[1].Y)) &&
                    rect.Contains(new Point((int)pt[2].X, (int)pt[2].Y)))
                {
                    for (int j = 0; j < 3; j++)
                    {
                        for (int k = 0; k < points.Size; k++)
                        {
                            if (Math.Abs(pt[j].X - points[k].X) < 1.0 &&
                                Math.Abs(pt[j].Y - points[k].Y) < 1)
                            {
                                indArr[j] = k;
                            }
                        }
                    }
                }
                ind = new VectorOfInt(indArr);
                triangleIndexes.Push(ind);
            }
        }
Пример #20
0
        private void ProcessFrame()
        {
            while (_capture != null && _capture.Ptr != IntPtr.Zero)
            {
                _capture.Retrieve(_frame, 0);
                _frame.CopyTo(_frameCopy);

                using (VectorOfInt ids = new VectorOfInt())
                    using (VectorOfVectorOfPointF corners = new VectorOfVectorOfPointF())
                        using (VectorOfVectorOfPointF rejected = new VectorOfVectorOfPointF())
                        {
                            //DetectorParameters p = DetectorParameters.GetDefault();
                            ArucoInvoke.DetectMarkers(_frameCopy, ArucoDictionary, corners, ids, _detectorParameters, rejected);

                            if (ids.Size > 0)
                            {
                                //ArucoInvoke.RefineDetectedMarkers(_frameCopy, ArucoBoard, corners, ids, rejected, null, null, 10, 3, true, null, _detectorParameters);
                                //cameraButton.Text = "Calibrate camera";
                                ArucoInvoke.DrawDetectedMarkers(_frameCopy, corners, ids, new MCvScalar(0, 255, 0));
                                if (!_cameraMatrix.IsEmpty && !_distCoeffs.IsEmpty)
                                {
                                    ArucoInvoke.EstimatePoseSingleMarkers(corners, markersLength, _cameraMatrix, _distCoeffs, rvecs, tvecs);
                                    for (int i = 0; i < ids.Size; i++)
                                    {
                                        using (Mat rvecMat = rvecs.Row(i))
                                            using (Mat tvecMat = tvecs.Row(i))
                                                using (VectorOfDouble rvec = new VectorOfDouble())
                                                    using (VectorOfDouble tvec = new VectorOfDouble())
                                                    {
                                                        double[] values = new double[3];
                                                        rvecMat.CopyTo(values);
                                                        rvec.Push(values);
                                                        tvecMat.CopyTo(values);
                                                        tvec.Push(values);
                                                        ArucoInvoke.DrawAxis(_frameCopy, _cameraMatrix, _distCoeffs, rvec, tvec,
                                                                             markersLength * 0.5f);
                                                    }
                                    }
                                }
                                float counterX = 0, counterY = 0;
                                int   count = corners.Size;
                                for (int i = 0; i < count; ++i)
                                {
                                    using (VectorOfPointF corner = corners[i])
                                    {
                                        PointF[] cor = corner.ToArray();
                                        for (int j = 0; j < cor.Length; j++)
                                        {
                                            //Console.WriteLine(cor[j].X);
                                            counterX += cor[j].X;
                                            counterY += cor[j].Y;
                                        }
                                        markersX = counterX / 4;
                                        markersY = counterY / 4;
                                    }
                                }
                            }
                        }
                CvInvoke.Undistort(_frameCopy, _output, _cameraMatrix, _distCoeffs);
                CvInvoke.Imshow("out", _output);
                CvInvoke.WaitKey(10);
                //Console.WriteLine("markersX is " + markersX);
                // Console.WriteLine("markersY is " + markersY);
            }
            //else
            //{
            Console.WriteLine("VideoCapture was not created");
            //}
        }
Пример #21
0
        static void CameraCalibrate(string filename)
        {
            const int   N            = 1;
            const int   Nx           = 9;
            const int   Ny           = 6;
            const float square_size  = 20.0f;
            const int   Ncorners     = Nx * Ny;
            var         pattern_size = new Size(Nx, Ny);

            var color_image = new Image <Bgr, byte>(filename);
            var gray_image  = color_image.Convert <Gray, byte>();

            // 角点位置坐标:物理坐标系
            var object_corners = new MCvPoint3D32f[N][];

            object_corners[0] = new MCvPoint3D32f[Ncorners];
            var k = 0;

            for (int r = 0; r < Ny; ++r)
            {
                for (int c = 0; c < Nx; ++c)
                {
                    object_corners[0][k++] =
                        new MCvPoint3D32f(
                            10.0f + square_size * (c + 1),
                            5.0f + square_size * (r + 1),
                            0.0f);
                }
            }

            // 角点位置坐标:图像坐标系
            var image_corners    = new PointF[N][];
            var detected_corners = new VectorOfPointF();

            CvInvoke.FindChessboardCorners(gray_image, pattern_size, detected_corners);
            image_corners[0] = detected_corners.ToArray();
            gray_image.FindCornerSubPix(image_corners, new Size(5, 5), new Size(-1, -1), new MCvTermCriteria(30, 0.1));

            var cameraMatrix     = new Mat();
            var distortionCoeffs = new Mat();
            var rotationVectors  = new Mat[N];

            rotationVectors[0] = new Mat();
            var translationVectors = new Mat[N];

            translationVectors[0] = new Mat();

            CvInvoke.CalibrateCamera(
                object_corners,
                image_corners,
                gray_image.Size,
                cameraMatrix,
                distortionCoeffs,
                CalibType.RationalModel,
                new MCvTermCriteria(30, 0.1),
                out rotationVectors,
                out translationVectors);

            var calibrated_image = new Image <Bgr, byte>(color_image.Size);

            CvInvoke.Undistort(color_image, calibrated_image, cameraMatrix, distortionCoeffs);
            CvInvoke.DrawChessboardCorners(color_image, pattern_size, detected_corners, true);
            CvInvoke.Imshow("chessboard", color_image);
            CvInvoke.Imshow("calibrated", calibrated_image);
            CvInvoke.WaitKey();
        }
Пример #22
0
        public static void ProcessFrames()
        {
            var   cornersObjectList = new List <MCvPoint3D32f[]>();
            var   cornersPointsList = new List <PointF[]>();
            var   width             = 8;                       //width of chessboard no. squares in width - 1
            var   height            = 6;                       // heght of chess board no. squares in heigth - 1
            float squareSize        = width * height;
            var   patternSize       = new Size(width, height); //size of chess board to be detected
            var   corners           = new VectorOfPointF();    //corners found from chessboard

            Mat[] _rvecs, _tvecs;

            var frameArrayBuffer = new List <Mat>();

            var cameraMatrix = new Mat(3, 3, DepthType.Cv64F, 1);
            var distCoeffs   = new Mat(8, 1, DepthType.Cv64F, 1);

            // Glob our frames from the static dir, loop for them
            string[] filePaths = Directory.GetFiles(@"/home/dietpi/", "*.jpg");
            var      frames    = filePaths.Select(path => CvInvoke.Imread(path)).ToList();

            LogUtil.Write("We have " + frames.Count + " frames.");
            var fc = 0;

            foreach (var frame in frames)
            {
                var grayFrame = new Mat();

                // Convert to grayscale
                CvInvoke.CvtColor(frame, grayFrame, ColorConversion.Bgr2Gray);

                //apply chess board detection
                var boardFound = CvInvoke.FindChessboardCorners(grayFrame, patternSize, corners,
                                                                CalibCbType.AdaptiveThresh | CalibCbType.FastCheck | CalibCbType.NormalizeImage);
                //we use this loop so we can show a colour image rather than a gray:
                if (boardFound)
                {
                    LogUtil.Write("Found board in frame " + fc);
                    //make measurements more accurate by using FindCornerSubPixel
                    CvInvoke.CornerSubPix(grayFrame, corners, new Size(11, 11), new Size(-1, -1),
                                          new MCvTermCriteria(30, 0.1));
                    frameArrayBuffer.Add(grayFrame);
                }

                fc++;
                corners = new VectorOfPointF();
            }

            LogUtil.Write("We have " + frameArrayBuffer.Count + " frames to use for mapping.");
            // Loop through frames where board was detected
            foreach (var frame in frameArrayBuffer)
            {
                var frameVect = new VectorOfPointF();
                CvInvoke.FindChessboardCorners(frame, patternSize, frameVect,
                                               CalibCbType.AdaptiveThresh
                                               | CalibCbType.FastCheck | CalibCbType.NormalizeImage);
                //for accuracy
                CvInvoke.CornerSubPix(frame, frameVect, new Size(11, 11), new Size(-1, -1),
                                      new MCvTermCriteria(30, 0.1));

                //Fill our objects list with the real world measurements for the intrinsic calculations
                var objectList = new List <MCvPoint3D32f>();
                for (int i = 0; i < height; i++)
                {
                    for (int j = 0; j < width; j++)
                    {
                        objectList.Add(new MCvPoint3D32f(j * squareSize, i * squareSize, 0.0F));
                    }
                }

                //corners_object_list[k] = new MCvPoint3D32f[];
                cornersObjectList.Add(objectList.ToArray());
                cornersPointsList.Add(frameVect.ToArray());
                frameVect.Dispose();
            }

            //our error should be as close to 0 as possible

            double error = CvInvoke.CalibrateCamera(cornersObjectList.ToArray(), cornersPointsList.ToArray(),
                                                    frames[0].Size,
                                                    cameraMatrix, distCoeffs, CalibType.RationalModel, new MCvTermCriteria(30, 0.1), out _rvecs,
                                                    out _tvecs);

            LogUtil.Write("Correction error: " + error);
            var sk = JsonConvert.SerializeObject(cameraMatrix);
            var sd = JsonConvert.SerializeObject(distCoeffs);

            LogUtil.Write("Camera matrix: " + sk);
            LogUtil.Write("Dist coefficient: " + sd);
            DataUtil.SetItem("K", sk);
            DataUtil.SetItem("D", sd);
        }
Пример #23
0
        private async Task SaveImageForCalibration(Mat frame_S1, Mat frame_S2)
        {
            Image <Bgr, byte>  frameImage_S1;
            Image <Gray, Byte> Gray_frame_S1;
            Image <Bgr, byte>  frameImage_S2;
            Image <Gray, Byte> Gray_frame_S2;

            frameImage_S1 = new Image <Bgr, byte>(frame_S1.Bitmap);
            Gray_frame_S1 = frameImage_S1.Convert <Gray, Byte>();
            frameImage_S2 = new Image <Bgr, byte>(frame_S2.Bitmap);
            Gray_frame_S2 = frameImage_S2.Convert <Gray, Byte>();

            VectorOfPointF cornerLeft  = new VectorOfPointF();
            VectorOfPointF cornerRight = new VectorOfPointF();

            //Find the chessboard in bothe images
            CvInvoke.FindChessboardCorners(Gray_frame_S1, patternModel.patternSize, cornerLeft, CalibCbType.AdaptiveThresh);
            CvInvoke.FindChessboardCorners(Gray_frame_S2, patternModel.patternSize, cornerRight, CalibCbType.AdaptiveThresh);

            if (cornerLeft.Size > 0 && cornerRight.Size > 0) //chess board found in one of the frames?
            {
                PointF[] corners_Left;
                PointF[] corners_Right;

                corners_Left  = cornerLeft.ToArray();
                corners_Right = cornerRight.ToArray();

                //make mesurments more accurate by using FindCornerSubPixel
                Gray_frame_S1.FindCornerSubPix(new PointF[1][] { corners_Left }, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.01));
                Gray_frame_S2.FindCornerSubPix(new PointF[1][] { corners_Right }, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.01));

                //if go button has been pressed start aquiring frames else we will just display the points
                if (patternModel.start_Flag)
                {
                    //save the calculated points into an array
                    corners_points_Left[buffer_savepoint]  = corners_Left;
                    corners_points_Right[buffer_savepoint] = corners_Right;
                    buffer_savepoint++;//increase buffer positon

                    //check the state of buffer
                    if (buffer_savepoint == buffer_length)
                    {
                        currentMode = ECalibrationMode.Caluculating_Stereo_Intrinsics;                                    //buffer full
                    }
                    //Show state of Buffer
                    _winForm.UpdateTitle("Form1: Buffer " + buffer_savepoint.ToString() + " of " + buffer_length.ToString());
                }

                //draw the results
                frameImage_S1.Draw(new CircleF(corners_Left[0], 3), new Bgr(Color.Yellow), 10);
                frameImage_S2.Draw(new CircleF(corners_Right[0], 3), new Bgr(Color.Yellow), 10);
                for (int i = 1; i < corners_Left.Length; i++)
                {
                    //left
                    frameImage_S1.Draw(new LineSegment2DF(corners_Left[i - 1], corners_Left[i]), patternModel.line_colour_array[i], 10);
                    frameImage_S1.Draw(new CircleF(corners_Left[i], 3), new Bgr(Color.Yellow), 10);
                    //right
                    frameImage_S2.Draw(new LineSegment2DF(corners_Right[i - 1], corners_Right[i]), patternModel.line_colour_array[i], 10);
                    frameImage_S2.Draw(new CircleF(corners_Right[i], 3), new Bgr(Color.Yellow), 10);
                }

                _winForm.pictureBox1.Image = frameImage_S1.Bitmap;
                _winForm.pictureBox2.Image = frameImage_S2.Bitmap;
            }
        }
        static private void RunWithCsv(CascadeClassifier faceDetector, FacemarkLBF facemark, string inputFilepath, string outputFilepath, Size imageSize)
        {
            using (var csvreader = new CsvReader(new StreamReader(inputFilepath)))
                using (var csvwriter = new CsvWriter(new StreamWriter(outputFilepath, false)))
                {
                    csvwriter.WriteHeader <CsvFer2013ModRow>();
                    csvwriter.NextRecord();

                    var record   = new CsvFer2013Row();
                    var records  = csvreader.EnumerateRecords(record);
                    int recordId = 0;
                    foreach (var r in records)
                    {
                        recordId++;

                        Image <Gray, byte> image = StringToImage(r.pixels, imageSize);

                        Rectangle face = image.ROI;
                        if (localiseFace)
                        {
                            Rectangle?detectionResult = DetectFace(faceDetector, image);
                            if (!detectionResult.HasValue)
                            {
                                continue;
                            }
                            face = detectionResult.Value;
                        }

                        //Image<Bgr, byte> colorImage = image.Convert<Bgr, byte>();
                        //CvInvoke.Imshow("image", colorImage);
                        //CvInvoke.WaitKey();

                        VectorOfPointF landmarks = MarkFacialPoints(facemark, image, face, out bool isSuccess);
                        if (!isSuccess)
                        {
                            continue;
                        }

                        //FaceInvoke.DrawFacemarks(colorImage, landmarks, new Bgr(0, 0, 255).MCvScalar);
                        //CvInvoke.Imshow("landmarked image", colorImage);
                        //CvInvoke.WaitKey();
                        //CvInvoke.DestroyAllWindows();

                        PointF[] facepoints = landmarks.ToArray();
                        if (normalise)
                        {
                            NormalizeFacepoints(facepoints);
                        }

                        SerializeFacepointsWithCsv(csvwriter, r, recordId, ref facepoints);

                        if (verbose)
                        {
                            Console.Write("\rRecord No: {0}", recordId);
                        }
                    }
                    if (verbose)
                    {
                        Console.WriteLine();
                    }
                }
        }
Пример #25
0
        //相机标定
        private void CalibraCamera(object sender, EventArgs e)
        {
            //图像标定
            StreamReader sin = new StreamReader("calibdata1.txt");

            //读取每一副图像,从中提出角点,然后对角点进行亚像素精确化
            Console.WriteLine("开始提取角点");
            int            image_count = 0;          //图像数量
            Size           image_size  = new Size(); //图像尺寸
            int            width       = 4;
            int            height      = 6;
            Size           board_size  = new Size(4, 6);                       //标定版上每行每列的角点数目
            int            CornerNum   = board_size.Width * board_size.Height; //每张图片上的角点总数
            int            nImage      = 14;
            VectorOfPointF Npointsl    = new VectorOfPointF();

            string filename;
            int    count = 0;//用于存储角点个数

            Console.WriteLine("count = " + count);
            MCvPoint3D32f[][] object_points = new MCvPoint3D32f[nImage][];//保存标定板上角点的三维坐标
            PointF[][]        corner_count  = new PointF[nImage][];
            while ((filename = sin.ReadLine()) != null)
            {
                image_count++;
                //用于观察检验输出
                Console.WriteLine("image_count = " + image_count);
                //输出检验
                //打开获取到的图像
                Image <Bgr, byte> imageInput = new Image <Bgr, byte>(new Bitmap(Image.FromFile(filename)));
                pictureBox1.Image = imageInput.ToBitmap();

                if (image_count == 1)//读入第一张图片时获取图像宽高信息
                {
                    Console.WriteLine("<---成功读取第一张图片--->");
                    image_size.Width  = imageInput.Cols;
                    image_size.Height = imageInput.Rows;
                    Console.WriteLine("image_size.Width  = " + image_size.Width);
                    Console.WriteLine("image_size.Hright = " + image_size.Height);
                }
                //提取角点
                Mat view_gray = new Mat();
                CvInvoke.CvtColor(imageInput, view_gray, ColorConversion.Rgb2Gray);
                //提取内角点(内角点与标定板的边缘不接触)
                //对每一张标定图片,提取角点信息

                /*
                 *              第一个参数Image,传入拍摄的棋盘图Mat图像,必须是8位的灰度或者彩色图像;
                 * 第二个参数patternSize,每个棋盘图上内角点的行列数,一般情况下,行列数不要相同,便于后续标定程序识别标定板的方向;
                 * 第三个参数corners,用于存储检测到的内角点图像坐标位置,一般用元素是VectorOfPoint类型表示
                 * 第四个参数flage:用于定义棋盘图上内角点查找的不同处理方式,有默认值。
                 */
                CvInvoke.FindChessboardCorners(view_gray, board_size, Npointsl, CalibCbType.AdaptiveThresh);
                corner_count[image_count - 1] = new PointF[24];
                for (int i = 0; i < 24; i++)
                {
                    corner_count[image_count - 1][i] = Npointsl.ToArray()[i];
                }
                //为了提高标定精度,需要在初步提取的角点信息上进一步提取亚像素信息,降低相机标定偏差
                //亚像素精确化FindCornerSubPix()

                /*
                 * 第一个参数corners,初始的角点坐标向量,同时作为亚像素坐标位置的输出,所以需要是浮点型数据,一般用元素是PointF[][]向量来表示
                 * 第二个参数winSize,大小为搜索窗口的一半;
                 * 第三个参数zeroZone,死区的一半尺寸,死区为不对搜索区的中央位置做求和运算的区域。它是用来避免自相关矩阵出现某些可能的奇异性。当值为(-1,-1)时表示没有死区;
                 * 第四个参数criteria,定义求角点的迭代过程的终止条件,可以为迭代次数和角点精度两者的组合;
                 */
                view_gray.ToImage <Gray, byte>().FindCornerSubPix(corner_count, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.1));
                //在图像上显示角点位置,在图片中标记角点
                Console.WriteLine("第" + image_count + "个图片已经被标记角点");
                //DrawChessboardCorners用于绘制被成功标定的角点

                /*
                 *              第一个参数image,8位灰度或者彩色图像;
                 * 第二个参数patternSize,每张标定棋盘上内角点的行列数;
                 * 第三个参数corners,初始的角点坐标向量,同时作为亚像素坐标位置的输出,所以需要是浮点型数据
                 * 第四个参数patternWasFound,标志位,用来指示定义的棋盘内角点是否被完整的探测到,true表示别完整的探测到,函数会用直线依次连接所有的内角点,作为一个整体,false表示有未被探测到的内角点,这时候函数会以(红色)圆圈标记处检测到的内角点;
                 */
                CvInvoke.DrawChessboardCorners(view_gray, board_size, Npointsl, true);//非必需,仅用做测试
                pictureBox2.Image = view_gray.ToImage <Bgr, byte>().ToBitmap();
                count             = image_count;
                CvInvoke.WaitKey(500);//暂停0.5秒*/
            }
            Console.WriteLine("角点提取完成!!!");
            //摄像机标定
            Console.WriteLine("开始标定");
            //摄像机内参数矩阵
            Mat cameraMatrix = new Mat(3, 3, DepthType.Cv32F, 1);
            //畸变矩阵
            //摄像机的5个畸变系数:k1,k2,p1,p2,k3
            Mat distCoeffs = new Mat(1, 5, DepthType.Cv32F, 1);

            //旋转矩阵R
            Mat[] rotateMat = new Mat[nImage];
            for (int i = 0; i < nImage; i++)
            {
                rotateMat[i] = new Mat(3, 3, DepthType.Cv32F, 1);
            }
            //平移矩阵T
            Mat[] transMat = new Mat[nImage];
            for (int i = 0; i < nImage; i++)
            {
                transMat[i] = new Mat(3, 1, DepthType.Cv32F, 1);
            }
            //初始化标定板上角点的三维坐标
            List <MCvPoint3D32f> object_list = new List <MCvPoint3D32f>();

            for (int k = 0; k < nImage; k++)
            {
                object_list.Clear();
                for (int i = 0; i < height; i++)
                {
                    for (int j = 0; j < width; j++)
                    {
                        object_list.Add(new MCvPoint3D32f(j, i, 0f));
                    }
                }
                object_points[k] = object_list.ToArray();
            }
            //相机标定
            //获取到棋盘标定图的内角点图像坐标之后,使用CalibrateCamera函数进行相机标定,计算相机内参和外参矩阵

            /*
             *          第一个参数objectPoints,为世界坐标系中的三维点。在使用时,应该输入一个三维坐标点的向量的向量MCvPoint3D32f[][],即需要依据棋盘上单个黑白矩阵的大小,计算出(初始化)每一个内角点的世界坐标。
             * 第二个参数imagePoints,为每一个内角点对应的图像坐标点。和objectPoints一样,应该输入PointF[][]类型变量;
             * 第三个参数imageSize,为图像的像素尺寸大小,在计算相机的内参和畸变矩阵时需要使用到该参数;
             * 第四个参数cameraMatrix为相机的内参矩阵。输入一个Mat cameraMatrix即可,如Mat cameraMatrix=Mat(3,3,CV_32FC1,Scalar::all(0));
             * 第五个参数distCoeffs为畸变矩阵。输入一个Mat distCoeffs=Mat(1,5,CV_32FC1,Scalar::all(0))即可
             *          第六个参数CalibType相机标定类型
             *          第七个参数criteria是最优迭代终止条件设定
             *          第八个参数out Mat[]类型的旋转矩阵
             *          第九个参数out Mat[]类型的平移矩阵
             */
            //在使用该函数进行标定运算之前,需要对棋盘上每一个内角点的空间坐标系的位置坐标进行初始化
            //标定的结果是生成相机的内参矩阵cameraMatrix、相机的5个畸变系数distCoeffs
            //另外每张图像都会生成属于自己的平移向量和旋转向量
            CvInvoke.CalibrateCamera(object_points, corner_count, image_size, cameraMatrix,
                                     distCoeffs, CalibType.RationalModel, new MCvTermCriteria(30, 0.1), out rotateMat, out transMat);
            Console.WriteLine("标定完成");
            /*标定评价略*/
            //利用标定结果对图像进行畸变校正
            //mapx和mapy为输出的x/y坐标重映射参数

            /*
             *          Mat mapx = new Mat(image_size, DepthType.Cv32F, 1);
             *          Mat mapy = new Mat(image_size, DepthType.Cv32F, 1);
             *          //可选输入,是第一和第二相机坐标之间的旋转矩阵
             *          Mat R = new Mat(3, 3, DepthType.Cv32F, 1);
             *          //输出校正之后的3x3摄像机矩阵
             *          Mat newCameraMatrix = new Mat(3, 3, DepthType.Cv32F, 1);
             */
            Console.WriteLine("保存矫正图像");

            StreamReader sin_test = new StreamReader("calibdata1.txt");
            string       filename_test;

            for (int i = 0; i < nImage; i++)
            {
                //InitUndistortRectifyMap用来计算畸变映射
                //CvInvoke.InitUndistortRectifyMap(cameraMatrix, distCoeffs, R, newCameraMatrix, image_size, DepthType.Cv32F, mapx, mapy);
                if ((filename_test = sin_test.ReadLine()) != null)
                {
                    Image <Bgr, byte> imageSource = new Image <Bgr, byte>(new Bitmap(Image.FromFile(filename_test)));
                    Image <Bgr, byte> newimage    = imageSource.Clone();
                    CvInvoke.Undistort(imageSource, newimage, cameraMatrix, distCoeffs);
                    //Remap把求得的映射应用到图像上
                    //CvInvoke.Remap(imageSource, newimage, mapx, mapy, Inter.Linear, BorderType.Constant, new MCvScalar(0));
                    pictureBox3.Image = imageSource.ToBitmap();
                    pictureBox4.Image = newimage.ToBitmap();
                    CvInvoke.WaitKey(500);
                }
            }
            Console.WriteLine("标定结束!");
        }