Пример #1
0
 /// <summary>
 /// 相机内参标定结果评价
 /// </summary>
 private void btnEst_Click(object sender, EventArgs e)
 {
     try
     {
         for (int i = 0; i < left_corners_set.Size; i++)         //计算左相机标定误差
         {
             objtemp = objectpoints[i];
             CvInvoke.ProjectPoints(objtemp, leftRvecs[i], leftTvecs[i],
                                    leftCamMatrix, leftDistCoeffs, projecttemp);  //空间点反向投影
             imgtemp = left_corners_set[i];
             //转换为矩阵存储方式:  需要再想!!!
             Image <Bgr, Single> prot = new Image <Bgr, float>(projecttemp.Size, 1);
             Image <Bgr, Single> imgt = new Image <Bgr, float>(imgtemp.Size, 1);
             for (int j = 0; j < projecttemp.Size; j++)
             {
                 prot.Data[0, j, 0] = projecttemp[j].X;
                 prot.Data[0, j, 1] = projecttemp[j].Y;
                 prot.Data[0, j, 2] = 0;
             }
             for (int j = 0; j < imgtemp.Size; j++)
             {
                 imgt.Data[0, j, 0] = imgtemp[j].X;
                 imgt.Data[0, j, 1] = imgtemp[j].Y;
                 imgt.Data[0, j, 2] = 0;
             }
             err        = CvInvoke.Norm(prot, imgt, NormType.L2); //计算误差
             total_err += (err /= (patternSize.Width * patternSize.Height));
         }
         for (int i = 0; i < right_corners_set.Size; i++)                 //计算右相机标定误差
         {
             objtemp = objectpoints[i];
             CvInvoke.ProjectPoints(objtemp, rightRvecs[i], rightTvecs[i],
                                    rightCamMatrix, rightDistCoeffs, projecttemp);  //空间点反向投影
             imgtemp = right_corners_set[i];
             //转换为矩阵存储方式:  需要再想!!!
             Image <Bgr, Single> prot = new Image <Bgr, float>(projecttemp.Size, 1);
             Image <Bgr, Single> imgt = new Image <Bgr, float>(imgtemp.Size, 1);
             for (int j = 0; j < projecttemp.Size; j++)
             {
                 prot.Data[0, j, 0] = projecttemp[j].X;
                 prot.Data[0, j, 1] = projecttemp[j].Y;
                 prot.Data[0, j, 2] = 0;
             }
             for (int j = 0; j < imgtemp.Size; j++)
             {
                 imgt.Data[0, j, 0] = imgtemp[j].X;
                 imgt.Data[0, j, 1] = imgtemp[j].Y;
                 imgt.Data[0, j, 2] = 0;
             }
             err        = CvInvoke.Norm(prot, imgt, NormType.L2); //计算误差
             total_err += (err /= (patternSize.Width * patternSize.Height));
         }
         total_err     /= (left_corners_set.Size + right_corners_set.Size);  //取均值
         Data.LogString = "相机内参标定平均误差:" + total_err.ToString() + "\n请保存数据~";
     }
     catch (Exception exc)
     {
         Data.LogString = exc.Message;
     }
 }
Пример #2
0
        /// <summary>
        /// Otvorí obrázky s šachovnicami a extrahuje rohové body
        /// </summary>
        /// <param name="fileList">zoznam mien obrázkov s šachovnicami</param>
        /// <param name="boardSize">počet vnútorných rohov šachovnice (x-1, y-1)</param>
        /// <returns></returns>
        private int AddChessboardPoints(List <string> fileList, Size boardSize)
        {
            //PointF[][] imageCorners = new PointF[Frame_array_buffer.Length][];
            //body na šachovnici
            //PointF[] imageCorners;
            //Emgu.CV.IOutputArray imageCorners;

            //poloha rohov šachovnice v 3D priestore
            MCvPoint3D32f[] objectCorners = new MCvPoint3D32f[boardSize.Height * boardSize.Width];

            //3D Scene Points:
            //Inicializácia vnútorných rohov šachovnice v 3D priestore (x,y,z) = (i,j,0)
            for (int i = 0; i < boardSize.Height; i++)
            {
                for (int j = 0; j < boardSize.Width; j++)
                {
                    objectCorners[i * boardSize.Width + j] = new MCvPoint3D32f(i, j, 0.0f);
                }
            }

            //2D body obrázka:
            Image <Gray, Byte> image; //obrázok pre načítavanie obrázka so šachovnicou
            int successes = 0;        //počet najdenych obrazkov so sachovnicou

            //List<VectorOfPointF> corners = new List<VectorOfPointF>();
            GC.Collect();
            //pre všetky vstupné obrázky - uhly pohľadu
            for (int i = 0; i < fileList.Count; i++)
            {
                var cornerPoints = new VectorOfPointF();                                             //vektor rohových bodov šachovnice
                image = new Image <Gray, Byte>(fileList[i]);                                         //načítaj obrázok zo zoznamu
                //imageCorners = null; //CameraCalibration.FindChessboardCorners(image, boardSize, CALIB_CB_TYPE.DEFAULT);
                CvInvoke.FindChessboardCorners(image, boardSize, cornerPoints, CalibCbType.Default); //získaj rohové body šachovnice

                if (cornerPoints == null)
                {
                    continue;                       //keď v aktuálnom obrázku nenašiel žiadne body, zoberie ďalší    //imageCorners
                }
                //corners.Add(cornerPoints);

                //image.FindCornerSubPix( imageCorners, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.1));

                //získaj rohové body so subpixelovou presnosťou
                CvInvoke.CornerSubPix(image, cornerPoints, new Size(11, 11), new Size(-1, -1), new MCvTermCriteria(30, 0.1));

                //CvInvoke.cvFindCornerSubPix(image, imageCorners,
                //    boardSize.Height * boardSize.Width,
                //    new Size(5, 5), new Size(-1, -1),
                //    new MCvTermCriteria(30, 0.1));

                //keď našiel na obrázku dosť bodov (9*6), tak ich pridá do zoznamu
                if (cornerPoints.Size == boardSize.Height * boardSize.Width)  //imageCorners.Length
                {
                    //zavolá metódu na pridanie bodov do zoznamov
                    AddPoints(cornerPoints.ToArray(), objectCorners);
                    successes++;
                }
            }
            return(successes);
        }
Пример #3
0
        public void _findOutline()
        {
            using (var im = _mat.Clone())
            {
                VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint();
                CvInvoke.FindContours(im, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple);

                var cts = contours.ToArrayOfArray()
                          .Select(x => CvInvoke.ConvexHull(x.Select(y => new PointF(y.X, y.Y)).ToArray())).ToArray();

                var cnt = cts.OrderByDescending(z => CvInvoke.ContourArea(new VectorOfPointF(z))).ToList();

                var result = new VectorOfPointF();

                foreach (var c in cnt)
                {
                    var vop  = new VectorOfPointF(c);
                    var peri = CvInvoke.ArcLength(vop, true);

                    var approx = new VectorOfPointF();
                    CvInvoke.ApproxPolyDP(vop, approx, _approxE * peri, true);

                    if (approx.Size == 4)
                    {
                        result = approx;
                        break;
                    }
                }

                _outline = result;
            }
        }
Пример #4
0
        private void TrackFeatures(Image <Gray, byte> grayImage)
        {
            if (framesProcessed == 1)
            {
                keyPoints   = featureDetector.Detect(grayImage);
                kpVector    = new VectorOfPointF((keyPoints.Select(p => p.Point).ToArray()));
                nextVector  = new VectorOfPointF(kpVector.Size);
                statusArray = new VectorOfByte(kpVector.Size);
                errArray    = new VectorOfFloat(kpVector.Size);
            }
            else if (framesProcessed > 2)
            {
                kpVector = nextVector;
            }

            if (framesProcessed % 50 == 0)
            {
                kpVector = CreateGrid(currentImage);
            }

            if (framesProcessed >= 2)
            {
                CvInvoke.CalcOpticalFlowPyrLK(lastGray, grayImage, kpVector, nextVector, statusArray, errArray, new Size(trackBar1.Value * 2 + 2, trackBar1.Value * 2 + 2), trackBar4.Value, new MCvTermCriteria(trackBar2.Value, trackBar3.Value / 100.0));
                DrawPoints(nextVector, Color.Blue);
            }
        }
Пример #5
0
        /// <summary>
        /// Compute pattern pose using PnP algorithm
        /// </summary>
        /// <param name="points"></param>
        /// <param name="calibration"></param>
        /// <param name="points3D"></param>
        /// <param name="raux"></param>
        /// <param name="taux"></param>
        public Transformation ComputePose(VectorOfPoint3D32F points3D, VectorOfPointF points, CameraCalibrationInfo calibration, out VectorOfFloat raux, out VectorOfFloat taux)
        {
            var pose3D = new Transformation();

            var rotationVector32F    = new VectorOfFloat();
            var translationVector32F = new VectorOfFloat();
            var rotationVector       = new Mat();
            var translationVector    = new Mat();

            CvInvoke.SolvePnP(points3D, points, calibration.Intrinsic, calibration.Distortion, rotationVector, translationVector);

            rotationVector.ConvertTo(rotationVector32F, DepthType.Cv32F);
            translationVector.ConvertTo(translationVector32F, DepthType.Cv32F);

            raux = rotationVector32F;
            taux = translationVector32F;

            var rotationMat = new Mat();

            CvInvoke.Rodrigues(rotationVector32F, rotationMat);
            var rotationMatrix = new Matrix <double>(rotationMat.Rows, rotationMat.Cols, rotationMat.DataPointer);

            // Copy to transformation matrix
            for (int col = 0; col < 3; col++)
            {
                for (int row = 0; row < 3; row++)
                {
                    pose3D.SetRotationMatrixValue(row, col, (float)rotationMatrix[row, col]); // Copy rotation component
                }
                pose3D.SetTranslationVectorValue(col, translationVector32F[col]);             // Copy translation component
            }

            // Since solvePnP finds camera location, w.r.t to marker pose, to get marker pose w.r.t to the camera we invert it.
            return(pose3D.GetInverted());
        }
Пример #6
0
        public VideoGenerator(ImageDetails imgdet1, ImageDetails imgdet2, VectorOfPointF points1, VectorOfPointF points2, int fpsUser, float alphaUser, string path)
        {
            this.destinationPath = path;
            sizeOfVid            = GetSizeOfImages(imgdet1, imgdet2);
            float      alpha = 0.0f;
            MorphImage m;

            try
            {
                videoWriter = new VideoWriter(fileName: destinationPath, fps: fpsUser, size: sizeOfVid, isColor: true);
                while (alpha < 1.0f)
                {
                    m = new MorphImage(imgdet1, imgdet2, points1, points2, alpha);
                    Image <Bgr, byte> morphedImage = m.GetMorphedImageI();
                    videoWriter.Write(morphedImage.Mat);
                    alpha += alphaUser;
                    morphedImage.Dispose();
                }
                if (videoWriter.IsOpened)
                {
                    videoWriter.Dispose();
                }
                MessageBox.Show($"Completed");
            }
            catch (Exception)
            {
                MessageBox.Show("The program has run out of memory. Try to use fewer images, a larger alpha value (0.05 - 0.1) or a lower FPS count (25)");
            }
        }
        static private void RunWithImagesFolder(string imagesFolder, string outputFilepath, CascadeClassifier faceDetector, FacemarkLBF facemark)
        {
            using (StreamWriter writer = new StreamWriter(outputFilepath, false))
                foreach (string filename in Directory.EnumerateFiles(imagesFolder))
                {
                    Image <Gray, byte> image = new Image <Gray, byte>(
                        CvInvoke.Imread(filename, ImreadModes.Grayscale).Bitmap);

                    Rectangle face = image.ROI;
                    if (localiseFace)
                    {
                        Rectangle?detectionResult = DetectFace(faceDetector, image);
                        if (!detectionResult.HasValue)
                        {
                            continue;
                        }
                        face = detectionResult.Value;
                    }

                    VectorOfPointF landmarks = MarkFacialPoints(facemark, image, face, out bool isSuccess);
                    if (!isSuccess)
                    {
                        continue;
                    }

                    PointF[] facepoints = landmarks.ToArray();
                    if (normalise)
                    {
                        NormalizeFacepoints(facepoints);
                    }

                    SerializeFacepoints(writer, filename, ref facepoints);
                }
        }
Пример #8
0
        private static double Validate(VectorOfVectorOfPoint3D32F processedObjectPoints, VectorOfVectorOfPointF processedImagePoints, Mat cameraMatrix, Mat distCoeffs, VectorOfPoint3D32F rvecs, VectorOfPoint3D32F tvecs, bool fisheye)
        {
            double error       = 0;
            int    totalpoints = 0;

            if (fisheye)
            {
                for (int i = 0; i < processedObjectPoints.Size; i++)
                {
                    VectorOfPoint3D32F objectFramePoints = processedObjectPoints[i];
                    VectorOfPointF     imageFramePoints  = processedImagePoints[i];
                    RotationVector3D   tvec = new RotationVector3D(new double[] { tvecs[i].X, tvecs[i].Y, tvecs[i].Z });
                    RotationVector3D   rvec = new RotationVector3D(new double[] { rvecs[i].X, rvecs[i].Y, rvecs[i].Z });

                    VectorOfPointF newImageFramePoints = new VectorOfPointF();

                    Fisheye.ProjectPoints(objectFramePoints, newImageFramePoints, rvec, tvec, cameraMatrix, distCoeffs);

                    for (int j = 0; j < newImageFramePoints.Size; j++)
                    {
                        PointF x1 = newImageFramePoints[j];
                        PointF x2 = imageFramePoints[j];
                        totalpoints++;
                        error += Math.Pow(x1.X - x2.X, 2) + Math.Pow(x1.Y - x2.Y, 2);
                    }
                }
            }
            return(Math.Sqrt(error / totalpoints));
        }
Пример #9
0
 public static VectorOfPointF VectorOfPointF2RoundedVectorOfPointF(VectorOfPointF vectorOfPointF)
 {
     return(new VectorOfPointF(
                (from p in vectorOfPointF.ToArray()
                 select new PointF((float)Math.Round(p.X), (float)Math.Round(p.Y))).ToArray()
                ));
 }
Пример #10
0
        // Draws the Delaunay triangulation into an image using the triangle indexes
        private void DrawDelaunay(ref Mat img, ref VectorOfPointF points, VectorOfVectorOfInt triangleIndexes, MCvScalar delaunayColor)
        {
            Size      size = img.Size;
            Rectangle rect = new Rectangle(0, 0, size.Width, size.Height);

            for (int i = 0; i < triangleIndexes.Size; i++)
            {
                VectorOfPoint tri = new VectorOfPoint();
                PointF        pp0 = points[triangleIndexes[i][0]];
                PointF        pp1 = points[triangleIndexes[i][1]];
                PointF        pp2 = points[triangleIndexes[i][2]];
                Point[]       p0  = { new Point((int)pp0.X, (int)pp0.Y) };
                Point[]       p1  = { new Point((int)pp1.X, (int)pp1.Y) };
                Point[]       p2  = { new Point((int)pp2.X, (int)pp2.Y) };
                tri.Push(p0);
                tri.Push(p1);
                tri.Push(p2);

                if (rect.Contains(tri[0]) && rect.Contains(tri[1]) && rect.Contains(tri[2]))
                {
                    CvInvoke.Line(img, tri[0], tri[1], delaunayColor, 2, Emgu.CV.CvEnum.LineType.AntiAlias, 0);
                    CvInvoke.Line(img, tri[1], tri[2], delaunayColor, 2, Emgu.CV.CvEnum.LineType.AntiAlias, 0);
                    CvInvoke.Line(img, tri[2], tri[0], delaunayColor, 2, Emgu.CV.CvEnum.LineType.AntiAlias, 0);
                }
            }
        }
Пример #11
0
 private void DrawPoints(VectorOfPointF kpVector, Color color)
 {
     for (int i = 0; i < kpVector.Size; i++)
     {
         currentImage.Draw(new Cross2DF(kpVector[i], 5, 5), new Bgr(color), 1);
     }
 }
Пример #12
0
    static Vector2[] FindCheckerBoardCorners(Color32[] data, int width, int height, Size boardSize)
    {
        /*
        byte[] bytes = null;
        if (bytes == null || bytes.Length != data.Length * 3)
        {
            bytes = new byte[data.Length * 3];
        }
        */
        Vector2[] corners = null;
        GCHandle handle = GCHandle.Alloc(data, GCHandleType.Pinned);
        //GCHandle resultHandle = GCHandle.Alloc(bytes, GCHandleType.Pinned);
        using (Mat bgra = new Mat(new Size(width, height), DepthType.Cv8U, 4, handle.AddrOfPinnedObject(), width * 4))
        //using (Mat bgr = new Mat(height, width, DepthType.Cv8U, 3, resultHandle.AddrOfPinnedObject(), width * 3))
        {
           // CvInvoke.CvtColor(bgra, bgr, ColorConversion.Bgra2Bgr);
            IInputArray image = bgra;
            VectorOfPointF vec = new VectorOfPointF();
            CvInvoke.FindChessboardCorners(image, boardSize, vec);
            PointF[] pCorners = vec.ToArray();

            corners = new Vector2[pCorners.Length];
            for (int p = 0; p < corners.Length; ++p)
            {
                corners[p].x = pCorners[p].X / width;
                corners[p].y = pCorners[p].Y / height;
            }
            
        }
        handle.Free();
        //resultHandle.Free();
        return corners;
    }
Пример #13
0
        public void SaveImgWithContours(Point point)
        {
            var imgToSave = Image;

            foreach (var contour in Contours.ToArrayOfArray())
            {
                var tempVector = new VectorOfPoint(contour);
                CvInvoke.DrawContours(imgToSave, tempVector, 0, new MCvScalar(0, 255, 0), 2);
                var tempRect = CvInvoke.MinAreaRect(tempVector);
                var box      = CvInvoke.BoxPoints(tempRect);
                var boxVec   = new VectorOfPointF(box);
                CvInvoke.DrawContours(imgToSave, boxVec, 0, new MCvScalar(0, 0, 255), 2);
                if (CvInvoke.PointPolygonTest(tempVector, point, true) >= 0)
                {
                    CvInvoke.PutText(ContourImage, tempRect.Size.Height.ToString(CultureInfo.InvariantCulture),
                                     new Point((int)(10 + boxVec[0].X), (int)(10 + boxVec[0].Y)),
                                     FontFace.HersheySimplex, 0.65, new MCvScalar(255, 100, 100, 255), 2);
                }
            }

            var processedDir = FolderName + Path.DirectorySeparatorChar + "processed";

            if (!Directory.Exists(processedDir))
            {
                Directory.CreateDirectory(processedDir);
            }

            CvInvoke.Imwrite(processedDir + Path.DirectorySeparatorChar + OpenedImgNumber + ".jpg", imgToSave);
        }
Пример #14
0
        public void GridDetection()
        {
            // convert to gray-scaler image
            Mat image = originalImage.Mat.Clone();

            // blur the image
            CvInvoke.GaussianBlur(image, image, new Size(11, 11), 0);

            // threshold the image
            CvInvoke.AdaptiveThreshold(image, image, 255, AdaptiveThresholdType.MeanC, ThresholdType.Binary, 5, 2);
            CvInvoke.BitwiseNot(image, image);
            Mat kernel = new Mat(new Size(3, 3), DepthType.Cv8U, 1);

            Marshal.Copy(new byte[] { 0, 1, 0, 1, 1, 1, 0, 1, 0 }, 0, kernel.DataPointer, 9);
            CvInvoke.Dilate(image, image, kernel, new Point(-1, -1), 1, BorderType.Default, new MCvScalar(255));
            FindOuterGridByFloorFill(image);
            CvInvoke.Erode(image, image, kernel, new Point(-1, -1), 1, BorderType.Default, new MCvScalar(255));
            ImageShowCase.ShowImage(image, "biggest blob");
            VectorOfPointF lines = new VectorOfPointF();

            CvInvoke.HoughLines(image, lines, 1, Math.PI / 180, 200);


            // merging lines
            PointF[] linesArray = lines.ToArray();
            //MergeLines(linesArray, image);
            lines = RemoveUnusedLine(linesArray);

            Mat harrisResponse = new Mat(image.Size, DepthType.Cv8U, 1);

            CvInvoke.CornerHarris(image, harrisResponse, 5);

            DrawLines(lines.ToArray(), image);
            ImageShowCase.ShowImage(image, "corners");
        }
Пример #15
0
 /// <summary>
 /// Create a new sinusoidal patterns
 /// </summary>
 /// <param name="width">Projector's width.</param>
 /// <param name="height">Projector's height.</param>
 /// <param name="nbrOfPeriods">Number of period along the patterns direction.</param>
 /// <param name="shiftValue">Phase shift between two consecutive patterns.</param>
 /// <param name="methodId">Allow to choose between FTP, PSP and FAPS.</param>
 /// <param name="nbrOfPixelsBetweenMarkers">Number of pixels between two consecutive markers on the same row.</param>
 /// <param name="horizontal">Horizontal</param>
 /// <param name="setMarkers">Allow to set markers on the patterns.</param>
 /// <param name="markersLocation">Vector used to store markers location on the patterns.</param>
 public SinusoidalPattern(
     int width        = 800,
     int height       = 600,
     int nbrOfPeriods = 20,
     float shiftValue = (float)(2 * Math.PI / 3),
     Method methodId  = Method.FAPS,
     int nbrOfPixelsBetweenMarkers = 56,
     bool horizontal = false,
     bool setMarkers = false,
     VectorOfPointF markersLocation = null
     )
 {
     _ptr = StructuredLightInvoke.cveSinusoidalPatternCreate(
         width,
         height,
         nbrOfPeriods,
         shiftValue,
         methodId,
         nbrOfPixelsBetweenMarkers,
         horizontal,
         setMarkers,
         markersLocation,
         ref _sharedPtr,
         ref _structuredLightPatternPtr,
         ref _algorithmPtr);
 }
Пример #16
0
        public void FindBoundPolygon()
        {
            // find convexhull
            PointF[] ps = new PointF[this.slicePoints2d.Count];
            for (int i = 0; i < this.slicePoints2d.Count; i++)
            {
                PointF p = new PointF((float)this.slicePoints2d[i].x, (float)this.slicePoints2d[i].y);
                ps[i] = p;
            }
            PointF[] hull = CvInvoke.ConvexHull(ps);

            // find boundary polygon
            VectorOfPointF hull2 = new VectorOfPointF();

            hull2.Push(hull);
            VectorOfPointF poly = new VectorOfPointF();

            // when inferring # of polygon edge, the 3-rd param can be [0.0005,0.0015], than choose the best(how to define "best"??)
            CvInvoke.ApproxPolyDP(hull2, poly, 0.0003, true);
            for (int i = 0; i < poly.Size; i++)
            {
                this.cornerPoints2d.Add(new MyVector2(poly[i].X, poly[i].Y));
            }

            // unproject to 3d
            foreach (MyVector2 corner2d in this.cornerPoints2d)
            {
                MyVector3 corner3d = frame.GetPointSpaceCoord(new MyVector3(corner2d, 0.0));
                this.cornerPoints3d.Add(corner3d);
            }
        }
Пример #17
0
        private void btStep3_Click(object sender, EventArgs e)
        {
            Mat paper = new Mat();

            Image <Gray, Byte> input = inputMat.ToImage <Gray, Byte>();

            cropped = new Mat();
            VectorOfPointF coners = new VectorOfPointF();

            //List<Point> contourPoints;

            ////find bounding contour
            //contourPoints = docContours.ToArrayOfArray()
            //    .Where(group => group.Length == docContours.ToArrayOfArray().Max(points => points.Length))
            //    .SingleOrDefault().ToList();



            Rectangle rect = CvInvoke.BoundingRectangle(docContours[0]);
            Mat       quad = new Mat();

            quad.Create(answerSheetRealSize.Height, answerSheetRealSize.Width, DepthType.Cv8U, 0);
            VectorOfPointF quadPts = new VectorOfPointF();

            quadPts.Push(new PointF[] { new PointF(0, 0), new PointF(quad.Cols, 0), new PointF(quad.Cols, quad.Rows), new PointF(0, quad.Rows) });

            Mat transmat = CvInvoke.GetPerspectiveTransform(docConers, quadPts);

            CvInvoke.WarpPerspective(grayInput, cropped, transmat, quad.Size);
            imageResult.Image = cropped;
            //input.DrawPolyline(contourPoints.ToArray<Point>(), true, new Gray(0), 10);
            //imageResult.Image = input;
        }
Пример #18
0
        /// <summary>
        /// 霍夫直线变换
        /// </summary>
        private void DrawHoughLines(HoughsArgs e)
        {
            VectorOfPointF lines = new VectorOfPointF();

            CvInvoke.HoughLines(mTempImage, lines, e.Rho, Math.PI / 180, e.Threshold);

            //便于绘制,再转成彩色图
            CvInvoke.CvtColor(mTempImage, mTempImage, ColorConversion.Gray2Bgr);

            for (int i = 0; i < lines.Size; i++)
            {
                double rho   = lines[i].X;
                double theta = lines[i].Y;

                double a = Math.Cos(theta);
                double b = Math.Sin(theta);

                double x0 = a * rho;
                double y0 = b * rho;

                int x1 = (int)(x0 + 1000 * (-b));
                int y1 = (int)(y0 + 1000 * (a));
                int x2 = (int)(x0 - 1000 * (-b));
                int y2 = (int)(y0 - 1000 * (a));

                CvInvoke.Line(mTempImage, new Point(x1, y1), new Point(x2, y2), e.Color, e.Thickness);
            }
        }
Пример #19
0
 //Draw lines into image.
 //For debugging purposes.
 private void DrawLines(VectorOfPointF lines, int xoff = 0, int yoff = 0)
 {
     for (var i = 0; i < lines.Size; i++)
     {
         CvInvoke.Line(_img, new Point((int)(lines[i].X + xoff), (int)(lines[i].Y + yoff)),
                       new Point((int)(lines[i].X + xoff), (int)(lines[i].Y + yoff)), new MCvScalar(255, 0, 0), 1);
     }
 }
        private List <VectorOfVectorOfPointF> findCorners(float squareEdge, Size patternSize, string[] imagesLeft, string[] imagesRight)
        {
            VectorOfVectorOfPointF allCornersLeft  = new VectorOfVectorOfPointF();
            VectorOfVectorOfPointF allCornersRight = new VectorOfVectorOfPointF();
            VectorOfPointF         cornersLeft     = new VectorOfPointF();
            VectorOfPointF         cornersRight    = new VectorOfPointF();

            Image <Gray, Byte> imageLeft;
            Image <Gray, Byte> imageRight;
            bool findLeft, findRight;

            for (int i = 0; i < imagesLeft.Length; i++)
            {
                imageLeft  = new Image <Gray, Byte>(imagesLeft[i]);
                imageRight = new Image <Gray, Byte>(imagesRight[i]);

                findLeft = CvInvoke.FindChessboardCorners(
                    imageLeft,
                    patternSize,
                    cornersLeft);

                findRight = CvInvoke.FindChessboardCorners(
                    imageRight,
                    patternSize,
                    cornersRight);

                if (!findLeft || !findRight)
                {
                    continue;
                }

                CvInvoke.CornerSubPix(
                    imageLeft,
                    cornersLeft,
                    new Size(11, 11),
                    new Size(-1, -1),
                    new MCvTermCriteria(30, 0.1));

                CvInvoke.CornerSubPix(
                    imageRight,
                    cornersRight,
                    new Size(11, 11),
                    new Size(-1, -1),
                    new MCvTermCriteria(30, 0.1));

                allCornersLeft.Push(cornersLeft);
                allCornersRight.Push(cornersRight);

                imageLeft.Dispose();
                imageRight.Dispose();
                GC.Collect();
            }

            return(new List <VectorOfVectorOfPointF>()
            {
                allCornersLeft, allCornersRight
            });
        }
Пример #21
0
        /// <summary>
        /// Calculates the matrix of an affine transform such that:
        /// (x'_i,y'_i)^T=map_matrix (x_i,y_i,1)^T
        /// where dst(i)=(x'_i,y'_i), src(i)=(x_i,y_i), i=0..2.
        /// </summary>
        /// <param name="src">Coordinates of 3 triangle vertices in the source image. If the array contains more than 3 points, only the first 3 will be used</param>
        /// <param name="dest">Coordinates of the 3 corresponding triangle vertices in the destination image. If the array contains more than 3 points, only the first 3 will be used</param>
        /// <returns>The 2x3 rotation matrix that defines the Affine transform</returns>
        public static Mat GetAffineTransform(PointF[] src, PointF[] dest)
        {
            Debug.Assert(src.Length >= 3, "The source should contain at least 3 points");
            Debug.Assert(dest.Length >= 3, "The destination should contain at least 3 points");

            using (VectorOfPointF ptSrc = new VectorOfPointF(src))
                using (VectorOfPointF ptDest = new VectorOfPointF(dest))
                    return(CvInvoke.GetAffineTransform(ptSrc, ptDest));
        }
Пример #22
0
      /*
      /// <summary>
      /// Use the specific method to find perspective transformation H=||h_ij|| between the source and the destination planes 
      /// </summary>
      /// <param name="srcPoints">Point coordinates in the original plane, 2xN, Nx2, 3xN or Nx3 array (the latter two are for representation in homogeneous coordinates), where N is the number of points</param>
      /// <param name="dstPoints">Point coordinates in the destination plane, 2xN, Nx2, 3xN or Nx3 array (the latter two are for representation in homogeneous coordinates) </param>
      /// <param name="method">FindHomography method</param>
      /// <param name="ransacReprojThreshold">The maximum allowed reprojection error to treat a point pair as an inlier. The parameter is only used in RANSAC-based homography estimation. E.g. if dst_points coordinates are measured in pixels with pixel-accurate precision, it makes sense to set this parameter somewhere in the range ~1..3</param>
      /// <returns>The 3x3 homography matrix if found. Null if not found.</returns>
      public static HomographyMatrix FindHomography(
         Matrix<float> srcPoints,
         Matrix<float> dstPoints,
         CvEnum.HomographyMethod method,
         double ransacReprojThreshold = 3,
         )
      {
         HomographyMatrix homography = new HomographyMatrix();
         Mat h = CvInvoke.FindHomography(srcPoints.Ptr, dstPoints.Ptr, method, ransacReprojThreshold);
         if ( !)
         {
            homography.Dispose();
            return null;
         }
         return homography;
      }*/




      /// <summary>
      /// Calculates the matrix of an affine transform such that:
      /// (x'_i,y'_i)^T=map_matrix (x_i,y_i,1)^T
      /// where dst(i)=(x'_i,y'_i), src(i)=(x_i,y_i), i=0..2.
      /// </summary>
      /// <param name="src">Coordinates of 3 triangle vertices in the source image. If the array contains more than 3 points, only the first 3 will be used</param>
      /// <param name="dest">Coordinates of the 3 corresponding triangle vertices in the destination image. If the array contains more than 3 points, only the first 3 will be used</param>
      /// <returns>The 2x3 rotation matrix that defines the Affine transform</returns>
      public static Mat GetAffineTransform(PointF[] src, PointF[] dest)
      {
         Debug.Assert(src.Length >= 3, "The source should contain at least 3 points");
         Debug.Assert(dest.Length >= 3, "The destination should contain at least 3 points");

         
         using (VectorOfPointF ptSrc = src.Length == 3 ? new VectorOfPointF(src) : new VectorOfPointF(new PointF[] {src[0], src[1], src[2]}))
         using (VectorOfPointF ptDest = dest.Length == 3 ? new VectorOfPointF(dest) : new VectorOfPointF(new PointF[]{dest[0], dest[1], dest[2]}))
            return CvInvoke.GetAffineTransform(ptSrc, ptDest);
      }
Пример #23
0
        /// <summary>
        /// Return a contour that is translated.
        /// </summary>
        /// <param name="contourIn">Contour that should be translated</param>
        /// <param name="offset_x">X translation</param>
        /// <param name="offset_y">Y translation</param>
        /// <returns>Translated contour</returns>
        public static VectorOfPointF TranslateContour(VectorOfPointF contourIn, int offset_x, int offset_y)
        {
            VectorOfPointF ret_contour = new VectorOfPointF();

            for (int i = 0; i < contourIn.Size; i++)
            {
                ret_contour.Push(new PointF((float)(contourIn[i].X + offset_x + 0.5), (float)(contourIn[i].Y + offset_y + 0.5)));
            }
            return(ret_contour);
        }
Пример #24
0
        public static PointF[] FindChessboardCorners(IInputArray image, Size ChessboardSize)
        {
            //PointF[] corners;
            VectorOfPointF corners = new VectorOfPointF();

            if (CVI.FindChessboardCorners(image, ChessboardSize, corners))
            {
                return(corners.ToArray());
            }
            throw new Exception("No Corners Found");
        }
Пример #25
0
        /// <summary>
        /// 霍夫圆变换
        /// </summary>
        private void DrawHoughCircles(HoughsArgs e)
        {
            VectorOfPointF vectorOfPointF = new VectorOfPointF();

            CvInvoke.HoughCircles(mTempImage, vectorOfPointF, HoughType.Gradient, 1, e.Threshold);
            //ToDo: 霍夫圆变换效果无效,原因未明
            for (int i = 0; i < vectorOfPointF.Size; i++)
            {
                //var a = "fs";
            }
        }
Пример #26
0
        public static VectorOfPointF OrderPoints(VectorOfPointF points)
        {
            List <float> ptsSums  = new List <float>();
            List <float> ptsDiffs = new List <float>();

            for (int i = 0; i < points.Size; ++i)
            {
                var pt = points[i];
                ptsSums.Add(pt.X + pt.Y);
                ptsDiffs.Add(pt.Y - pt.X);
            }

            float minDiff  = ptsDiffs[0];
            int   minDiffI = 0;
            float maxDiff  = ptsDiffs[0];
            int   maxDiffI = 0;

            float minSum  = ptsSums[0];
            int   minSumI = 0;
            float maxSum  = ptsSums[0];
            int   maxSumI = 0;

            for (int i = 0; i < points.Size; ++i)
            {
                if (ptsDiffs[i] < minDiff)
                {
                    minDiff  = ptsDiffs[i];
                    minDiffI = i;
                }
                if (ptsDiffs[i] > maxDiff)
                {
                    maxDiff  = ptsDiffs[i];
                    maxDiffI = i;
                }
                if (ptsSums[i] < minSum)
                {
                    minSum  = ptsSums[i];
                    minSumI = i;
                }
                if (ptsSums[i] > maxSum)
                {
                    maxSum  = ptsSums[i];
                    maxSumI = i;
                }
            }

            return(new VectorOfPointF(new PointF[]
            {
                points[minSumI],
                points[minDiffI],
                points[maxSumI],
                points[maxDiffI]
            }));
        }
Пример #27
0
        private static VectorOfPoint VPointFToVPoint(VectorOfPointF input)
        {
            var ta  = input.ToArray();
            var pIn = new Point[input.Size];

            for (int i = 0; i < ta.Length; i++)
            {
                pIn[i] = new Point((int)ta[i].X, (int)ta[i].Y);
            }
            return(new VectorOfPoint(pIn));
        }
Пример #28
0
        public static List <PointF> ToList(this VectorOfPointF v, int take = -1)
        {
            int           size = take > 0 ? Math.Min(take, v.Size) : v.Size;
            List <PointF> r    = new List <PointF>(size);

            for (int i = 0; i < size; ++i)
            {
                r.Add(v[i]);
            }
            return(r);
        }
Пример #29
0
        public static Matrix <double> GetPointsMatrix(VectorOfPointF points)
        {
            var matrix = new Matrix <double>(points.Size, 2);

            for (int i = 0; i < points.Size; i++)
            {
                matrix[i, 0] = points[i].X;
                matrix[i, 1] = points[i].Y;
            }

            return(matrix);
        }
Пример #30
0
 /// <summary>
 /// Estimates extrinsic camera parameters using known intrinsic parameters and extrinsic parameters for each view. The coordinates of 3D object points and their correspondent 2D projections must be specified. This function also minimizes back-projection error. 
 /// </summary>
 /// <param name="objectPoints">The array of object points</param>
 /// <param name="imagePoints">The array of corresponding image points</param>
 /// <param name="intrin">The intrinsic parameters</param>
 /// <param name="method">Method for solving a PnP problem</param>
 /// <returns>The extrinsic parameters</returns>
 public static ExtrinsicCameraParameters SolvePnP(
    MCvPoint3D32f[] objectPoints,
    PointF[] imagePoints,
    IntrinsicCameraParameters intrin, 
    CvEnum.SolvePnpMethod method = CvEnum.SolvePnpMethod.Iterative)
 {
    ExtrinsicCameraParameters p = new ExtrinsicCameraParameters();
    using (VectorOfPoint3D32F objPtVec = new VectorOfPoint3D32F(objectPoints))
    using (VectorOfPointF imgPtVec = new VectorOfPointF(imagePoints))
       CvInvoke.SolvePnP(objPtVec, imgPtVec, intrin.IntrinsicMatrix, intrin.DistortionCoeffs, p.RotationVector, p.TranslationVector, false, method);
    return p;
 }