Exemple #1
0
        /// <summary>
        /// ラインイテレータを初期化する
        /// </summary>
        /// <param name="image">対象画像</param>
        /// <param name="pt1">線分の一つ目の端点</param>
        /// <param name="pt2">線分のニつ目の端点</param>
        /// <param name="connectivity">走査した線分の接続性.4または8</param>
        /// <param name="left_to_right">pt1とpt2とは無関係に線分をいつも左から右に走査する(true)か, pt1からpt2への決まった方向で走査するか(false)を指定するフラグ. </param>
        /// <returns></returns>
#else
        /// <summary>
        /// Initializes line iterator
        /// </summary>
        /// <param name="image">Image to sample the line from.  </param>
        /// <param name="pt1">First ending point of the line segment. </param>
        /// <param name="pt2">Second ending point of the line segment. </param>
        /// <param name="connectivity">The scanned line connectivity, 4 or 8. </param>
        /// <param name="left_to_right">The flag, indicating whether the line should be always scanned from the left-most point to the right-most out of pt1 and pt2 (left_to_right=true), or it is scanned in the specified order, from pt1 to pt2 (left_to_right=false). </param>
        /// <returns>The function cvInitLineIterator initializes the line iterator and returns the number of pixels between two end points. Both points must be inside the image. After the iterator has been initialized, all the points on the raster line that connects the two ending points may be retrieved by successive calls of NextLinePoint point. The points on the line are calculated one by one using 4-connected or 8-connected Bresenham algorithm.</returns>
#endif
        public CvLineIterator(CvArr image, CvPoint pt1, CvPoint pt2, PixelConnectivity connectivity, bool left_to_right)
            : this()
        {
            if (image == null)
            {
                throw new ArgumentNullException("image");
            }
            Initialize(image, pt1, pt2, connectivity, left_to_right);
        }
 /// <summary>
 /// Initializes line iterator
 /// </summary>
 /// <param name="image">Image to sample the line from.  </param>
 /// <param name="pt1">First ending point of the line segment. </param>
 /// <param name="pt2">Second ending point of the line segment. </param>
 /// <param name="connectivity">The scanned line connectivity, 4 or 8. </param>
 /// <param name="leftToRight">The flag, indicating whether the line should be always scanned from the left-most point to the right-most out of pt1 and pt2 (leftToRight=true), or it is scanned in the specified order, from pt1 to pt2 (leftToRight=false). </param>
 /// <returns>The function cvInitLineIterator initializes the line iterator and returns the number of pixels between two end points. Both points must be inside the image. After the iterator has been initialized, all the points on the raster line that connects the two ending points may be retrieved by successive calls of NextLinePoint point. The points on the line are calculated one by one using 4-connected or 8-connected Bresenham algorithm.</returns>
 private void Initialize(CvArr image, CvPoint pt1, CvPoint pt2, PixelConnectivity connectivity, bool leftToRight)
 {
     this.image        = image;
     this.pt1          = pt1;
     this.pt2          = pt2;
     this.connectivity = connectivity;
     this.leftToRight  = leftToRight;
     this.count        = NativeMethods.cvInitLineIterator(image.CvPtr, pt1, pt2, this.CvPtr, connectivity, leftToRight);
 }
Exemple #3
0
        /// <summary>
        /// 2つの線分が交差しているかどうかを返す
        /// </summary>
        /// <param name="seg1"></param>
        /// <param name="seg2"></param>
        /// <returns></returns>
#else
        /// <summary>
        /// Returns a boolean value indicating whether the specified two segments intersect.
        /// </summary>
        /// <param name="seg1"></param>
        /// <param name="seg2"></param>
        /// <returns></returns>
#endif
        public static bool IntersectedSegments(CvLineSegmentPoint seg1, CvLineSegmentPoint seg2)
        {
            CvPoint p1 = seg1.P1;
            CvPoint p2 = seg1.P2;
            CvPoint p3 = seg2.P1;
            CvPoint p4 = seg2.P2;

            checked
            {
                if (p1.X >= p2.X)
                {
                    if ((p1.X < p3.X && p1.X < p4.X) || (p2.X > p3.X && p2.X > p4.X))
                    {
                        return(false);
                    }
                }
                else
                {
                    if ((p2.X < p3.X && p2.X < p4.X) || (p1.X > p3.X && p1.X > p4.X))
                    {
                        return(false);
                    }
                }
                if (p1.Y >= p2.Y)
                {
                    if ((p1.Y < p3.Y && p1.Y < p4.Y) || (p2.Y > p3.Y && p2.Y > p4.Y))
                    {
                        return(false);
                    }
                }
                else
                {
                    if ((p2.Y < p3.Y && p2.Y < p4.Y) || (p1.Y > p3.Y && p1.Y > p4.Y))
                    {
                        return(false);
                    }
                }

                if (((long)(p1.X - p2.X) * (p3.Y - p1.Y) + (long)(p1.Y - p2.Y) * (p1.X - p3.X)) *
                    ((long)(p1.X - p2.X) * (p4.Y - p1.Y) + (long)(p1.Y - p2.Y) * (p1.X - p4.X)) > 0)
                {
                    return(false);
                }
                if (((long)(p3.X - p4.X) * (p1.Y - p3.Y) + (long)(p3.Y - p4.Y) * (p3.X - p1.X)) *
                    ((long)(p3.X - p4.X) * (p2.Y - p3.Y) + (long)(p3.Y - p4.Y) * (p3.X - p2.X)) > 0)
                {
                    return(false);
                }
            }
            return(true);
        }
Exemple #4
0
        /// <summary>
        /// 直線と線分が交差しているかを調べる
        /// </summary>
        /// <param name="line">線分</param>
        /// <param name="seg">直線</param>
        /// <returns></returns>
#else
        /// <summary>
        /// Returns a boolean value indicating whether a line and a segment intersect.
        /// </summary>
        /// <param name="line">Line</param>
        /// <param name="seg">Segment</param>
        /// <returns></returns>
#endif
        public static bool IntersectedLineAndSegment(CvLineSegmentPoint line, CvLineSegmentPoint seg)
        {
            CvPoint p1 = line.P1;
            CvPoint p2 = line.P2;
            CvPoint p3 = seg.P1;
            CvPoint p4 = seg.P2;

            if (((long)(p1.X - p2.X) * (p3.Y - p1.Y) + (long)(p1.Y - p2.Y) * (p1.X - p3.X)) *
                ((long)(p1.X - p2.X) * (p4.Y - p1.Y) + (long)(p1.Y - p2.Y) * (p1.X - p4.X)) > 0)
            {
                return(false);
            }
            return(true);
        }
Exemple #5
0
        /// <summary>
        /// 指定したサイズに直線を合わせて、その端点を返す (描画用途)
        /// </summary>
        /// <param name="width">合わせこむサイズの幅</param>
        /// <param name="height">合わせこむサイズの高さ</param>
        /// <param name="pt1">端点1つ目</param>
        /// <param name="pt2">端点2つ目</param>
#else
        /// <summary>
        /// Fits this line to the specified size (for drawing)
        /// </summary>
        /// <param name="width">Width of fit size</param>
        /// <param name="height">Height of fit size</param>
        /// <param name="pt1">1st edge point of fitted line</param>
        /// <param name="pt2">2nd edge point of fitted line</param>
#endif
        public void FitSize(int width, int height, out CvPoint pt1, out CvPoint pt2)
        {
            double t = (width + height);

            pt1 = new CvPoint
            {
                X = Cv.Round(X1 - Vx * t),
                Y = Cv.Round(Y1 - Vy * t)
            };
            pt2 = new CvPoint
            {
                X = Cv.Round(X1 + Vx * t),
                Y = Cv.Round(Y1 + Vy * t)
            };
        }
        /// <summary>
        /// CvLineSegmentPointに変換する
        /// </summary>
        /// <param name="scale"></param>
        /// <returns></returns>
        public CvLineSegmentPoint ToSegmentPoint(double scale)
        {
            double  cos = Math.Cos(Theta);
            double  sin = Math.Sin(Theta);
            double  x0  = cos * Rho;
            double  y0  = sin * Rho;
            CvPoint p1  = new CvPoint {
                X = (int)Math.Round(x0 + scale * -sin), Y = (int)Math.Round(y0 + scale * cos)
            };
            CvPoint p2 = new CvPoint {
                X = (int)Math.Round(x0 - scale * -sin), Y = (int)Math.Round(y0 - scale * cos)
            };

            return(new CvLineSegmentPoint(p1, p2));
        }
Exemple #7
0
        /// <summary>
        /// 画像上にテキストを描画します.
        /// </summary>
        /// <param name="img">文字列描画の対象となる画像.</param>
        /// <param name="text">画像上に描画されるテキスト.</param>
        /// <param name="location">画像上のテキストの開始位置 Point(x,y).</param>
        /// <param name="font">テキストを描画するのに利用されるフォント.</param>
#else
        /// <summary>
        /// Create the font to be used to draw text on an image
        /// </summary>
        /// <param name="img">Image where the text should be drawn</param>
        /// <param name="text">Text to write on the image</param>
        /// <param name="location">Point(x,y) where the text should start on the image</param>
        /// <param name="font">Font to use to draw the text</param>
#endif
        public static void AddText(CvArr img, string text, CvPoint location, CvFont font)
        {
            if (img == null)
            {
                throw new ArgumentNullException("img");
            }
            if (text == null)
            {
                throw new ArgumentNullException("text");
            }
            if (font == null)
            {
                throw new ArgumentNullException("font");
            }

            NativeMethods.cvAddText(img.CvPtr, text, location, font.CvPtr);
        }
Exemple #8
0
        /// <summary>
        /// a rough implementation for object location
        /// </summary>
        /// <param name="objectKeypoints"></param>
        /// <param name="objectDescriptors"></param>
        /// <param name="imageKeypoints"></param>
        /// <param name="imageDescriptors"></param>
        /// <param name="srcCorners"></param>
        /// <returns></returns>
        private static CvPoint[] LocatePlanarObject(CvSeq <CvSURFPoint> objectKeypoints, CvSeq <float> objectDescriptors,
                                                    CvSeq <CvSURFPoint> imageKeypoints, CvSeq <float> imageDescriptors,
                                                    CvPoint[] srcCorners)
        {
            CvMat h = new CvMat(3, 3, MatrixType.F64C1);

            int[] ptpairs = FindPairs(objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors);
            int   n       = ptpairs.Length / 2;

            if (n < 4)
            {
                return(null);
            }

            CvPoint2D32f[] pt1 = new CvPoint2D32f[n];
            CvPoint2D32f[] pt2 = new CvPoint2D32f[n];
            for (int i = 0; i < n; i++)
            {
                pt1[i] = (Cv.GetSeqElem <CvSURFPoint>(objectKeypoints, ptpairs[i * 2])).Value.Pt;
                pt2[i] = (Cv.GetSeqElem <CvSURFPoint>(imageKeypoints, ptpairs[i * 2 + 1])).Value.Pt;
            }

            CvMat pt1Mat = new CvMat(1, n, MatrixType.F32C2, pt1);
            CvMat pt2Mat = new CvMat(1, n, MatrixType.F32C2, pt2);

            if (Cv.FindHomography(pt1Mat, pt2Mat, h, HomographyMethod.Ransac, 5) == 0)
            {
                return(null);
            }

            CvPoint[] dstCorners = new CvPoint[4];
            for (int i = 0; i < 4; i++)
            {
                double x = srcCorners[i].X;
                double y = srcCorners[i].Y;
                double Z = 1.0 / (h[6] * x + h[7] * y + h[8]);
                double X = (h[0] * x + h[1] * y + h[2]) * Z;
                double Y = (h[3] * x + h[4] * y + h[5]) * Z;
                dstCorners[i] = new CvPoint(Cv.Round(X), Cv.Round(Y));
            }

            return(dstCorners);
        }
Exemple #9
0
        /// <summary>
        /// 画像上にテキストを描画します.
        /// </summary>
        /// <param name="img">文字列描画の対象となる画像.</param>
        /// <param name="text">画像上に描画されるテキスト.</param>
        /// <param name="location">画像上のテキストの開始位置 Point(x,y).</param>
        /// <param name="font">テキストを描画するのに利用されるフォント.</param>
#else
        /// <summary>
        /// Create the font to be used to draw text on an image
        /// </summary>
        /// <param name="img">Image where the text should be drawn</param>
        /// <param name="text">Text to write on the image</param>
        /// <param name="location">Point(x,y) where the text should start on the image</param>
        /// <param name="font">Font to use to draw the text</param>
#endif
        public static void AddText(CvArr img, string text, CvPoint location, CvFont font)
        {
            if (img == null)
            {
                throw new ArgumentNullException(nameof(img));
            }
            if (text == null)
            {
                throw new ArgumentNullException(nameof(text));
            }
            if (font == null)
            {
                throw new ArgumentNullException(nameof(font));
            }

            NativeMethods.cvAddText(img.CvPtr, text, location, font.CvPtr);
            GC.KeepAlive(img);
            GC.KeepAlive(font);
        }
        /// <summary>
        /// 指定したy座標を両端とするような線分に変換する
        /// </summary>
        /// <param name="y1"></param>
        /// <param name="y2"></param>
        /// <returns></returns>
        public CvLineSegmentPoint ToSegmentPointY(int y1, int y2)
        {
            if (y1 > y2)
            {
                throw new ArgumentOutOfRangeException();
            }

            int?x1 = XPosOfLine(y1);
            int?x2 = XPosOfLine(y2);

            if (!x1.HasValue || !x2.HasValue)
            {
                throw new Exception();
            }

            CvPoint p1 = new CvPoint(x1.Value, y1);
            CvPoint p2 = new CvPoint(x2.Value, y2);

            return(new CvLineSegmentPoint(p1, p2));
        }
        /// <summary>
        /// 指定したx座標を両端とするような線分に変換する
        /// </summary>
        /// <param name="x1"></param>
        /// <param name="x2"></param>
        /// <returns></returns>
        public CvLineSegmentPoint ToSegmentPointX(int x1, int x2)
        {
            if (x1 > x2)
            {
                throw new ArgumentOutOfRangeException();
            }

            int?y1 = YPosOfLine(x1);
            int?y2 = YPosOfLine(x2);

            if (!y1.HasValue || !y2.HasValue)
            {
                throw new Exception();
            }

            CvPoint p1 = new CvPoint(x1, y1.Value);
            CvPoint p2 = new CvPoint(x2, y2.Value);

            return(new CvLineSegmentPoint(p1, p2));
        }
Exemple #12
0
        /// <summary>
        /// 初期化
        /// </summary>
        /// <param name="p1">1つ目の点</param>
        /// <param name="p2">2つ目の点</param>
#else
        /// <summary>
        /// Constructor
        /// </summary>
        /// <param name="p1">1st Point</param>
        /// <param name="p2">2nd Point</param>
#endif
        public CvLineSegmentPoint(CvPoint p1, CvPoint p2)
        {
            this.P1 = p1;
            this.P2 = p2;
        }
Exemple #13
0
 /// <summary>
 /// 線分が交差しているかどうか
 /// </summary>
 /// <param name="seg1"></param>
 /// <param name="seg2"></param>
 /// <returns></returns>
 private static bool _IntersectedSegments_(CvLineSegmentPoint seg1, CvLineSegmentPoint seg2)
 {
     return
         (CvPoint.CrossProduct(seg1.P2 - seg1.P1, seg2.P1 - seg1.P1) * CvPoint.CrossProduct(seg1.P2 - seg1.P1, seg2.P2 - seg1.P1) < double.Epsilon &&
          CvPoint.CrossProduct(seg2.P2 - seg2.P1, seg1.P1 - seg2.P1) * CvPoint.CrossProduct(seg2.P2 - seg2.P1, seg1.P2 - seg2.P1) < double.Epsilon);
 }
Exemple #14
0
        /// <summary>
        /// この CvLineSegmentPoint を指定の量だけ平行移動する
        /// </summary>
        /// <param name="p">オフセットに使用する CvPoint</param>
        /// <returns></returns>
#else
        /// <summary>
        /// Translates the Point by the specified amount.
        /// </summary>
        /// <param name="p">The Point used offset this CvPoint.</param>
        /// <returns></returns>
#endif
        public void Offset(CvPoint p)
        {
            Offset(p.X, p.Y);
        }
Exemple #15
0
        /// <summary>
        /// 初期化
        /// </summary>
        /// <param name="pt"></param>
        /// <param name="size"></param>
        /// <param name="response"></param>
#else
        /// <summary>
        /// Constructor
        /// </summary>
        /// <param name="pt"></param>
        /// <param name="size"></param>
        /// <param name="response"></param>
#endif
        public CvStarKeypoint(CvPoint pt, int size, float response)
        {
            Pt       = pt;
            Size     = size;
            Response = response;
        }
 /// <summary>
 ///
 /// </summary>
 /// <param name="imgGray"></param>
 /// <param name="pRects"></param>
 /// <param name="ptRotate"></param>
 /// <param name="dbAngleRotate"></param>
 /// <returns></returns>
 public bool TrackFace(IplImage imgGray, CvRect[] pRects, out CvPoint ptRotate, out double dbAngleRotate)
 {
     return(Cv.TrackFace(this, imgGray, pRects, out ptRotate, out dbAngleRotate));
 }
Exemple #17
0
        /// <summary>
        /// 画像の外側輪郭線,または内側輪郭線を描画する
        /// </summary>
        /// <param name="img">輪郭を描画する元画像.輪郭はROIで切り取られる.</param>
        /// <param name="contour">最初の輪郭へのポインタ</param>
        /// <param name="externalColor">外側輪郭線の色</param>
        /// <param name="holeColor">内側輪郭線(穴)の色</param>
        /// <param name="maxLevel">描画される輪郭の最大レベル. 0にした場合,contourのみが描画される. 1にした場合,先頭の輪郭と,同レベルのすべての輪郭が描画される. 2にした場合,先頭の輪郭と同レベルのすべての輪郭と,先頭の輪郭の一つ下のレベルのすべての輪郭が描画される.以下同様.</param>
        /// <param name="thickness">描画される輪郭線の太さ. 負(例えば=Cv.FILLED)にした場合には,内部を塗りつぶす.</param>
        /// <param name="lineType">線の種類</param>
        /// <param name="offset">全ての座標を指定した値だけシフトする</param>
#else
        /// <summary>
        /// Draws contour outlines or interiors in the image
        /// </summary>
        /// <param name="img">Image where the contours are to be drawn. Like in any other drawing function, the contours are clipped with the ROI. </param>
        /// <param name="contour">Reference to the first contour. </param>
        /// <param name="externalColor">Color of the external contours. </param>
        /// <param name="holeColor">Color of internal contours (holes). </param>
        /// <param name="maxLevel">Maximal level for drawn contours. If 0, only contour is drawn. If 1, the contour and all contours after it on the same level are drawn. If 2, all contours after and all contours one level below the contours are drawn, etc. If the value is negative, the function does not draw the contours following after contour but draws child contours of contour up to abs(max_level)-1 level. </param>
        /// <param name="thickness">Thickness of lines the contours are drawn with. If it is negative (e.g. =CV_FILLED), the contour interiors are drawn. </param>
        /// <param name="lineType">Type of the contour segments.</param>
        /// <param name="offset">Shift all the point coordinates by the specified value. It is useful in case if the contours retrieved in some image ROI and then the ROI offset needs to be taken into account during the rendering. </param>
#endif
        public static void DrawContours(CvArr img, CvSeq <CvPoint> contour, CvScalar externalColor, CvScalar holeColor, int maxLevel, int thickness, LineType lineType, CvPoint offset)
        {
            if (img == null)
            {
                throw new ArgumentNullException(nameof(img));
            }
            if (contour == null)
            {
                throw new ArgumentNullException(nameof(contour));
            }
            NativeMethods.cvDrawContours(img.CvPtr, contour.CvPtr, externalColor, holeColor, maxLevel, thickness, lineType, offset);
            KeepAlive(img, contour);
        }
Exemple #18
0
        public IplImage Test(string fileName2)
        {
            IplImage correspond = null;

            using (CvMemStorage storage = Cv.CreateMemStorage(0))

                using (IplImage image = Cv.LoadImage(fileName2, LoadMode.GrayScale))
                {
                    correspond = Cv.CreateImage(new CvSize(image.Width, _Obj.Height + image.Height), BitDepth.U8, 1);


                    Cv.SetImageROI(correspond, new CvRect(0, 0, _Obj.Width, _Obj.Height));
                    Cv.Copy(_Obj, correspond);
                    Cv.SetImageROI(correspond, new CvRect(0, _Obj.Height, correspond.Width, correspond.Height));
                    Cv.Copy(image, correspond);
                    Cv.ResetImageROI(correspond);

                    // SURFの処理
                    CvSeq <CvSURFPoint> imageKeypoints;
                    CvSeq <float>       imageDescriptors;
                    Stopwatch           watch = Stopwatch.StartNew();
                    {
                        CvSURFParams param = new CvSURFParams(500, true);


                        Cv.ExtractSURF(image, null, out imageKeypoints, out imageDescriptors, storage, param);
                        Console.WriteLine("Image Descriptors: {0}", imageDescriptors.Total);
                    }
                    watch.Stop();
                    Console.WriteLine("Extraction time = {0}ms", watch.ElapsedMilliseconds);
                    watch.Reset();
                    watch.Start();

                    // シーン画像にある局所画像の領域を線で囲む
                    CvPoint[] srcCorners = new CvPoint[4] {
                        new CvPoint(0, 0), new CvPoint(_Obj.Width, 0), new CvPoint(_Obj.Width, _Obj.Height), new CvPoint(0, _Obj.Height)
                    };
                    CvPoint[] dstCorners = LocatePlanarObject(_ObjectKeypoints, _ObjectDescriptors, imageKeypoints, imageDescriptors, srcCorners);
                    if (dstCorners != null)
                    {
                        for (int i = 0; i < 4; i++)
                        {
                            CvPoint r1 = dstCorners[i % 4];
                            CvPoint r2 = dstCorners[(i + 1) % 4];
                            Cv.Line(correspond, new CvPoint(r1.X, r1.Y + _Obj.Height), new CvPoint(r2.X, r2.Y + _Obj.Height), CvColor.White);
                        }
                    }

                    // 対応点同士を線で引く
                    int[] ptpairs = FindPairs(_ObjectKeypoints, _ObjectDescriptors, imageKeypoints, imageDescriptors);
                    for (int i = 0; i < ptpairs.Length; i += 2)
                    {
                        CvSURFPoint r1 = Cv.GetSeqElem <CvSURFPoint>(_ObjectKeypoints, ptpairs[i]).Value;
                        CvSURFPoint r2 = Cv.GetSeqElem <CvSURFPoint>(imageKeypoints, ptpairs[i + 1]).Value;
                        Cv.Line(correspond, r1.Pt, new CvPoint(Cv.Round(r2.Pt.X), Cv.Round(r2.Pt.Y + _Obj.Height)), CvColor.White);
                    }

                    // 特徴点の場所に円を描く
                    for (int i = 0; i < _ObjectKeypoints.Total; i++)
                    {
                        CvSURFPoint r      = Cv.GetSeqElem <CvSURFPoint>(_ObjectKeypoints, i).Value;
                        CvPoint     center = new CvPoint(Cv.Round(r.Pt.X), Cv.Round(r.Pt.Y));
                        int         radius = Cv.Round(r.Size * (1.2 / 9.0) * 2);
                        Cv.Circle(_ObjColor, center, radius, CvColor.Red, 1, LineType.AntiAlias, 0);
                    }
                    watch.Stop();
                    Console.WriteLine("Drawing time = {0}ms", watch.ElapsedMilliseconds);
                }


            return(correspond);
        }
Exemple #19
0
        /// <summary>
        ///
        /// </summary>
        /// <param name="faceTracker"></param>
        /// <param name="imgGray"></param>
        /// <param name="rects"></param>
        /// <param name="ptRotate"></param>
        /// <param name="angleRotate"></param>
        /// <returns></returns>
        public static bool TrackFace(CvFaceTracker faceTracker, IplImage imgGray, CvRect[] rects, out CvPoint ptRotate, out double angleRotate)
        {
            if (faceTracker == null)
            {
                throw new ArgumentNullException("faceTracker");
            }
            if (imgGray == null)
            {
                throw new ArgumentNullException("imgGray");
            }
            if (rects == null)
            {
                throw new ArgumentNullException("rects");
            }
            if (rects.Length < 3)
            {
                throw new ArgumentException("rects.Length >= 3");
            }

            var ret = NativeMethods.cvTrackFace(faceTracker.CvPtr, imgGray.CvPtr, rects, rects.Length, out ptRotate, out angleRotate);

            GC.KeepAlive(faceTracker);
            GC.KeepAlive(imgGray);
            return(ret);
        }
Exemple #20
0
        public void DrawMarker(int x, int y, CvScalar color, MarkerStyle style, int size, LineType lineType, int thickness)
        {
            int r = size / 2;

            switch (style)
            {
            case MarkerStyle.CircleLine:
                Circle(x, y, r, color, thickness, lineType);
                break;

            case MarkerStyle.CircleFilled:
                Circle(x, y, r, color, -1, lineType);
                break;

            case MarkerStyle.Cross:
                Line(x, y - r, x, y + r, color, thickness, lineType);
                Line(x - r, y, x + r, y, color, thickness, lineType);
                break;

            case MarkerStyle.TiltedCross:
                Line(x - r, y - r, x + r, y + r, color, thickness, lineType);
                Line(x + r, y - r, x - r, y + r, color, thickness, lineType);
                break;

            case MarkerStyle.CircleAndCross:
                Circle(x, y, r, color, thickness, lineType);
                Line(x, y - r, x, y + r, color, thickness, lineType);
                Line(x - r, y, x + r, y, color, thickness, lineType);
                break;

            case MarkerStyle.CircleAndTiltedCross:
                Circle(x, y, r, color, thickness, lineType);
                Line(x - r, y - r, x + r, y + r, color, thickness, lineType);
                Line(x + r, y - r, x - r, y + r, color, thickness, lineType);
                break;

            case MarkerStyle.DiamondLine:
            case MarkerStyle.DiamondFilled:
            {
                int       r2  = (int)(size * Math.Sqrt(2) / 2.0);
                CvPoint[] pts = new CvPoint[]
                {
                    new CvPoint(x, y - r2),
                    new CvPoint(x + r2, y),
                    new CvPoint(x, y + r2),
                    new CvPoint(x - r2, y),
                };
                switch (style)
                {
                case MarkerStyle.DiamondLine:
                    PolyLine(new CvPoint[][] { pts }, true, color, thickness, lineType); break;

                case MarkerStyle.DiamondFilled:
                    FillConvexPoly(pts, color, lineType); break;
                }
            }
            break;

            case MarkerStyle.SquareLine:
            case MarkerStyle.SquareFilled:
            {
                CvPoint[] pts = new CvPoint[]
                {
                    new CvPoint(x - r, y - r),
                    new CvPoint(x + r, y - r),
                    new CvPoint(x + r, y + r),
                    new CvPoint(x - r, y + r),
                };
                switch (style)
                {
                case MarkerStyle.SquareLine:
                    PolyLine(new CvPoint[][] { pts }, true, color, thickness, lineType); break;

                case MarkerStyle.SquareFilled:
                    FillConvexPoly(pts, color, lineType); break;
                }
            }
            break;

            default:
                throw new NotImplementedException();
            }
        }
Exemple #21
0
        /// <summary>
        /// ブーストされた分類器のカスケードを,与えられた画像位置で実行する
        /// </summary>
        /// <param name="pt">解析する領域の左上の角</param>
        /// <param name="startStage">0から始まるインデックスで,カスケードステージをどこ から開始するかを決定する</param>
        /// <returns>分析対象の領域が全ての分類器ステージを通過した場合(これは候補の一つになる)はtrue,そうでなければfalse.</returns>
#else
        /// <summary>
        /// Runs cascade of boosted classifier at given image location
        /// </summary>
        /// <param name="pt">Top-left corner of the analyzed region. Size of the region is a original window size scaled by the currenly set scale. The current window size may be retrieved using  cvGetHaarClassifierCascadeWindowSize function. </param>
        /// <param name="startStage">Initial zero-based index of the cascade stage to start from. The function assumes that all the previous stages are passed. This feature is used internally by  cvHaarDetectObjects for better processor cache utilization. </param>
        /// <returns>positive value if the analyzed rectangle passed all the classifier stages (it is a candidate) and zero or negative value otherwise. </returns>
#endif
        public bool Run(CvPoint pt, bool startStage)
        {
            return(Cv.RunHaarClassifierCascade(this, pt, startStage));
        }
Exemple #22
0
        /// <summary>
        /// 指定した点と直線の距離を返す
        /// </summary>
        /// <param name="point"></param>
#else
        /// <summary>
        /// Returns the distance between this line and the specified point
        /// </summary>
        /// <param name="point"></param>
#endif
        public double Distance(CvPoint point)
        {
            return(Distance(point.X, point.Y));
        }
Exemple #23
0
        /// <summary>
        /// 指定した点がこの矩形に含まれているかどうかを判断する
        /// </summary>
        /// <param name="pt">点</param>
        /// <returns></returns>
#else
        /// <summary>
        /// Determines if the specified point is contained within the rectangular region defined by this Rectangle.
        /// </summary>
        /// <param name="pt">point</param>
        /// <returns></returns>
#endif
        public bool Contains(CvPoint pt)
        {
            return(Contains(pt.X, pt.Y));
        }
        /// <summary>
        /// ラインイテレータを初期化する
        /// </summary>
        /// <param name="image">対象画像</param>
        /// <param name="pt1">線分の一つ目の端点</param>
        /// <param name="pt2">線分のニつ目の端点</param>
        /// <param name="connectivity">走査した線分の接続性.4または8</param>
        /// <returns></returns>
#else
        /// <summary>
        /// Initializes line iterator
        /// </summary>
        /// <param name="image">Image to sample the line from.  </param>
        /// <param name="pt1">First ending point of the line segment. </param>
        /// <param name="pt2">Second ending point of the line segment. </param>
        /// <param name="connectivity">The scanned line connectivity, 4 or 8. </param>
        /// <returns>The function cvInitLineIterator initializes the line iterator and returns the number of pixels between two end points. Both points must be inside the image. After the iterator has been initialized, all the points on the raster line that connects the two ending points may be retrieved by successive calls of NextLinePoint point. The points on the line are calculated one by one using 4-connected or 8-connected Bresenham algorithm.</returns>
#endif
        public CvLineIterator(CvArr image, CvPoint pt1, CvPoint pt2, PixelConnectivity connectivity)
            : this(image, pt1, pt2, connectivity, false)
        {
        }
Exemple #25
0
        /// <summary>
        /// ブーストされた分類器のカスケードを,与えられた画像位置で実行する
        /// </summary>
        /// <param name="pt">解析する領域の左上の角</param>
        /// <returns>分析対象の領域が全ての分類器ステージを通過した場合(これは候補の一つになる)はtrue,そうでなければfalse.</returns>
#else
        /// <summary>
        /// Runs cascade of boosted classifier at given image location
        /// </summary>
        /// <param name="pt">Top-left corner of the analyzed region. Size of the region is a original window size scaled by the currenly set scale. The current window size may be retrieved using  cvGetHaarClassifierCascadeWindowSize function. </param>
        /// <returns>positive value if the analyzed rectangle passed all the classifier stages (it is a candidate) and zero or negative value otherwise. </returns>
#endif
        public bool Run(CvPoint pt)
        {
            return(Cv.RunHaarClassifierCascade(this, pt));
        }
Exemple #26
0
        /// <summary>
        ///
        /// </summary>
        /// <param name="pFaceTracker"></param>
        /// <param name="imgGray"></param>
        /// <param name="pRects"></param>
        /// <param name="ptRotate"></param>
        /// <param name="dbAngleRotate"></param>
        /// <returns></returns>
        public static bool TrackFace(CvFaceTracker pFaceTracker, IplImage imgGray, CvRect[] pRects, out CvPoint ptRotate, out double dbAngleRotate)
        {
            if (pFaceTracker == null)
            {
                throw new ArgumentNullException("pFaceTracker");
            }
            if (imgGray == null)
            {
                throw new ArgumentNullException("imgGray");
            }
            if (pRects == null)
            {
                throw new ArgumentNullException("pRects");
            }
            if (pRects.Length < 3)
            {
                throw new ArgumentException("pRects.Length >= 3");
            }

            return(NativeMethods.cvTrackFace(pFaceTracker.CvPtr, imgGray.CvPtr, pRects, pRects.Length, out ptRotate, out dbAngleRotate));
        }