Exemplo n.º 1
0
        public FaceDetect()
        {
            CheckMemoryLeak();

            // CvHaarClassifierCascade, cvHaarDetectObjects

            CvColor[] colors = new CvColor[]{
                new CvColor(0,0,255),
                new CvColor(0,128,255),
                new CvColor(0,255,255),
                new CvColor(0,255,0),
                new CvColor(255,128,0),
                new CvColor(255,255,0),
                new CvColor(255,0,0),
                new CvColor(255,0,255),
            };

            const double Scale = 1.14;
            const double ScaleFactor = 1.0850;
            const int MinNeighbors = 2;

            using (IplImage img = new IplImage(FilePath.Image.Yalta, LoadMode.Color))
            using (IplImage smallImg = new IplImage(new CvSize(Cv.Round(img.Width / Scale), Cv.Round(img.Height / Scale)), BitDepth.U8, 1))
            {
                using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1))
                {
                    Cv.CvtColor(img, gray, ColorConversion.BgrToGray);
                    Cv.Resize(gray, smallImg, Interpolation.Linear);
                    Cv.EqualizeHist(smallImg, smallImg);
                }

                using (var cascade = CvHaarClassifierCascade.FromFile(FilePath.Text.HaarCascade))  
                using (var storage = new CvMemStorage())
                {
                    storage.Clear();

                    // 顔の検出
                    Stopwatch watch = Stopwatch.StartNew();
                    CvSeq<CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, ScaleFactor, MinNeighbors, 0, new CvSize(30, 30));
                    watch.Stop();
                    Console.WriteLine("detection time = {0}ms\n", watch.ElapsedMilliseconds);

                    // 検出した箇所にまるをつける
                    for (int i = 0; i < faces.Total; i++)
                    {
                        CvRect r = faces[i].Value.Rect;
                        CvPoint center = new CvPoint
                        {
                            X = Cv.Round((r.X + r.Width * 0.5) * Scale),
                            Y = Cv.Round((r.Y + r.Height * 0.5) * Scale)
                        };
                        int radius = Cv.Round((r.Width + r.Height) * 0.25 * Scale);
                        img.Circle(center, radius, colors[i % 8], 3, LineType.AntiAlias, 0);
                    }
                }

                // ウィンドウに表示
                CvWindow.ShowImages(img);
            }
        }
Exemplo n.º 2
0
 public Snake()
 {
     using (IplImage src = new IplImage(Const.ImageCake, LoadMode.GrayScale))
     using (IplImage dst = new IplImage(src.Size, BitDepth.U8, 3))
     {
         CvPoint[] contour = new CvPoint[100];
         CvPoint center = new CvPoint(src.Width / 2, src.Height / 2);
         for (int i = 0; i < contour.Length; i++)
         {
             contour[i].X = (int)(center.X * Math.Cos(2 * Math.PI * i / contour.Length) + center.X);
             contour[i].Y = (int)(center.Y * Math.Sin(2 * Math.PI * i / contour.Length) + center.Y);
         }
         Console.WriteLine("Press any key to snake\nEsc - quit");
         using (CvWindow w = new CvWindow())
         {
             while (true)
             {
                 src.SnakeImage(contour, 0.45f, 0.35f, 0.2f, new CvSize(15, 15), new CvTermCriteria(1), true);
                 src.CvtColor(dst, ColorConversion.GrayToRgb);
                 for (int i = 0; i < contour.Length - 1; i++)
                 {
                     dst.Line(contour[i], contour[i + 1], new CvColor(255, 0, 0), 2);
                 }
                 dst.Line(contour[contour.Length - 1], contour[0], new CvColor(255, 0, 0), 2);
                 w.Image = dst;
                 int key = CvWindow.WaitKey();
                 if (key == 27)
                 {
                     break;
                 }
             }
         }
     }
 }
Exemplo n.º 3
0
        public 描画画面()
        {
            InitializeComponent();

            dis_height= System.Windows.Forms.Screen.PrimaryScreen.Bounds.Height;
            dis_width=System.Windows.Forms.Screen.PrimaryScreen.Bounds.Width;
            pos_max = Tobii.pos_max;
            while (Tobii. 眼球位置_L[0] == 0 || Tobii. 眼球位置_R[0] == 100) { }//両目とれるまでここにとどまる
            diff_in = Tobii. 眼球位置_R[0]-Tobii. 眼球位置_L[0];
            posY_in = (Tobii.眼球位置_L[1] + Tobii.眼球位置_R[1] )/ 2;

            pictureBoxIpl1.Width = dis_width;
            pictureBoxIpl1.Height = dis_height;
            frame = Cv.CreateImage(new CvSize(dis_width, dis_height), BitDepth.U8, 3);
            background = Cv.CreateImage(new CvSize(dis_width, dis_height), BitDepth.U8, 3);
            background=メイン画面.background;
            pictureBoxIpl1.ImageIpl = background;
            window_size = new CvSize(メイン画面.window[0], メイン画面.window[1]);
            point_old = new CvPoint(window_size.Width / 2, window_size.Height / 2);
            許容半径 = メイン画面.radius;

            PC=new System.Diagnostics.PerformanceCounter[3];

            タイマー開始();
        }
Exemplo n.º 4
0
        /// <summary>
        /// 
        /// </summary>
        public SeqPartition()
        {
            CvMemStorage storage = new CvMemStorage(0);
            pointSeq = new CvSeq<CvPoint>(SeqType.EltypeS32C2, CvSeq.SizeOf, storage);
            Random rand = new Random();
            canvas = new IplImage(Width, Height, BitDepth.U8, 3);

            colors = new CvScalar[Count];
            for (int i = 0; i < Count; i++)
            {
                CvPoint pt = new CvPoint
                {
                    X = rand.Next(Width),
                    Y = rand.Next(Height)
                };
                pointSeq.Push(pt);
                int icolor = rand.Next() | 0x00404040;
                colors[i] = Cv.RGB(icolor & 255, (icolor >> 8) & 255, (icolor >> 16) & 255);
            }

            using (window = new CvWindowEx() { Text = "points" })
            {
                window.CreateTrackbar("threshold", 10, 50, OnTrack);
                OnTrack(10);
                CvWindowEx.WaitKey();
            }
        }
Exemplo n.º 5
0
        public Contour()
        {
            // cvContourArea, cvArcLength
            // 輪郭によって区切られた領域の面積と,輪郭の長さを求める
            
            const int SIZE = 500;

            // (1)画像を確保し初期化する
            using (CvMemStorage storage = new CvMemStorage())
            using (IplImage img = new IplImage(SIZE, SIZE, BitDepth.U8, 3))
            {
                img.Zero();
                // (2)点列を生成する 
                CvSeq<CvPoint> points = new CvSeq<CvPoint>(SeqType.PolyLine, storage);
                CvRNG rng = new CvRNG((ulong)DateTime.Now.Ticks);
                double scale = rng.RandReal() + 0.5;
                CvPoint pt0 = new CvPoint
                {
                    X = (int)(Math.Cos(0) * SIZE / 4 * scale + SIZE / 2),
                    Y = (int)(Math.Sin(0) * SIZE / 4 * scale + SIZE / 2)
                };
                img.Circle(pt0, 2, CvColor.Green);
                points.Push(pt0);
                for (int i = 1; i < 20; i++)
                {
                    scale = rng.RandReal() + 0.5;
                    CvPoint pt1 = new CvPoint
                    {
                        X = (int)(Math.Cos(i * 2 * Math.PI / 20) * SIZE / 4 * scale + SIZE / 2),
                        Y = (int)(Math.Sin(i * 2 * Math.PI / 20) * SIZE / 4 * scale + SIZE / 2)
                    };
                    img.Line(pt0, pt1, CvColor.Green, 2);
                    pt0.X = pt1.X;
                    pt0.Y = pt1.Y;
                    img.Circle(pt0, 3, CvColor.Green, Cv.FILLED);
                    points.Push(pt0);
                }
                img.Line(pt0, points.GetSeqElem(0).Value, CvColor.Green, 2);
                // (3)包含矩形,面積,長さを求める
                CvRect rect = points.BoundingRect(false);
                double area = points.ContourArea();
                double length = points.ArcLength(CvSlice.WholeSeq, 1);
                // (4)結果を画像に書き込む
                img.Rectangle(new CvPoint(rect.X, rect.Y), new CvPoint(rect.X + rect.Width, rect.Y + rect.Height), CvColor.Red, 2);
                string text_area = string.Format("Area:   wrect={0}, contour={1}", rect.Width * rect.Height, area);
                string text_length = string.Format("Length: rect={0}, contour={1}", 2 * (rect.Width + rect.Height), length);
                using (CvFont font = new CvFont(FontFace.HersheySimplex, 0.7, 0.7, 0, 1, LineType.AntiAlias))
                {
                    img.PutText(text_area, new CvPoint(10, img.Height - 30), font, CvColor.White);
                    img.PutText(text_length, new CvPoint(10, img.Height - 10), font, CvColor.White);
                }
                // (5)画像を表示,キーが押されたときに終了 
                using (CvWindow window = new CvWindow("BoundingRect", WindowMode.AutoSize))
                {
                    window.Image = img;
                    CvWindow.WaitKey(0);
                }
            }
        }
Exemplo n.º 6
0
 /// <summary>
 /// helper function:
 /// finds a cosine of Angle between vectors
 /// from pt0->pt1 and from pt0->pt2 
 /// </summary>
 /// <param name="pt1"></param>
 /// <param name="pt2"></param>
 /// <param name="pt0"></param>
 /// <returns></returns>
 static double Angle(CvPoint pt1, CvPoint pt2, CvPoint pt0)
 {
     double dx1 = pt1.X - pt0.X;
     double dy1 = pt1.Y - pt0.Y;
     double dx2 = pt2.X - pt0.X;
     double dy2 = pt2.Y - pt0.Y;
     return (dx1 * dx2 + dy1 * dy2) / Math.Sqrt((dx1 * dx1 + dy1 * dy1) * (dx2 * dx2 + dy2 * dy2) + 1e-10);
 }
Exemplo n.º 7
0
        public Watershed()
        {
            // cvWatershed
            // マウスで円形のマーカー(シード領域)の中心を指定し,複数のマーカーを設定する.
            // このマーカを画像のgradientに沿って広げて行き,gradientの高い部分に出来る境界を元に領域を分割する.
            // 領域は,最初に指定したマーカーの数に分割される. 

            // (2)画像の読み込み,マーカー画像の初期化,結果表示用画像領域の確保を行なう
            using (IplImage srcImg = new IplImage(Const.ImageGoryokaku, LoadMode.AnyDepth | LoadMode.AnyColor))
            using (IplImage dstImg = srcImg.Clone())
            using (IplImage dspImg = srcImg.Clone())
            using (IplImage markers = new IplImage(srcImg.Size, BitDepth.S32, 1))
            {
                markers.Zero();

                // (3)入力画像を表示しシードコンポーネント指定のためのマウスイベントを登録する
                using (CvWindow wImage = new CvWindow("image", WindowMode.AutoSize))
                {
                    wImage.Image = srcImg;
                    // クリックにより中心を指定し,円形のシード領域を設定する   
                    int seedNum = 0;
                    wImage.OnMouseCallback += delegate(MouseEvent ev, int x, int y, MouseEvent flags)
                    {
                        if (ev == MouseEvent.LButtonDown)
                        {
                            seedNum++;
                            CvPoint pt = new CvPoint(x, y);
                            markers.Circle(pt, 20, CvScalar.ScalarAll(seedNum), Cv.FILLED, LineType.Link8, 0);
                            dspImg.Circle(pt, 20, CvColor.White, 3, LineType.Link8, 0);
                            wImage.Image = dspImg;
                        }
                    };
                    CvWindow.WaitKey();
                }

                // (4)watershed分割を実行する  
                Cv.Watershed(srcImg, markers);

                // (5)実行結果の画像中のwatershed境界(ピクセル値=-1)を結果表示用画像上に表示する
                for (int i = 0; i < markers.Height; i++)
                {
                    for (int j = 0; j < markers.Width; j++)
                    {
                        int idx = (int)(markers.Get2D(i, j).Val0);
                        if (idx == -1)
                        {
                            dstImg.Set2D(i, j, CvColor.Red);
                        }
                    }
                }
                using (CvWindow wDst = new CvWindow("watershed transform", WindowMode.AutoSize))
                {
                    wDst.Image = dstImg;
                    CvWindow.WaitKey();
                }
            }

        }
Exemplo n.º 8
0
        public IplImage InpaintImage(IplImage src)
        {
            inpaint = new IplImage(src.Size, BitDepth.U8, 3);
            IplImage paint = src.Clone();
            IplImage mask  = new IplImage(src.Size, BitDepth.U8, 1);

            CvWindow win_Paint = new CvWindow("Paint", WindowMode.AutoSize, paint);

            CvPoint prevPt = new CvPoint(-1, -1);

            win_Paint.OnMouseCallback += delegate(MouseEvent eve, int x, int y, MouseEvent flag)
            {
                if (eve == MouseEvent.LButtonDown)
                {
                    prevPt = new CvPoint(x, y);
                }
                else if (eve == MouseEvent.LButtonUp || (flag & MouseEvent.FlagLButton) == 0)
                {
                    prevPt = new CvPoint(-1, -1);
                }
                else if (eve == MouseEvent.MouseMove && (flag & MouseEvent.FlagLButton) != 0)
                {
                    CvPoint pt = new CvPoint(x, y);

                    Cv.DrawLine(mask, prevPt, pt, CvColor.White, 5, LineType.AntiAlias, 0);
                    Cv.DrawLine(paint, prevPt, pt, CvColor.White, 5, LineType.AntiAlias, 0);
                    prevPt = pt;
                    win_Paint.ShowImage(paint);
                }
            };

            bool repeat = true;

            while (repeat)
            {
                switch (CvWindow.WaitKey(0))
                {
                case 'r':
                    mask.SetZero();
                    Cv.Copy(src, paint);
                    win_Paint.ShowImage(paint);
                    break;

                case '\r':
                    CvWindow win_Inpaint = new CvWindow("Inpainted", WindowMode.AutoSize);
                    Cv.Inpaint(paint, mask, inpaint, 3, InpaintMethod.NS);
                    win_Inpaint.ShowImage(inpaint);
                    break;

                case (char)27:
                    CvWindow.DestroyAllWindows();
                    repeat = false;
                    break;
                }
            }
            return(inpaint);
        }
Exemplo n.º 9
0
        /// <summary>
        /// helper function:
        /// finds a cosine of Angle between vectors
        /// from pt0->pt1 and from pt0->pt2
        /// </summary>
        /// <param name="pt1"></param>
        /// <param name="pt2"></param>
        /// <param name="pt0"></param>
        /// <returns></returns>
        static double Angle(CvPoint pt1, CvPoint pt2, CvPoint pt0)
        {
            double dx1 = pt1.X - pt0.X;
            double dy1 = pt1.Y - pt0.Y;
            double dx2 = pt2.X - pt0.X;
            double dy2 = pt2.Y - pt0.Y;

            return((dx1 * dx2 + dy1 * dy2) / Math.Sqrt((dx1 * dx1 + dy1 * dy1) * (dx2 * dx2 + dy2 * dy2) + 1e-10));
        }
Exemplo n.º 10
0
        public Watershed()
        {
            // cvWatershed
            // マウスで円形のマーカー(シード領域)の中心を指定し,複数のマーカーを設定する.
            // このマーカを画像のgradientに沿って広げて行き,gradientの高い部分に出来る境界を元に領域を分割する.
            // 領域は,最初に指定したマーカーの数に分割される.

            // (2)画像の読み込み,マーカー画像の初期化,結果表示用画像領域の確保を行なう
            using (IplImage srcImg = new IplImage(Const.ImageGoryokaku, LoadMode.AnyDepth | LoadMode.AnyColor))
                using (IplImage dstImg = srcImg.Clone())
                    using (IplImage dspImg = srcImg.Clone())
                        using (IplImage markers = new IplImage(srcImg.Size, BitDepth.S32, 1))
                        {
                            markers.Zero();

                            // (3)入力画像を表示しシードコンポーネント指定のためのマウスイベントを登録する
                            using (CvWindow wImage = new CvWindow("image", WindowMode.AutoSize))
                            {
                                wImage.Image = srcImg;
                                // クリックにより中心を指定し,円形のシード領域を設定する
                                int seedNum = 0;
                                wImage.OnMouseCallback += delegate(MouseEvent ev, int x, int y, MouseEvent flags)
                                {
                                    if (ev == MouseEvent.LButtonDown)
                                    {
                                        seedNum++;
                                        CvPoint pt = new CvPoint(x, y);
                                        markers.Circle(pt, 20, CvScalar.ScalarAll(seedNum), Cv.FILLED, LineType.Link8, 0);
                                        dspImg.Circle(pt, 20, CvColor.White, 3, LineType.Link8, 0);
                                        wImage.Image = dspImg;
                                    }
                                };
                                CvWindow.WaitKey();
                            }

                            // (4)watershed分割を実行する
                            Cv.Watershed(srcImg, markers);

                            // (5)実行結果の画像中のwatershed境界(ピクセル値=-1)を結果表示用画像上に表示する
                            for (int i = 0; i < markers.Height; i++)
                            {
                                for (int j = 0; j < markers.Width; j++)
                                {
                                    int idx = (int)(markers.Get2D(i, j).Val0);
                                    if (idx == -1)
                                    {
                                        dstImg.Set2D(i, j, CvColor.Red);
                                    }
                                }
                            }
                            using (CvWindow wDst = new CvWindow("watershed transform", WindowMode.AutoSize))
                            {
                                wDst.Image = dstImg;
                                CvWindow.WaitKey();
                            }
                        }
        }
Exemplo n.º 11
0
        private void 얼굴검출ToolStripMenuItem_Click(object sender, EventArgs e)
        {
            CvColor[] colors = new CvColor[] {
                new CvColor(0, 0, 255),
                new CvColor(0, 128, 255),
                new CvColor(0, 255, 255),
                new CvColor(0, 255, 0),
                new CvColor(255, 128, 0),
                new CvColor(255, 255, 0),
                new CvColor(255, 0, 0),
                new CvColor(255, 0, 255),
            };

            const double scale        = 1.04;
            const double scaleFactor  = 1.139;
            const int    minNeighbors = 2;

            using (IplImage img = src.Clone())
                using (IplImage smallImg = new IplImage(new CvSize(Cv.Round(img.Width / scale), Cv.Round(img.Height / scale)), BitDepth.U8, 1))
                {
                    // 얼굴 검출용의 화상의 생성
                    using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1))
                    {
                        Cv.CvtColor(img, gray, ColorConversion.BgrToGray);
                        Cv.Resize(gray, smallImg, Interpolation.Linear);
                        Cv.EqualizeHist(smallImg, smallImg);
                    }

                    //using (CvHaarClassifierCascade cascade = Cv.Load<CvHaarClassifierCascade>(Const.XmlHaarcascade))  // 아무거나 가능

                    using (CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile(Application.StartupPath + "\\" + "haarcascade_frontalface_alt.xml"))                  //
                        using (CvMemStorage storage = new CvMemStorage())
                        {
                            storage.Clear();

                            // 얼굴의 검출

                            CvSeq <CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, scaleFactor, minNeighbors, 0, new CvSize(30, 30), new CvSize(180, 180));

                            // 검출한 얼굴에 원을 그린다
                            for (int i = 0; i < faces.Total; i++)
                            {
                                CvRect  r      = faces[i].Value.Rect;
                                CvPoint center = new CvPoint
                                {
                                    X = Cv.Round((r.X + r.Width * 0.5) * scale),
                                    Y = Cv.Round((r.Y + r.Height * 0.5) * scale)
                                };
                                int radius = Cv.Round((r.Width + r.Height) * 0.25 * scale);
                                img.Circle(center, radius, colors[i % 8], 3, LineType.AntiAlias, 0);
                            }
                        }
                    dst = img.Clone();
                    pictureBoxIpl2.ImageIpl = dst;
                }
        }
Exemplo n.º 12
0
    // Update is called once per frame
    void Update()
    {
        IplImage frame = Cv.QueryFrame(capture);

        using (IplImage img = Cv.CloneImage(frame))
            using (IplImage smallImg = new IplImage(new CvSize(Cv.Round(img.Width / Scale), Cv.Round(img.Height / Scale)), BitDepth.U8, 1))
            {
                using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1))
                {
                    Cv.CvtColor(img, gray, ColorConversion.BgrToGray);
                    Cv.Resize(gray, smallImg, Interpolation.Linear);
                    Cv.EqualizeHist(smallImg, smallImg);
                }

                using (CvMemStorage storage = new CvMemStorage())
                {
                    storage.Clear();

                    CvSeq <CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, ScaleFactor, MinNeighbors, 0, new CvSize(64, 64));

                    for (int i = 0; i < faces.Total; i++)
                    {
                        CvRect  r      = faces[i].Value.Rect;
                        CvPoint center = new CvPoint
                        {
                            X = Cv.Round((r.X + r.Width * 0.5) * Scale),
                            Y = Cv.Round((r.Y + r.Height * 0.5) * Scale)
                        };
                        int radius = Cv.Round((r.Width + r.Height) * 0.25 * Scale);
                        img.Circle(center, radius, colors[i % 8], 3, LineType.AntiAlias, 0);
                    }

                    if (faces.Total > 0)
                    {
                        CvRect r = faces[0].Value.Rect;
                        facepos = new Vector2((r.X + r.Width / 2.0f) / CAPTURE_WIDTH, (r.Y + r.Height / 2.0f) / CAPTURE_HEIGHT);
                    }
                    else
                    {
                        facepos = Vector2.zero;
                    }

                    if (facepos.x >= 0.2 && facepos.x <= 0.7 && facepos.y >= 0.2 && facepos.x <= 0.7)
                    {
                        isFaceInCapture = true;
                    }
                    else
                    {
                        isFaceInCapture = false;
                    }
                }

                Cv.ShowImage("FaceDetect", img);
            }
    }
 // Convert _outBox.BoxPoints (type CvPoint2D32f) into CvPoint[][] for use
 // in DrawPolyLine
 CvPoint[][] rectangleBoxPoint(CvPoint2D32f[] _box)
 {
     CvPoint[] pts = new CvPoint[_box.Length];
     for (int i = 0; i < _box.Length; i++)
     {
         pts[i] = _box[i];  // Get the box coordinates (CvPoint)
     }
     // Now we've got the 4 corners of the tracking box returned by CamShift
     // in a format that DrawPolyLine can use
     return(new CvPoint[][] { pts });
 }
Exemplo n.º 14
0
        // Copy abstract array of "CvPoints" to "List" of abstract class that takes 2 int constructor parameters (this class should
        // probably represent a 2D point)
        // this ain't C++ so it is a tad slow, but much better than having cluttered code all over the place
        // =>	source = generic "CvPoint" container
        //		dest = array to copy "CvPoint"s into. WARNING : dest size >= source size !!!
        static public void CopyCvPointsToGenericPointsArray <ClassDest>(IEnumerable <CvPoint> source, ClassDest[] dest)
        {
            IEnumerator <CvPoint> it = source.GetEnumerator();
            int index = 0;

            while (it.MoveNext())
            {
                CvPoint current = it.Current;
                dest[index++] = ((ClassDest)Activator.CreateInstance(typeof(ClassDest), new object[] { current.X, current.Y }));
            }
        }
Exemplo n.º 15
0
        /// <summary>
        /// ConvexHullの描画
        /// </summary>
        /// <param name="contours"></param>
        /// <param name="hull"></param>
        /// <param name="img"></param>
        private void DrawConvexHull(CvSeq <CvPoint> contours, int[] hull, IplImage img)
        {
            CvPoint pt0 = contours[hull.Last()].Value;

            foreach (int idx in hull)
            {
                CvPoint pt = contours[idx].Value;
                Cv.Line(img, pt0, pt, new CvColor(255, 255, 255));
                pt0 = pt;
            }
        }
Exemplo n.º 16
0
        /*
         * public IplImage HoughLines_Point(IplImage src, int canny1, int canny2, int thresh, int sideData)
         * {
         *  // cvHoughLines2
         *  // 확률적 허프 변환을 지정해 선분의 검출을 실시한다
         *
         *  // (1) 화상 읽기
         *  IplImage srcImgStd = src.Clone();
         *  IplImage srcImgGray = new IplImage(src.Size, BitDepth.U8, 1);
         *
         *  CvMemStorage storage = new CvMemStorage();
         *  CvSeq houghLines;
         *  Cv.CvtColor(srcImgStd, srcImgGray, ColorConversion.BgrToGray);
         *  Cv.Canny(srcImgGray, srcImgGray, canny1, canny2, ApertureSize.Size3);
         *  houghLines = srcImgGray.HoughLines2(storage, HoughLinesMethod.Probabilistic, 1, Math.PI/180, thresh, 5, 0);
         *
         *
         *  LinePoints.Clear();
         *  int limit = Math.Min(houghLines.Total, 6);
         *  for (int i = 0; i < limit; i++)
         *  {
         *      CvLineSegmentPolar elem = houghLines.GetSeqElem<CvLineSegmentPolar>(i).Value;
         *      CvPoint pt1 = houghLines.GetSeqElem<CvLineSegmentPoint>(i).Value.P1;
         *      CvPoint pt2 = houghLines.GetSeqElem<CvLineSegmentPoint>(i).Value.P2;
         *
         *      //Trace.WriteLine(pt1.X.ToString("000.00000  ") + pt1.Y.ToString("000.00000  ") + pt2.X.ToString("000.00000  ")+ pt2.Y.ToString("000.00000"));
         *
         *      srcImgStd.Line(pt1, pt2, CvColor.Red, 1, LineType.AntiAlias, 0);
         *
         *      LinePoints.Add(pt1);
         *      LinePoints.Add(pt2);
         *  }
         *  srcImgStd.Dispose();
         *  srcImgGray.Dispose();
         *  houghLines.Dispose();
         *  storage.Dispose();
         *  return srcImgStd;
         * }
         */

        public IplImage HoughLines_Point08(IplImage src, int canny1, int canny2, int thresh, int sideData)
        {
            List <CvPoint> LinePoints    = new List <CvPoint>();
            int            lineMinLength = 0;

            if (sideData == 0 && sideData == 2)
            {
                lineMinLength = src.Width / 2;
            }
            else
            {
                lineMinLength = src.Height / 2;
            }

            // cvHoughLines2
            // 확률적 허프 변환을 지정해 선분의 검출을 실시한다

            // (1) 화상 읽기
            using (IplImage srcImgStd = src.Clone())
                using (IplImage srcImgGray = new IplImage(src.Size, BitDepth.U8, 1))
                {
                    Cv.CvtColor(srcImgStd, srcImgGray, ColorConversion.BgrToGray);

                    // (2) 허프변환을 위한 캐니엣지 처리
                    //Cv.Canny(srcImgGray, srcImgGray, 50, 200, ApertureSize.Size3);
                    Cv.Canny(srcImgGray, srcImgGray, canny1, canny2, ApertureSize.Size3);

                    houghLine = srcImgGray.Clone();

                    using (CvMemStorage storage = new CvMemStorage())
                    {
                        LinePoints.Clear();
                        // (3) 표준적 허프 변환에 의한 선의 검출과 검출된 선 그리기
                        CvSeq lines = srcImgGray.HoughLines2(storage, HoughLinesMethod.Probabilistic, 1, Math.PI / 180, thresh, 5, 0);
                        int   limit = Math.Min(lines.Total, 6);
                        for (int i = 0; i < limit; i++)
                        {
                            CvLineSegmentPolar elem = lines.GetSeqElem <CvLineSegmentPolar>(i).Value;
                            CvPoint            pt1  = lines.GetSeqElem <CvLineSegmentPoint>(i).Value.P1;
                            CvPoint            pt2  = lines.GetSeqElem <CvLineSegmentPoint>(i).Value.P2;

                            //Trace.WriteLine(pt1.X.ToString("000.00000  ") + pt1.Y.ToString("000.00000  ") + pt2.X.ToString("000.00000  ")+ pt2.Y.ToString("000.00000"));

                            srcImgStd.Line(pt1, pt2, CvColor.Red, 1, LineType.AntiAlias, 0);

                            LinePoints.Add(pt1);
                            LinePoints.Add(pt2);

                            houghLine = srcImgStd.Clone();
                        }
                    }
                }
            return(houghLine);
        }
Exemplo n.º 17
0
        // !!!!!!!!!!!!!!!!Требуется отладка - похоже функция работает неправильно!
        // Функция поиска угла прямой, заданной двумя точками P1 и P2
        public static double GetAngle(CvPoint P1, CvPoint P2)
        {
            // считаем центральной точку находящуюся выше,
            CvPoint aCurr, aPrev, aNext;
            aCurr = (P1.Y < P2.Y) ? P1 : P2;
            aPrev = (P1.Y < P2.Y) ? P2 : P1;

            aNext.X = aCurr.X;
            aNext.Y = 1000;
            if (aCurr.X > aPrev.X) return -GetAngle(aCurr, aPrev, aNext);
            else return GetAngle(aCurr, aPrev, aNext);
        }
Exemplo n.º 18
0
        static public CvPoint[] DistillContours(
            CvMat inputMat_grey, int maxContourPoints, CvPoint offset,
            ContourRetrieval retr = ContourRetrieval.External, ContourChain chain = ContourChain.ApproxSimple)
        {
            // maxContourPoints (original name "maxContours"); 5 or 100??!!!

            // TODO : The CV docs specifically state that the image should be in binary format. Check if it is.
            //std::vector<std::vector<cv::Point>> updateContours;
            //std::vector<cv::Vec4i> m_hierarchy; // TODO : Not used currently, but determine if it's gonna be needed for points-tracking
            // see usage here: http://stackoverflow.com/questions/35418714/opencvsharps-findcontour-returns-wrong-data
            ContourData contoursData = Filters.FindContours(inputMat_grey, retr, chain, offset);

            CvPoint[][] contoursFound = contoursData.contours;             // original name: "updateContours"
            if (contoursFound.Length == 0)
            {
                return(null);                // TODO : cannot process frame, no contours found. Maybe it's time to rethink about that strategy and not simply
            }
            // return empty handed!!!

            double[] contourAreas = contoursData.areas;
            Point[]  newPtV;
            // find index of max-area contour
            int    index   = 0;
            double maxArea = 0;

            for (int i = contourAreas.Length - 1; i >= 0; --i)
            {
                double area = contourAreas[i];
                if (area > maxArea)
                {
                    index   = i;
                    maxArea = area;
                }
            }

            // approximate contour down to 4 points
            // TODO : This idea sounds weird. Why not check all contours and find one that is best approximated by 4 points???
            CvPoint[] biggestContour = contoursFound[index];
            newPtV = new Point[biggestContour.Length];
            PointOps.CopyCvPointsToPoints(biggestContour, newPtV);               //PointOps.CopyCvPointsToGenericPointsArray( biggestContour, newPtV );
            double epsilon = 1;

            while (newPtV.Length > maxContourPoints)
            {
                newPtV = Cv2.ApproxPolyDP(newPtV, epsilon++, true);
                // TODO : Is incrementing epsilon by 1 a bit stupid? Maybe increment exponentially?
            }

            // finally
            CvPoint[] cvPoints = new CvPoint[newPtV.Length];
            PointOps.CopyPointsToCvPoints(newPtV, cvPoints);
            return(cvPoints);
        }
Exemplo n.º 19
0
        public CvPoint ImageToBox(IplImage fromImage, PictureBoxIpl targetBox, CvPoint fromPoint)
        {
            double imgToBox_X = (double)targetBox.Width / (double)fromImage.Width;
            double imgToBox_Y = (double)targetBox.Height / (double)fromImage.Height;


            double  tmpX        = (double)fromPoint.X * imgToBox_X;
            double  tmpY        = (double)fromPoint.Y * imgToBox_Y;
            CvPoint targetPoint = new CvPoint((int)tmpX, (int)tmpY);

            return(targetPoint);
        }
Exemplo n.º 20
0
        public static CvCircleSegment Approximate(CvPoint[] points)
        {
            CvPoint2D32f[] points2D32f = new CvPoint2D32f[points.Length];

            for (int i = 0; i < points.Length; i++)
            {
                points2D32f[i].X = (float)points[i].X;
                points2D32f[i].Y = (float)points[i].Y;
            }

            return Approximate(points2D32f);
        }
Exemplo n.º 21
0
        private void OnClick_csv出力(object sender, EventArgs e)
        {
            if (合成画像 != null)
            {
                string 結果 = "";
                int x,y;
                int roi_w = 9;
                int roi_h = 9;

                CvSize roiSize = new CvSize(roi_w, roi_h);
                CvPoint roiPoint;
                for (x = 0; x < 合成画像.Width - roi_w; x++)
                {
                    System.Diagnostics.Debug.WriteLine(x + "\n" + 結果);

                    for (y = 0; y < 合成画像.Height - roi_h; y++)
                    {
                        string buff = "";
                        string type = 検査領域か判断(x,y,roi_w,roi_h);

                        if (type != "")//ちょっと高速化
                        {
                            roiPoint = new CvPoint(x, y);
                            Cv.SetImageROI(検査対象, new CvRect(roiPoint, roiSize));
                            if (type == "1") buff = csvフォーマットを取得(検査対象, roiSize, "1");
                            else if (type == "0") buff = csvフォーマットを取得(検査対象, roiSize, "0");
                        }
                        //if (checkBox_all.Checked)
                        //{
                        //    roiPoint = new CvPoint(x, y);
                        //    Cv.SetImageROI(検査対象, new CvRect(roiPoint, roiSize));
                        //    Cv.SetImageROI(マスク画像, new CvRect(roiPoint, roiSize));
                        //    if (マスク画像.Avg().Val0 == 0) buff = csvフォーマットを取得(検査対象, roiSize, "0");
                        //    else if (マスク画像.Avg().Val0 == 255) buff = csvフォーマットを取得(検査対象, roiSize, "1");
                        //}
                        //else if (checkBox_black.Checked)
                        //{
                        //    if (マスク画像.Avg().Val0 == 0) buff = csvフォーマットを取得(検査対象, roiSize, "0");
                        //}
                        //else
                        //{
                        //    if (マスク画像.Avg().Val0 == 255) buff = csvフォーマットを取得(検査対象, roiSize, "1");
                        //}

                        if(buff!="")結果 += buff + "\n";

                        Cv.ResetImageROI(マスク画像);
                        Cv.ResetImageROI(検査対象);
                    }
                }
                stringをcsv出力(結果,DateTime.Now.ToString("yy-MM-dd_")+this.Text);
            }
        }
Exemplo n.º 22
0
        ///////////////////////
        public static IplImage FaceDetect(IplImage src)
        {
            IplImage FindFace;

            // CvHaarClassifierCascade, cvHaarDetectObjects
            // 얼굴을 검출하기 위해서 Haar 분류기의 캐스케이드를 이용한다
            CvColor[] colors = new CvColor[] {
                new CvColor(0, 0, 255),
                new CvColor(0, 128, 255),
                new CvColor(0, 255, 255),
                new CvColor(0, 255, 0),
                new CvColor(255, 128, 0),
                new CvColor(255, 255, 0),
                new CvColor(255, 0, 0),
                new CvColor(255, 0, 255),
            };
            const double scale        = 1;
            const double scaleFactor  = 1.139;
            const int    minNeighbors = 2;
            IplImage     img          = src.Clone();
            IplImage     smallImg     = new IplImage(new CvSize(Cv.Round(img.Width / scale), Cv.Round(img.Height / scale)), BitDepth.U8, 3);
            {
                // 얼굴 검출용의 화상의 생성
                using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1))
                {
                    Cv.CvtColor(img, gray, ColorConversion.BgrToGray);
                    Cv.Resize(gray, smallImg, Interpolation.Linear);
                    Cv.EqualizeHist(smallImg, smallImg);
                }
                using (CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile("C:\\haarcascade_frontalface_default.xml"))
                    using (CvMemStorage storage = new CvMemStorage())
                    {
                        storage.Clear();
                        // 얼굴의 검출
                        CvSeq <CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, scaleFactor, minNeighbors, 0, new CvSize(24, 24));
                        // 검출한 얼굴에 원을 그린다
                        for (int i = 0; i < faces.Total; i++)
                        {
                            CvRect  r      = faces[i].Value.Rect;
                            CvPoint center = new CvPoint
                            {
                                X = Cv.Round((r.X + r.Width * 0.5) * scale),
                                Y = Cv.Round((r.Y + r.Height * 0.5) * scale)
                            };
                            int radius = Cv.Round((r.Width + r.Height) * 0.25 * scale);
                            img.Circle(center, radius, colors[i % 8], 3, LineType.AntiAlias, 0);
                        }
                    }
                FindFace = img.Clone();
                return(FindFace);
            }
        }
Exemplo n.º 23
0
        private IplImage ExtractSubImage(IplImage imgOrig, out CvRect pSubImageRect)
        {
            IplImage mainSubImage = null;

            CvSize  subImageSize = new CvSize((Int32)((imgOrig.Size.Width / 9) * 7), (Int32)((imgOrig.Size.Height / 9) * 4));
            CvPoint point        = new CvPoint(0, (Int32)((imgOrig.Size.Height / 9) * 3));
            CvRect  subImageRect = new CvRect(point, subImageSize);

            pSubImageRect = subImageRect;
            mainSubImage  = GetSubImage(imgOrig, subImageRect);

            return(mainSubImage);
        }
Exemplo n.º 24
0
        public ConvexHull()
        {
            using (IplImage img = Cv.CreateImage(new CvSize(500, 500), BitDepth.U8, 3))
                using (CvWindow window = new CvWindow("hull"))
                {
                    Random rand = new Random();

                    for (; ;)
                    {
                        int count = rand.Next() % 100 + 1;

                        // create sequence of random points
                        CvPoint[] ptseq = new CvPoint[count];
                        for (int i = 0; i < ptseq.Length; i++)
                        {
                            ptseq[i] = new CvPoint
                            {
                                X = rand.Next() % (img.Width / 2) + img.Width / 4,
                                Y = rand.Next() % (img.Height / 2) + img.Height / 4
                            };
                        }

                        // draw points
                        Cv.Zero(img);
                        foreach (CvPoint pt in ptseq)
                        {
                            Cv.Circle(img, pt, 2, new CvColor(255, 0, 0), -1);
                        }

                        // find hull
                        CvPoint[] hull;
                        Cv.ConvexHull2(ptseq, out hull, ConvexHullOrientation.Clockwise);

                        // draw hull
                        CvPoint pt0 = hull.Last();
                        foreach (CvPoint pt in hull)
                        {
                            Cv.Line(img, pt0, pt, CvColor.Green);
                            pt0 = pt;
                        }


                        window.ShowImage(img);

                        if (Cv.WaitKey(0) == 27) // 'ESC'
                        {
                            break;
                        }
                    }
                }
        }
Exemplo n.º 25
0
    public IplImage effectMouthOrange(IplImage srcImage, IplImage copyImage)
    {
        IplImage tmpImage = Cv.CloneImage(srcImage);

        CvPoint M1;

        M1.X = picture.facePoints.find_label_point_X("M1");
        M1.Y = picture.facePoints.find_label_point_Y("M1");
        CvPoint M2;

        M2.X = picture.facePoints.find_label_point_X("M2");
        M2.Y = picture.facePoints.find_label_point_Y("M2");
        CvPoint M3;

        M3.X = picture.facePoints.find_label_point_X("M3");
        M3.Y = picture.facePoints.find_label_point_Y("M3");
        CvPoint M4;

        M4.X = picture.facePoints.find_label_point_X("M4");
        M4.Y = picture.facePoints.find_label_point_Y("M4");
        CvPoint M5;

        M5.X = picture.facePoints.find_label_point_X("M5");
        M5.Y = picture.facePoints.find_label_point_Y("M5");
        CvPoint M6;

        M6.X = picture.facePoints.find_label_point_X("M6");
        M6.Y = picture.facePoints.find_label_point_Y("M6");
        CvPoint M7;

        M7.X = picture.facePoints.find_label_point_X("M7");
        M7.Y = picture.facePoints.find_label_point_Y("M7");
        CvPoint M8;

        M8.X = picture.facePoints.find_label_point_X("M8");
        M8.Y = picture.facePoints.find_label_point_Y("M8");

        CvPoint[][] points = new CvPoint[][] {
            new CvPoint[] { M1, M2, M3, M4, M5, M6, M7, M8 },
        };
        CvScalar orange;

        orange      = new CvScalar();
        orange.Val0 = 0;
        orange.Val1 = 140;
        orange.Val2 = 255;
        Cv.FillPoly(copyImage, points, orange);
        Cv.AddWeighted(copyImage, 0.2, tmpImage, 0.8, 0, tmpImage);
        return(tmpImage);
    }
Exemplo n.º 26
0
        public Watershed()
        {
            using (var srcImg = new IplImage(FilePath.Image.Goryokaku, LoadMode.AnyDepth | LoadMode.AnyColor))
            using (var dstImg = srcImg.Clone())
            using (var dspImg = srcImg.Clone())
            using (var markers = new IplImage(srcImg.Size, BitDepth.S32, 1))
            {
                markers.Zero();

                using (var window = new CvWindow("image", WindowMode.AutoSize))
                {
                    window.Image = srcImg;
                    // Mouse event  
                    int seedNum = 0;
                    window.OnMouseCallback += delegate(MouseEvent ev, int x, int y, MouseEvent flags)
                    {
                        if (ev == MouseEvent.LButtonDown)
                        {
                            seedNum++;
                            CvPoint pt = new CvPoint(x, y);
                            markers.Circle(pt, 20, CvScalar.ScalarAll(seedNum), Cv.FILLED, LineType.Link8, 0);
                            dspImg.Circle(pt, 20, CvColor.White, 3, LineType.Link8, 0);
                            window.Image = dspImg;
                        }
                    };
                    CvWindow.WaitKey();
                }

                Cv.Watershed(srcImg, markers);

                // draws watershed
                for (int i = 0; i < markers.Height; i++)
                {
                    for (int j = 0; j < markers.Width; j++)
                    {
                        int idx = (int)(markers.Get2D(i, j).Val0);
                        if (idx == -1)
                        {
                            dstImg.Set2D(i, j, CvColor.Red);
                        }
                    }
                }
                using (CvWindow wDst = new CvWindow("watershed transform", WindowMode.AutoSize))
                {
                    wDst.Image = dstImg;
                    CvWindow.WaitKey();
                }
            }

        }
Exemplo n.º 27
0
        public ConvexHull()
        {
            using (IplImage img = Cv.CreateImage(new CvSize(500, 500), BitDepth.U8, 3))
            using (CvWindow window = new CvWindow("hull"))
            {
                Random rand = new Random();

                for (; ; )
                {
                    int count = rand.Next() % 100 + 1;

                    // create sequence of random points
                    CvPoint[] ptseq = new CvPoint[count];
                    for (int i = 0; i < ptseq.Length; i++)
                    {
                        ptseq[i] = new CvPoint
                        {
                            X = rand.Next() % (img.Width / 2) + img.Width / 4,
                            Y = rand.Next() % (img.Height / 2) + img.Height / 4
                        };
                    }

                    // draw points
                    Cv.Zero(img);
                    foreach(CvPoint pt in ptseq)
                    {
                        Cv.Circle(img, pt, 2, new CvColor(255, 0, 0), -1);
                    }

                    // find hull
                    CvPoint[] hull;
                    Cv.ConvexHull2(ptseq, out hull, ConvexHullOrientation.Clockwise);

                    // draw hull
                    CvPoint pt0 = hull.Last();
                    foreach(CvPoint pt in hull)
                    {
                        Cv.Line(img, pt0, pt, CvColor.Green);
                        pt0 = pt;
                    }


                    window.ShowImage(img);

                    if (Cv.WaitKey(0) == 27) // 'ESC'
                        break;
                }

            }
        }
Exemplo n.º 28
0
        public Watershed()
        {
            using (var srcImg = new IplImage(FilePath.Image.Goryokaku, LoadMode.AnyDepth | LoadMode.AnyColor))
                using (var dstImg = srcImg.Clone())
                    using (var dspImg = srcImg.Clone())
                        using (var markers = new IplImage(srcImg.Size, BitDepth.S32, 1))
                        {
                            markers.Zero();

                            using (var window = new CvWindow("image", WindowMode.AutoSize))
                            {
                                window.Image = srcImg;
                                // Mouse event
                                int seedNum = 0;
                                window.OnMouseCallback += delegate(MouseEvent ev, int x, int y, MouseEvent flags)
                                {
                                    if (ev == MouseEvent.LButtonDown)
                                    {
                                        seedNum++;
                                        CvPoint pt = new CvPoint(x, y);
                                        markers.Circle(pt, 20, CvScalar.ScalarAll(seedNum), Cv.FILLED, LineType.Link8, 0);
                                        dspImg.Circle(pt, 20, CvColor.White, 3, LineType.Link8, 0);
                                        window.Image = dspImg;
                                    }
                                };
                                CvWindow.WaitKey();
                            }

                            Cv.Watershed(srcImg, markers);

                            // draws watershed
                            for (int i = 0; i < markers.Height; i++)
                            {
                                for (int j = 0; j < markers.Width; j++)
                                {
                                    int idx = (int)(markers.Get2D(i, j).Val0);
                                    if (idx == -1)
                                    {
                                        dstImg.Set2D(i, j, CvColor.Red);
                                    }
                                }
                            }
                            using (CvWindow wDst = new CvWindow("watershed transform", WindowMode.AutoSize))
                            {
                                wDst.Image = dstImg;
                                CvWindow.WaitKey();
                            }
                        }
        }
Exemplo n.º 29
0
        public List <CvPoint> ImageToBox(IplImage fromImage, PictureBoxIpl targetBox, List <CvPoint> fromPoint)
        {
            List <CvPoint> targetPoint = new List <CvPoint>();
            double         imgToBox_X  = (double)targetBox.Width / (double)fromImage.Width;
            double         imgToBox_Y  = (double)targetBox.Height / (double)fromImage.Height;

            for (int i = 0; i < fromPoint.Count(); i++)
            {
                double  tmpX    = (double)fromPoint[i].X * imgToBox_X;
                double  tmpY    = (double)fromPoint[i].Y * imgToBox_Y;
                CvPoint tmPoint = new CvPoint((int)tmpX, (int)tmpY);
                targetPoint.Add(tmPoint);
            }
            return(targetPoint);
        }
Exemplo n.º 30
0
        /// <summary>
        /// Calculate sum of line pixels (wrapper style)
        /// </summary>
        /// <param name="image"></param>
        /// <param name="pt1"></param>
        /// <param name="pt2"></param>
        /// <returns></returns>
        private CvScalar SumLinePixelsManaged(IplImage image, CvPoint pt1, CvPoint pt2)
        {
            double blueSum = 0, greenSum = 0, redSum = 0;
            var    iterator = new CvLineIterator(image, pt1, pt2, PixelConnectivity.Connectivity8, false);

            foreach (CvScalar pixel in iterator)
            {
                blueSum  += pixel.Val0;  //blue_sum += iterator.ptr[0];
                greenSum += pixel.Val1;  //green_sum += iterator.ptr[1];
                redSum   += pixel.Val2;  //red_sum += iterator.ptr[2];

                PrintCoordinate(image, iterator);
            }
            return(new CvScalar(blueSum, greenSum, redSum));
        }
Exemplo n.º 31
0
        /// <summary>
        /// Calculate sum of line pixels (wrapper style)
        /// </summary>
        /// <param name="image"></param>
        /// <param name="pt1"></param>
        /// <param name="pt2"></param>
        /// <returns></returns>
        private CvScalar SumLinePixelsManaged(IplImage image, CvPoint pt1, CvPoint pt2)
        {            
            double blue_sum = 0, green_sum = 0, red_sum = 0;
            CvLineIterator iterator = new CvLineIterator(image, pt1, pt2, PixelConnectivity.Connectivity_8, false);

            foreach (CvScalar pixel in iterator)
            {
                blue_sum += pixel.Val0;   //blue_sum += iterator.ptr[0];
                green_sum += pixel.Val1;  //green_sum += iterator.ptr[1];
                red_sum += pixel.Val2;    //red_sum += iterator.ptr[2];

                PrintCoordinate(image, iterator);
            }
            return new CvScalar(blue_sum, green_sum, red_sum);
        }
Exemplo n.º 32
0
        // Get area of similarly-colored pixels, starting from a point
        // Actually uses the FloodFill method but since this took me some time to figure out how it works, I have "libraried" the code
        // Note : uses 8-way filling, one may want to alter this...
        // =>	mask is input/output CvMat and has several restrictions; if you intend to use it on successive calls, pass it as null on first call
        //		and the function will create it for you appropriately.
        static public CvConnectedComp GetAreaOfSimilarPixels(CvMat input, CvPoint startPoint, CvScalar lower, CvScalar upper, ref CvMat mask, byte maskCol = 255)
        {
            CvConnectedComp filledAreaData;

            if (mask == null)
            {
                mask = new CvMat(input.Rows + 2, input.Cols + 2, MatrixType.U8C1, new CvScalar(0, 0, 0, 0));
            }

            input.FloodFill(
                startPoint, 0, lower, upper, out filledAreaData,
                (FloodFillFlag.Link8 | FloodFillFlag.MaskOnly | FloodFillFlag.FixedRange) + (maskCol << 8), mask);

            return(filledAreaData);
        }
Exemplo n.º 33
0
        public BoundingRect()
        {
            // cvBoundingRect
            // 点列を包含する矩形を求める

            // (1)画像とメモリストレージを確保し初期化する
            // (メモリストレージは、CvSeqを使わないのであれば不要)
            using (IplImage img = new IplImage(640, 480, BitDepth.U8, 3))
                using (CvMemStorage storage = new CvMemStorage(0))
                {
                    img.Zero();
                    CvRNG rng = new CvRNG(DateTime.Now);
                    // (2)点列を生成する
                    ///*
                    // お手軽な方法 (普通の配列を使う)
                    CvPoint[] points = new CvPoint[50];
                    for (int i = 0; i < 50; i++)
                    {
                        points[i] = new CvPoint()
                        {
                            X = (int)(rng.RandInt() % (img.Width / 2) + img.Width / 4),
                            Y = (int)(rng.RandInt() % (img.Height / 2) + img.Height / 4)
                        };
                        img.Circle(points[i], 3, new CvColor(0, 255, 0), Cv.FILLED);
                    }
                    //*/

                    /*
                     * // サンプルに準拠した方法 (CvSeqを使う)
                     * CvSeq points = new CvSeq(SeqType.EltypePoint, CvSeq.SizeOf, CvPoint.SizeOf, storage);
                     * for (int i = 0; i < 50; i++) {
                     *  CvPoint pt = new CvPoint();
                     *  pt.X = (int)(rng.RandInt() % (img.Width / 2) + img.Width / 4);
                     *  pt.Y = (int)(rng.RandInt() % (img.Height / 2) + img.Height / 4);
                     *  points.Push(pt);
                     *  img.Circle(pt, 3, new CvColor(0, 255, 0), Cv.FILLED);
                     * }
                     * //*/
                    // (3)点列を包含する矩形を求めて描画する
                    CvRect rect = Cv.BoundingRect(points);
                    img.Rectangle(new CvPoint(rect.X, rect.Y), new CvPoint(rect.X + rect.Width, rect.Y + rect.Height), new CvColor(255, 0, 0), 2);
                    // (4)画像の表示,キーが押されたときに終了
                    using (CvWindow w = new CvWindow("BoundingRect", WindowMode.AutoSize, img))
                    {
                        CvWindow.WaitKey(0);
                    }
                }
        }
Exemplo n.º 34
0
        public TreeNodeIterator()
        {
            using (CvMemStorage storage = new CvMemStorage(0))
                using (IplImage srcImg = new IplImage(Const.ImageLenna, LoadMode.Color))
                    using (IplImage srcImgGray = new IplImage(srcImg.Size, BitDepth.U8, 1))
                        using (IplImage tmpImg = new IplImage(srcImg.Size, BitDepth.U8, 1))
                        {
                            Cv.CvtColor(srcImg, srcImgGray, ColorConversion.BgrToGray);

                            // (1)画像の二値化と輪郭の検出
                            Cv.Threshold(srcImgGray, tmpImg, 120, 255, ThresholdType.Binary);
                            CvSeq <CvPoint> contours;
                            Cv.FindContours(tmpImg, storage, out contours, CvContour.SizeOf, ContourRetrieval.Tree, ContourChain.ApproxSimple);
                            /* 輪郭シーケンスから座標を取得 */
                            using (CvFileStorage fs = new CvFileStorage("contours.yaml", null, FileStorageMode.Write))
                            {
                                // (2)ツリーノードイテレータの初期化
                                CvTreeNodeIterator <CvSeq <CvPoint> > it = new CvTreeNodeIterator <CvSeq <CvPoint> >(contours, 1);
                                // (3)各ノード(輪郭)を走査
                                //CvSeq<CvPoint> contour;
                                //while ((contour = it.NextTreeNode()) != null)
                                foreach (CvSeq <CvPoint> contour in it)
                                {
                                    fs.StartWriteStruct("contour", NodeType.Seq);
                                    // (4)輪郭を構成する頂点座標を取得
                                    CvPoint tmp = contour[-1].Value;
                                    for (int i = 0; i < contour.Total; i++)
                                    {
                                        CvPoint point = contour[i].Value;
                                        srcImg.Line(tmp, point, CvColor.Blue, 2);
                                        fs.StartWriteStruct(null, NodeType.Map | NodeType.Flow);
                                        fs.WriteInt("x", point.X);
                                        fs.WriteInt("y", point.Y);
                                        fs.EndWriteStruct();
                                        tmp = point;
                                    }
                                    fs.EndWriteStruct();
                                }
                            }

                            Console.WriteLine(File.ReadAllText("contours.yaml"));

                            using (new CvWindow("Contours", srcImg))
                            {
                                Cv.WaitKey(0);
                            }
                        }
        }
Exemplo n.º 35
0
        /// <summary>
        /// ConvexityDefectsの描画
        /// </summary>
        /// <param name="img"></param>
        /// <param name="defect"></param>
        private void DrawDefects(IplImage img, CvSeq <CvConvexityDefect> defect)
        {
            int count = 0;

            foreach (CvConvexityDefect item in defect)
            {
                CvPoint      p1 = item.Start, p2 = item.End;
                double       dist = GetDistance(p1, p2);
                CvPoint2D64f mid  = GetMidpoint(p1, p2);
                img.DrawLine(p1, p2, CvColor.White, 3);
                img.DrawCircle(item.DepthPoint, 10, CvColor.Green, -1);
                img.DrawLine(mid, item.DepthPoint, CvColor.White, 1);
                Console.WriteLine("No:{0} Depth:{1} Dist:{2}", count, item.Depth, dist);
                count++;
            }
        }
Exemplo n.º 36
0
            public override void FloodFill(CvPoint pt)
            {
                PrepareForFloodFill();

                unsafe
                {
                    scan0 = (byte *)bitmap.DataByte;
                    int   x = pt.X; int y = pt.Y;
                    int   loc      = CoordsToIndex(ref x, ref y);
                    byte *colorPtr = ((byte *)(scan0 + loc));
                    startColor = new byte[] { colorPtr[0], colorPtr[1], colorPtr[2] };
                    LinearFloodFill4(ref x, ref y);

                    bool[] pixelsChecked = this.pixelsChecked;

                    while (ranges.Count > 0)
                    {
                        FloodFillRange range = ranges.Dequeue();

                        //START THE LOOP UPWARDS AND DOWNWARDS
                        int   upY       = range.Y - 1;                //so we can pass the y coord by ref
                        int   downY     = range.Y + 1;
                        byte *upPtr     = (byte *)(scan0 + CoordsToIndex(ref range.StartX, ref upY));
                        byte *downPtr   = (byte *)(scan0 + CoordsToIndex(ref range.StartX, ref downY));
                        int   downPxIdx = (bitmapWidth * (range.Y + 1)) + range.StartX;                    //CoordsToPixelIndex(range.StartX,range.Y+1);
                        int   upPxIdx   = (bitmapWidth * (range.Y - 1)) + range.StartX;                    //CoordsToPixelIndex(range.StartX, range.Y - 1);
                        for (int i = range.StartX; i <= range.EndX; i++)
                        {
                            //START LOOP UPWARDS
                            //if we're not above the top of the bitmap and the pixel above this one is within the color tolerance
                            if (range.Y > 0 && CheckPixel(ref upPtr) && (!(pixelsChecked[upPxIdx])))
                            {
                                LinearFloodFill4(ref i, ref upY);
                            }
                            //START LOOP DOWNWARDS
                            if (range.Y < (bitmapHeight - 1) && CheckPixel(ref downPtr) && (!(pixelsChecked[downPxIdx])))
                            {
                                LinearFloodFill4(ref i, ref downY);
                            }
                            upPtr   += bitmapPixelFormatSize;
                            downPtr += bitmapPixelFormatSize;
                            downPxIdx++;
                            upPxIdx++;
                        }
                    }
                }
            }
Exemplo n.º 37
0
        public BoundingRect()
        {
            // cvBoundingRect 
            // 点列を包含する矩形を求める

            // (1)画像とメモリストレージを確保し初期化する
            // (メモリストレージは、CvSeqを使わないのであれば不要)
            using (IplImage img = new IplImage(640, 480, BitDepth.U8, 3))
            using (CvMemStorage storage = new CvMemStorage(0))
            {
                img.Zero();
                CvRNG rng = new CvRNG(DateTime.Now);
                // (2)点列を生成する
                ///*
                // お手軽な方法 (普通の配列を使う)
                CvPoint[] points = new CvPoint[50];
                for (int i = 0; i < 50; i++)
                {
                    points[i] = new CvPoint()
                    {
                        X = (int)(rng.RandInt() % (img.Width / 2) + img.Width / 4),
                        Y = (int)(rng.RandInt() % (img.Height / 2) + img.Height / 4)
                    };
                    img.Circle(points[i], 3, new CvColor(0, 255, 0), Cv.FILLED);
                }
                //*/
                /*
                // サンプルに準拠した方法 (CvSeqを使う)
                CvSeq points = new CvSeq(SeqType.EltypePoint, CvSeq.SizeOf, CvPoint.SizeOf, storage);
                for (int i = 0; i < 50; i++) {
                    CvPoint pt = new CvPoint();
                    pt.X = (int)(rng.RandInt() % (img.Width / 2) + img.Width / 4);
                    pt.Y = (int)(rng.RandInt() % (img.Height / 2) + img.Height / 4);
                    points.Push(pt);
                    img.Circle(pt, 3, new CvColor(0, 255, 0), Cv.FILLED);
                }
                //*/
                // (3)点列を包含する矩形を求めて描画する
                CvRect rect = Cv.BoundingRect(points);
                img.Rectangle(new CvPoint(rect.X, rect.Y), new CvPoint(rect.X + rect.Width, rect.Y + rect.Height), new CvColor(255, 0, 0), 2);
                // (4)画像の表示,キーが押されたときに終了 
                using (CvWindow w = new CvWindow("BoundingRect", WindowMode.AutoSize, img))
                {
                    CvWindow.WaitKey(0);
                }
            }
        }
Exemplo n.º 38
0
        private void extendLines(CvLineSegmentPoint[] lines, double ext)
        {
            // TODO : this is stupid way to extend a line, does 2 sqrts and is generally non-comprehensible
            // Better just make it parametric, like x = x0 + t * x1, y = y0 + t * y1 where for t=0, {x,y} = P1 & for t=1, {x,y} = P2
            // so we can increase in size by percent, like t = +-0.5 (+50%)

            //Transfer 2-point line segments to type "a*x=b" CvLine2D format
            //TODO : this should probably be redone manually without OpenCV's ultra-generic slow function. We only have 2 points after all.
            List <CvLine2D> fitLinesV = new List <CvLine2D>(lines.Length);

            CvPoint[] forFitline = new CvPoint[2];
            for (int i = 0; i < lines.Length; ++i)
            {
                forFitline[0] = lines[i].P1;
                forFitline[1] = lines[i].P2;
                CvLine2D fitLinef = Cv.FitLine2D(forFitline, DistanceType.L2, 0, 0.01, 0.01);
                fitLinesV.Add(fitLinef);
            }

            CvPoint p1, p2, p3, p4;

            for (int i = 0; i < lines.Length; i++)
            {
                CvLineSegmentPoint lineSegm = lines[i];
                CvLine2D           fitLine  = fitLinesV[i];
                int fitLineVx = (int)(fitLine.Vx * ext);
                int fitLineVy = (int)(fitLine.Vy * ext);
                p1 = new CvPoint(lineSegm.P1.X + fitLineVx, lineSegm.P1.Y + fitLineVy);
                p2 = new CvPoint(lineSegm.P2.X - fitLineVx, lineSegm.P2.Y - fitLineVy);
                p3 = new CvPoint(lineSegm.P1.X - fitLineVx, lineSegm.P1.Y - fitLineVy);
                p4 = new CvPoint(lineSegm.P2.X + fitLineVx, lineSegm.P2.Y + fitLineVy);
                if (p1.DistanceTo(p2) > p3.DistanceTo(p4))
                {
                    lineSegm.P1.X = p1.X;
                    lineSegm.P1.Y = p1.Y;
                    lineSegm.P2.X = p2.X;
                    lineSegm.P2.Y = p2.Y;
                }
                else
                {
                    lineSegm.P1.X = p3.X;
                    lineSegm.P1.Y = p3.Y;
                    lineSegm.P2.X = p4.X;
                    lineSegm.P2.Y = p4.Y;
                }
            }
        }
Exemplo n.º 39
0
        /// <summary>
        ///
        /// </summary>
        /// <param name="img"></param>
        /// <param name="edge"></param>
        /// <param name="color"></param>
        private void DrawSubdivEdge(IplImage img, CvSubdiv2DEdge edge, CvScalar color)
        {
            CvSubdiv2DPoint org_pt = edge.Org();
            CvSubdiv2DPoint dst_pt = edge.Dst();

            if (org_pt != null && dst_pt != null)
            {
                CvPoint2D32f org = org_pt.Pt;
                CvPoint2D32f dst = dst_pt.Pt;

                CvPoint iorg = new CvPoint(Cv.Round(org.X), Cv.Round(org.Y));
                CvPoint idst = new CvPoint(Cv.Round(dst.X), Cv.Round(dst.Y));

                //Console.WriteLine("{0} / {1}", iorg, idst);
                img.Line(iorg, idst, color, 1, LineType.AntiAlias, 0);
            }
        }
Exemplo n.º 40
0
        //public LineSegment() { }

        public LineSegment(CvLineSegmentPoint cvline)
        {
            this.cvline = cvline;
            CvPoint p1 = cvline.P1;
            CvPoint p2 = cvline.P2;

            A = p2.Y - p1.Y;
            B = p2.X - p1.X;
            C = p2.X * p1.Y - p2.Y * p1.X;
            // cache
            A_GT_B      = Math.Abs(A) > Math.Abs(B);
            INV_A2_P_B2 = 1 / (A * A + B * B);
            A_DIV       = A * INV_A2_P_B2;
            B_DIV       = B * INV_A2_P_B2;
            AC_DIV      = C * A_DIV;
            BC_DIV      = C * B_DIV;
        }
Exemplo n.º 41
0
        /// <summary>
        /// sample of new C++ style wrapper
        /// </summary>
        private void SampleCpp()
        {
            // (1) Load the image
            using (Mat imgGray = new Mat(FilePath.Goryokaku, LoadMode.GrayScale))
                using (Mat imgStd = new Mat(FilePath.Goryokaku, LoadMode.Color))
                    using (Mat imgProb = imgStd.Clone())
                    {
                        // Preprocess
                        Cv2.Canny(imgGray, imgGray, 50, 200, 3, false);

                        // (3) Run Standard Hough Transform
                        CvLineSegmentPolar[] segStd = Cv2.HoughLines(imgGray, 1, Math.PI / 180, 50, 0, 0);
                        int limit = Math.Min(segStd.Length, 10);
                        for (int i = 0; i < limit; i++)
                        {
                            // Draws result lines
                            float   rho   = segStd[i].Rho;
                            float   theta = segStd[i].Theta;
                            double  a     = Math.Cos(theta);
                            double  b     = Math.Sin(theta);
                            double  x0    = a * rho;
                            double  y0    = b * rho;
                            CvPoint pt1   = new CvPoint {
                                X = Cv.Round(x0 + 1000 * (-b)), Y = Cv.Round(y0 + 1000 * (a))
                            };
                            CvPoint pt2 = new CvPoint {
                                X = Cv.Round(x0 - 1000 * (-b)), Y = Cv.Round(y0 - 1000 * (a))
                            };
                            imgStd.Line(pt1, pt2, CvColor.Red, 3, LineType.AntiAlias, 0);
                        }

                        // (4) Run Probabilistic Hough Transform
                        CvLineSegmentPoint[] segProb = Cv2.HoughLinesP(imgGray, 1, Math.PI / 180, 50, 50, 10);
                        foreach (CvLineSegmentPoint s in segProb)
                        {
                            imgProb.Line(s.P1, s.P2, CvColor.Red, 3, LineType.AntiAlias, 0);
                        }

                        // (5) Show results
                        using (new CvWindow("Hough_line_standard", WindowMode.AutoSize, imgStd.ToIplImage()))
                            using (new CvWindow("Hough_line_probabilistic", WindowMode.AutoSize, imgProb.ToIplImage()))
                            {
                                CvWindow.WaitKey(0);
                            }
                    }
        }
Exemplo n.º 42
0
        /// <summary>
        /// a rough implementation for object location
        /// </summary>
        /// <param name="objectKeypoints"></param>
        /// <param name="objectDescriptors"></param>
        /// <param name="imageKeypoints"></param>
        /// <param name="imageDescriptors"></param>
        /// <param name="srcCorners"></param>
        /// <returns></returns>
        private static CvPoint[] LocatePlanarObject(CvSURFPoint[] objectKeypoints,
                                                    float[][] objectDescriptors,
                                                    CvSURFPoint[] imageKeypoints,
                                                    float[][] imageDescriptors,
                                                    CvPoint[] srcCorners)
        {
            CvMat h = new CvMat(3, 3, MatrixType.F64C1);

            int[] ptpairs = FindPairs(objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors);
            int   n       = ptpairs.Length / 2;

            if (n < 4)
            {
                return(null);
            }

            CvPoint2D32f[] pt1 = new CvPoint2D32f[n];
            CvPoint2D32f[] pt2 = new CvPoint2D32f[n];
            for (int i = 0; i < n; i++)
            {
                pt1[i] = objectKeypoints[ptpairs[i * 2]].Pt;
                pt2[i] = imageKeypoints[ptpairs[i * 2 + 1]].Pt;
            }

            CvMat pt1Mat = new CvMat(1, n, MatrixType.F32C2, pt1);
            CvMat pt2Mat = new CvMat(1, n, MatrixType.F32C2, pt2);

            if (Cv.FindHomography(pt1Mat, pt2Mat, h, HomographyMethod.Ransac, 5) == 0)
            {
                return(null);
            }

            CvPoint[] dstCorners = new CvPoint[4];
            for (int i = 0; i < 4; i++)
            {
                double x = srcCorners[i].X;
                double y = srcCorners[i].Y;
                double Z = 1.0 / (h[6] * x + h[7] * y + h[8]);
                double X = (h[0] * x + h[1] * y + h[2]) * Z;
                double Y = (h[3] * x + h[4] * y + h[5]) * Z;
                dstCorners[i] = new CvPoint(Cv.Round(X), Cv.Round(Y));
            }

            return(dstCorners);
        }
Exemplo n.º 43
0
        /// <summary>
        /// Calculate sum of line pixels (native style)
        /// </summary>
        /// <param name="image"></param>
        /// <param name="pt1"></param>
        /// <param name="pt2"></param>
        /// <returns></returns>
        private CvScalar SumLinePixelsNative(IplImage image, CvPoint pt1, CvPoint pt2)
        {
            CvLineIterator iterator;
            int            blue_sum = 0, green_sum = 0, red_sum = 0;
            int            count = Cv.InitLineIterator(image, pt1, pt2, out iterator, PixelConnectivity.Connectivity8, false);

            for (int i = 0; i < count; i++)
            {
                blue_sum  += Marshal.ReadByte(iterator.Ptr, 0); //blue_sum += iterator.ptr[0];
                green_sum += Marshal.ReadByte(iterator.Ptr, 1); //green_sum += iterator.ptr[1];
                red_sum   += Marshal.ReadByte(iterator.Ptr, 2); //red_sum += iterator.ptr[2];

                Cv.NEXT_LINE_POINT(iterator);

                PrintCoordinate(image, iterator);
            }
            return(new CvScalar(blue_sum, green_sum, red_sum));
        }
Exemplo n.º 44
0
        /// <summary>
        /// Calculate sum of line pixels (native style)
        /// </summary>
        /// <param name="image"></param>
        /// <param name="pt1"></param>
        /// <param name="pt2"></param>
        /// <returns></returns>
        private CvScalar SumLinePixelsNative(IplImage image, CvPoint pt1, CvPoint pt2)
        {
            CvLineIterator iterator;
            int blue_sum = 0, green_sum = 0, red_sum = 0;
            int count = Cv.InitLineIterator(image, pt1, pt2, out iterator, PixelConnectivity.Connectivity_8, false);

            for (int i = 0; i < count; i++)
            {
                blue_sum += Marshal.ReadByte(iterator.Ptr, 0);  //blue_sum += iterator.ptr[0];
                green_sum += Marshal.ReadByte(iterator.Ptr, 1); //green_sum += iterator.ptr[1];
                red_sum += Marshal.ReadByte(iterator.Ptr, 2);   //red_sum += iterator.ptr[2];

                Cv.NEXT_LINE_POINT(iterator);

                PrintCoordinate(image, iterator);
            }
            return new CvScalar(blue_sum, green_sum, red_sum);
        }
Exemplo n.º 45
0
    public IplImage effectCheek2(IplImage srcImage, IplImage copyImage)
    {
        IplImage tmpImage = Cv.CloneImage(srcImage);

        CvPoint leftCenter, rightCenter = new CvPoint();

        leftCenter.X  = picture.facePoints.find_label_point_X("N4") + picture.facePoints.find_label_point_X("F8");
        leftCenter.Y  = picture.facePoints.find_label_point_Y("EL5") + picture.facePoints.find_label_point_Y("N4") + picture.facePoints.find_label_point_Y("F8");
        rightCenter.X = picture.facePoints.find_label_point_X("N2") + picture.facePoints.find_label_point_X("F4");
        rightCenter.Y = picture.facePoints.find_label_point_Y("ER5") + picture.facePoints.find_label_point_Y("N2") + picture.facePoints.find_label_point_Y("F4");

        double leftLength  = Math.Abs(picture.facePoints.find_label_point_X("N4") - picture.facePoints.find_label_point_X("F8")) * 0.6;
        double rightLength = Math.Abs(picture.facePoints.find_label_point_X("N2") - picture.facePoints.find_label_point_X("F4")) * 0.6;

        CvScalar red = new CvScalar();

        red.Val0 = 0;
        red.Val1 = 0;
        red.Val2 = 255;

        CvPoint tmpleft = new CvPoint();

        tmpleft.X = leftCenter.X - picture.facePoints.find_label_point_X("N4");
        tmpleft.Y = leftCenter.Y / 3;
        CvSize tmpleftSize = new CvSize();

        tmpleftSize.Width  = (int)leftLength;
        tmpleftSize.Height = 20;

        CvPoint tmpright = new CvPoint();

        tmpright.X = rightCenter.X - picture.facePoints.find_label_point_X("N2");
        tmpright.Y = rightCenter.Y / 3;
        CvSize tmprightSize = new CvSize();

        tmprightSize.Width  = (int)rightLength;
        tmprightSize.Height = 20;

        Cv.Ellipse(copyImage, tmpleft, tmpleftSize, 0.0, 90.0, 270.0, red, -1);
        Cv.Ellipse(copyImage, tmpright, tmprightSize, 0.0, -90.0, 90.0, red, -1);

        Cv.AddWeighted(copyImage, 0.05, tmpImage, 0.95, 0, tmpImage);
        return(tmpImage);
    }
Exemplo n.º 46
0
        /// <summary>
        /// sample of new C++ style wrapper
        /// </summary>
        private void SampleCpp()
        {
            // (1)画像の読み込み 
            using (Mat imgGray = new Mat(Const.ImageGoryokaku, LoadMode.GrayScale))
            using (Mat imgStd = new Mat(Const.ImageGoryokaku, LoadMode.Color))
            using (Mat imgProb = imgStd.Clone())
            {
                // ハフ変換のための前処理 
                CvCpp.Canny(imgGray, imgGray, 50, 200, ApertureSize.Size3, false);

                // (3)標準的ハフ変換による線の検出と検出した線の描画
                CvLineSegmentPolar[] segStd = CvCpp.HoughLines(imgGray, 1, Math.PI / 180, 50, 0, 0);
                int limit = Math.Min(segStd.Length, 10);
                for (int i = 0; i < limit; i++ )
                {
                    float rho = segStd[i].Rho;
                    float theta = segStd[i].Theta;

                    double a = Math.Cos(theta);
                    double b = Math.Sin(theta);
                    double x0 = a * rho;
                    double y0 = b * rho;
                    CvPoint pt1 = new CvPoint { X = Cv.Round(x0 + 1000 * (-b)), Y = Cv.Round(y0 + 1000 * (a)) };
                    CvPoint pt2 = new CvPoint { X = Cv.Round(x0 - 1000 * (-b)), Y = Cv.Round(y0 - 1000 * (a)) };
                    imgStd.Line(pt1, pt2, CvColor.Red, 3, LineType.AntiAlias, 0);
                }

                // (4)確率的ハフ変換による線分の検出と検出した線分の描画
                CvLineSegmentPoint[] segProb = CvCpp.HoughLinesP(imgGray, 1, Math.PI / 180, 50, 50, 10);
                foreach (CvLineSegmentPoint s in segProb)
                {
                    imgProb.Line(s.P1, s.P2, CvColor.Red, 3, LineType.AntiAlias, 0);
                }


                // (5)検出結果表示用のウィンドウを確保し表示する
                using (new CvWindow("Hough_line_standard", WindowMode.AutoSize, imgStd.ToIplImage()))
                using (new CvWindow("Hough_line_probabilistic", WindowMode.AutoSize, imgProb.ToIplImage()))
                {
                    CvWindow.WaitKey(0);
                }
            }
        }
Exemplo n.º 47
0
        /// <summary>
        /// sample of new C++ style wrapper
        /// </summary>
        private void SampleCpp()
        {
            // (1) Load the image
            using (Mat imgGray = new Mat(FilePath.Goryokaku, LoadMode.GrayScale))
            using (Mat imgStd = new Mat(FilePath.Goryokaku, LoadMode.Color))
            using (Mat imgProb = imgStd.Clone())
            {
                // Preprocess
                Cv2.Canny(imgGray, imgGray, 50, 200, 3, false);

                // (3) Run Standard Hough Transform 
                CvLineSegmentPolar[] segStd = Cv2.HoughLines(imgGray, 1, Math.PI / 180, 50, 0, 0);
                int limit = Math.Min(segStd.Length, 10);
                for (int i = 0; i < limit; i++ )
                {
                    // Draws result lines
                    float rho = segStd[i].Rho;
                    float theta = segStd[i].Theta;
                    double a = Math.Cos(theta);
                    double b = Math.Sin(theta);
                    double x0 = a * rho;
                    double y0 = b * rho;
                    CvPoint pt1 = new CvPoint { X = Cv.Round(x0 + 1000 * (-b)), Y = Cv.Round(y0 + 1000 * (a)) };
                    CvPoint pt2 = new CvPoint { X = Cv.Round(x0 - 1000 * (-b)), Y = Cv.Round(y0 - 1000 * (a)) };
                    imgStd.Line(pt1, pt2, CvColor.Red, 3, LineType.AntiAlias, 0);
                }

                // (4) Run Probabilistic Hough Transform
                CvLineSegmentPoint[] segProb = Cv2.HoughLinesP(imgGray, 1, Math.PI / 180, 50, 50, 10);
                foreach (CvLineSegmentPoint s in segProb)
                {
                    imgProb.Line(s.P1, s.P2, CvColor.Red, 3, LineType.AntiAlias, 0);
                }

                // (5) Show results
                using (new CvWindow("Hough_line_standard", WindowMode.AutoSize, imgStd.ToIplImage()))
                using (new CvWindow("Hough_line_probabilistic", WindowMode.AutoSize, imgProb.ToIplImage()))
                {
                    CvWindow.WaitKey(0);
                }
            }
        }
Exemplo n.º 48
0
        public LineIterator()
        {
            using (IplImage image = new IplImage(FilePath.Image.Lenna, LoadMode.Color))
            {
                CvPoint pt1 = new CvPoint(30, 100);
                CvPoint pt2 = new CvPoint(500, 400);

                CvScalar result;
                result = SumLinePixelsNative(image, pt1, pt2);      // native style
                result = SumLinePixelsManaged(image, pt1, pt2);     // wrapper style
                Console.WriteLine(result.ToString());

                Cv.Line(image, pt1, pt2, CvColor.Red, 3, LineType.Link8);

                using (new CvWindow("line", image))
                {
                    Cv.WaitKey();
                }
            }
        }
Exemplo n.º 49
0
 public static double GetDistance(CvPoint leftPoint, CvPoint rightPoint)
 {
     if (Math.Abs(rightPoint.Y - leftPoint.Y) > 50 || leftPoint.X < 320 && rightPoint.X > 320)
     {
         return double.NaN;
     }
     else
     {
         double hX, hY, angle1, angle2;
         if (rightPoint.X < 320 && leftPoint.X < 320)
         {
             angle1 = GetAngle(rightPoint);
             angle2 = GetAngle(leftPoint);
             hX = GetXDistance(angle1, angle2);
             hY = GetYDistance(angle1, hX);
         }
         else if (rightPoint.X < 320 && leftPoint.X >= 320)
         {
             angle1 = GetAngle(rightPoint);
             angle2 = GetAngle(leftPoint);
             hX = GetXDistance(angle1, angle2, true);
             var h = hX > Config.DistanceBetweenCameras / 2
                 ? Config.DistanceBetweenCameras / 2 - (Config.DistanceBetweenCameras - hX)
                 : Config.DistanceBetweenCameras / 2 - hX;
             hY = GetYDistance(angle1, hX);
         }
         else
         {
             angle1 = GetAngle(rightPoint);
             angle2 = GetAngle(leftPoint);
             hX = GetXDistance(angle2, angle1);
             hY = GetYDistance(angle2, hX);
         }
         var e = Math.Sqrt(Math.Pow(hY, 2) + Math.Pow(hX, 2));
         var o = Math.Pow(e/20, 2);
         Debug.WriteLine("e = {0}, o = {1}", e, o);
         return (e > 136) ? double.PositiveInfinity : e - 1.05 * o;
     }
 }
Exemplo n.º 50
0
        public SVM()
        {
            // CvSVM
            // SVMを利用して2次元ベクトルの3クラス分類問題を解く

            const int S = 1000;
            const int SIZE = 400;
            CvRNG rng = new CvRNG((ulong)DateTime.Now.Ticks);

            // (1)画像領域の確保と初期化
            using (IplImage img = new IplImage(SIZE, SIZE, BitDepth.U8, 3))
            {
                img.Zero();
                // (2)学習データの生成
                CvPoint[] pts = new CvPoint[S];
                int[] res = new int[S];
                for (int i = 0; i < S; i++)
                {
                    pts[i].X = (int)(rng.RandInt() % SIZE);
                    pts[i].Y = (int)(rng.RandInt() % SIZE);
                    if (pts[i].Y > 50 * Math.Cos(pts[i].X * Cv.PI / 100) + 200)
                    {
                        img.Line(new CvPoint(pts[i].X - 2, pts[i].Y - 2), new CvPoint(pts[i].X + 2, pts[i].Y + 2), new CvColor(255, 0, 0));
                        img.Line(new CvPoint(pts[i].X + 2, pts[i].Y - 2), new CvPoint(pts[i].X - 2, pts[i].Y + 2), new CvColor(255, 0, 0));
                        res[i] = 1;
                    }
                    else
                    {
                        if (pts[i].X > 200)
                        {
                            img.Line(new CvPoint(pts[i].X - 2, pts[i].Y - 2), new CvPoint(pts[i].X + 2, pts[i].Y + 2), new CvColor(0, 255, 0));
                            img.Line(new CvPoint(pts[i].X + 2, pts[i].Y - 2), new CvPoint(pts[i].X - 2, pts[i].Y + 2), new CvColor(0, 255, 0));
                            res[i] = 2;
                        }
                        else
                        {
                            img.Line(new CvPoint(pts[i].X - 2, pts[i].Y - 2), new CvPoint(pts[i].X + 2, pts[i].Y + 2), new CvColor(0, 0, 255));
                            img.Line(new CvPoint(pts[i].X + 2, pts[i].Y - 2), new CvPoint(pts[i].X - 2, pts[i].Y + 2), new CvColor(0, 0, 255));
                            res[i] = 3;
                        }
                    }
                }

                // (3)学習データの表示
                Cv.NamedWindow("SVM", WindowMode.AutoSize);
                Cv.ShowImage("SVM", img);
                Cv.WaitKey(0);

                // (4)学習パラメータの生成
                float[] data = new float[S * 2];
                for (int i = 0; i < S; i++)
                {
                    data[i * 2] = ((float)pts[i].X) / SIZE;
                    data[i * 2 + 1] = ((float)pts[i].Y) / SIZE;
                }

                // (5)SVMの学習
                using (CvSVM svm = new CvSVM())
                {
                    CvMat data_mat = new CvMat(S, 2, MatrixType.F32C1, data);
                    CvMat res_mat = new CvMat(S, 1, MatrixType.S32C1, res);
                    CvTermCriteria criteria = new CvTermCriteria(1000, float.Epsilon);
                    CvSVMParams param = new CvSVMParams(SVMType.CSvc, SVMKernelType.Rbf, 10.0, 8.0, 1.0, 10.0, 0.5, 0.1, null, criteria);
                    svm.Train(data_mat, res_mat, null, null, param);

                    // (6)学習結果の描画
                    for (int i = 0; i < SIZE; i++)
                    {
                        for (int j = 0; j < SIZE; j++)
                        {
                            float[] a = { (float)j / SIZE, (float)i / SIZE };
                            CvMat m = new CvMat(1, 2, MatrixType.F32C1, a);
                            float ret = svm.Predict(m);
                            CvColor color = new CvColor();
                            switch ((int)ret)
                            {
                                case 1:
                                    color = new CvColor(100, 0, 0); break;
                                case 2:
                                    color = new CvColor(0, 100, 0); break;
                                case 3:
                                    color = new CvColor(0, 0, 100); break;
                            }
                            img[i, j] = color;
                        }
                    }

                    // (7)トレーニングデータの再描画
                    for (int i = 0; i < S; i++)
                    {
                        CvColor color = new CvColor();
                        switch (res[i])
                        {
                            case 1:
                                color = new CvColor(255, 0, 0); break;
                            case 2:
                                color = new CvColor(0, 255, 0); break;
                            case 3:
                                color = new CvColor(0, 0, 255); break;
                        }
                        img.Line(new CvPoint(pts[i].X - 2, pts[i].Y - 2), new CvPoint(pts[i].X + 2, pts[i].Y + 2), color);
                        img.Line(new CvPoint(pts[i].X + 2, pts[i].Y - 2), new CvPoint(pts[i].X - 2, pts[i].Y + 2), color);
                    }

                    // (8)サポートベクターの描画
                    int sv_num = svm.GetSupportVectorCount();
                    for (int i = 0; i < sv_num; i++)
                    {
                        var support = svm.GetSupportVector(i);
                        img.Circle(new CvPoint((int)(support[0] * SIZE), (int)(support[1] * SIZE)), 5, new CvColor(200, 200, 200));
                    }

                    // (9)画像の表示
                    Cv.NamedWindow("SVM", WindowMode.AutoSize);
                    Cv.ShowImage("SVM", img);
                    Cv.WaitKey(0);
                    Cv.DestroyWindow("SVM");

                }
            }

        }
Exemplo n.º 51
0
    // Convert _outBox.BoxPoints (type CvPoint2D32f) into CvPoint[][] for use
    // in DrawPolyLine
    CvPoint[][] rectangleBoxPoint(CvPoint2D32f[] _box)
    {
        CvPoint[] pts = new CvPoint[_box.Length];
        for (int i = 0; i < _box.Length; i++)
            pts[i] = _box[i];  // Get the box coordinates (CvPoint)

        // Now we've got the 4 corners of the tracking box returned by CamShift
        // in a format that DrawPolyLine can use
        return (new CvPoint[][] { pts });
    }
Exemplo n.º 52
0
        /// <summary>
        /// sample of C style wrapper 
        /// </summary>
        private void SampleC()
        {
            // cvHoughLines2

            using (IplImage srcImgGray = new IplImage(FilePath.Image.Goryokaku, LoadMode.GrayScale))
            using (IplImage srcImgStd = new IplImage(FilePath.Image.Goryokaku, LoadMode.Color))
            using (IplImage srcImgProb = srcImgStd.Clone())
            {
                Cv.Canny(srcImgGray, srcImgGray, 50, 200, ApertureSize.Size3);
                using (CvMemStorage storage = new CvMemStorage())
                {
                    // Standard algorithm
                    CvSeq lines = srcImgGray.HoughLines2(storage, HoughLinesMethod.Standard, 1, Math.PI / 180, 50, 0, 0);
                    // wrapper style
                    //CvLineSegmentPolar[] lines = src_img_gray.HoughLinesStandard(1, Math.PI / 180, 50, 0, 0);

                    int limit = Math.Min(lines.Total, 10);
                    for (int i = 0; i < limit; i++)
                    {
                        // native code style
                        /*
                        unsafe
                        {
                            float* line = (float*)lines.GetElem<IntPtr>(i).Value.ToPointer();
                            float rho = line[0];
                            float theta = line[1];
                        }
                        //*/

                        // wrapper style
                        CvLineSegmentPolar elem = lines.GetSeqElem<CvLineSegmentPolar>(i).Value;
                        float rho = elem.Rho;
                        float theta = elem.Theta;

                        double a = Math.Cos(theta);
                        double b = Math.Sin(theta);
                        double x0 = a * rho;
                        double y0 = b * rho;
                        CvPoint pt1 = new CvPoint { X = Cv.Round(x0 + 1000 * (-b)), Y = Cv.Round(y0 + 1000 * (a)) };
                        CvPoint pt2 = new CvPoint { X = Cv.Round(x0 - 1000 * (-b)), Y = Cv.Round(y0 - 1000 * (a)) };
                        srcImgStd.Line(pt1, pt2, CvColor.Red, 3, LineType.AntiAlias, 0);
                    }

                    // Probabilistic algorithm
                    lines = srcImgGray.HoughLines2(storage, HoughLinesMethod.Probabilistic, 1, Math.PI / 180, 50, 50, 10);
                    // wrapper style
                    //CvLineSegmentPoint[] lines = src_img_gray.HoughLinesProbabilistic(1, Math.PI / 180, 50, 0, 0);

                    for (int i = 0; i < lines.Total; i++)
                    {
                        // native code style
                        /*
                        unsafe
                        {
                            CvPoint* point = (CvPoint*)lines.GetElem<IntPtr>(i).Value.ToPointer();
                            src_img_prob.Line(point[0], point[1], CvColor.Red, 3, LineType.AntiAlias, 0);
                        }
                        //*/

                        // wrapper style
                        CvLineSegmentPoint elem = lines.GetSeqElem<CvLineSegmentPoint>(i).Value;
                        srcImgProb.Line(elem.P1, elem.P2, CvColor.Red, 3, LineType.AntiAlias, 0);
                    }
                }

                using (new CvWindow("Hough_line_standard", WindowMode.AutoSize, srcImgStd))
                using (new CvWindow("Hough_line_probabilistic", WindowMode.AutoSize, srcImgProb))
                {
                    CvWindow.WaitKey(0);
                }
            }
        }
Exemplo n.º 53
0
    // Update and OnGUI are the main loops
    void Update()
    {
        if (DrawThresholdImageFlag)
            DrawThresholdImage(videoSourceImage);
        if (DoFaceTrack)
            DrawFaceTracking(videoSourceImage);

        FindObjectScreenPosition();

        if (_webcamTexture.isPlaying)
        {

            if (_webcamTexture.didUpdateThisFrame)
            {
                //convert Unity 2D texture from webcam to CvMat
                Texture2DToCvMat();

                // Do some image processing with OpenCVSharp on this image frame
                ProcessImage(videoSourceImage);
            }

        }
        else
        {
            Debug.Log("Can't find camera!");
        }

        if (Input.GetKeyDown(KeyCode.H))  // "h" key turns histogram screen on/off
            histoWindowFlag = !histoWindowFlag;

        if (trackFlag)
        {
            if (Input.GetKeyDown(KeyCode.B))  // "b" key turns back projection on/off
                backprojWindowFlag = !backprojWindowFlag;
            if (Input.GetKeyDown(KeyCode.T))  // "t" key turns tracking openCV window on
                trackWindowFlag = !trackWindowFlag;

            // Move an external game object based on the ROI being tracked
            if (gameObjectTracker)
                ROIScreenToGameObject(rotatedBoxToTrack, gameObjectTracker);

        }

        if (Input.GetMouseButtonDown(1))
        { // Right mouse button
            Debug.Log("Tracking off");
            trackFlag = false;
            _mouseIsDown = false;
        }
        else if (Input.GetMouseButtonDown(0))
        {  // Left mouse button

            if (!_mouseIsDown)
            {
                _mouseDownPos = Input.mousePosition;
                trackFlag = false;

            }

            _mouseIsDown = true;
        }

        if (Input.GetMouseButtonUp(0))
        {  // Left mouse button is up

            // If mouse went from down to up, then update the region of interest using the box
            if (_mouseIsDown)
            {

                // Calculate the histogram for the selected region of interest (ROI)
                _rectToTrack = CheckROIBounds(ConvertRect2CvRect(MakePixelBox(_mouseDownPos, _mouseLastPos)));

                if (DisplayROIFlag)
                {
                    // Draw the region of interest to track
                    DrawROIBox(videoSourceImage);
                }

                // Use Hue/Saturation histogram (not just the Hue dimension)
                _histogramToTrack = CalculateHSVHistogram(GetROI(videoSourceImage, _rectToTrack));

                // Use Hue channel histogram only
                //_histogramToTrack = CalculateOneChannelHistogram (GetROI (videoSourceImage, _rectToTrack), 0, 179);

                lastPosition = new CvPoint(Mathf.FloorToInt(_rectToTrack.X), Mathf.FloorToInt(_rectToTrack.Y));
                InitializeKalmanFilter();

                trackFlag = true;
            }

            _mouseIsDown = false;

        }
    }
Exemplo n.º 54
0
        /// <summary>
        /// 楕円弧をポリラインで近似する
        /// </summary>
        /// <param name="center">弧の中心</param>
        /// <param name="axes">楕円の軸の長さ</param>
        /// <param name="angle">楕円の回転角度</param>
        /// <param name="arc_start">楕円弧の開始角度</param>
        /// <param name="arc_end">楕円弧の終了角度</param>
        /// <param name="pts">この関数で塗りつぶされる点の配列</param>
        /// <param name="delta">ポリラインの連続した頂点間の角度,近似精度.出力される点の総数は最大で ceil((end_angle - start_angle)/delta) + 1.</param>
        /// <returns></returns>
#else
        /// <summary>
        /// Approximates elliptic arc with polyline
        /// </summary>
        /// <param name="center">Center of the arc. </param>
        /// <param name="axes">Half-sizes of the arc. See cvEllipse. </param>
        /// <param name="angle">Rotation angle of the ellipse in degrees. See cvEllipse. </param>
        /// <param name="arc_start">Starting angle of the elliptic arc. </param>
        /// <param name="arc_end">Ending angle of the elliptic arc. </param>
        /// <param name="pts">The array of points, filled by the function. </param>
        /// <param name="delta">Angle between the subsequent polyline vertices, approximation accuracy. So, the total number of output points will ceil((end_angle - start_angle)/delta) + 1 at max. </param>
        /// <returns>The function cvEllipse2Poly computes vertices of the polyline that approximates the specified elliptic arc. It is used by cvEllipse. It returns the numbers of output points.</returns>
#endif
        public static int Ellipse2Poly(CvPoint center, CvSize axes, int angle, int arc_start, int arc_end, out CvPoint[] pts, int delta)
        {
            int nb_pts = (int)Math.Ceiling(((arc_end - arc_start) / (float)delta) + 1);
            pts = new CvPoint[nb_pts];
            nb_pts = CvInvoke.cvEllipse2Poly(center, axes, angle, arc_start, arc_end, pts, delta);
            //pts = new CvPoint[nb_pts];
            //Array.ConstrainedCopy(pts2, 0, pts, 0, nb_pts);       
            return nb_pts;
        }
Exemplo n.º 55
0
        /// <summary>
        /// 枠だけの楕円,楕円弧,もしくは塗りつぶされた扇形の楕円を描画する
        /// </summary>
        /// <param name="img">楕円が描画される画像</param>
        /// <param name="center">楕円の中心</param>
        /// <param name="axes">楕円の軸の長さ</param>
        /// <param name="angle">回転角度</param>
        /// <param name="start_angle">楕円弧の開始角度</param>
        /// <param name="end_angle">楕円弧の終了角度</param>
        /// <param name="color">楕円の色</param>
        /// <param name="thickness">楕円弧の線の幅</param>
        /// <param name="line_type">楕円弧の線の種類</param>
        /// <param name="shift">中心座標と軸の長さの小数点以下の桁を表すビット数</param>
#else
        /// <summary>
        /// Draws simple or thick elliptic arc or fills ellipse sector
        /// </summary>
        /// <param name="img">Image. </param>
        /// <param name="center">Center of the ellipse. </param>
        /// <param name="axes">Length of the ellipse axes. </param>
        /// <param name="angle">Rotation angle. </param>
        /// <param name="start_angle">Starting angle of the elliptic arc. </param>
        /// <param name="end_angle">Ending angle of the elliptic arc. </param>
        /// <param name="color">Ellipse color. </param>
        /// <param name="thickness">Thickness of the ellipse arc. </param>
        /// <param name="line_type">Type of the ellipse boundary.</param>
        /// <param name="shift">Number of fractional bits in the center coordinates and axes' values. </param>
#endif
        public static void DrawEllipse(CvArr img, CvPoint center, CvSize axes, double angle, double start_angle, double end_angle, CvScalar color, int thickness, LineType line_type, int shift)
        {
            Ellipse(img, center, axes, angle, start_angle, end_angle, color, thickness, line_type, shift);
        }
Exemplo n.º 56
0
        /// <summary>
        /// 枠だけの楕円,楕円弧,もしくは塗りつぶされた扇形の楕円を描画する
        /// </summary>
        /// <param name="img">楕円が描画される画像</param>
        /// <param name="center">楕円の中心</param>
        /// <param name="axes">楕円の軸の長さ</param>
        /// <param name="angle">回転角度</param>
        /// <param name="start_angle">楕円弧の開始角度</param>
        /// <param name="end_angle">楕円弧の終了角度</param>
        /// <param name="color">楕円の色</param>
#else
        /// <summary>
        /// Draws simple or thick elliptic arc or fills ellipse sector
        /// </summary>
        /// <param name="img">Image. </param>
        /// <param name="center">Center of the ellipse. </param>
        /// <param name="axes">Length of the ellipse axes. </param>
        /// <param name="angle">Rotation angle. </param>
        /// <param name="start_angle">Starting angle of the elliptic arc. </param>
        /// <param name="end_angle">Ending angle of the elliptic arc. </param>
        /// <param name="color">Ellipse color. </param>
#endif
        public static void DrawEllipse(CvArr img, CvPoint center, CvSize axes, double angle, double start_angle, double end_angle, CvScalar color)
        {
            Ellipse(img, center, axes, angle, start_angle, end_angle, color, 1, LineType.Link8, 0);
        }
Exemplo n.º 57
0
        /// <summary>
        /// 枠だけの楕円,楕円弧,もしくは塗りつぶされた扇形の楕円を描画する
        /// </summary>
        /// <param name="img">楕円が描画される画像</param>
        /// <param name="center">楕円の中心</param>
        /// <param name="axes">楕円の軸の長さ</param>
        /// <param name="angle">回転角度</param>
        /// <param name="start_angle">楕円弧の開始角度</param>
        /// <param name="end_angle">楕円弧の終了角度</param>
        /// <param name="color">楕円の色</param>
        /// <param name="thickness">楕円弧の線の幅</param>
        /// <param name="line_type">楕円弧の線の種類</param>
        /// <param name="shift">中心座標と軸の長さの小数点以下の桁を表すビット数</param>
#else
        /// <summary>
        /// Draws simple or thick elliptic arc or fills ellipse sector
        /// </summary>
        /// <param name="img">Image. </param>
        /// <param name="center">Center of the ellipse. </param>
        /// <param name="axes">Length of the ellipse axes. </param>
        /// <param name="angle">Rotation angle. </param>
        /// <param name="start_angle">Starting angle of the elliptic arc. </param>
        /// <param name="end_angle">Ending angle of the elliptic arc. </param>
        /// <param name="color">Ellipse color. </param>
        /// <param name="thickness">Thickness of the ellipse arc. </param>
        /// <param name="line_type">Type of the ellipse boundary.</param>
        /// <param name="shift">Number of fractional bits in the center coordinates and axes' values. </param>
#endif
        public static void Ellipse(CvArr img, CvPoint center, CvSize axes, double angle, double start_angle, double end_angle, CvScalar color, int thickness, LineType line_type, int shift)
        {
            if (img == null)
            {
                throw new ArgumentNullException("img");
            }
            CvInvoke.cvEllipse(img.CvPtr, center, axes, angle, start_angle, end_angle, color, thickness, line_type, shift);
        }
        public void update_mhi(IplImage imgMain, ref IplImage imgDst, int diff_threshold)
        {
            double timestamp = (double)DateTime.Now.Second;
            CvSize size = new CxCore.CvSize(imgMain.width, imgMain.height);
            int i, idx1 = last, idx2;
            IplImage silh;
            CvSeq seq;
            CvRect comp_rect;
            double count;
            double angle;
            CvPoint center;
            double magnitude;
            CvScalar color;

            //allocate images at the beginning or reallocate them if the frame size is changed
            if (mhi.ptr == null || mhi.width != size.width || mhi.height != size.height)
            {
                for (i = 0; i < N; i++)
                {
                    buf[i] = cxcore.CvCreateImage(size, (int)cxtypes.IPL_DEPTH_8U, 1);
                    cxcore.CvZero(ref buf[i]);
                }
                cxcore.CvReleaseImage(ref mhi);
                cxcore.CvReleaseImage(ref orient);
                cxcore.CvReleaseImage(ref segmask);
                cxcore.CvReleaseImage(ref mask);

                mhi = cxcore.CvCreateImage(size, (int)cxtypes.IPL_DEPTH_32F, 1);
                cxcore.CvZero(ref mhi);
                orient = cxcore.CvCreateImage(size, (int)cxtypes.IPL_DEPTH_32F, 1);
                segmask = cxcore.CvCreateImage(size, (int)cxtypes.IPL_DEPTH_32F, 1);
                mask = cxcore.CvCreateImage(size, (int)cxtypes.IPL_DEPTH_32F, 1);
            }

            cv.CvCvtColor(ref imgMain, ref buf[last], cvtypes.CV_BGR2GRAY);

            idx2 = (last + 1) % N;
            last = idx2;

            silh = buf[idx2];
            cxcore.CvAbsDiff(ref buf[idx1], ref buf[idx2], ref silh);

            cv.CvThreshold(ref silh, ref silh, diff_threshold, 1, cv.CV_THRESH_BINARY);
            cv.CvUpdateMotionHistory(ref silh, ref mhi, timestamp, MHI_DURATION);

            cxcore.CvConvertScale(ref mhi, ref mask, 255 / MHI_DURATION, (MHI_DURATION - timestamp) * 255 / MHI_DURATION);
            cxcore.CvZero(ref imgDst);
            cxcore.CvMerge(ref mask, ref imgDst);
            cv.CvCalcMotionGradient(ref mhi, ref mask, ref orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3);
            if (storage.ptr == null)
                storage = cxcore.CvCreateMemStorage();
            else
                cxcore.CvClearMemStorage(ref storage);
            seq = cv.CvSegmentMotion(ref mhi, ref segmask, ref storage, timestamp, MAX_TIME_DELTA);
            for (i = -1; i < seq.total; i++)
            {
                if (i < 0)
                {
                    comp_rect = new CvRect(0, 0, size.width, size.height);
                    color = cxcore.CV_RGB(255, 255, 255);
                    magnitude = 100;
                }
                else
                {
                    IntPtr ptr = cxcore.CvGetSeqElem(ref seq, i);
                    CvConnectedComp c = (CvConnectedComp)cvconvert.PtrToType(ptr, typeof(CvConnectedComp));
                    comp_rect = c.rect;
                    if (comp_rect.width + comp_rect.height < 100)
                        continue;
                    color = cxcore.CV_RGB(255, 0, 0);
                    magnitude = 30;
                }

                //select component ROI
                cxcore.CvSetImageROI(ref silh, comp_rect);
                cxcore.CvSetImageROI(ref mhi, comp_rect);
                cxcore.CvSetImageROI(ref orient, comp_rect);
                cxcore.CvSetImageROI(ref mask, comp_rect);

                //calculate orientation
                angle = cv.CvCalcGlobalOrientation(ref orient, ref mask, ref mhi, timestamp, MHI_DURATION);
                angle = 360 - angle;

                count = cxcore.CvNorm(ref silh); //<<<<<<<<<<<<<<< recheck

                cxcore.CvResetImageROI(ref mhi);
                cxcore.CvResetImageROI(ref orient);
                cxcore.CvResetImageROI(ref mask);
                cxcore.CvResetImageROI(ref silh);

                //check for the case of little motion
                if (count < comp_rect.width * comp_rect.height * 0.05)
                    continue;

                //draw a clock with arrow indicating the direction
                center = new CvPoint((comp_rect.x + comp_rect.width / 2), (comp_rect.y + comp_rect.height / 2));

                cxcore.CvCircle(ref imgDst, center, cxcore.CvRound(magnitude * 1.2), color, 3, cxcore.CV_AA, 0);
                cxcore.CvLine(ref imgDst, center,
                    new CvPoint(cxcore.CvRound(center.x + magnitude * Math.Cos(angle * Math.PI / 180)),
                    cxcore.CvRound(center.y - magnitude * Math.Sin(angle * Math.PI / 180))),
                    color, 3, cxcore.CV_AA, 0);
            }
        }
Exemplo n.º 59
0
 /// <summary>
 /// 
 /// </summary>
 /// <param name="imgGray"></param>
 /// <param name="pRects"></param>
 /// <param name="ptRotate"></param>
 /// <param name="dbAngleRotate"></param>
 /// <returns></returns>
 public bool TrackFace(IplImage imgGray, CvRect[] pRects, out CvPoint ptRotate, out double dbAngleRotate)
 {
     return Cv.TrackFace(this, imgGray, pRects, out ptRotate, out dbAngleRotate);
 }
Exemplo n.º 60
0
    /// <summary>
    /// 高度なモルフォロジー変換を行います.
    /// </summary>
    /// <param name="op">モルフォロジー演算の種類</param>
    /// <param name="element">構造要素</param>
    /// <param name="anchor">構造要素内のアンカー位置.デフォルト値の (-1, -1) は,アンカーが構造要素の中心にあることを意味します.</param>
    /// <param name="iterations">収縮と膨張が適用される回数. [既定値は1]</param>
    /// <param name="borderType">ピクセル外挿手法. [既定値はBorderType.Constant]</param>
    /// <param name="borderValue">定数境界モードで利用されるピクセル値.デフォルト値は特別な意味を持ちます. [既定値は CvCpp.MorphologyDefaultBorderValue()]</param>
    /// <returns>src と同じサイズ,同じ型の出力画像</returns>
#else
        /// <summary>
        /// Performs advanced morphological transformations
        /// </summary>
        /// <param name="op">Type of morphological operation</param>
        /// <param name="element">Structuring element</param>
        /// <param name="anchor">Position of the anchor within the element. The default value (-1, -1) means that the anchor is at the element center</param>
        /// <param name="iterations">Number of times erosion and dilation are applied. [By default this is 1]</param>
        /// <param name="borderType">The pixel extrapolation method. [By default this is BorderType.Constant]</param>
        /// <param name="borderValue">The border value in case of a constant border. The default value has a special meaning. [By default this is CvCpp.MorphologyDefaultBorderValue()]</param>
        /// <returns>Destination image. It will have the same size and the same type as src</returns>
#endif
        public Mat MorphologyEx(MorphologyOperation op, InputArray element,
            CvPoint? anchor = null, int iterations = 1, BorderType borderType = BorderType.Constant,
            CvScalar? borderValue = null)
        {
            var dst = new Mat();
            Cv2.MorphologyEx(this, dst, op, element, anchor, iterations, borderType, borderValue);
            return dst;
        }