コード例 #1
0
ファイル: EyeRects.cs プロジェクト: mind0n/hive
 public void AddRect(CvRect r)
 {
     if (xs.Count < 1)
     {
         xs.Add(r);
     }
     else
     {
         for (int i = 0; i < xs.Count; i++)
         {
             var c = xs[i];
             if (r.X <= c.X)
             {
                 xs.Insert(i, r);
                 break;
             }
         }
     }
     if (ys.Count < 1)
     {
         ys.Add(r);
     }
     else
     {
         for (int i = 0; i < ys.Count; i++)
         {
             var c = ys[i];
             if (r.Y <= c.Y)
             {
                 ys.Insert(i, c);
                 break;
             }
         }
     }
 }
コード例 #2
0
        public EyeDetect()
        {
            CvColor[] colors = new CvColor[] {
                new CvColor(0, 0, 255),
                new CvColor(0, 128, 255),
                new CvColor(0, 255, 255),
                new CvColor(0, 255, 0),
                new CvColor(255, 128, 0),
                new CvColor(255, 255, 0),
                new CvColor(255, 0, 0),
                new CvColor(255, 0, 255),
            };

            const double Scale        = 1.25;
            const double ScaleFactor  = 2.5;
            const int    MinNeighbors = 2;

            using (CvCapture cap = CvCapture.FromCamera(1))
                using (CvWindow w = new CvWindow("Eye Tracker"))
                {
                    while (CvWindow.WaitKey(10) < 0)
                    {
                        using (IplImage img = cap.QueryFrame())
                            using (IplImage smallImg = new IplImage(new CvSize(Cv.Round(img.Width / Scale), Cv.Round(img.Height / Scale)), BitDepth.U8, 1))
                            {
                                using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1))
                                {
                                    Cv.CvtColor(img, gray, ColorConversion.BgrToGray);
                                    Cv.Resize(gray, smallImg, Interpolation.Linear);
                                    Cv.EqualizeHist(smallImg, smallImg);
                                }

                                using (CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile("C:\\Program Files\\OpenCV\\data\\haarcascades\\haarcascade_eye.xml"))
                                    using (CvMemStorage storage = new CvMemStorage())
                                    {
                                        storage.Clear();

                                        Stopwatch         watch = Stopwatch.StartNew();
                                        CvSeq <CvAvgComp> eyes  = Cv.HaarDetectObjects(smallImg, cascade, storage, ScaleFactor, MinNeighbors, 0, new CvSize(30, 30));
                                        watch.Stop();
                                        //Console.WriteLine("detection time = {0}msn", watch.ElapsedMilliseconds);

                                        for (int i = 0; i < eyes.Total; i++)
                                        {
                                            CvRect  r      = eyes[i].Value.Rect;
                                            CvPoint center = new CvPoint
                                            {
                                                X = Cv.Round((r.X + r.Width * 0.5) * Scale),
                                                Y = Cv.Round((r.Y + r.Height * 0.5) * Scale)
                                            };
                                            int radius = Cv.Round((r.Width + r.Height) * 0.25 * Scale);
                                            img.Circle(center, radius, colors[i % 8], 3, LineType.AntiAlias, 0);
                                        }
                                    }

                                w.Image = img;
                            }
                    }
                }
        }
コード例 #3
0
ファイル: LatentSVM.cs プロジェクト: inohiroki/opencvsharp
        public LatentSVM()
        {
            using (CvLatentSvmDetector detector = new CvLatentSvmDetector(Const.XmlLatentSVMCat))
                using (IplImage imageSrc = new IplImage(Const.ImageCat, LoadMode.Color))
                    using (IplImage imageDst = imageSrc.Clone())
                        using (CvMemStorage storage = new CvMemStorage())
                        {
                            Console.WriteLine("Running LatentSVM...");
                            Stopwatch watch = Stopwatch.StartNew();

                            CvSeq <CvObjectDetection> result = detector.DetectObjects(imageSrc, storage, 0.5f, 2);

                            watch.Stop();
                            Console.WriteLine("Elapsed time: {0}ms", watch.ElapsedMilliseconds);

                            foreach (CvObjectDetection detection in result)
                            {
                                CvRect boundingBox = detection.Rect;
                                imageDst.Rectangle(
                                    new CvPoint(boundingBox.X, boundingBox.Y),
                                    new CvPoint(boundingBox.X + boundingBox.Width, boundingBox.Y + boundingBox.Height),
                                    CvColor.Red, 3);
                            }

                            using (new CvWindow("LatentSVM result", imageDst))
                            {
                                Cv.WaitKey();
                            }
                        }
        }
コード例 #4
0
        public static OpenCvSharp.IplImage GetSub(this OpenCvSharp.IplImage ipl, OpenCvSharp.CvRect subRect)
        {
            if (ipl == null)
            {
                throw new ArgumentNullException("ipl", "ipl is null.");
            }

            var boundingRect = new CvRect(0, 0, ipl.Width, ipl.Height);

            if (!boundingRect.Contains(subRect))
            {
                throw new InvalidOperationException("subRect is outside of ipl");
            }


            try
            {
                ipl.SetROI(subRect);

                OpenCvSharp.IplImage sub = new IplImage(
                    ipl.GetSize(),
                    ipl.Depth,
                    ipl.NChannels);

                ipl.Copy(sub);
                return(sub);
            }
            finally
            {
                ipl.ResetROI();
            }
        }
コード例 #5
0
ファイル: DrawToHDC.cs プロジェクト: inohiroki/opencvsharp
        public DrawToHdc()
        {
            CvRect roi = new CvRect(320, 260, 100, 100);        // region of roosevelt's face

            using (IplImage src = new IplImage(Const.ImageYalta, LoadMode.Color))
                using (IplImage dst = new IplImage(roi.Size, BitDepth.U8, 3))
                {
                    src.ROI = roi;

                    using (Bitmap bitmap = new Bitmap(roi.Width, roi.Height, PixelFormat.Format32bppArgb))
                        using (Graphics g = Graphics.FromImage(bitmap))
                        {
                            //BitmapConverter.DrawToGraphics(src, g, new CvRect(new CvPoint(0, 0), roi.Size));
                            IntPtr hdc = g.GetHdc();
                            BitmapConverter.DrawToHdc(src, hdc, new CvRect(new CvPoint(0, 0), roi.Size));
                            g.ReleaseHdc(hdc);

                            g.DrawString("Roosevelt", new Font(FontFamily.GenericSerif, 12), Brushes.Red, 20, 0);
                            g.DrawEllipse(new Pen(Color.Red, 4), new Rectangle(20, 20, roi.Width / 2, roi.Height / 2));

                            dst.CopyFrom(bitmap);
                        }

                    src.ResetROI();

                    using (new CvWindow("src", src))
                        using (new CvWindow("dst", dst))
                        {
                            Cv.WaitKey();
                        }
                }
        }
コード例 #6
0
        /// <summary>
        /// Draw a contour.
        /// </summary>
        /// <param name="img">Image to draw on.</param>
        /// <param name="color">Color to draw (default, white).</param>
        public void Render(IplImage img, CvScalar color)
        {
            if (img == null)
            {
                throw new ArgumentNullException("img");
            }
            if (img.Depth != BitDepth.U8 || img.NChannels != 3)
            {
                throw new ArgumentException("Invalid img format (U8 3-channels)");
            }

            int    step   = img.WidthStep;
            CvRect roi    = img.ROI;
            int    width  = roi.Width;
            int    height = roi.Height;
            int    offset = (3 * roi.X) + (roi.Y * step);

            unsafe
            {
                byte *imgData = img.ImageDataPtr + offset;
                int   x       = StartingPoint.X;
                int   y       = StartingPoint.Y;
                foreach (CvChainCode cc in ChainCode)
                {
                    imgData[3 * x + step * y + 0] = (byte)(color.Val0);
                    imgData[3 * x + step * y + 1] = (byte)(color.Val1);
                    imgData[3 * x + step * y + 2] = (byte)(color.Val2);
                    x += CvBlobConst.ChainCodeMoves[(int)cc][0];
                    y += CvBlobConst.ChainCodeMoves[(int)cc][1];
                }
            }
        }
コード例 #7
0
        private void CaptureImages(PointF [] imagePoints, Bitmap frame)
        {
            try
            {
                if (curFrame == null)
                {
                    curFrame = new CvImageWrapper(frame);
                }
                else
                {
                    curFrame.setImage(frame);
                }

                for (int i = 0; i < imagePoints.Length; i++)
                {
                    PointF imagePoint = imagePoints[i];

                    CvRect cropDimensions = new CvRect();
                    cropDimensions.x      = (int)imagePoint.X - obsSize / 2;
                    cropDimensions.y      = (int)imagePoint.Y - obsSize / 2;
                    cropDimensions.width  = obsSize;
                    cropDimensions.height = obsSize;

                    CvImageWrapper curObs = curFrame.cropSubImage(cropDimensions);

                    this.templatesList[i].Add(curObs);
                }
            }
            catch (Exception e)
            {
            }
        }
コード例 #8
0
 public double GetPercentageOfSkinInImage(string filePath, string debugImageFilePath = null)
 {
     using (IplImage imgSrc = new IplImage(filePath))
     {
         using (CvMemStorage storage = new CvMemStorage())
         {
             CvRect rect;
             using (IplImage imgGray = new IplImage(filePath, LoadMode.GrayScale))
             {
                 rect = FindBorder(imgGray);
             }
             if (rect.Width < imgSrc.Width * 0.10 || rect.Height < imgSrc.Height * 0.10)
             {
                 rect = new CvRect(0, 0, imgSrc.Width, imgSrc.Height);
             }
             using (var subImg = imgSrc.GetSubImage(rect))
             {
                 using (IplImage imgHueMask = new IplImage(subImg.Size, BitDepth.U8, 1))
                 {
                     CvAdaptiveSkinDetector detector = new CvAdaptiveSkinDetector(1, MorphingMethod.ErodeDilate);
                     detector.Process(subImg, imgHueMask);
                     int count   = CountSkinPoints(imgHueMask, CvColor.White);
                     var percent = ((double)count / (double)(subImg.Width * subImg.Height) * 100);
                     if (debugImageFilePath != null)
                     {
                         imgHueMask.SaveImage(debugImageFilePath);
                     }
                     return(percent);
                 }
             }
         }
     }
 }
コード例 #9
0
ファイル: FileStorage.cs プロジェクト: qxp1011/opencvsharp
        /// <summary>
        /// 画像データのファイルストレージへの書き込み
        /// </summary>
        /// <param name="fileName">書きこむXML or YAMLファイル</param>
        private static void SampleFileStorageWriteImage(string fileName)
        {
            // cvWrite, cvWriteComment
            // IplImage構造体の情報をファイルに保存する

            // (1)画像を読み込む
            using (IplImage colorImg = new IplImage(Const.ImageLenna, LoadMode.Color))
            using (IplImage grayImg = new IplImage(colorImg.Size, BitDepth.U8, 1))
            {
                // (2)ROIの設定と二値化処理
                colorImg.CvtColor(grayImg, ColorConversion.BgrToGray);
                CvRect roi = new CvRect(0, 0, colorImg.Width / 2, colorImg.Height / 2);
                grayImg.SetROI(roi);
                colorImg.SetROI(roi);
                grayImg.Threshold(grayImg, 90, 255, ThresholdType.Binary);
                // (3)xmlファイルへの書き出し 
                using (CvFileStorage fs = new CvFileStorage(fileName, null, FileStorageMode.Write))
                {
                    fs.WriteComment("This is a comment line.", false);
                    fs.Write("color_img", colorImg);
                    fs.StartNextStream();
                    fs.Write("gray_img", grayImg);
                }
                // (4)書きこんだxmlファイルを開く
                //using (Process p = Process.Start(fileName)) {
                //    p.WaitForExit();
                //}                
            }
        }
コード例 #10
0
        /// <summary>
        /// 画像を指定した領域で切り取る
        /// </summary>
        /// <param name="src">切り取る元画像</param>
        /// <param name="centerPosition">切り取る領域の中心座標</param>
        /// <param name="snipWidth">切り取る横幅</param>
        /// <param name="snipHeight">切り取る縦幅</param>
        /// <returns>切り取った画像</returns>
        private IplImage SnipFaceImage(IplImage src, ColorImagePoint centerPosition, int snipWidth, int snipHeight)
        {
            int faceX, faceY;

            // 画面からはみ出している場合は切り取り処理しない
            if (centerPosition.X - snipWidth / 2 < 0 ||
                centerPosition.Y - snipHeight / 2 < 0)
            {
                return(null);
            }
            else
            {
                faceX = centerPosition.X - snipWidth / 2;
                faceY = centerPosition.Y - snipHeight / 2;
            }

            // 切り取り領域の設定
            var faceRect = new CvRect(faceX, faceY, snipWidth, snipHeight);
            var part     = new IplImage(faceRect.Size, BitDepth.U8, 1);

            src.SetROI(faceRect);   // 切り取り範囲を設定
            Cv.Copy(src, part);     // データをコピー
            src.ResetROI();         // 指定した範囲のリセット

            return(part);
        }
コード例 #11
0
        public IplImage FaceDetection(IplImage src)
        {
            haarface = new IplImage(src.Size, BitDepth.U8, 3);
            Cv.Copy(src, haarface);

            gray = this.GrayScale(src);
            Cv.EqualizeHist(gray, gray);

            double scaleFactor  = 1.139;
            int    minNeighbors = 1;

            CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile("../../haarcascade_frontalface_alt.xml");
            CvMemStorage            Storage = new CvMemStorage();

            CvSeq <CvAvgComp> faces = Cv.HaarDetectObjects(gray, cascade, Storage, scaleFactor, minNeighbors, HaarDetectionType.ScaleImage, new CvSize(90, 90), new CvSize(0, 0));

            for (int i = 0; i < faces.Total; i++)
            {
                CvRect r = faces[i].Value.Rect;

                int cX     = Cv.Round(r.X + r.Width * 0.5);
                int cY     = Cv.Round(r.Y + r.Height * 0.5);
                int radius = Cv.Round((r.Width + r.Height) * 0.25);

                Cv.DrawCircle(haarface, new CvPoint(cX, cY), radius, CvColor.Black, 3);
            }
            return(haarface);
        }
コード例 #12
0
ファイル: SVMManage.cs プロジェクト: tome-beta/FaceJudge
        //作成した辞書を図でみる
        public void Debug_DispPredict()
        {
            return;

            //辞書ファイルのロード
            this.libSVM_model = SVM.LoadModel(@"libsvm_model.xml");

            using (IplImage retPlot = new IplImage(300, 300, BitDepth.U8, 3))
            {
                for (int x = 0; x < 300; x++)
                {
                    for (int y = 0; y < 300; y++)
                    {
                        float[] sample = { x / 300f, y / 300f };
                        //問題を作成
                        SVMNode[] node_array = new SVMNode[2];
                        node_array[0] = new SVMNode(1, sample[0]);
                        node_array[1] = new SVMNode(2, sample[1]);
                        int    ret_double = (int)SVM.Predict(libSVM_model, node_array);
                        int    ret_i      = (int)ret_double;
                        CvRect plotRect   = new CvRect(x, 300 - y, 1, 1);
                        if (ret_i == 1)
                        {
                            retPlot.Rectangle(plotRect, CvColor.Red);
                        }
                        else if (ret_i == 2)
                        {
                            retPlot.Rectangle(plotRect, CvColor.GreenYellow);
                        }
                    }
                }
                CvWindow.ShowImages(retPlot);
            }
        }
コード例 #13
0
        public IplImage preprocess(IplImage imgSrc, int new_width, int new_height)
        {
            IplImage result;
            IplImage scaledResult;

            // A = aspect ratio maintained
            CvMat  data  = new CvMat();
            CvMat  dataA = new CvMat();
            CvRect bb    = new CvRect();
            CvRect bbA   = new CvRect();

            bb = cariBB(imgSrc);
            //Cari data bounding box
            cxcore.CvGetSubRect(ref imgSrc, ref data, new CvRect(bb.x, bb.y, bb.width, bb.height));

            //Buat image dengan data width dan height (aspect ratio = 1)
            int size = (bb.width > bb.height) ? bb.width : bb.height;

            result = cxcore.CvCreateImage(new CvSize(size, size), 8, 1);
            cxcore.CvSet(ref result, new CvScalar(255, 255, 255));

            int x = (int)Math.Floor((size - bb.width) / 2.0f);
            int y = (int)Math.Floor((size - bb.height) / 2.0f);

            cxcore.CvGetSubRect(ref result, ref dataA, new CvRect(x, y, bb.width, bb.height));
            cxcore.CvCopy(ref data, ref dataA);

            scaledResult = cxcore.CvCreateImage(new CvSize(new_width, new_height), 8, 1);
            cv.CvResize(ref result, ref scaledResult, cv.CV_INTER_NN);

            return(scaledResult);
        }
コード例 #14
0
ファイル: Program.cs プロジェクト: tome-beta/FaceJudge
            /// <summary>
            /// 画像から顔を見つける
            /// </summary>
            /// <param name="file_name"></param>
            /// <param name="read_count"></param>
            private void DetectFace(String file_name, int read_count)
            {
                //カスケード分類器の特徴量を取得する
                CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile(@"C:\opencv2.4.8\sources\data\haarcascades\haarcascade_frontalface_alt.xml");
                CvMemStorage            strage  = new CvMemStorage(0); // メモリを確保

                using (IplImage img = new IplImage(file_name))
                {
                    //グレースケールに変換
                    using (IplImage gray_image = Cv.CreateImage(new CvSize(img.Width, img.Height), BitDepth.U8, 1))
                    {
                        Cv.CvtColor(img, gray_image, ColorConversion.BgrToGray);

                        //発見した矩形
                        var result = Cv.HaarDetectObjects(gray_image, cascade, strage);
                        for (int i = 0; i < result.Total; i++)
                        {
                            //矩形の大きさに書き出す
                            CvRect rect = result[i].Value.Rect;
                            Cv.Rectangle(img, rect, new CvColor(255, 0, 0));

                            //矩形部分をファイル出力する
                            img.ROI = rect;
                            string out_name = this.OutputFoldaName + @"\out" + read_count + @"_" + i + @".bmp";
                            Cv.SaveImage(out_name, img);
                        }
                    }
                }
            }
コード例 #15
0
        /// <summary>
        /// 画像データのファイルストレージへの書き込み
        /// </summary>
        /// <param name="fileName">書きこむXML or YAMLファイル</param>
        private static void SampleFileStorageWriteImage(string fileName)
        {
            // cvWrite, cvWriteComment
            // IplImage構造体の情報をファイルに保存する

            // (1)画像を読み込む
            using (IplImage colorImg = new IplImage(Const.ImageLenna, LoadMode.Color))
                using (IplImage grayImg = new IplImage(colorImg.Size, BitDepth.U8, 1))
                {
                    // (2)ROIの設定と二値化処理
                    colorImg.CvtColor(grayImg, ColorConversion.BgrToGray);
                    CvRect roi = new CvRect(0, 0, colorImg.Width / 2, colorImg.Height / 2);
                    grayImg.SetROI(roi);
                    colorImg.SetROI(roi);
                    grayImg.Threshold(grayImg, 90, 255, ThresholdType.Binary);
                    // (3)xmlファイルへの書き出し
                    using (CvFileStorage fs = new CvFileStorage(fileName, null, FileStorageMode.Write))
                    {
                        fs.WriteComment("This is a comment line.", false);
                        fs.Write("color_img", colorImg);
                        fs.StartNextStream();
                        fs.Write("gray_img", grayImg);
                    }
                    // (4)書きこんだxmlファイルを開く
                    //using (Process p = Process.Start(fileName)) {
                    //    p.WaitForExit();
                    //}
                }
        }
コード例 #16
0
        public static OpenCvSharp.IplImage GetSub(this OpenCvSharp.IplImage ipl, OpenCvSharp.CvRect subRect)
        {
            if (ipl == null)
                throw new ArgumentNullException("ipl", "ipl is null.");

            var boundingRect = new CvRect(0, 0, ipl.Width, ipl.Height);

            if (!boundingRect.Contains(subRect))
                throw new InvalidOperationException("subRect is outside of ipl");

            try
            {
                ipl.SetROI(subRect);

                OpenCvSharp.IplImage sub = new IplImage(
                    ipl.GetSize(),
                    ipl.Depth,
                    ipl.NChannels);

                ipl.Copy(sub);
                return sub;
            }
            finally
            {
                ipl.ResetROI();
            }
        }
コード例 #17
0
 public static void DrawRect(this IplImage ipl, CvRect rect, CvColor color, int thickNess)
 {
     var roi = ipl.ROI;
     ipl.ResetROI();
     ipl.DrawRect(rect.X, rect.Y, rect.X + rect.Width, rect.Y + rect.Height, color, thickNess);
     ipl.SetROI(roi);
 }
コード例 #18
0
        public IplImage VehicleDetect(IplImage src)
        {
            haarvehicle = new IplImage(src.Size, BitDepth.U8, 3);
            Cv.Copy(src, haarvehicle);

            gray = new IplImage(src.Size, BitDepth.U8, 1);
            Cv.CvtColor(src, gray, ColorConversion.BgrToGray);

            Cv.EqualizeHist(gray, gray);

            double scaleFactor  = 1.139;
            int    minNeighbors = 1;

            CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile("../../../cars.xml");
            CvMemStorage            Storage = new CvMemStorage();

            CvSeq <CvAvgComp> vehicles = Cv.HaarDetectObjects(gray, cascade, Storage, scaleFactor, minNeighbors, HaarDetectionType.ScaleImage, new CvSize(90, 90), new CvSize(0, 0));

            for (int i = 0; i < vehicles.Total; i++)
            {
                CvRect r = vehicles[i].Value.Rect;

                int cX     = Cv.Round(r.X + r.Width * 0.5);
                int cY     = Cv.Round(r.Y + r.Height * 0.5);
                int radius = Cv.Round((r.Width + r.Height) * 0.25);

                //Cv.DrawCircle(haarvehicle, new CvPoint(cX, cY), radius, CvColor.Red, 3);
                Cv.DrawRect(haarvehicle, r, CvColor.Red, 5);
            }

            return(haarvehicle);
        }
コード例 #19
0
ファイル: FaceDetect.cs プロジェクト: simonmssu/opencvsharp
        public FaceDetect()
        {
            CheckMemoryLeak();

            // CvHaarClassifierCascade, cvHaarDetectObjects

            CvColor[] colors = new CvColor[] {
                new CvColor(0, 0, 255),
                new CvColor(0, 128, 255),
                new CvColor(0, 255, 255),
                new CvColor(0, 255, 0),
                new CvColor(255, 128, 0),
                new CvColor(255, 255, 0),
                new CvColor(255, 0, 0),
                new CvColor(255, 0, 255),
            };

            const double Scale        = 1.14;
            const double ScaleFactor  = 1.0850;
            const int    MinNeighbors = 2;

            using (IplImage img = new IplImage(FilePath.Image.Yalta, LoadMode.Color))
                using (IplImage smallImg = new IplImage(new CvSize(Cv.Round(img.Width / Scale), Cv.Round(img.Height / Scale)), BitDepth.U8, 1))
                {
                    using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1))
                    {
                        Cv.CvtColor(img, gray, ColorConversion.BgrToGray);
                        Cv.Resize(gray, smallImg, Interpolation.Linear);
                        Cv.EqualizeHist(smallImg, smallImg);
                    }

                    using (var cascade = CvHaarClassifierCascade.FromFile(FilePath.Text.HaarCascade))
                        using (var storage = new CvMemStorage())
                        {
                            storage.Clear();

                            // 顔の検出
                            Stopwatch         watch = Stopwatch.StartNew();
                            CvSeq <CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, ScaleFactor, MinNeighbors, 0, new CvSize(30, 30));
                            watch.Stop();
                            Console.WriteLine("detection time = {0}ms\n", watch.ElapsedMilliseconds);

                            // 検出した箇所にまるをつける
                            for (int i = 0; i < faces.Total; i++)
                            {
                                CvRect  r      = faces[i].Value.Rect;
                                CvPoint center = new CvPoint
                                {
                                    X = Cv.Round((r.X + r.Width * 0.5) * Scale),
                                    Y = Cv.Round((r.Y + r.Height * 0.5) * Scale)
                                };
                                int radius = Cv.Round((r.Width + r.Height) * 0.25 * Scale);
                                img.Circle(center, radius, colors[i % 8], 3, LineType.AntiAlias, 0);
                            }
                        }

                    // ウィンドウに表示
                    CvWindow.ShowImages(img);
                }
        }
コード例 #20
0
ファイル: DrawToHDC.cs プロジェクト: healtech/opencvsharp
        public DrawToHdc()
        {
            CvRect roi = new CvRect(320, 260, 100, 100);        // region of roosevelt's face

            using (IplImage src = new IplImage(Const.ImageYalta, LoadMode.Color))
            using (IplImage dst = new IplImage(roi.Size, BitDepth.U8, 3))
            {
                src.ROI = roi;

                using (Bitmap bitmap = new Bitmap(roi.Width, roi.Height, PixelFormat.Format32bppArgb))
                using (Graphics g = Graphics.FromImage(bitmap))
                {
                    //BitmapConverter.DrawToGraphics(src, g, new CvRect(new CvPoint(0, 0), roi.Size));
                    IntPtr hdc = g.GetHdc();
                    BitmapConverter.DrawToHdc(src, hdc, new CvRect(new CvPoint(0,0), roi.Size));
                    g.ReleaseHdc(hdc);

                    g.DrawString("Roosevelt", new Font(FontFamily.GenericSerif, 12), Brushes.Red, 20, 0);
                    g.DrawEllipse(new Pen(Color.Red, 4), new Rectangle(20, 20, roi.Width/2, roi.Height/2));

                    dst.CopyFrom(bitmap);
                }

                src.ResetROI();                

                using (new CvWindow("src", src))
                using (new CvWindow("dst", dst))
                {
                    Cv.WaitKey();
                }
            }
        }
コード例 #21
0
ファイル: Extract.cs プロジェクト: yudaping/ddm_projects_old
        public int Detect()
        {
            const double Scale        = 1.04;
            const double ScaleFactor  = 1.139;
            const int    MinNeighbors = 2;

            IplImage smallImg = new IplImage(new CvSize(Cv.Round(_src.Width / Scale), Cv.Round(_src.Height / Scale)), BitDepth.U8, 1);
            IplImage gray     = new IplImage(_src.Size, BitDepth.U8, 1);

            Cv.CvtColor(_src, gray, ColorConversion.BgrToGray);
            Cv.Resize(gray, smallImg, Interpolation.Linear);
            Cv.EqualizeHist(smallImg, smallImg);

            CvSeq <CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, ScaleFactor, MinNeighbors, 0, new CvSize(30, 30));

            Faces.Clear();
            for (int i = 0; i < faces.Total; i++)
            {
                CvRect r = faces[i].Value.Rect;

                r.X      = Cv.Round(r.X * Scale);
                r.Y      = Cv.Round(r.Y * Scale);
                r.Width  = Cv.Round(r.Width * Scale);
                r.Height = Cv.Round(r.Height * Scale);
                _src.SetROI(r);
                IplImage p = new IplImage(64, 64, _src.Depth, 3);
                Cv.Resize(_src, p);
                Faces.Add(p);
                _src.ResetROI();
            }
            storage.Clear();
            return(faces.Total);
        }
コード例 #22
0
        //--------------------------------------------------------------------------------------
        // private
        //---------------------------------------------------------------------------------------

        private void Debug_DispPredict()
        {
            using (IplImage retPlot = new IplImage(300, 300, BitDepth.U8, 3))
            {
                for (int x = 0; x < 300; x++)
                {
                    for (int y = 0; y < 300; y++)
                    {
                        float[] sample    = { x / 300f, y / 300f };
                        CvMat   sampleMat = new CvMat(1, 2, MatrixType.F32C1, sample);
                        int     ret       = (int)svm.Predict(sampleMat);
                        CvRect  plotRect  = new CvRect(x, 300 - y, 1, 1);
                        if (ret == 1)
                        {
                            retPlot.Rectangle(plotRect, CvColor.Red);
                        }
                        else if (ret == 2)
                        {
                            retPlot.Rectangle(plotRect, CvColor.GreenYellow);
                        }
                    }
                }
                CvWindow.ShowImages(retPlot);
            }
        }
コード例 #23
0
ファイル: EyeDetection.cs プロジェクト: zhaoyingju/resizer
        private List <ObjRect> DetectFeaturesInPair(IplImage img, CvMemStorage storage, CvAvgComp eyePair)
        {
            List <ObjRect> eyes = new List <ObjRect>();
            CvRect         pair = eyePair.Rect;

            //Inflate 100% vertically, centering
            pair.Top    -= pair.Height / 2;
            pair.Height *= 2;
            if (pair.Top < 0)
            {
                pair.Height += pair.Top; pair.Top = 0;
            }
            if (pair.Height >= img.Height)
            {
                pair.Height = img.Height;
            }
            if (pair.Bottom >= img.Height)
            {
                pair.Top = img.Height - pair.Height;
            }

            //Inflate 20% on each side, centering
            pair.Left  -= pair.Width / 5;
            pair.Width += pair.Width / 5 * 2;
            pair.Left   = Math.Max(0, pair.Left);
            pair.Width  = Math.Min(img.Width - pair.Left, pair.Width);

            eyes.AddRange(DetectEyesInRegion(img, storage, pair));

            if (eyes.Count > 0)
            {
                eyes.Add(new ObjRect(eyePair.Rect.ToRectangleF(), FeatureType.EyePair));
            }
            return(eyes);
        }
コード例 #24
0
 public void AddRect(CvRect r)
 {
     if (xs.Count < 1)
     {
         xs.Add(r);
     }
     else
     {
         for (int i = 0; i < xs.Count; i++)
         {
             var c = xs[i];
             if (r.X <= c.X)
             {
                 xs.Insert(i, r);
                 break;
             }
         }
     }
     if (ys.Count < 1)
     {
         ys.Add(r);
     }
     else
     {
         for (int i = 0; i < ys.Count; i++)
         {
             var c = ys[i];
             if (r.Y <= c.Y)
             {
                 ys.Insert(i, c);
                 break;
             }
         }
     }
 }
コード例 #25
0
 public void ImageToBox(CvRect sourceRect, ref Rectangle resultRect, float pImageVsBoxX, float pImageVsBoxY)
 {
     resultRect.X      = Convert.ToInt32(sourceRect.X / pImageVsBoxX);
     resultRect.Y      = Convert.ToInt32(sourceRect.Y / pImageVsBoxY);
     resultRect.Width  = Convert.ToInt32(sourceRect.Width / pImageVsBoxX);
     resultRect.Height = Convert.ToInt32(sourceRect.Height / pImageVsBoxY);
 }
コード例 #26
0
ファイル: Contour.cs プロジェクト: shimat/opencvsharp_2410
        public Contour()
        {
            // cvContourArea, cvArcLength
            // 輪郭によって区切られた領域の面積と,輪郭の長さを求める

            const int SIZE = 500;

            // (1)画像を確保し初期化する
            using (CvMemStorage storage = new CvMemStorage())
                using (IplImage img = new IplImage(SIZE, SIZE, BitDepth.U8, 3))
                {
                    img.Zero();
                    // (2)点列を生成する
                    CvSeq <CvPoint> points = new CvSeq <CvPoint>(SeqType.PolyLine, storage);
                    CvRNG           rng    = new CvRNG((ulong)DateTime.Now.Ticks);
                    double          scale  = rng.RandReal() + 0.5;
                    CvPoint         pt0    = new CvPoint
                    {
                        X = (int)(Math.Cos(0) * SIZE / 4 * scale + SIZE / 2),
                        Y = (int)(Math.Sin(0) * SIZE / 4 * scale + SIZE / 2)
                    };
                    img.Circle(pt0, 2, CvColor.Green);
                    points.Push(pt0);
                    for (int i = 1; i < 20; i++)
                    {
                        scale = rng.RandReal() + 0.5;
                        CvPoint pt1 = new CvPoint
                        {
                            X = (int)(Math.Cos(i * 2 * Math.PI / 20) * SIZE / 4 * scale + SIZE / 2),
                            Y = (int)(Math.Sin(i * 2 * Math.PI / 20) * SIZE / 4 * scale + SIZE / 2)
                        };
                        img.Line(pt0, pt1, CvColor.Green, 2);
                        pt0.X = pt1.X;
                        pt0.Y = pt1.Y;
                        img.Circle(pt0, 3, CvColor.Green, Cv.FILLED);
                        points.Push(pt0);
                    }
                    img.Line(pt0, points.GetSeqElem(0).Value, CvColor.Green, 2);
                    // (3)包含矩形,面積,長さを求める
                    CvRect rect   = points.BoundingRect(false);
                    double area   = points.ContourArea();
                    double length = points.ArcLength(CvSlice.WholeSeq, 1);
                    // (4)結果を画像に書き込む
                    img.Rectangle(new CvPoint(rect.X, rect.Y), new CvPoint(rect.X + rect.Width, rect.Y + rect.Height), CvColor.Red, 2);
                    string text_area   = string.Format("Area:   wrect={0}, contour={1}", rect.Width * rect.Height, area);
                    string text_length = string.Format("Length: rect={0}, contour={1}", 2 * (rect.Width + rect.Height), length);
                    using (CvFont font = new CvFont(FontFace.HersheySimplex, 0.7, 0.7, 0, 1, LineType.AntiAlias))
                    {
                        img.PutText(text_area, new CvPoint(10, img.Height - 30), font, CvColor.White);
                        img.PutText(text_length, new CvPoint(10, img.Height - 10), font, CvColor.White);
                    }
                    // (5)画像を表示,キーが押されたときに終了
                    using (CvWindow window = new CvWindow("BoundingRect", WindowMode.AutoSize))
                    {
                        window.Image = img;
                        CvWindow.WaitKey(0);
                    }
                }
        }
コード例 #27
0
        public static void DrawRect(this IplImage ipl, CvRect rect, CvColor color, int thickNess)
        {
            var roi = ipl.ROI;

            ipl.ResetROI();
            ipl.DrawRect(rect.X, rect.Y, rect.X + rect.Width, rect.Y + rect.Height, color, thickNess);
            ipl.SetROI(roi);
        }
コード例 #28
0
        void ShowMarkedImages(IplImage scr, IplImage tmpl, CvPoint maxPoint)
        {
            var marked = scr.Clone();
            var rect   = new CvRect(maxPoint, tmpl.Size);

            marked.DrawRect(rect, new CvScalar(0, 0, 255), 2);
            CvWindow.ShowImages(marked);
        }
コード例 #29
0
 /// <summary>
 /// set initial object rectangle (must be called before initial calculation of the histogram)
 /// </summary>
 /// <param name="window"></param>
 /// <returns></returns>
 public bool SetWindow(CvRect window)
 {
     if (disposed)
     {
         throw new ObjectDisposedException("CvCamShiftTracker");
     }
     return(NativeMethods.legacy_CvCamShiftTracker_set_window(ptr, window) != 0);
 }
コード例 #30
0
        public MainWindow()
        {
            InitializeComponent();

            //ViewModel からのスクリプト実行用のdelegate
            DataContextChanged += (o, e) =>
            {
                ViewModel vm = DataContext as ViewModel;
                if (vm != null)
                {
                    vm._ExecuteScript += (sender, arg) =>
                    {
                        Dispatcher.Invoke(new Action(() => { pythonConsole.Pad.Console.RunStatements(arg.cmd); }));
                    };

                    vm._DrawCameraBitmap += (sender, arg) =>
                    {
                        Dispatcher.BeginInvoke(new Action(() =>
                        {
                            IplImage img = vm.VisionControl.GetCameraImage();

                            DrawCameraViewEventArgs a = arg as DrawCameraViewEventArgs;

                            if (a._draw == 1)
                            {
                                CvRect rect = new CvRect(a._x1, a._y1, a._x2, a._y2);
                                img.DrawRect(rect, new CvScalar(255, 0, 0), 2);
                            }
                            else if (a._draw == 2)
                            {
                                int x1 = a._x1 - a._x2 / 2;
                                int x2 = a._x1 + a._x2 / 2;
                                int y1 = a._y1 - a._y2 / 2;
                                int y2 = a._y1 + a._y2 / 2;
                                img.DrawLine(x1, a._y1, x2, a._y1, new CvScalar(255, 0, 0), 2);
                                img.DrawLine(a._x1, y1, a._x1, y2, new CvScalar(255, 0, 0), 2);
                            }

                            if (VM.CenterLine == true)
                            {
                                img.DrawLine(0, 320, 640, 320, new CvScalar(255, 0, 0, 0), 2);
                                img.DrawLine(320, 0, 320, 640, new CvScalar(255, 0, 0, 0), 2);
                            }

                            WriteableBitmapConverter.ToWriteableBitmap(img, _col_wb);

                            cameraImage.Source = _col_wb;

                            img.Dispose();

                            //cameraImage.Source = vm.VisionControl.GetCameraBitmap();
                        }));
                    };
                }
            };

            pythonConsole.Pad.Host.ConsoleCreated += new PythonConsoleControl.ConsoleCreatedEventHandler(Host_ConsoleCreated);
        }
コード例 #31
0
        public System.Drawing.Bitmap FaceDetect(IplImage src)
        {
            // CvHaarClassifierCascade, cvHaarDetectObjects
            // 얼굴을 검출하기 위해서 Haar 분류기의 캐스케이드를 이용한다

            CvColor[] colors = new CvColor[] {
                new CvColor(0, 0, 255),
                new CvColor(0, 128, 255),
                new CvColor(0, 255, 255),
                new CvColor(0, 255, 0),
                new CvColor(255, 128, 0),
                new CvColor(255, 255, 0),
                new CvColor(255, 0, 0),
                new CvColor(255, 0, 255),
            };

            const double scale        = 1.04;
            const double scaleFactor  = 1.139;
            const int    minNeighbors = 1;

            using (IplImage img = src.Clone())
                using (IplImage smallImg = new IplImage(new CvSize(Cv.Round(img.Width / scale), Cv.Round(img.Height / scale)), BitDepth.U8, 1))
                {
                    // 얼굴 검출을 위한 화상을 생성한다.
                    using (IplImage gray = new IplImage(img.Size, BitDepth.U8, 1))
                    {
                        Cv.CvtColor(img, gray, ColorConversion.BgrToGray);
                        Cv.Resize(gray, smallImg, Interpolation.Linear);
                        Cv.EqualizeHist(smallImg, smallImg);
                    }

                    using (CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile(Environment.CurrentDirectory + "\\" + "haarcascade_frontalface_alt.xml"))
                        using (CvMemStorage storage = new CvMemStorage())
                        {
                            storage.Clear();

                            // 얼굴을 검출한다.
                            CvSeq <CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascade, storage, scaleFactor, minNeighbors, 0, new CvSize(20, 20));

                            // 검출한 얼굴에 검은색 원을 덮어씌운다.
                            for (int i = 0; i < faces.Total; i++)
                            {
                                CvRect  r      = faces[i].Value.Rect;
                                CvPoint center = new CvPoint
                                {
                                    X = Cv.Round((r.X + r.Width * 0.5) * scale),
                                    Y = Cv.Round((r.Y + r.Height * 0.5) * scale)
                                };
                                int radius = Cv.Round((r.Width + r.Height) * 0.25 * scale);
                                img.Circle(center, radius, new CvColor(0, 0, 0), -1, LineType.Link8, 0);
                            }
                        }
                    FindFace = img.Clone();

                    //생성한 IplImage 화상을 비트맵으로 변환해 반환한다.
                    return(FindFace.ToBitmap(System.Drawing.Imaging.PixelFormat.Format24bppRgb));
                }
        }
コード例 #32
0
    void OnGUI()
    {
        // Display instructions when not tracking
        if (!trackFlag)
        {
            GUI.Box(new Rect(Screen.width - 600, Screen.height - 30, 500, 30),
                    "Hold the mouse button down and drag to select a rectangular region to track");
        }
        else
        {
            GUI.Box(new Rect(0, 0, 230, 100),
                    "Keyboard shortcuts\n" +
                    "======================\n\n" +
                    "h = Show histogram\n" +
                    "b = Show back projection\n" +
                    "t = Show Camshift tracking window");
        }

        // Draw the selection box to identify region to track
        if (_mouseIsDown)
        {
            _mouseLastPos = Input.mousePosition;

            // Find the corner of the box
            Vector2 origin;

            origin.x = Mathf.Min(_mouseDownPos.x, _mouseLastPos.x);
            // GUI and mouse coordinates are the opposite way around.
            origin.y = Mathf.Max(_mouseDownPos.y, _mouseLastPos.y);

            //Compute size of box
            Vector2 size = _mouseDownPos - _mouseLastPos;

            Rect rectBox = new Rect(origin.x, Screen.height - origin.y, Mathf.Abs(size.x), Mathf.Abs(size.y));

            GUI.Box(rectBox, "Region\nto\nTrack");  // Draw empty box as GUI overlay
        }
        // If tracking with CamShift, then draw GUI box over the tracked object
        else if (trackFlag)
        {
            // Figure out where the tracking box is relative to top-left corner of gameObject

            CvPoint p1    = rotatedBoxToTrack.BoxPoints()[1]; // Top left corner
            CvRect  _rect = ConvertRect2CvRect(new Rect(p1.X, p1.Y, rotatedBoxToTrack.Size.Width, rotatedBoxToTrack.Size.Height));

            Vector2 origin;
            origin.x = objectScreenPosition.position.x + scaleObjectWidth(_rect.X);
            origin.y = objectScreenPosition.position.y + scaleObjectHeight(_rect.Y);


            GUIUtility.RotateAroundPivot(rotatedBoxToTrack.Angle, origin);
            GUI.Box(new Rect(origin.x, origin.y, scaleObjectWidth(_rect.Width),
                             scaleObjectHeight(_rect.Height)), "");

            // Rotate GUI opposite way so that successive GUI calls won't be rotated.
            GUIUtility.RotateAroundPivot(-rotatedBoxToTrack.Angle, origin);
        }
    }
コード例 #33
0
        // 数字認識
        internal static int recognizeDigit(CvMat image)
        {
            int nonzero = 0;

            nonzero = image.GetCols( image.Cols-2, image.Cols ).CountNonZero();
            if ( image.Rows * 2 == nonzero )
                // 1 右端2列がすべて輝点
                return 1;

            nonzero = image.GetRows( image.Rows-2, image.Rows ).CountNonZero();
            if ( image.Cols * 2 == nonzero )
                // 2 下端2行がすべて輝点
                return 2;

            nonzero = image.GetRows ( 0, 2 ).CountNonZero();
            if ( image.Cols * 2 - 2 < nonzero )
                // 7 上端2行がすべて輝点.ただし1ピクセルまで欠けても良い
                return 7;

            nonzero = image.GetCols ( image.Cols-3, image.Cols-1 ).CountNonZero();
            if ( image.Rows * 2 == nonzero )
                // 4 右端の左2列がすべて輝点
                return 4;

            CvRect rect = new CvRect( 0, 0, 1, image.Rows*2/3 );
            CvMat subarr;
            nonzero = image.GetSubArr ( out subarr, rect ).CountNonZero();
            if ( 0 == nonzero )
                // 3 左端の上部3分の2がすべて暗点
                return 3;

            rect = new CvRect ( 0, image.Rows/2, 3, 2 );
            nonzero = image.GetSubArr ( out subarr, rect ).CountNonZero();
            if ( 0 == nonzero )
                // 5 左端の下半分開始すぐのwidth3 height2 がすべて暗点
                return 5;

            rect = new CvRect ( image.Cols/2, image.Rows/2-1, 1, 3 );
            nonzero = image.GetSubArr( out subarr, rect ).CountNonZero();
            if ( 0 == nonzero )
                // 0 中央列中央3ピクセルがすべて暗点
                return 0;

            rect = new CvRect ( image.Cols-1, 0, 1, image.Rows*2/5 );
            nonzero = image.GetSubArr( out subarr, rect ).CountNonZero();
            if ( 0 == nonzero )
                // 6 右端上部5分の2がすべて暗点
                return 6;

            rect = new CvRect ( image.Cols-1, image.Rows-3, 1, 3 );
            nonzero = image.GetSubArr( out subarr, rect ).CountNonZero();
            if ( 0 == nonzero )
                // 右端下部3ピクセルがすべて暗点
                return 9;

            // 8 上記条件を満たさない
            return 8;
        }
コード例 #34
0
ファイル: GpuMat.cs プロジェクト: inohiroki/opencvsharp
 /// <summary>
 ///
 /// </summary>
 /// <param name="roi"></param>
 /// <returns></returns>
 public virtual Mat this[CvRect roi]
 {
     get
     {
         Mat result = new Mat();
         GpuInvoke.GpuMat_opRange1(ptr, roi, result.CvPtr);
         return(result);
     }
 }
コード例 #35
0
 public static extern void calib3d_stereoRectify_InputArray(
     IntPtr cameraMatrix1, IntPtr distCoeffs1,
     IntPtr cameraMatrix2, IntPtr distCoeffs2,
     CvSize imageSize, IntPtr R, IntPtr T,
     IntPtr R1, IntPtr R2,
     IntPtr P1, IntPtr P2,
     IntPtr Q, int flags,
     double alpha, CvSize newImageSize,
     out CvRect validPixROI1, out CvRect validPixROI2);
コード例 #36
0
ファイル: CvBlobs.cs プロジェクト: shimat/opencvsharp_2410
        /// <summary>
        /// Calculates mean color of a blob in an image. (cvBlobMeanColor)
        /// </summary>
        /// <param name="targetBlob">The target blob</param>
        /// <param name="originalImage">Original image.</param>
        public CvScalar BlobMeanColor(CvBlob targetBlob, IplImage originalImage)
        {
            if (targetBlob == null)
            {
                throw new ArgumentNullException(nameof(targetBlob));
            }
            if (originalImage == null)
            {
                throw new ArgumentNullException(nameof(originalImage));
            }
            if (originalImage.Depth != BitDepth.U8)
            {
                throw new ArgumentException("imgOut.Depth != BitDepth.U8");
            }
            if (originalImage.NChannels != 3)
            {
                throw new ArgumentException("imgOut.NChannels != 3");
            }
            if (Labels == null)
            {
                throw new ArgumentException("blobs.Labels == null");
            }

            int    step   = originalImage.WidthStep;
            CvRect roi    = originalImage.ROI;
            int    width  = roi.Width;
            int    height = roi.Height;
            int    offset = roi.X + (roi.Y * step);

            int mb = 0;
            int mg = 0;
            int mr = 0;

            unsafe
            {
                byte *imgData = originalImage.ImageDataPtr + offset;
                for (int r = 0; r < height; r++)
                {
                    for (int c = 0; c < width; c++)
                    {
                        if (Labels[r, c] == targetBlob.Label)
                        {
                            mb += imgData[3 * c + 0];
                            mg += imgData[3 * c + 1];
                            mr += imgData[3 * c + 2];
                        }
                    }
                    imgData += step;
                }
            }

            GC.KeepAlive(originalImage);

            int pixels = targetBlob.Area;

            return(new CvColor((byte)(mr / pixels), (byte)(mg / pixels), (byte)(mb / pixels)));
        }
コード例 #37
0
ファイル: HOG.cs プロジェクト: neoxeo/opencvsharp
        public HOG()
        {
            CPP.Mat img = CPP.CvCpp.ImRead(Const.ImageAsahiyama, LoadMode.Color);

            /*
            if (GPU.CvGpu.IsEnabled)
            {
                GPU.GpuMat imgGpu = new GPU.GpuMat(img);

                GPU.HOGDescriptor hog = new GPU.HOGDescriptor();
                hog.SetSVMDetector(OpenCvSharp.CPlusPlus.HOGDescriptor.GetDefaultPeopleDetector());

                //bool b = hog.CheckDetectorSize();
                //b.ToString();
            }
            else
            //*/
            {
                CPP.HOGDescriptor hog = new CPP.HOGDescriptor();
                hog.SetSVMDetector(CPP.HOGDescriptor.GetDefaultPeopleDetector());

                bool b = hog.CheckDetectorSize();
                b.ToString();

                Stopwatch watch = Stopwatch.StartNew();

                // run the detector with default parameters. to get a higher hit-rate
                // (and more false alarms, respectively), decrease the hitThreshold and
                // groupThreshold (set groupThreshold to 0 to turn off the grouping completely).
                CvRect[] found = hog.DetectMultiScale(img, 0, new CvSize(8, 8), new CvSize(24, 16), 1.05, 2);

                watch.Stop();
                Console.WriteLine("Detection time = {0}ms", watch.ElapsedMilliseconds);
                Console.WriteLine("{0} region(s) found", found.Length);

                foreach (CvRect rect in found)
                {
                    // the HOG detector returns slightly larger rectangles than the real objects.
                    // so we slightly shrink the rectangles to get a nicer output.
                    CvRect r = new CvRect
                    {
                        X = rect.X + (int)Math.Round(rect.Width * 0.1),
                        Y = rect.Y + (int)Math.Round(rect.Height * 0.1),
                        Width = (int)Math.Round(rect.Width * 0.8),
                        Height = (int)Math.Round(rect.Height * 0.8)
                    };
                    img.Rectangle(r.TopLeft, r.BottomRight, CvColor.Red, 3, LineType.Link8, 0);
                }

                using (CvWindow window = new CvWindow("people detector", WindowMode.None, img.ToIplImage()))
                {
                    window.SetProperty(WindowProperty.Fullscreen, 1);
                    Cv.WaitKey(0);
                }
            }
        }
コード例 #38
0
        public static CvRect[] LocateFaces(this IplImage img, FaceSearchWrapper.FaceSearch searcher, CvRect rectToLookin)
        {
            var frame = new Common.Frame(img);
            frame.MotionRectangles.Add(rectToLookin);
            var faces = searcher.SearchFace(frame.GetImage());

            var faceRects = from f in faces
                            select f.Bounds;

            return faceRects.ToArray();
        }
コード例 #39
0
        /// <summary>
        /// 
        /// </summary>
        /// <param name="imgGray"></param>
        /// <param name="pRects"></param>
        /// <returns></returns>
        public CvFaceTracker(IplImage imgGray, CvRect[] pRects)
        {
            if (imgGray == null)
                throw new ArgumentNullException("imgGray");
            if (pRects == null)
                throw new ArgumentNullException("pRects");
            if (pRects.Length != 3)
                throw new ArgumentException("pRects.Length must be 3");

            _ptr = CvInvoke.cvInitFaceTracker(IntPtr.Zero, imgGray.CvPtr, pRects, 3);
            if (_ptr == IntPtr.Zero)
                throw new OpenCvSharpException("Failed to create CvFaceTracker");
        }
コード例 #40
0
ファイル: Delaunay.cs プロジェクト: qxp1011/opencvsharp
        public Delaunay()
        {
            CvRect rect = new CvRect(0, 0, 600, 600);
            CvColor activeFacetColor = new CvColor(255, 0, 0);
            CvColor delaunayColor = new CvColor(0, 0, 0);
            CvColor voronoiColor = new CvColor(0, 180, 0);
            CvColor bkgndColor = new CvColor(255, 255, 255);
            Random rand = new Random();
            
            using (CvMemStorage storage = new CvMemStorage(0))
            using (IplImage img = new IplImage(rect.Size, BitDepth.U8, 3))
            using (CvWindow window = new CvWindow("delaunay"))
            {
                img.Set(bkgndColor);
                CvSubdiv2D subdiv = new CvSubdiv2D(rect, storage);
                for (int i = 0; i < 200; i++)
                {
                    CvPoint2D32f fp = new CvPoint2D32f
                    {
                        X = (float)rand.Next(5, rect.Width - 10),
                        Y = (float)rand.Next(5, rect.Height - 10)
                    };
                    LocatePoint(subdiv, fp, img, activeFacetColor);
                    window.Image = img;

                    if (CvWindow.WaitKey(100) >= 0)
                    {
                        break;
                    }
                    subdiv.Insert(fp);
                    subdiv.CalcVoronoi2D();
                    img.Set(bkgndColor);
                    DrawSubdiv(img, subdiv, delaunayColor, voronoiColor);
                    window.Image = img;
                    if (CvWindow.WaitKey(100) >= 0)
                    {
                        break;
                    }
                }
                img.Set(bkgndColor);
                PaintVoronoi(subdiv, img);
                window.Image = img;

                CvWindow.WaitKey(0);
            }
        }
コード例 #41
0
        public PyrSegmentation()
        {
            // cvPyrSegmentation
            // レベルを指定して画像ピラミッドを作成し,その情報を用いて画像のセグメント化を行なう.

            const double threshold1 = 255.0;
            const double threshold2 = 50.0; 

            // (1)画像の読み込み
            using (IplImage srcImg = new IplImage(Const.ImageGoryokaku, LoadMode.AnyDepth | LoadMode.AnyColor))
            {
                // level1から4それぞれでやってみる
                IplImage[] dstImg = new IplImage[4];
                for (int level = 0; level < dstImg.Length; level++)
                {
                    // (2)領域分割のためにROIをセットする
                    CvRect roi = new CvRect()
                    {
                        X = 0,
                        Y = 0,
                        Width = srcImg.Width & -(1 << (level + 1)),
                        Height = srcImg.Height & -(1 << (level + 1))
                    };
                    srcImg.ROI = roi;
                    // (3)分割結果画像出力用の画像領域を確保し,領域分割を実行
                    dstImg[level] = srcImg.Clone();
                    Cv.PyrSegmentation(srcImg, dstImg[level], level + 1, threshold1, threshold2);
                }

                // (4)入力画像と分割結果画像の表示
                CvWindow wSrc = new CvWindow("src", srcImg);
                CvWindow[] wDst = new CvWindow[dstImg.Length];
                for (int i = 0; i < dstImg.Length; i++)
                {
                    wDst[i] = new CvWindow("dst" + i, dstImg[i]);
                }
                CvWindow.WaitKey();
                CvWindow.DestroyAllWindows();

                foreach (IplImage item in dstImg)
                {
                    item.Dispose();
                }
            }

        }
コード例 #42
0
        public PyrMeanShiftFiltering()
        {
            // cvPyrMeanShiftFiltering
            // 平均値シフト法による画像のセグメント化を行う

            const int level = 2;

            // (1)画像の読み込み
            using (IplImage srcImg = new IplImage(Const.ImageGoryokaku, LoadMode.AnyDepth | LoadMode.AnyColor))
            {
                if (srcImg.NChannels != 3)
                {
                    throw new Exception();
                }
                if (srcImg.Depth != BitDepth.U8)
                {
                    throw new Exception();
                }

                // (2)領域分割のためにROIをセットする
                CvRect roi = new CvRect
                {
                    X = 0,
                    Y = 0,
                    Width = srcImg.Width & -(1 << level),
                    Height = srcImg.Height & -(1 << level)
                };
                srcImg.ROI = roi;

                // (3)分割結果画像出力用の画像領域を確保し,領域分割を実行
                using (IplImage dstImg = srcImg.Clone())
                {
                    Cv.PyrMeanShiftFiltering(srcImg, dstImg, 30.0, 30.0, level, new CvTermCriteria(5, 1));
                    // (4)入力画像と分割結果画像の表示
                    using (CvWindow wSrc = new CvWindow("Source", srcImg))
                    using (CvWindow wDst = new CvWindow("MeanShift", dstImg))
                    {
                        CvWindow.WaitKey();
                    }
                }
            }

        }
コード例 #43
0
        // スコア表からキャラクタ単位に切り分ける
        static void extractScoreRows(CvMat scoreTable, ref List<CvMat> scoreRows)
        {
            CvMat scoreRow;
            CvRect rect;
            // ランキングスコア10件
            for ( int i = 0; i < ScoreTop10Rows; i++ )
            {
                rect = new CvRect ( 0, i * ( ScoreRowHeight + ScoreRowInterval ),
                    scoreTable.Cols, ScoreRowHeight );
                scoreTable.GetSubArr ( out scoreRow, rect );

                scoreRows.Add( scoreRow );
            }

            // プレイヤースコア
            rect = new CvRect ( 0, scoreTable.Rows - ScoreRowHeight, scoreTable.Cols, ScoreRowHeight );
            scoreTable.GetSubArr ( out scoreRow, rect );

            scoreRows.Add ( scoreRow );
        }
コード例 #44
0
        public void Test()
        {
            var faceSearcher = new FaceSearchWrapper.FaceSearch();

            int count = 0;
            var timer = new System.Diagnostics.Stopwatch();
            timer.Start();

            foreach (var file in System.IO.Directory.EnumerateFiles(@"G:\pic", "*.jpg"))
            {
                var img = IplImage.FromFile(file);
                var rect = new CvRect(0, 0, img.Width, img.Height);
                var faces = faceSearcher.SearchFace(img, rect);
                System.Diagnostics.Debug.WriteLine(faces.Length);
                count++;
            }

            var msPerPic = timer.ElapsedMilliseconds/count;
            System.Diagnostics.Debug.WriteLine("millisecond per picture: " + msPerPic);
        }
コード例 #45
0
ファイル: CvSubdiv2D.cs プロジェクト: neoxeo/opencvsharp
        /// <summary>
	    /// cvCreateSubdivDelaunay2Dで初期化
	    /// </summary>
        /// <param name="rect"></param>
	    /// <param name="storage"></param>
#else
        /// <summary>
        /// Initializes using cvCreateSubdivDelaunay2D
        /// </summary>
        /// <param name="rect"></param>
        /// <param name="storage"></param>
#endif
        public CvSubdiv2D(CvRect rect, CvMemStorage storage)
        {
            if (storage == null)
            {
                throw new ArgumentNullException();
            }

            IntPtr subdiv = CvInvoke.cvCreateSubdiv2D(
                SeqType.KindSubdiv2D, CvSubdiv2D.SizeOf, CvSubdiv2DPoint.SizeOf, CvQuadEdge2D.SizeOf, storage.CvPtr
            );

            if (subdiv == IntPtr.Zero)
            {
                throw new OpenCvSharpException("Failed to create CvSubdiv2D");
            }

            CvInvoke.cvInitSubdivDelaunay2D(subdiv, rect);

            Initialize(subdiv);
            holdingStorage = storage;
        }
コード例 #46
0
ファイル: FileStorage.cs プロジェクト: 0sv/opencvsharp
        /// <summary>
        /// 
        /// </summary>
        /// <param name="fileName"></param>
        private static void SampleFileStorageWriteImage(string fileName)
        {
            // cvWrite, cvWriteComment

            using (IplImage colorImg = new IplImage(FilePath.Image.Lenna, LoadMode.Color))
            using (IplImage grayImg = new IplImage(colorImg.Size, BitDepth.U8, 1))
            {
                colorImg.CvtColor(grayImg, ColorConversion.BgrToGray);
                CvRect roi = new CvRect(0, 0, colorImg.Width / 2, colorImg.Height / 2);
                grayImg.SetROI(roi);
                colorImg.SetROI(roi);
                grayImg.Threshold(grayImg, 90, 255, ThresholdType.Binary);

                using (CvFileStorage fs = new CvFileStorage(fileName, null, FileStorageMode.Write))
                {
                    fs.WriteComment("This is a comment line.", false);
                    fs.Write("color_img", colorImg);
                    fs.StartNextStream();
                    fs.Write("gray_img", grayImg);
                }
            }
        }
コード例 #47
0
ファイル: PyrSegmentation.cs プロジェクト: 0sv/opencvsharp
        public PyrSegmentation()
        {
            const double threshold1 = 255.0;
            const double threshold2 = 50.0; 

            using (IplImage srcImg = new IplImage(FilePath.Image.Goryokaku, LoadMode.AnyDepth | LoadMode.AnyColor))
            {
                IplImage[] dstImg = new IplImage[4];
                for (int level = 0; level < dstImg.Length; level++)
                {
                    CvRect roi = new CvRect()
                    {
                        X = 0,
                        Y = 0,
                        Width = srcImg.Width & -(1 << (level + 1)),
                        Height = srcImg.Height & -(1 << (level + 1))
                    };
                    srcImg.ROI = roi;

                    dstImg[level] = srcImg.Clone();
                    Cv.PyrSegmentation(srcImg, dstImg[level], level + 1, threshold1, threshold2);
                }

                CvWindow wSrc = new CvWindow("src", srcImg);
                CvWindow[] wDst = new CvWindow[dstImg.Length];
                for (int i = 0; i < dstImg.Length; i++)
                {
                    wDst[i] = new CvWindow("dst" + i, dstImg[i]);
                }
                CvWindow.WaitKey();
                CvWindow.DestroyAllWindows();

                foreach (IplImage item in dstImg)
                {
                    item.Dispose();
                }
            }
        }
コード例 #48
0
        public CvRect cariBB(IplImage imgSrc)
        {
            CvRect aux;
            int xmin, xmax, ymin, ymax, height, width;
            xmin = xmax = ymin = ymax = height = width = 0;

            cariX(imgSrc, ref xmin, ref xmax);
            cariY(imgSrc, ref ymin, ref ymax);

            width = xmax - xmin;
            height = ymax - ymin;

            double lebar = width * 1.5;

            height = height >= (width * 1.5) ? (int)lebar : height;

            //form.WriteLine("height = " + height.ToString(), true, true);
            //form.WriteLine("width = " + width.ToString(), true, true);

            aux = new CvRect(xmin, ymin, width, height);

            return aux;
        }
コード例 #49
0
        public PyrMeanShiftFiltering()
        {
            const int level = 2;

            using (var srcImg = new IplImage(FilePath.Image.Goryokaku, LoadMode.AnyDepth | LoadMode.AnyColor))
            {
                if (srcImg.NChannels != 3)
                {
                    throw new Exception();
                }
                if (srcImg.Depth != BitDepth.U8)
                {
                    throw new Exception();
                }

                CvRect roi = new CvRect
                {
                    X = 0,
                    Y = 0,
                    Width = srcImg.Width & -(1 << level),
                    Height = srcImg.Height & -(1 << level)
                };
                srcImg.ROI = roi;

                using (IplImage dstImg = srcImg.Clone())
                {
                    Cv.PyrMeanShiftFiltering(srcImg, dstImg, 30.0, 30.0, level, new CvTermCriteria(5, 1));

                    using (new CvWindow("Source", srcImg))
                    using (new CvWindow("MeanShift", dstImg))
                    {
                        CvWindow.WaitKey();
                    }
                }
            }

        }
コード例 #50
0
        // n番目アイコン部分を返す
        static CvMat extractColorNationality(CvMat scoreTable, int n)
        {
            CvRect rect;
            CvMat scoreRow;

            if ( n < 10 )
            {
                // ランキング部分から抽出
                rect = new CvRect(
                    Score.NationalityOffset, n * ( ScoreRowHeight + ScoreRowInterval ),
                    Score.NationalityWidth, ScoreRowHeight );

            }
            else
            {
                // プレイヤーキャラクタスコアを抽出
                rect = new CvRect(
                    Score.NationalityOffset, scoreTable.Rows - ScoreRowHeight,
                    Score.NationalityWidth, ScoreRowHeight );
            }

            scoreTable.GetSubArr ( out scoreRow, rect );
            return scoreRow;
        }
コード例 #51
0
    // Return a region of interest (_rect_roi) from within the image _image
    //  This doesn't need to be its own function, but I had so much trouble
    //  finding a method that didn't crash the program that I separated it.
    CvMat GetROI(CvMat _image, CvRect rect_roi)
    {
        // Get the region of interest
        CvMat img_roi;  // Get the region of interest

        // Grab the region of interest using the mouse-drawn box
        _image.GetSubRect(out img_roi, rect_roi);

        return (img_roi);
    }
コード例 #52
0
    // Determine if pixel box (ROI) is within the bounds of the image
    // Bounds are (0, 0, imWidth, imHeight)
    CvRect CheckROIBounds(CvRect _roi)
    {
        int _x = _roi.X, _y = _roi.Y,
        _width = Mathf.Abs(_roi.Width), _height = Mathf.Abs(_roi.Height);

        if (_roi.X < 0)
        {
            _x = 0;
            //Debug.LogWarning("X is outside of image");
        }

        if (_roi.Y < 0)
        {
            _y = 0;
           // Debug.LogWarning("Y is outside of image");
        }

        if (_roi.Width < 2)
            _width = 2;
        if (_roi.Height < 2)
            _height = 2;

        if ((_x + _width) > imWidth)
        {
            _width = Mathf.Abs(imWidth - _x);
            //Debug.LogWarning("Width is outside of image");
        }

        if ((_y + _height) > imHeight)
        {
            _height = Mathf.Abs(imHeight - _y);
            //Debug.LogWarning("Height is outside of image");
        }
        //Debug.Log (new CvRect (_x, _y, _width, _height));

        return new CvRect(_x, _y, _width, _height);
    }
コード例 #53
0
    // Converts Unity's Rect type to CvRect
    // CVRect has type int and Rect has type float
    CvRect ConvertRect2CvRect(Rect _roi)
    {
        CvRect _cvroi = new CvRect(Mathf.FloorToInt(_roi.x),
                            Mathf.FloorToInt(_roi.y),
                            Mathf.FloorToInt(_roi.width),
                            Mathf.FloorToInt(_roi.height));

        return _cvroi;
    }
コード例 #54
0
ファイル: VideoCaptureScript.cs プロジェクト: Titoulion/Shoal
    // Update and OnGUI are the main loops
    void Update()
    {
        if (DrawThresholdImageFlag)
            DrawThresholdImage(videoSourceImage);
        if (DoFaceTrack)
            DrawFaceTracking(videoSourceImage);

        FindObjectScreenPosition();

        if (_webcamTexture.isPlaying)
        {

            if (_webcamTexture.didUpdateThisFrame)
            {
                //convert Unity 2D texture from webcam to CvMat
                Texture2DToCvMat();

                // Do some image processing with OpenCVSharp on this image frame
                ProcessImage(videoSourceImage);
            }

        }
        else
        {
            Debug.Log("Can't find camera!");
        }

        if (Input.GetKeyDown(KeyCode.H))  // "h" key turns histogram screen on/off
            histoWindowFlag = !histoWindowFlag;

        if (trackFlag)
        {
            if (Input.GetKeyDown(KeyCode.B))  // "b" key turns back projection on/off
                backprojWindowFlag = !backprojWindowFlag;
            if (Input.GetKeyDown(KeyCode.T))  // "t" key turns tracking openCV window on
                trackWindowFlag = !trackWindowFlag;

            // Move an external game object based on the ROI being tracked
            if (gameObjectTracker)
                ROIScreenToGameObject(rotatedBoxToTrack, gameObjectTracker);

        }

        if (Input.GetMouseButtonDown(1))
        { // Right mouse button
            Debug.Log("Tracking off");
            trackFlag = false;
            _mouseIsDown = false;
        }
        else if (Input.GetMouseButtonDown(0))
        {  // Left mouse button

            if (!_mouseIsDown)
            {
                _mouseDownPos = Input.mousePosition;
                trackFlag = false;

            }

            _mouseIsDown = true;
        }

        if (Input.GetMouseButtonUp(0))
        {  // Left mouse button is up

            // If mouse went from down to up, then update the region of interest using the box
            if (_mouseIsDown)
            {

                // Calculate the histogram for the selected region of interest (ROI)
                _rectToTrack = CheckROIBounds(ConvertRect2CvRect(MakePixelBox(_mouseDownPos, _mouseLastPos)));

                if (DisplayROIFlag)
                {
                    // Draw the region of interest to track
                    DrawROIBox(videoSourceImage);
                }

                // Use Hue/Saturation histogram (not just the Hue dimension)
                _histogramToTrack = CalculateHSVHistogram(GetROI(videoSourceImage, _rectToTrack));

                // Use Hue channel histogram only
                //_histogramToTrack = CalculateOneChannelHistogram (GetROI (videoSourceImage, _rectToTrack), 0, 179);

                lastPosition = new CvPoint(Mathf.FloorToInt(_rectToTrack.X), Mathf.FloorToInt(_rectToTrack.Y));
                InitializeKalmanFilter();

                trackFlag = true;
            }

            _mouseIsDown = false;

        }
    }
コード例 #55
0
ファイル: VideoCaptureScript.cs プロジェクト: Titoulion/Shoal
    //  Use the CamShift algorithm to track to base histogram throughout the
    // succeeding frames
    void CalculateCamShift(CvMat _image)
    {
        CvMat _backProject = CalculateBackProjection(_image, _histogramToTrack);

        // Create convolution kernel for erosion and dilation
        IplConvKernel elementErode = Cv.CreateStructuringElementEx(10, 10, 5, 5, ElementShape.Rect, null);
        IplConvKernel elementDilate = Cv.CreateStructuringElementEx(4, 4, 2, 2, ElementShape.Rect, null);

        // Try eroding and then dilating the back projection
        // Hopefully this will get rid of the noise in favor of the blob objects.
        Cv.Erode(_backProject, _backProject, elementErode, 1);
        Cv.Dilate(_backProject, _backProject, elementDilate, 1);

        if (backprojWindowFlag)
        {
            Cv.ShowImage("Back Projection", _backProject);
        }

        // Parameters returned by Camshift algorithm
        CvBox2D _outBox;
        CvConnectedComp _connectComp;

        // Set the criteria for the CamShift algorithm
        // Maximum 10 iterations and at least 1 pixel change in centroid
        CvTermCriteria term_criteria = Cv.TermCriteria(CriteriaType.Iteration | CriteriaType.Epsilon, 10, 1);

        // Draw object center based on Kalman filter prediction
        CvMat _kalmanPrediction = _kalman.Predict();

        int predictX = Mathf.FloorToInt((float)_kalmanPrediction.GetReal2D(0, 0));
        int predictY = Mathf.FloorToInt((float)_kalmanPrediction.GetReal2D(1, 0));

        // Run the CamShift algorithm
        if (Cv.CamShift(_backProject, _rectToTrack, term_criteria, out _connectComp, out _outBox) > 0)
        {

            // Use the CamShift estimate of the object center to update the Kalman model
            CvMat _kalmanMeasurement = Cv.CreateMat(2, 1, MatrixType.F32C1);
            // Update Kalman model with raw data from Camshift estimate
            _kalmanMeasurement.Set2D(0, 0, _outBox.Center.X); // Raw X position
            _kalmanMeasurement.Set2D(1, 0, _outBox.Center.Y); // Raw Y position
            //_kalmanMeasurement.Set2D (2, 0, _outBox.Center.X - lastPosition.X);
            //_kalmanMeasurement.Set2D (3, 0, _outBox.Center.Y - lastPosition.Y);

            lastPosition.X = Mathf.FloorToInt(_outBox.Center.X);
            lastPosition.Y = Mathf.FloorToInt(_outBox.Center.Y);

            _kalman.Correct(_kalmanMeasurement); // Correct Kalman model with raw data

            // CamShift function returns two values: _connectComp and _outBox.

            //	_connectComp contains is the newly estimated position and size
            //  of the region of interest. This is passed into the subsequent
            // call to CamShift
            // Update the ROI rectangle with CamShift's new estimate of the ROI
            _rectToTrack = CheckROIBounds(_connectComp.Rect);

            // Draw a rectangle over the tracked ROI
            // This method will draw the rectangle but won't rotate it.
            _image.DrawRect(_rectToTrack, CvColor.Aqua);
            _image.DrawMarker(predictX, predictY, CvColor.Aqua);

            // _outBox contains a rotated rectangle esimating the position, size, and orientation
            // of the object we want to track (specified by the initial region of interest).
            // We then take this estimation and draw a rotated bounding box.
            // This method will draw the rotated rectangle
            rotatedBoxToTrack = _outBox;

            // Draw a rotated rectangle representing Camshift's estimate of the
            // object's position, size, and orientation.
            _image.DrawPolyLine(rectangleBoxPoint(_outBox.BoxPoints()), true, CvColor.Red);

        }
        else
        {

            //Debug.Log ("Object lost by Camshift tracker");

            _image.DrawMarker(predictX, predictY, CvColor.Purple, MarkerStyle.CircleLine);

            _rectToTrack = CheckROIBounds(new CvRect(predictX - Mathf.FloorToInt(_rectToTrack.Width / 2),
                                           predictY - Mathf.FloorToInt(_rectToTrack.Height / 2),
                                        _rectToTrack.Width, _rectToTrack.Height));
            _image.DrawRect(_rectToTrack, CvColor.Purple);

        }

        if (trackWindowFlag)
            Cv.ShowImage("Image", _image);
    }
コード例 #56
0
ファイル: LabelData.cs プロジェクト: kaorun55/opencvsharp
 /// <summary>
 /// 
 /// </summary>
 /// <param name="values"></param>
 /// <param name="roi"></param>
 public LabelData(int[,] values, CvRect roi)
 {
     Values = (int[,])values.Clone();
     Roi = roi;
 }
コード例 #57
0
ファイル: LabelData.cs プロジェクト: kaorun55/opencvsharp
 /// <summary>
 /// 
 /// </summary>
 /// <param name="values"></param>
 public LabelData(int[,] values)
 {
     Values = (int[,])values.Clone();
     Roi = new CvRect();
 }
コード例 #58
0
ファイル: LabelData.cs プロジェクト: kaorun55/opencvsharp
 /// <summary>
 /// 
 /// </summary>
 /// <param name="rows"></param>
 /// <param name="cols"></param>
 /// <param name="roi"></param>
 public LabelData(int rows, int cols, CvRect roi)
 {
     Values = new int[rows, cols];
     Roi = roi;
 }
コード例 #59
0
ファイル: LabelData.cs プロジェクト: kaorun55/opencvsharp
 /// <summary>
 /// 
 /// </summary>
 /// <param name="rows"></param>
 /// <param name="cols"></param>
 public LabelData(int rows, int cols)
 {
     Values = new int[rows, cols];
     Roi = new CvRect();
 }
コード例 #60
0
        public void update_mhi(IplImage imgMain, ref IplImage imgDst, int diff_threshold)
        {
            double timestamp = (double)DateTime.Now.Second;
            CvSize size = new CxCore.CvSize(imgMain.width, imgMain.height);
            int i, idx1 = last, idx2;
            IplImage silh;
            CvSeq seq;
            CvRect comp_rect;
            double count;
            double angle;
            CvPoint center;
            double magnitude;
            CvScalar color;

            //allocate images at the beginning or reallocate them if the frame size is changed
            if (mhi.ptr == null || mhi.width != size.width || mhi.height != size.height)
            {
                for (i = 0; i < N; i++)
                {
                    buf[i] = cxcore.CvCreateImage(size, (int)cxtypes.IPL_DEPTH_8U, 1);
                    cxcore.CvZero(ref buf[i]);
                }
                cxcore.CvReleaseImage(ref mhi);
                cxcore.CvReleaseImage(ref orient);
                cxcore.CvReleaseImage(ref segmask);
                cxcore.CvReleaseImage(ref mask);

                mhi = cxcore.CvCreateImage(size, (int)cxtypes.IPL_DEPTH_32F, 1);
                cxcore.CvZero(ref mhi);
                orient = cxcore.CvCreateImage(size, (int)cxtypes.IPL_DEPTH_32F, 1);
                segmask = cxcore.CvCreateImage(size, (int)cxtypes.IPL_DEPTH_32F, 1);
                mask = cxcore.CvCreateImage(size, (int)cxtypes.IPL_DEPTH_32F, 1);
            }

            cv.CvCvtColor(ref imgMain, ref buf[last], cvtypes.CV_BGR2GRAY);

            idx2 = (last + 1) % N;
            last = idx2;

            silh = buf[idx2];
            cxcore.CvAbsDiff(ref buf[idx1], ref buf[idx2], ref silh);

            cv.CvThreshold(ref silh, ref silh, diff_threshold, 1, cv.CV_THRESH_BINARY);
            cv.CvUpdateMotionHistory(ref silh, ref mhi, timestamp, MHI_DURATION);

            cxcore.CvConvertScale(ref mhi, ref mask, 255 / MHI_DURATION, (MHI_DURATION - timestamp) * 255 / MHI_DURATION);
            cxcore.CvZero(ref imgDst);
            cxcore.CvMerge(ref mask, ref imgDst);
            cv.CvCalcMotionGradient(ref mhi, ref mask, ref orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3);
            if (storage.ptr == null)
                storage = cxcore.CvCreateMemStorage();
            else
                cxcore.CvClearMemStorage(ref storage);
            seq = cv.CvSegmentMotion(ref mhi, ref segmask, ref storage, timestamp, MAX_TIME_DELTA);
            for (i = -1; i < seq.total; i++)
            {
                if (i < 0)
                {
                    comp_rect = new CvRect(0, 0, size.width, size.height);
                    color = cxcore.CV_RGB(255, 255, 255);
                    magnitude = 100;
                }
                else
                {
                    IntPtr ptr = cxcore.CvGetSeqElem(ref seq, i);
                    CvConnectedComp c = (CvConnectedComp)cvconvert.PtrToType(ptr, typeof(CvConnectedComp));
                    comp_rect = c.rect;
                    if (comp_rect.width + comp_rect.height < 100)
                        continue;
                    color = cxcore.CV_RGB(255, 0, 0);
                    magnitude = 30;
                }

                //select component ROI
                cxcore.CvSetImageROI(ref silh, comp_rect);
                cxcore.CvSetImageROI(ref mhi, comp_rect);
                cxcore.CvSetImageROI(ref orient, comp_rect);
                cxcore.CvSetImageROI(ref mask, comp_rect);

                //calculate orientation
                angle = cv.CvCalcGlobalOrientation(ref orient, ref mask, ref mhi, timestamp, MHI_DURATION);
                angle = 360 - angle;

                count = cxcore.CvNorm(ref silh); //<<<<<<<<<<<<<<< recheck

                cxcore.CvResetImageROI(ref mhi);
                cxcore.CvResetImageROI(ref orient);
                cxcore.CvResetImageROI(ref mask);
                cxcore.CvResetImageROI(ref silh);

                //check for the case of little motion
                if (count < comp_rect.width * comp_rect.height * 0.05)
                    continue;

                //draw a clock with arrow indicating the direction
                center = new CvPoint((comp_rect.x + comp_rect.width / 2), (comp_rect.y + comp_rect.height / 2));

                cxcore.CvCircle(ref imgDst, center, cxcore.CvRound(magnitude * 1.2), color, 3, cxcore.CV_AA, 0);
                cxcore.CvLine(ref imgDst, center,
                    new CvPoint(cxcore.CvRound(center.x + magnitude * Math.Cos(angle * Math.PI / 180)),
                    cxcore.CvRound(center.y - magnitude * Math.Sin(angle * Math.PI / 180))),
                    color, 3, cxcore.CV_AA, 0);
            }
        }