Example #1
0
        public static OpenCvSharp.IplImage GetSub(this OpenCvSharp.IplImage ipl, OpenCvSharp.CvRect subRect)
        {
            if (ipl == null)
                throw new ArgumentNullException("ipl", "ipl is null.");

            var boundingRect = new CvRect(0, 0, ipl.Width, ipl.Height);

            if (!boundingRect.Contains(subRect))
                throw new InvalidOperationException("subRect is outside of ipl");

            try
            {
                ipl.SetROI(subRect);

                OpenCvSharp.IplImage sub = new IplImage(
                    ipl.GetSize(),
                    ipl.Depth,
                    ipl.NChannels);

                ipl.Copy(sub);
                return sub;
            }
            finally
            {
                ipl.ResetROI();
            }
        }
Example #2
0
 /// <summary>
 /// The AKAZE constructor
 /// </summary>
 /// <param name="descriptorType"></param>
 /// <param name="descriptorSize"></param>
 /// <param name="descriptorChannels"></param>
 /// <param name="threshold"></param>
 /// <param name="nOctaves"></param>
 /// <param name="nOctaveLayers"></param>
 /// <param name="diffusivity"></param>
 public static AKAZE Create(
     OpenCvSharp.AKAZEDescriptorType descriptorType = OpenCvSharp.AKAZEDescriptorType.MLDB,
     int descriptorSize = 0,
     int descriptorChannels = 3,
     float threshold = 0.001f,
     int nOctaves = 4,
     int nOctaveLayers = 4,
     KAZEDiffusivity diffusivity = KAZEDiffusivity.DiffPmG2)
 {
     IntPtr ptr = NativeMethods.features2d_AKAZE_create(
         (int) descriptorType, descriptorSize, descriptorChannels,
         threshold, nOctaves, nOctaveLayers, (int) diffusivity);
     return new AKAZE(ptr);
 }
Example #3
0
        public static OpenCvSharp.IplImage SubImage(
            this OpenCvSharp.IplImage whole,
            OpenCvSharp.CvRect region)
        {
            whole.SetROI(region);

            OpenCvSharp.IplImage part =
                new OpenCvSharp.IplImage(new OpenCvSharp.CvSize(region.Width, region.Height),
                                         whole.Depth, whole.NChannels);

            whole.Copy(part);

            whole.ResetROI();

            return part;
        }
    public CvPoint2D64f convertKinectToProjector(OpenCvSharp.CvPoint3D64f kinectPoint)
    {
        CvPoint2D64f outp = new CvPoint2D64f();
        //Debug.Log("In: " + kinectPoint.X + " " +kinectPoint.Y + " " +kinectPoint.Z);
        //xp = (q1*xk + q2*yk + q3*zk + q4)/(q9*xk + q10*yk + q11*zk + 1)
        //outp.X = (result.get(0,0)*kinectPoint.x + result.get(0,1)*kinectPoint.y + result.get(0,2)*kinectPoint.z + result.get(0,3))/
        //        (result.get(0,8)*kinectPoint.x + result.get(0,9)*kinectPoint.y + result.get(0,10)*kinectPoint.z + 1);
        outp.X = ((result.Get<double>(0, 0) * kinectPoint.X) + (result.Get<double>(0, 1) * kinectPoint.Y) + (result.Get<double>(0, 2) * kinectPoint.Z) + result.Get<double>(0, 3)) /
                ((result.Get<double>(0, 8) * kinectPoint.X) + (result.Get<double>(0, 9) * kinectPoint.Y) + (result.Get<double>(0, 10) * kinectPoint.Z) + 1);

        //yp = (q5*xk + q6*yk + q7*zk + q8)/(q9*xk + q10*yk + q11*zk + 1)
        //outp.y = (result.get(0, 4) * kinectPoint.x + result.get(0, 5) * kinectPoint.y + result.get(0, 6) * kinectPoint.z + result.get(0, 7)) /
        //        (result.get(0, 8) * kinectPoint.x + result.get(0, 9) * kinectPoint.y + result.get(0, 10) * kinectPoint.z + 1);
        outp.Y = ((result.Get<double>(0, 4) * kinectPoint.X) + (result.Get<double>(0, 5) * kinectPoint.Y) + (result.Get<double>(0, 6) * kinectPoint.Z) + result.Get<double>(0, 7)) /
                ((result.Get<double>(0, 8) * kinectPoint.X) + (result.Get<double>(0, 9) * kinectPoint.Y) + (result.Get<double>(0, 10) * kinectPoint.Z) + 1);
        //Debug.Log(outp.X + " " + outp.Y);

        //outp.X = ((result.Get<double>(0, 0) * kinectPoint.X) + (result.Get<double>(1,0) * kinectPoint.Y) + (result.Get<double>(2,0) * kinectPoint.Z) + result.Get<double>(3,0)) /
        //        ((result.Get<double>(8,0) * kinectPoint.X) + (result.Get<double>(9,0) * kinectPoint.Y) + (result.Get<double>(10,0) * kinectPoint.Z) + 1);
        //outp.Y = ((result.Get<double>(4,0) * kinectPoint.X) + (result.Get<double>(5,0) * kinectPoint.Y) + (result.Get<double>(6,0) * kinectPoint.Z) + result.Get<double>(7,0)) /
        //        ((result.Get<double>(8,0) * kinectPoint.X) + (result.Get<double>(9,0) * kinectPoint.Y) + (result.Get<double>(10,0) * kinectPoint.Z) + 1);

        return outp;
    }
 private static bool IsStaticFrame(OpenCvSharp.CvRect rect)
 {
     return rect.Width == 0 || rect.Height == 0;
 }
        //3次元位置取得
        Microsoft.Kinect.CameraSpacePoint GetCenterPosition(OpenCvSharp.CPlusPlus.Point colorImagePoint, ushort[] depthBuffer, int imgW,int imgH)
        {
            Microsoft.Kinect.KinectSensor kinect = Microsoft.Kinect.KinectSensor.GetDefault();

            Microsoft.Kinect.CameraSpacePoint[] bodyPosition = new Microsoft.Kinect.CameraSpacePoint[imgW * imgH];
            kinect.CoordinateMapper.MapColorFrameToCameraSpace(depthBuffer, bodyPosition);

            Microsoft.Kinect.CameraSpacePoint centerPoint = bodyPosition[colorImagePoint.X + colorImagePoint.Y * imgW];
            //Console.WriteLine(centerPoint.X.ToString());

            return centerPoint;
        }
Example #7
0
        // 2値化(グレースケール化 + 2値化)
        private OpenCvSharp.CPlusPlus.Mat procBinary(OpenCvSharp.CPlusPlus.Mat matSrc)
        {
            OpenCvSharp.CPlusPlus.Mat matDst = matSrc.Clone();
            OpenCvSharp.CPlusPlus.Mat matGray = new OpenCvSharp.CPlusPlus.Mat(matSrc.Rows, matSrc.Cols, OpenCvSharp.CPlusPlus.MatType.CV_8UC1);
            OpenCvSharp.CPlusPlus.Mat matBinary = new OpenCvSharp.CPlusPlus.Mat(matSrc.Rows, matSrc.Cols, OpenCvSharp.CPlusPlus.MatType.CV_8UC1);

            OpenCvSharp.CPlusPlus.Cv2.CvtColor(matSrc, matGray, OpenCvSharp.ColorConversion.BgraToGray, 1);
            OpenCvSharp.CPlusPlus.Cv2.Threshold(matGray, matBinary, 0, 255, OpenCvSharp.ThresholdType.Binary | OpenCvSharp.ThresholdType.Otsu);

            OpenCvSharp.CPlusPlus.Cv2.CvtColor(matBinary, matDst, OpenCvSharp.ColorConversion.GrayToBgra, 3);

            return matDst;
        }
        private void CompareFace(
            Damany.Util.DateTimeRange range,
            OpenCvSharp.IplImage targetImage, OpenCvSharp.CvRect rect)
        {
            try
            {
                IsRunning = true;

                targetImage.ROI = rect;
                int count = 0;

                var gray = targetImage.GetSub(rect).CvtToGray();
                var poi = new PersonOfInterest(targetImage, gray);
                var repo = new PersonOfInterest[] { poi };
                this._comparer.Load(repo.ToList());

                //foreach (var p in portraits)
                //{
                //    if (Exit)
                //    {
                //        break;
                //    }

                //    this.view.CurrentImage = p.GetIpl().ToBitmap();

                //    var colorImg = p.GetIpl();
                //    var imgFromRepository = colorImg.GetSub(p.FaceBounds).CvtToGray();

                //    var result = this._comparer.CompareTo(imgFromRepository);

                //    foreach (var repositoryCompareResult in result)
                //    {
                //        if (repositoryCompareResult.Similarity > Thresholds[ThresholdIndex])
                //        {
                //            count++;
                //            this.view.AddPortrait(p);
                //            this.view.SetStatusText(string.Format("检索到 {0} 个目标", count));
                //        }

                //    }
                //}

            }
            finally
            {
                IsRunning = false;

            }
        }
 public void AddVideoNode(OpenCvSharp.VideoCapture video, string video_name, int index)
 {
     XmlNode video_node = _MetaData.CreateNode(XmlNodeType.Element, "video", null);
     XmlAttribute attribute = _MetaData.CreateAttribute("seq");
     attribute.Value = index.ToString();
     video_node.Attributes.Append(attribute);
     XmlAttribute attribute_frame = _MetaData.CreateAttribute("frame");
     attribute_frame.Value = video.Fps.ToString();
     video_node.Attributes.Append(attribute_frame);
     XmlAttribute attribute_width = _MetaData.CreateAttribute("width");
     attribute_width.Value = video.FrameWidth.ToString();
     video_node.Attributes.Append(attribute_width);
     XmlAttribute attribute_height = _MetaData.CreateAttribute("height");
     attribute_height.Value = video.FrameHeight.ToString();
     video_node.Attributes.Append(attribute_height);
     video_node.InnerText = video_name;
     VideoNode.AppendChild(video_node);
 }
Example #10
0
        // 平均値シフト法による画像のセグメント化
        // パラメータ: 空間窓の半径, 色空間窓の半径, セグメンテーションに用いるピラミッドの最大レベル
        private OpenCvSharp.CPlusPlus.Mat procPyrMeanShiftFiltering(OpenCvSharp.CPlusPlus.Mat matSrc, double sp, double sr, int level)
        {
            // 終了パラメータ
            OpenCvSharp.CPlusPlus.TermCriteria term = new OpenCvSharp.CPlusPlus.TermCriteria(OpenCvSharp.CriteriaType.Iteration, 5, 1);
            //OpenCvSharp.CPlusPlus.TermCriteria term = new OpenCvSharp.CPlusPlus.TermCriteria(OpenCvSharp.CriteriaType.Epsilon, 5, 1);

            // Matの準備
            OpenCvSharp.CPlusPlus.Mat matDst = matSrc.Clone();

            OpenCvSharp.CPlusPlus.Cv2.PyrMeanShiftFiltering(matSrc, matDst, sp, sr, level, term);

            return matDst;
        }
Example #11
0
        // 確率的Hough変換(Canny + Hough) + 統計情報
        private OpenCvSharp.CPlusPlus.Mat procHoughStat(OpenCvSharp.CPlusPlus.Mat matSrc, int votes, double minLength, double maxGap)
        {
            OpenCvSharp.CPlusPlus.Mat matDst = matSrc.Clone();
            OpenCvSharp.CPlusPlus.Mat matGray = new OpenCvSharp.CPlusPlus.Mat(matSrc.Rows, matSrc.Cols, OpenCvSharp.CPlusPlus.MatType.CV_8UC1);
            OpenCvSharp.CPlusPlus.Mat matBinary = new OpenCvSharp.CPlusPlus.Mat(matSrc.Rows, matSrc.Cols, OpenCvSharp.CPlusPlus.MatType.CV_8UC1);

            OpenCvSharp.CPlusPlus.Cv2.CvtColor(matSrc, matGray, OpenCvSharp.ColorConversion.BgraToGray, 1);
            OpenCvSharp.CPlusPlus.Cv2.Threshold(matGray, matBinary, 0, 255, OpenCvSharp.ThresholdType.Binary | OpenCvSharp.ThresholdType.Otsu);

            // Hough変換
            double rho = 1.0;               // 距離分解能
            double theta = Math.PI / 180.0; // 角度分解能
            OpenCvSharp.CvLineSegmentPoint[] lines = OpenCvSharp.CPlusPlus.Cv2.HoughLinesP(matBinary, rho, theta, votes, minLength, maxGap);

            // 描画
            Random rnd = new Random();
            foreach (OpenCvSharp.CvLineSegmentPoint it in lines)
            {
                //matDst.Line(it.P1, it.P2, new OpenCvSharp.CPlusPlus.Scalar(0, 0, 255), 1, OpenCvSharp.LineType.AntiAlias, 0);
                matDst.Line(it.P1, it.P2, new OpenCvSharp.CPlusPlus.Scalar(rnd.Next(0, 255), rnd.Next(0, 255), rnd.Next(0, 255)), 1, OpenCvSharp.LineType.AntiAlias, 0);
            }

            // 本数、平均長、最大長の計算と表示
            //int divNumR = 10;
            //int divNumC = 10;
            int divSizeR = matDst.Rows / divNumR;
            int divSizeC = matDst.Cols / divNumC;

            //double[,] sum = new double[divNumR, divNumC];
            //double[,] max = new double[divNumR, divNumC];
            //int[,] num = new int[divNumR, divNumC];

            foreach (OpenCvSharp.CvLineSegmentPoint it in lines)
            {
                double midR = (it.P1.Y + it.P2.Y) / 2;
                double midC = (it.P1.X + it.P2.X) / 2;
                double dist = it.P1.DistanceTo(it.P2);

                for (int r = 0; r < divNumR; r++)
                {
                    for (int c = 0; c < divNumC; c++)
                    {
                        if (midR >= divSizeR * r && midR < divSizeR * (r + 1) && midC >= divSizeC * c && midC < divSizeC * (c + 1))
                        {
                            sum[r, c] += dist;
                            num[r, c]++;
                            if (max[r, c] < dist)
                            {
                                max[r, c] = dist;
                            }
                        }
                    }
                }
            }

            for (int r = 0; r < divNumR; r++)
            {
                matDst.Line(new OpenCvSharp.CPlusPlus.Point(0, divSizeR * r), new OpenCvSharp.CPlusPlus.Point(matDst.Cols, divSizeR * r), new OpenCvSharp.CPlusPlus.Scalar(0, 0, 255), 1, OpenCvSharp.LineType.AntiAlias, 0);
                for (int c = 0; c < divNumC; c++)
                {
                    matDst.Line(new OpenCvSharp.CPlusPlus.Point(divSizeC * c, 0), new OpenCvSharp.CPlusPlus.Point(divSizeC * c, matDst.Cols), new OpenCvSharp.CPlusPlus.Scalar(0, 0, 255), 1, OpenCvSharp.LineType.AntiAlias, 0);

                    if (num[r, c] > 0)
                    {
                        OpenCvSharp.CPlusPlus.Cv2.PutText(matDst, num[r, c].ToString(), new OpenCvSharp.CPlusPlus.Point(10 + divSizeC * c, 20 + divSizeR * r), OpenCvSharp.FontFace.HersheySimplex, 0.5, new OpenCvSharp.CPlusPlus.Scalar(0, 0, 255), 2, OpenCvSharp.LineType.AntiAlias);
                        OpenCvSharp.CPlusPlus.Cv2.PutText(matDst, (sum[r, c] / num[r, c]).ToString("F2"), new OpenCvSharp.CPlusPlus.Point(10 + divSizeC * c, 40 + divSizeR * r), OpenCvSharp.FontFace.HersheySimplex, 0.5, new OpenCvSharp.CPlusPlus.Scalar(0, 0, 255), 2, OpenCvSharp.LineType.AntiAlias);
                        OpenCvSharp.CPlusPlus.Cv2.PutText(matDst, max[r, c].ToString("F2"), new OpenCvSharp.CPlusPlus.Point(10 + divSizeC * c, 60 + divSizeR * r), OpenCvSharp.FontFace.HersheySimplex, 0.5, new OpenCvSharp.CPlusPlus.Scalar(0, 0, 255), 2, OpenCvSharp.LineType.AntiAlias);
                    }
                }
            }

            return matDst;
        }
Example #12
0
        // 確率的Hough変換(Canny + Hough)
        private OpenCvSharp.CPlusPlus.Mat procHough(OpenCvSharp.CPlusPlus.Mat matSrc, int votes, double minLength, double maxGap)
        {
            OpenCvSharp.CPlusPlus.Mat matDst = matSrc.Clone();
            OpenCvSharp.CPlusPlus.Mat matCanny = new OpenCvSharp.CPlusPlus.Mat(matSrc.Rows, matSrc.Cols, OpenCvSharp.CPlusPlus.MatType.CV_8UC1);

            OpenCvSharp.CPlusPlus.Cv2.Canny(matSrc, matCanny, 100, 200, 3);

            // Hough変換
            double rho = 1.0;               // 距離分解能
            double theta = Math.PI / 180.0; // 角度分解能
            OpenCvSharp.CvLineSegmentPoint[] lines = OpenCvSharp.CPlusPlus.Cv2.HoughLinesP(matCanny, rho, theta, votes, minLength, maxGap);

            // 描画
            Random rnd = new Random();
            foreach (OpenCvSharp.CvLineSegmentPoint it in lines)
            {
                //matDst.Line(it.P1, it.P2, new OpenCvSharp.CPlusPlus.Scalar(0, 0, 255), 1, OpenCvSharp.LineType.AntiAlias, 0);
                matDst.Line(it.P1, it.P2, new OpenCvSharp.CPlusPlus.Scalar(rnd.Next(0, 255), rnd.Next(0, 255), rnd.Next(0, 255)), 1, OpenCvSharp.LineType.AntiAlias, 0);
            }

            return matDst;
        }
Example #13
0
        // グレースケール化
        private OpenCvSharp.CPlusPlus.Mat procGrayScale(OpenCvSharp.CPlusPlus.Mat matSrc)
        {
            OpenCvSharp.CPlusPlus.Mat matDst = matSrc.Clone();
            OpenCvSharp.CPlusPlus.Mat matGray = new OpenCvSharp.CPlusPlus.Mat(matSrc.Rows, matSrc.Cols, OpenCvSharp.CPlusPlus.MatType.CV_8UC1);

            OpenCvSharp.CPlusPlus.Cv2.CvtColor(matSrc, matGray, OpenCvSharp.ColorConversion.BgraToGray, 1);

            OpenCvSharp.CPlusPlus.Cv2.CvtColor(matGray, matDst, OpenCvSharp.ColorConversion.GrayToBgra, 3);

            return matDst;
        }
Example #14
0
        // 輪郭抽出(グレースケール + 2値化 + 輪郭抽出)(輪郭のみの画像を戻す)
        private OpenCvSharp.CPlusPlus.Mat procContour(OpenCvSharp.CPlusPlus.Mat matSrc)
        {
            OpenCvSharp.CPlusPlus.Mat matDst = new OpenCvSharp.CPlusPlus.Mat(matSrc.Rows, matSrc.Cols, OpenCvSharp.CPlusPlus.MatType.CV_8UC3, new OpenCvSharp.CPlusPlus.Scalar(0, 0, 0));
            OpenCvSharp.CPlusPlus.Mat matGray = new OpenCvSharp.CPlusPlus.Mat(matSrc.Rows, matSrc.Cols, OpenCvSharp.CPlusPlus.MatType.CV_8UC1);
            OpenCvSharp.CPlusPlus.Mat matBinary = new OpenCvSharp.CPlusPlus.Mat(matSrc.Rows, matSrc.Cols, OpenCvSharp.CPlusPlus.MatType.CV_8UC1);

            OpenCvSharp.CPlusPlus.Cv2.CvtColor(matSrc, matGray, OpenCvSharp.ColorConversion.BgraToGray, 1);
            OpenCvSharp.CPlusPlus.Cv2.Threshold(matGray, matBinary, 0, 255, OpenCvSharp.ThresholdType.Binary | OpenCvSharp.ThresholdType.Otsu);

            // 輪郭抽出
            OpenCvSharp.CPlusPlus.Mat[] contours;
            OpenCvSharp.CPlusPlus.Mat hierarchy = new OpenCvSharp.CPlusPlus.Mat();
            OpenCvSharp.CPlusPlus.Cv2.FindContours(matBinary, out contours, hierarchy, OpenCvSharp.ContourRetrieval.Tree, OpenCvSharp.ContourChain.ApproxNone);
            //OpenCvSharp.CPlusPlus.Cv2.FindContours(matBinary, out contours, hierarchy, OpenCvSharp.ContourRetrieval.Tree, OpenCvSharp.ContourChain.ApproxSimple);
            //OpenCvSharp.CPlusPlus.Cv2.FindContours(matBinary, out contours, hierarchy, OpenCvSharp.ContourRetrieval.Tree, OpenCvSharp.ContourChain.ApproxTC89KCOS);
            //OpenCvSharp.CPlusPlus.Cv2.FindContours(matBinary, out contours, hierarchy, OpenCvSharp.ContourRetrieval.Tree, OpenCvSharp.ContourChain.ApproxTC89L1);

            // 描画
            //OpenCvSharp.CPlusPlus.Cv2.DrawContours(matDst, contours, -1, new OpenCvSharp.CPlusPlus.Scalar(255, 255, 255), OpenCvSharp.Cv.FILLED, OpenCvSharp.LineType.AntiAlias, hierarchy);
            OpenCvSharp.CPlusPlus.Cv2.DrawContours(matDst, contours, -1, new OpenCvSharp.CPlusPlus.Scalar(255, 255, 255), 1, OpenCvSharp.LineType.AntiAlias, hierarchy);

            return matDst;
        }
Example #15
0
        // エッジ抽出(Canny)
        private OpenCvSharp.CPlusPlus.Mat procCanny(OpenCvSharp.CPlusPlus.Mat matSrc, double threshold1, double threshold2, int apertureSize)
        {
            OpenCvSharp.CPlusPlus.Mat matDst = matSrc.Clone();
            OpenCvSharp.CPlusPlus.Mat matCanny = new OpenCvSharp.CPlusPlus.Mat(matSrc.Rows, matSrc.Cols, OpenCvSharp.CPlusPlus.MatType.CV_8UC1);

            OpenCvSharp.CPlusPlus.Cv2.Canny(matSrc, matCanny, threshold1, threshold2, apertureSize);

            OpenCvSharp.CPlusPlus.Cv2.CvtColor(matCanny, matDst, OpenCvSharp.ColorConversion.GrayToBgra, 3);

            return matDst;
        }
Example #16
0
        // 左右側処理本体
        private void buttonProcLR_Click(ComboBox cb, ref OpenCvSharp.CPlusPlus.Mat mat, ref Bitmap bm, TextBox tb1, TextBox tb2, TextBox tb3)
        {
            if (mat == null) return;

            OpenCvSharp.CPlusPlus.Mat matDst;

            if (cb.Text.Equals("none (reset)"))
            {
                mat = this.matOrg.Clone();
                bm = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat);
            }
            else if (cb.Text.Equals("PyrSegmentation"))
            {
                // 画像ピラミッドを用いた画像の領域分割
                matDst = this.procPyrSegmentation(mat, int.Parse(tb1.Text), double.Parse(tb2.Text), double.Parse(tb3.Text));
                mat = matDst;
                bm = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat);
            }
            else if (cb.Text.Equals("PyrMeanShiftFiltering"))
            {
                // 平均値シフト法による画像のセグメント化
                matDst = this.procPyrMeanShiftFiltering(mat, double.Parse(tb1.Text), double.Parse(tb2.Text), int.Parse(tb3.Text));
                mat = matDst;
                bm = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat);
            }
            else if (cb.Text.Equals("Watershed"))
            {
                // Watershedアルゴリズムによる画像の領域分割
                matDst = this.procWatershed(mat, int.Parse(tb1.Text), int.Parse(tb2.Text), int.Parse(tb3.Text));
                bm = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(matDst);
            }
            else if (cb.Text.Equals("GrayScale"))
            {
                // グレースケール化
                matDst = this.procGrayScale(mat);
                mat = matDst;
                bm = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat);
            }
            else if (cb.Text.Equals("Canny"))
            {
                // エッジ抽出(Canny)
                matDst = this.procCanny(mat, double.Parse(tb1.Text), double.Parse(tb2.Text), int.Parse(tb3.Text));
                mat = matDst;
                bm = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat);
            }
            else if (cb.Text.Equals("Binary"))
            {
                // 2値化(グレースケール化 + 2値化)
                matDst = this.procBinary(mat);
                mat = matDst;
                bm = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat);
            }
            else if (cb.Text.Equals("HoughStat"))
            {
                // 確率的Hough変換(Canny + Hough) + 統計情報
                matDst = this.procHoughStat(mat, int.Parse(tb1.Text), double.Parse(tb2.Text), int.Parse(tb3.Text));
                mat = matDst;
                bm = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat);
            }
            else if (cb.Text.Equals("Hough"))
            {
                // 確率的Hough変換(Canny + Hough)
                matDst = this.procHough(mat, int.Parse(tb1.Text), double.Parse(tb2.Text), int.Parse(tb3.Text));
                mat = matDst;
                bm = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat);
            }
            else if (cb.Text.Equals("Contour"))
            {
                // 輪郭抽出(グレースケール + 2値化 + 輪郭抽出)
                matDst = this.procContour(mat);
                mat = matDst;
                bm = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat);
            }
        }
Example #17
0
        // 画像ピラミッドを用いた画像の領域分割
        // パラメータ: ピラミッドレベル, ピクセルを接続する閾値, クラスタリングの範囲の閾値
        private OpenCvSharp.CPlusPlus.Mat procPyrSegmentation(OpenCvSharp.CPlusPlus.Mat matSrc, int level, double threshold1, double threshold2)
        {
            // Matの準備
            OpenCvSharp.CPlusPlus.Mat matDst = matSrc.Clone();

            // IplImageの準備(C API用)
            OpenCvSharp.IplImage iplSrc = matSrc.ToIplImage();
            OpenCvSharp.IplImage iplDst = iplSrc.Clone();

            // ピラミッド画像作成のためのROI設定(2^levelで割り切れるサイズ)
            OpenCvSharp.CvRect roi;
            roi.X = 0;
            roi.Y = 0;
            roi.Width = iplSrc.Width & -(1 << level);
            roi.Height = iplSrc.Height & -(1 << level);
            iplSrc.SetROI(roi);
            iplDst.SetROI(roi);

            OpenCvSharp.Cv.PyrSegmentation(iplSrc, iplDst, level, threshold1, threshold2);

            // IplImage -> Matに戻す
            matDst = new OpenCvSharp.CPlusPlus.Mat(iplDst);

            return matDst;
        }
        private static OpenCvSharp.CvRect FrameToPortrait(OpenCvSharp.CvRect bounds, OpenCvSharp.CvRect faceBounds)
        {
            faceBounds.X -= bounds.X;
            faceBounds.Y -= bounds.Y;

            return faceBounds;
        }
Example #19
0
        // Watershedアルゴリズムによる画像の領域分割
        // パラメータ: 分割数(横), 分割数(縦), マーカサイズ
        private OpenCvSharp.CPlusPlus.Mat procWatershed(OpenCvSharp.CPlusPlus.Mat matSrc, int wdiv, int hdiv, int msize)
        {
            // Matの準備
            OpenCvSharp.CPlusPlus.Mat matDst = matSrc.Clone();

            // IplImageの準備(C API用)
            OpenCvSharp.IplImage iplSrc = matSrc.ToIplImage();
            OpenCvSharp.IplImage iplDst = iplSrc.Clone();

            // マーカ画像の準備
            OpenCvSharp.IplImage iplMarker = new OpenCvSharp.IplImage(iplSrc.Size, OpenCvSharp.BitDepth.S32, 1);
            iplMarker.Zero();

            // マーカ設置(等分割)
            OpenCvSharp.CvPoint[,] mpt = new OpenCvSharp.CvPoint[wdiv, hdiv];
            for (int i = 0; i < wdiv; i++)
            {
                for (int j = 0; j < hdiv; j++)
                {
                    mpt[i, j] = new OpenCvSharp.CvPoint((int)(iplSrc.Width / wdiv * (i + 0.5)), (int)(iplSrc.Height / hdiv * (j + 0.5)));
                    iplMarker.Circle(mpt[i, j], msize, OpenCvSharp.CvScalar.ScalarAll(i * wdiv + j), OpenCvSharp.Cv.FILLED, OpenCvSharp.LineType.Link8, 0);
                }
            }

            // 分割実行
            OpenCvSharp.Cv.Watershed(iplSrc, iplMarker);

            // マーカの描画
            for (int i = 0; i < wdiv; i++)
            {
                for (int j = 0; j < hdiv; j++)
                {
                    iplDst.Circle(mpt[i, j], msize, OpenCvSharp.CvColor.White, 3, OpenCvSharp.LineType.Link8, 0);
                }
            }

            // 領域境界の描画
            for (int i = 0; i < iplMarker.Height; i++)
            {
                for (int j = 0; j < iplMarker.Width; j++)
                {
                    int idx = (int)(iplMarker.Get2D(i, j).Val0);
                    if (idx == -1)
                    {
                        iplDst.Set2D(i, j, OpenCvSharp.CvColor.Red);
                    }
                }
            }

            // IplImage -> Matに戻す
            matDst = new OpenCvSharp.CPlusPlus.Mat(iplDst);

            return matDst;
        }
        private Dictionary<int, List<MarkerStructure>> MarkerTracker(OpenCvSharp.Mat frame, int video_idx, int frame_number)
        {
            if (frame.Empty())
                return null;

            using (var detector = new Aruco.Net.MarkerDetector())
            {
                Dictionary<int, List<MarkerStructure>> MarkerDict = new Dictionary<int, List<MarkerStructure>>();

                var cameraMatrix = new OpenCV.Net.Mat(3, 3, OpenCV.Net.Depth.F32, 1);
                var distortion = new OpenCV.Net.Mat(1, 4, OpenCV.Net.Depth.F32, 1);

                detector.ThresholdMethod = ThresholdMethod.AdaptiveThreshold;
                detector.Param1 = 7.0;
                detector.Param2 = 7.0;
                detector.MinSize = 0.04f;
                detector.MaxSize = 0.5f;
                detector.CornerRefinement = CornerRefinementMethod.Lines;

                // Detect markers in a sequence of camera images.
                var markerSize = 10;
                var image2 = 
                    new OpenCV.Net.Mat(new OpenCV.Net.Size(frame.Width, frame.Height), 
                    (OpenCV.Net.Depth)frame.Depth(), frame.Channels(), frame.Data);

                try {
                    var detectedMarkers = detector.Detect(image2, cameraMatrix, distortion, markerSize);
                    foreach (var marker in detectedMarkers)
                    {
                        //event trigger
                        List<MarkerStructure> tmp = new List<MarkerStructure>();
                        if (!MarkerDict.TryGetValue(marker.Id, out tmp))
                        {
                            if (tmp != null)
                            {
                                tmp.Add(new MarkerStructure(marker.Id, video_idx, frame_number,
                                    new OpenCV.Net.Point2f(marker.Center.X - (marker.Size / 2), marker.Center.Y - (marker.Size / 2)),
                                    new OpenCV.Net.Size((int)marker.Size, (int)marker.Size)));
                                MarkerDict[marker.Id] = tmp;
                            }
                            else
                            {
                                List<MarkerStructure> tmp2 = new List<MarkerStructure>();
                                tmp2.Add(new MarkerStructure(marker.Id, video_idx, frame_number,
                                    new OpenCV.Net.Point2f(marker.Center.X - (marker.Size / 2), marker.Center.Y - (marker.Size / 2)),
                                    new OpenCV.Net.Size((int)marker.Size, (int)marker.Size)));
                                MarkerDict[marker.Id] = tmp2;
                            }
                        }
                        else
                        {
                            List<MarkerStructure> new_list = new List<MarkerStructure>();
                            new_list.Add(new MarkerStructure(marker.Id, video_idx, frame_number,
                                new OpenCV.Net.Point2f(marker.Center.X - (marker.Size / 2), marker.Center.Y - (marker.Size / 2)),
                                new OpenCV.Net.Size((int)marker.Size, (int)marker.Size)));
                            MarkerDict.Add(marker.Id, new_list);
                        }
                    }
                } catch(Exception)
                {
                }

                return MarkerDict;
            }
        }
 public static extern bool IsFace(IntPtr faceImg, IntPtr backImg, OpenCvSharp.CvRect SubRect);