A wrapper for the CvStructuringElementEx structure in opencv
Inheritance: Emgu.Util.UnmanagedObject
コード例 #1
0
        //:::::::::::::::::fin variables::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::


        //:::::::::::::Morphological operations ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::

        private Image<Gray, Byte> openingOperation(Image<Gray, Byte> binaryFrame)
        {
            StructuringElementEx SElement;

            SElement = new StructuringElementEx(3, 9, 1, 4, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_RECT);

            binaryFrame._MorphologyEx(SElement, Emgu.CV.CvEnum.CV_MORPH_OP.CV_MOP_OPEN, 1);

            return binaryFrame; 
        } //end openingOperation()
コード例 #2
0
        } //end openingOperation()


        private Image<Gray, Byte> closeOperation(Image<Gray, Byte> binaryFrame)
        {
            StructuringElementEx SElement;

            SElement = new StructuringElementEx(3, 11, 1, 5, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_RECT);

            binaryFrame._MorphologyEx(SElement, Emgu.CV.CvEnum.CV_MORPH_OP.CV_MOP_CLOSE, 1);

            return binaryFrame; 
        }
コード例 #3
0
 public override Image<Gray, byte> DetectSkin(Image<Bgr, byte> Img, IColor min, IColor max)
 {
     Image<Ycc, Byte> currentYCrCbFrame = Img.Convert<Ycc, Byte>();
     Image<Gray, byte> skin = new Image<Gray, byte>(Img.Width, Img.Height);
     skin = currentYCrCbFrame.InRange((Ycc)min,(Ycc) max);
     StructuringElementEx rect_12 = new StructuringElementEx(12, 12, 6, 6, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_RECT);
     CvInvoke.cvErode(skin, skin, rect_12, 1);
     StructuringElementEx rect_6 = new StructuringElementEx(6, 6, 3, 3, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_RECT);
     CvInvoke.cvDilate(skin, skin, rect_6, 2);
     return skin;
 }
コード例 #4
0
ファイル: MainForm.cs プロジェクト: ArnoldPMolenaar/LeafZone
        public Image<Gray, Byte> GetContoursHard()
        {
            Image<Gray, Byte> cannyEdges = this.GrayscaleImage.Canny(this.trackBarThreshold.Value, this.trackBarThresholdLinking.Value);

            StructuringElementEx element = new StructuringElementEx(3, 3, 1, 1, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_CROSS);

            CvInvoke.cvMorphologyEx(cannyEdges, cannyEdges, IntPtr.Zero, element, CV_MORPH_OP.CV_MOP_GRADIENT, 1);

            this.Contours = cannyEdges.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_EXTERNAL);

            return cannyEdges;
        }
コード例 #5
0
        public override Image<Gray, byte> DetectSkin(Image<Bgr, byte> Img, IColor min, IColor max)
        {
            Image<Hsv, Byte> currentHsvFrame = Img.Convert<Hsv, Byte>();
            Image<Gray, byte> skin = new Image<Gray, byte>(Img.Width, Img.Height);
            skin = currentHsvFrame.InRange((Hsv)min, (Hsv)max);

            StructuringElementEx rect_6 = new StructuringElementEx(3, 3,1, 1, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_RECT);
            CvInvoke.cvDilate(skin, skin, rect_6, 2);


            return skin;
        }
コード例 #6
0
        public Dictionary<Card, Point> LocateCards(Image<Bgr, Byte> table, Settings settings)
        {
            #region process image
            //Convert the image to grayscale and filter out the noise
            Image<Gray, Byte> gray = table.Convert<Gray, Byte>();

            Gray cannyThreshold = new Gray(180);
            Gray cannyThresholdLinking = new Gray(120);
            Gray circleAccumulatorThreshold = new Gray(120);

            Image<Gray, Byte> cannyEdges = gray.Canny(cannyThreshold, cannyThresholdLinking);

            StructuringElementEx el = new StructuringElementEx(3, 3, 1, 1, CV_ELEMENT_SHAPE.CV_SHAPE_RECT);
            cannyEdges = cannyEdges.MorphologyEx(el, CV_MORPH_OP.CV_MOP_CLOSE, 1);
            #endregion

            Contour<Point> contours = cannyEdges.FindContours(
                CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, //was CV_CHAIN_APPROX_SIMPLE
                RETR_TYPE.CV_RETR_TREE);

            ContourNode tree = new ContourNode(contours);

            Dictionary<Card, Point> cardlocs = new Dictionary<Card, Point>();
            foreach (KeyValuePair<Card, CardContour> pair in GiveCards(tree))
            {
                ContourNode node = pair.Value.Node;
                Card card = pair.Key;

                PointF fcenter = node.Contour.GetMinAreaRect().center;
                Point center = new Point((int)fcenter.X, (int)fcenter.Y);

                cardlocs.Add(card, center);
            }

            #region draw
            #if DEBUG
            TreeViz.VizualizeTree(tree);
            ContourAnalyzer.DrawContours(tree, table);
            //ImageViewer.Show(table);
            #endif
            #endregion

            return cardlocs;
        }
コード例 #7
0
        public override Image<Gray, byte> DetectSkin(Image<Bgr, byte> Img, IColor min, IColor max)
        {
            //Code adapted from here
            // http://blog.csdn.net/scyscyao/archive/2010/04/09/5468577.aspx
            // Look at this paper for reference (Chinese!!!!!)
            // http://www.chinamca.com/UploadFile/200642991948257.pdf

            Image<Ycc, Byte> currentYCrCbFrame = Img.Convert<Ycc, Byte>();
            Image<Gray, Byte> skin = new Image<Gray, Byte>(Img.Width, Img.Height);

            int y, cr, cb, l, x1, y1, value;

            int rows = Img.Rows;
            int cols = Img.Cols;
            Byte[,,] YCrCbData = currentYCrCbFrame.Data;
            Byte[,,] skinData = skin.Data;

            for (int i = 0; i < rows; i++)
                for (int j = 0; j < cols; j++)
                {
                    y = YCrCbData[i, j, 0];
                    cr = YCrCbData[i, j, 1];
                    cb = YCrCbData[i, j, 2];

                    cb -= 109;
                    cr -= 152;
                    x1 = (819 * cr - 614 * cb) / 32 + 51;
                    y1 = (819 * cr + 614 * cb) / 32 + 77;
                    x1 = x1 * 41 / 1024;
                    y1 = y1 * 73 / 1024;
                    value = x1 * x1 + y1 * y1;
                    if (y < 100)
                        skinData[i, j, 0] = (value < 700) ? (byte)255 : (byte)0;
                    else
                        skinData[i, j, 0] = (value < 850) ? (byte)255 : (byte)0;

                }
            StructuringElementEx rect_6 = new StructuringElementEx(6, 6, 3, 3, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_RECT);
            CvInvoke.cvErode(skin, skin, rect_6, 1);
            CvInvoke.cvDilate(skin, skin, rect_6, 2);
            return skin;

        }
        ///////////////////////////////////////////////////////////////////////////////////////////
        void processFrameAndUpdateGUI(object sender, EventArgs arg)
        {
            imgOriginal = capWebcam.QueryFrame();               // get next frame from the webcam

            if (imgOriginal == null)                            // if we did not get a frame
            {                                                   // show error via message box
                MessageBox.Show("unable to read from webcam" + Environment.NewLine + Environment.NewLine +
                                "exiting program");
                Environment.Exit(0);                            // and exit program
            }

            imgBlurredBGR = imgOriginal.SmoothGaussian(5);          // blur

            imgProcessed = imgBlurredBGR.InRange(new Bgr(0, 0, 175), new Bgr(100, 100, 256));       // filter on color

            imgProcessed = imgProcessed.SmoothGaussian(5);          // blur again

            StructuringElementEx structuringElementEx = new StructuringElementEx(5, 5, 1, 1, CV_ELEMENT_SHAPE.CV_SHAPE_RECT);       // declare structuring element to use in dilate and erode

            CvInvoke.cvDilate(imgProcessed, imgProcessed, structuringElementEx, 1);             // close image (dilate, then erode)
            CvInvoke.cvErode(imgProcessed, imgProcessed, structuringElementEx, 1);              // closing "closes" (i.e. fills in) foreground gaps

            CircleF[] circles = imgProcessed.HoughCircles(new Gray(100), new Gray(50), 2, imgProcessed.Height / 4, 10, 400)[0];     // fill variable circles with all circles in the processed image

            foreach (CircleF circle in circles)                     // for each circle
            {
                if (txtXYRadius.Text != "") txtXYRadius.AppendText(Environment.NewLine);        // if we are not on the first line in the text box then insert a new line char

                txtXYRadius.AppendText("ball position = x " + circle.Center.X.ToString().PadLeft(4) +           // print ball position and radius
                                       ", y = " + circle.Center.Y.ToString().PadLeft(4) +                       //
                                       ", radius = " + circle.Radius.ToString("###.000").PadLeft(7));           //

                txtXYRadius.ScrollToCaret();                // scroll down in text box so most recent line added (at the bottom) will be shown

                // draw a small green circle at the center of the detected object
                CvInvoke.cvCircle(imgOriginal, new Point((int)circle.Center.X, (int)circle.Center.Y), 3, new MCvScalar(0, 255, 0), -1, LINE_TYPE.CV_AA, 0);

                imgOriginal.Draw(circle, new Bgr(Color.Red), 3);        // draw a red circle around the detected object
            }
            ibOriginal.Image = imgOriginal;             // update image boxes on form
            ibProcessed.Image = imgProcessed;           //
        }
コード例 #9
0
        public override Image<Gray, byte> DetectSkin(Image<Bgr, byte> Img, IColor min, IColor max)
        {
            Image<Ycc, Byte> currentYCrCbFrame = Img.Convert<Ycc, Byte>();
            Image<Gray, Byte> skin = new Image<Gray, Byte>(Img.Width, Img.Height);

            int y, cr, cb, l, x1, y1, value;

            int rows = Img.Rows;
            int cols = Img.Cols;
            Byte[, ,] YCrCbData = currentYCrCbFrame.Data;
            Byte[, ,] skinData = skin.Data;

            for (int i = 0; i < rows; i++)
                for (int j = 0; j < cols; j++)
                {
                    y = YCrCbData[i, j, 0];
                    cr = YCrCbData[i, j, 1];
                    cb = YCrCbData[i, j, 2];

                    cb -= 109;
                    cr -= 152;
                    x1 = (819 * cr - 614 * cb) / 32 + 51;
                    y1 = (819 * cr + 614 * cb) / 32 + 77;
                    x1 = x1 * 41 / 1024;
                    y1 = y1 * 73 / 1024;
                    value = x1 * x1 + y1 * y1;
                    if (y < 100)
                        skinData[i, j, 0] = (value < 700) ? (byte)255 : (byte)0;
                    else
                        skinData[i, j, 0] = (value < 850) ? (byte)255 : (byte)0;

                }
            StructuringElementEx rect_6 = new StructuringElementEx(6, 6, 3, 3, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_RECT);
            CvInvoke.cvErode(skin, skin, rect_6, 1);
            CvInvoke.cvDilate(skin, skin, rect_6, 2);
            return skin;
        }
コード例 #10
0
ファイル: Form1.cs プロジェクト: s9908/naiMorse
        public frmMain()
        {
            InitializeComponent();
            Status = new frmStatus(); //okienko w którym będzie podgląd jak działa aplikacja     
            obraz1 = new Image<Bgr, byte>(new Size(640, 480));
            tlo = new Image<Bgr, byte>(new Size(640, 480));
            //inicjalizacja kamerki
            kamerka = new Capture(0);

            //wątek odczytu start
            th_pobierzObraz = new Thread(pobierzObraz);

            //wątek zliczania czasu
            th_liczCzas = new Thread(liczCzas);        

            rect_12 = new StructuringElementEx(12, 12, 6, 6, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_RECT);
            rect_6 = new StructuringElementEx(6, 6, 3, 3, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_RECT);
            lNapis.Text = "";
        }
コード例 #11
0
        private Image<Gray, Single> _testMorphological(Image<Gray, Single> img, int threshold, int maxGrayVal, int closingIteration, int cols, int rows, int anchorX, int anchorY, int shape)
        {
            img = img.Sobel(0, 1, 3);
            StructuringElementEx element = new StructuringElementEx(cols, rows, anchorX, anchorY, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_RECT); ;
            if (shape == 1)
                element = new StructuringElementEx(cols, rows, anchorX, anchorY, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_CROSS);
            else if (shape == 2)
                element = new StructuringElementEx(cols, rows, anchorX, anchorY, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_ELLIPSE);

            img = img.MorphologyEx(element, Emgu.CV.CvEnum.CV_MORPH_OP.CV_MOP_CLOSE, closingIteration);
            img = img.Rotate(90.0, new Gray(255), false);
            img = img.MorphologyEx(element, Emgu.CV.CvEnum.CV_MORPH_OP.CV_MOP_CLOSE, closingIteration);
            img = img.Rotate(-90.0, new Gray(255), false);
            if (threshold >= 0 && threshold <= maxGrayVal)
            {
                img = img.ThresholdBinary(new Gray(threshold), new Gray(255));
            }
            else
            {
                MessageBox.Show("Threshold is not appropriate, please choose another!");
            }
            return img;
        }
コード例 #12
0
        public void _testSobelParameters(Image<Gray, Single> img, int threshold, int maxGrayVal, int closingIteration, int cols, int rows, int anchorX, int anchorY, int shape)
        {
            int xOrder = 0, yOrder = 0, appetureSize = 1;
            for (appetureSize = 3; appetureSize < 8; appetureSize += 2)
            {
                for (xOrder = 0; xOrder < appetureSize-1; xOrder++)
                    for (yOrder = 0; yOrder < appetureSize-1; yOrder++)
                    {
                        img = img.Sobel(xOrder, yOrder, appetureSize);
                        StructuringElementEx element;
                        if (shape == 0)
                            element = new StructuringElementEx(cols, rows, anchorX, anchorY, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_RECT);
                        else if (shape == 1)
                            element = new StructuringElementEx(cols, rows, anchorX, anchorY, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_CROSS);
                        else
                            element = new StructuringElementEx(cols, rows, anchorX, anchorY, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_ELLIPSE);

                        img = img.MorphologyEx(element, Emgu.CV.CvEnum.CV_MORPH_OP.CV_MOP_CLOSE, closingIteration);
                        img = img.Rotate(90.0, new Gray(255), false);
                        img = img.MorphologyEx(element, Emgu.CV.CvEnum.CV_MORPH_OP.CV_MOP_CLOSE, closingIteration);
                        img = img.Rotate(-90.0, new Gray(255), false);
                        if (threshold >= 0 && threshold <= maxGrayVal)
                        {
                            img = img.ThresholdBinary(new Gray(threshold), new Gray(255));
                        }
                        else
                        {
                            MessageBox.Show("Threshold is not appropriate, please choose another!");
                        }
                        img.Bitmap.Save(@"Sobel\" + xOrder + " " + yOrder + " " + appetureSize + ".png");
                    }
            }
        }
コード例 #13
0
 /// <summary>
 /// 
 /// </summary>
 /// <param name="img">Input Image</param>
 /// <param name="threshold">Threshold value (with 8-bit image is from 0 to 255)</param>
 /// <param name="maxGrayVal">Maximum Gray Value</param>
 /// <param name="closingIteration">number of Iterations</param>
 /// <returns>Processed Image</returns>
 public Image<Gray, Single> _imageProcessing(Image<Gray, Single> img, int threshold, int maxGrayVal, int closingIteration)
 {
     img = img.Sobel(0, 1, 3);
     StructuringElementEx element = new StructuringElementEx(3, 3, -1, -1, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_ELLIPSE);
     img = img.MorphologyEx(element, Emgu.CV.CvEnum.CV_MORPH_OP.CV_MOP_CLOSE, closingIteration);
     img = img.Rotate(-90.0, new Gray(255), false);
     img = img.MorphologyEx(element, Emgu.CV.CvEnum.CV_MORPH_OP.CV_MOP_CLOSE, closingIteration);
     img = img.Rotate(90.0, new Gray(255), false);
     if (threshold >= 0 && threshold <= maxGrayVal)
     {
         img = img.ThresholdBinary(new Gray(threshold), new Gray(maxGrayVal));
     }
     else
     {
         //MessageBox.Show("Threshold is not appropriate, please choose another!");
         throw new ArgumentOutOfRangeException("Threshold is not appropriate, please choose another threshold value!");
     }
     return img;
 }
コード例 #14
0
        public void testClosingParameters(Image<Gray, Single> inputImg)
        {
            int rows, colums, anchorX, anchorY, shape, closingIteration, threshold;
            int count = 1;
            Image<Gray, float> img = inputImg;
            for (closingIteration = 6; closingIteration < 7; closingIteration++)
            {
                for (shape = 0; shape < 1; shape++)
                {
                    for (rows = 5; rows < 7; rows++)
                        for (colums = 5; colums < 7; colums++)
                        {
                            for (anchorX = 0; anchorX < colums; anchorX++)
                                for (anchorY = 0; anchorY < rows; anchorY++)
                                {

                                    for (threshold = 80; threshold < 180; threshold += 20)
                                    {
                                        img = inputImg.Sobel(0, 1, 3);
                                        img = inputImg.Sobel(1, 0, 3);
                                        StructuringElementEx element = new StructuringElementEx(colums, rows, anchorX, anchorY, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_RECT); ;
                                        if (shape == 1)
                                            element = new StructuringElementEx(colums, rows, anchorX, anchorY, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_CROSS);
                                        else if (shape == 2)
                                            element = new StructuringElementEx(colums, rows, anchorX, anchorY, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_ELLIPSE);

                                        img = img.MorphologyEx(element, Emgu.CV.CvEnum.CV_MORPH_OP.CV_MOP_CLOSE, closingIteration);
                                        img = img.Rotate(90.0, new Gray(255), false);
                                        img = img.MorphologyEx(element, Emgu.CV.CvEnum.CV_MORPH_OP.CV_MOP_CLOSE, closingIteration);
                                        img = img.Rotate(-90.0, new Gray(255), false);
                                        if (threshold >= 0 && threshold <= 255)
                                        {
                                            img = img.ThresholdBinary(new Gray(threshold), new Gray(255));
                                        }
                                        else
                                        {
                                            MessageBox.Show("Threshold is not appropriate, please choose another!");
                                        }
                                        string str_shape;
                                        if (shape == 0)
                                            str_shape = "RECTANGLE";
                                        else if (shape == 1)
                                            str_shape = "CROSS";
                                        else str_shape = "ELLIPSE";
                                        img.Bitmap.Save(@"Result\" + closingIteration + "_" + str_shape + "_" + rows + "_" + colums + "_" + anchorX + "_" + anchorY + "_" + threshold + ".bmp");
                                        count++;
                                    }
                                }
                        }
                }
            }
            MessageBox.Show("Done " + count + " images saved!");
        }
コード例 #15
0
        /// <summary>
        /// LocateCards works by analyzing the contours in the image. 
        /// For instance, the Diamond in Set is a polygon with exactly 4 vertices. 
        /// The Oval has no such features yet. 
        /// The Squiggle is not convex, but concave and has edges in a 'right bend', instead of only 'left bends'
        /// 
        /// All these shapes are inside the contour of a (white) card, which is a rounded square. 
        /// Cards may also be the (only) exterior boundaries
        /// </summary>
        /// <param name="table">An image displaying the table with the Set cards</param>
        /// <returns>A dict locating which cards are present where in the image</returns>
        public Dictionary<Card, Point> LocateCards(Image<Bgr, Byte> table, Settings settings)
        {
            classifier = new BgrHsvClassifier();
            classifier.Train();

            #region process image
            //Convert the image to grayscale and filter out the noise
            Image<Gray, Byte> gray = table.Convert<Gray, Byte>();

            Gray cannyThreshold = new Gray(50); //180
            Gray cannyThresholdLinking = new Gray(30); //120
            Gray circleAccumulatorThreshold = new Gray(100); //120
            #region old
                    Image<Gray, Byte> cannyEdges = gray.Canny(cannyThreshold, cannyThresholdLinking);
            if (settings.debuglevel >= 3)
            {
                ImageViewer.Show(cannyEdges, "cannyEdges before Closing");
            }
            #endregion
            //#region new
            //Image<Gray, Byte> thresholded = new Image<Gray, byte>(gray.Size);
            //CvInvoke.cvAdaptiveThreshold(gray.Ptr, thresholded.Ptr,
            //    255,
            //    ADAPTIVE_THRESHOLD_TYPE.CV_ADAPTIVE_THRESH_GAUSSIAN_C,
            //    THRESH.CV_THRESH_BINARY_INV, 9, 5);
            //StructuringElementEx el1 = new StructuringElementEx(3, 3, 1, 1, CV_ELEMENT_SHAPE.CV_SHAPE_RECT);
            //thresholded = thresholded.Erode(1);//thresholded.MorphologyEx(el1, CV_MORPH_OP.CV_MOP_CLOSE, 1);//
            //Image<Gray, Byte> cannyEdges = thresholded;//.Canny(new Gray(1), new Gray(10));
            //#endregion

            //StructuringElementEx el = new StructuringElementEx(5, 5, 2, 2, CV_ELEMENT_SHAPE.CV_SHAPE_RECT);
            StructuringElementEx el = new StructuringElementEx(3, 3, 1, 1, CV_ELEMENT_SHAPE.CV_SHAPE_RECT);
            cannyEdges = cannyEdges.MorphologyEx(el, CV_MORPH_OP.CV_MOP_CLOSE, 1);
            if (settings.debuglevel >= 3)
            {
                ImageViewer.Show(cannyEdges, "cannyEdges after Closing");
            }
            #endregion

            Contour<Point> contours = cannyEdges.FindContours(
                CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, //was CV_CHAIN_APPROX_SIMPLE
                RETR_TYPE.CV_RETR_TREE);

            ContourNode tree = new ContourNode(contours);

            FilterTree(tree);
            #region debug
            if (settings.debuglevel >= 3)
            {
                var debug = table.Clone();
                DrawContours(tree, debug);
                ImageViewer.Show(debug, "Contours after filtering");
            }
            #endregion

            AssignShapes(tree);
            AssignImages(tree, table, true);
            #region debug
            if (settings.debuglevel >= 3)
            {
                var debug1 = table.Clone();
                DrawContours(tree, debug1);
                ImageViewer.Show(debug1);
            }
            #endregion

            FilterTree(tree);

            #region debug
            if (settings.debuglevel >= 3)
            {
                var debug2 = table.Clone();
                DrawContours(tree, debug2);
                ImageViewer.Show(debug2);
            }
            #endregion

            AssignColors(tree, table, settings);

            #region debug
            if (settings.debuglevel >= 3)
            {
                TreeViz.VizualizeTree(tree);
            }
            #endregion

            AssignFills(tree);

            Dictionary<Card, Point> cardlocs = new Dictionary<Card, Point>();
            foreach (KeyValuePair<Card, ContourNode> pair in GiveCards(tree))
            {
                ContourNode node = pair.Value;
                Card card = pair.Key;

                PointF fcenter = node.Contour.GetMinAreaRect().center;
                Point center = new Point((int)fcenter.X, (int)fcenter.Y);

                cardlocs.Add(card, center);
            }

            #region debug
            if (settings.debuglevel >= 1)
            {
                TreeViz.VizualizeTree(tree);
            }
            if (settings.debuglevel >= 2)
            {
                DrawContours(tree, table);
                ImageViewer.Show(table);
            }
            #endregion
            return cardlocs;
        }
コード例 #16
0
ファイル: Form1.cs プロジェクト: srivera4/imageWithSpeech
        private Image<Gray, Byte> erodeDilateImage(Image<Gray, Byte> img)
        {
            if (erodeDimensionTB.Text != "" & erodeDimensionTB.Text != "0" & erodeOffsetTB.Text != "" & erodeOffsetTB.Text != "0" )
              {
            erodeDimension = Convert.ToInt32(erodeDimensionTB.Text);
            erodeOffset = Convert.ToInt32(erodeOffsetTB.Text);

            //if (erodeDimension != 0 & erodeOffset != 0)
            //{
            if (erodeDimension > erodeOffset)
            {
              StructuringElementEx rectErode = new StructuringElementEx(erodeDimension, erodeDimension, erodeOffset, erodeOffset, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_RECT);
              CvInvoke.cvErode(img, img, rectErode, 1);
            }
            //}
              }
              if (dilateDimensionTB.Text != "" & dilateDimensionTB.Text != "0" & dilateOffsetTB.Text != "" & dilateOffsetTB.Text != "0")
              {
            dilateDimension = Convert.ToInt32(dilateDimensionTB.Text);
            dilateOffset = Convert.ToInt32(dilateOffsetTB.Text);
            //if (dilateDimension != 0 & dilateOffset != 0)
            //{
            if (dilateDimension > dilateOffset)
            {
              StructuringElementEx rectDilate = new StructuringElementEx(dilateDimension, dilateDimension, dilateOffset, dilateOffset, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_RECT);
              CvInvoke.cvDilate(img, img, rectDilate, 2);
            }
            //}
              }

              return img;
        }
コード例 #17
0
ファイル: Form1.cs プロジェクト: mrcancer91/Driving-Bind-Spot
        private Image<Gray, Single> _testMorphological(Image<Gray, Single> img, int threshold, int maxGrayVal, int closingIteration, int cols, int rows, int anchorX, int anchorY, int shape)
        {
            int xOrder = Int32.Parse(txtXorder.Text), yOrder = Int32.Parse(txtYorder.Text),
                appetureSize = Int32.Parse(txtAppetureSize.Text);
            //img = img.Sobel(0, 1, 5);
            //img = img.Sobel(xOrder, yOrder, appetureSize);
            img = sobel_Filter(img);

            StructuringElementEx element;
            if(shape==0)
            element = new StructuringElementEx(cols, rows, anchorX, anchorY, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_RECT);
            else if (shape == 1)
                element = new StructuringElementEx(cols, rows, anchorX, anchorY, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_CROSS);
            else
                element = new StructuringElementEx(cols, rows, anchorX, anchorY, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_ELLIPSE);

            img = img.MorphologyEx(element, Emgu.CV.CvEnum.CV_MORPH_OP.CV_MOP_CLOSE, closingIteration);
            img = img.Rotate(90.0, new Gray(255), false);
            img = img.MorphologyEx(element, Emgu.CV.CvEnum.CV_MORPH_OP.CV_MOP_CLOSE, closingIteration);
            img = img.Rotate(-90.0, new Gray(255), false);
            if (threshold >= 0 && threshold <= maxGrayVal)
            {
                img = img.ThresholdBinary(new Gray(threshold), new Gray(255));
            }
            else
            {
                MessageBox.Show("Threshold is not appropriate, please choose another!");
            }
            //img.Bitmap.Save(@"Sobel\" + xOrder + " " + yOrder + " " + appetureSize + ".png");
            return img;
        }
コード例 #18
0
ファイル: AutoTestVarious.cs プロジェクト: samuto/UnityOpenCV
 public void TestMorphEx()
 {
     StructuringElementEx element1 = new StructuringElementEx(3, 3, 1, 1, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_CROSS);
      StructuringElementEx element2 = new StructuringElementEx(new int[3, 3] { { 0, 1, 0 }, { 1, 0, 1 }, { 0, 1, 0 } }, 1, 1);
      Image<Bgr, Byte> tmp = new Image<Bgr, byte>(100, 100);
      Image<Bgr, Byte> tmp2 = tmp.MorphologyEx(element1, Emgu.CV.CvEnum.CV_MORPH_OP.CV_MOP_GRADIENT, 1);
      Image<Bgr, Byte> tmp3 = tmp.MorphologyEx(element2, Emgu.CV.CvEnum.CV_MORPH_OP.CV_MOP_BLACKHAT, 1);
 }
コード例 #19
0
        // Cherche les contours et bounding boxes de la peau sur une image fournie
        // Cette partie a été développée par nous-même
        // Elle détecte et sépare les parties de peau de l'image fournie à la classe, d'après les couleurs HSV
        // On va ensuite eroder et dilater les pixels restants, afin d'obtenir une image "plus nette" et réduire le bruit et les imperfections
        // On va ensuite rechercher tous les contours dans l'image obtenur et mettre le plus grand de côté afin de pouvoir le traiter plus tard
        public void FindSkinContours()
        {
            // Converti les couleurs de l'image en HSV
            Image<Hsv, Byte> hsvImg = imgOrg.Convert<Hsv, Byte>();

            // Filtre les pixels de l'image afin de ne garder que ceux qui se rapprochent de la couleur de la peau
            // Nous avons trouvé ces valeurs sur Internet et les avons un peu adaptées pour notre utilisation
            imgSkin = hsvImg.InRange(new Hsv(0, 48, 80), new Hsv(20, 255, 255));

            imgSkin = imgSkin.ThresholdBinary(new Gray(200), new Gray(255));

            // On erode et dilate pour éliminer les imperfections
            // Nous avons trouvé ces valeurs par "tattonement" en essayant d'avoir un contour de main le plus net possible
            StructuringElementEx erodeStrctEl = new StructuringElementEx(4, 4, 2, 2, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_RECT);
            CvInvoke.cvErode(imgSkin, imgSkin, erodeStrctEl, 1);
            StructuringElementEx dilateStrctEl = new StructuringElementEx(6, 6, 3, 3, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_RECT);
            CvInvoke.cvDilate(imgSkin, imgSkin, dilateStrctEl, 3);

            // Boucle sur tous les contours trouvés dans l'image
            for (Contour<Point> contours = imgSkin.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE, contoursStorage); contours != null; contours = contours.HNext)
            {
                // Si un contour est plus grand que 20x20 pixels, on le traite
                if (contours.BoundingRectangle.Width > 20 && contours.BoundingRectangle.Height > 20)
                {
                    // Récupère la bounding box autour du contour
                    Rectangle rect = contours.BoundingRectangle;

                    // On agrandi la bourning box et on vérifie qu'elle ne sorte pas de l'image
                    rect.X = rect.X - 10;
                    rect.X = (rect.X < 0) ? 0 : rect.X;
                    rect.Y = rect.Y - 10;
                    rect.Y = (rect.Y < 0) ? 0 : rect.Y;

                    rect.Height = (rect.Height + 20);
                    rect.Width = (rect.Width + 20);

                    // Ajout de la bounding box et du contour aux listes
                    boundingBoxes.Add(rect);
                    contoursList.Add(contours);
                }
            }

            // Si des contours ont été trouvés, on cherche le plus grand
            if(boundingBoxes.Count > 0)
            {
                FindBiggestBoundingBox();
                FindBiggestContour();
                isHandDetected = true;
            }
            else
            {
                // Sinon, c'est que rien n'a été détecté
                isHandDetected = false;
                fingerNum = 0;
            }
        }
コード例 #20
0
        private GrayImage removeGradient(GrayImage image)
        {
            if (!preprocessorRemoveGrading)
                return image;

            //TODO: Test if morf improve
            GrayImage grayImage = image.Copy();
            StructuringElementEx element = new StructuringElementEx(1, 1, 0, 0, CV_ELEMENT_SHAPE.CV_SHAPE_ELLIPSE);
            element = new StructuringElementEx(2, 2, 0, 0, CV_ELEMENT_SHAPE.CV_SHAPE_ELLIPSE);
            grayImage._MorphologyEx(element, CV_MORPH_OP.CV_MOP_CLOSE, 1);
            saveDebugImage(grayImage, "after Morph");
            element.Dispose();

            //grayImage._ThresholdBinary(new Gray(120), new Gray(255));
            GrayImage paddedImage = new GrayImage(350, 350);
            CvInvoke.cvCopyMakeBorder(grayImage, paddedImage, new Point(25, 25), BORDER_TYPE.CONSTANT, new MCvScalar(0));
            //grayImage.Dispose();
            grayImage = paddedImage;

            saveDebugImage(grayImage, "Binary image (larger)");

            for (var contour = grayImage.FindContours(CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, RETR_TYPE.CV_RETR_CCOMP); contour != null; contour = contour.HNext)
            {
                Point[] points = contour.ToArray();
                MCvBox2D box = PointCollection.MinAreaRect(Array.ConvertAll(points, item => (PointF)item));

                if (box.size.Height < 5 || box.size.Width < 5)
                    grayImage.FillConvexPoly(points, new Gray(0));
            }

            grayImage._ThresholdBinaryInv(new Gray(128), new Gray(255));
            for (var contour = grayImage.FindContours(CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, RETR_TYPE.CV_RETR_CCOMP); contour != null; contour = contour.HNext)
            {
                Point[] points = contour.ToArray();
                MCvBox2D box = PointCollection.MinAreaRect(Array.ConvertAll(points, item => (PointF)item));

                //Console.WriteLine("hight: " + box.size.Height.ToString() + " Witdh: " + box.size.Width.ToString());
                if (box.size.Height < 5 || box.size.Width < 5)
                    grayImage.FillConvexPoly(points, new Gray(0));
            }
            grayImage._ThresholdBinaryInv(new Gray(128), new Gray(255));
            saveDebugImage(grayImage, "Grading removed");

            return grayImage.GetSubRect(new Rectangle(25, 25, 300, 300));
        }
コード例 #21
0
ファイル: Form1.cs プロジェクト: vutiendung/Test-Find
 private void cogian()
 {
     Image<Gray, Byte> eroded = new Image<Gray, byte>(grayImg.Size);
     Image<Gray, Byte> temp = new Image<Gray, byte>(grayImg.Size);
     Image<Gray, Byte> skel = new Image<Gray, byte>(grayImg.Size);
     //skel.SetValue(0);
     // CvInvoke.cvThreshold(grayImg, grayImg, 127, 256, 0);
     StructuringElementEx element = new StructuringElementEx(10, 10, 1, 1, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_CROSS);
     CvInvoke.cvErode(grayImg, eroded, element, 1);
     CvInvoke.cvDilate(eroded, temp, element, 1);
     temp = grayImg.Sub(temp);
     skel = skel | temp;
     grayImg = eroded;
     pictureBox3.Image = grayImg.ToBitmap();
 }
コード例 #22
0
ファイル: MovementBrain.cs プロジェクト: kovacshuni/slrt
 private Image<Gray, Byte> detectSkin(Image<Bgr, Byte> Img)
 {
     Image<Ycc, Byte> currentYCrCbFrame = Img.Convert<Ycc, Byte>();
     Image<Gray, byte> skin = new Image<Gray, byte>(Img.Width, Img.Height);
     skin = currentYCrCbFrame.InRange(new Ycc(0, 131, 80), new Ycc(255, 185, 135));
     StructuringElementEx rect_12 = new StructuringElementEx(12, 12, 6, 6, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_RECT);
     CvInvoke.cvErode(skin, skin, rect_12, 1);
     StructuringElementEx rect_6 = new StructuringElementEx(6, 6, 3, 3, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_RECT);
     CvInvoke.cvDilate(skin, skin, rect_6, 2);
     return skin;
 }
コード例 #23
0
ファイル: Form1.cs プロジェクト: mrcancer91/Driving-Bind-Spot
 private Image<Gray, Single> morfologyEx(Image<Gray, Single> img)
 {
     StructuringElementEx element = new StructuringElementEx(5, 5, 3, 3, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_RECT);
     img = img.MorphologyEx(element, Emgu.CV.CvEnum.CV_MORPH_OP.CV_MOP_CLOSE, closingIteration);
     return img;
 }
コード例 #24
0
ファイル: Form1.cs プロジェクト: mrcancer91/Driving-Bind-Spot
 /// <summary>
 /// 
 /// </summary>
 /// <param name="img">Input Image</param>
 /// <param name="threshold">Threshold value (with 8-bit image is from 0 to 255)</param>
 /// <param name="maxGrayVal">Maximum Gray Value</param>
 /// <param name="closingIteration">number of Iterations</param>
 /// <returns></returns>
 private Image<Gray, Single> _imageProcessing(Image<Gray, Single> img, int threshold, int maxGrayVal, int closingIteration)
 {
     img = img.Sobel(0, 1, 4);
     StructuringElementEx element = new StructuringElementEx(4, 4, 1, 1, Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_RECT);
     //MessageBox.Show((int)Emgu.CV.CvEnum.CV_ELEMENT_SHAPE.CV_SHAPE_ELLIPSE + "");
     img = img.MorphologyEx(element, Emgu.CV.CvEnum.CV_MORPH_OP.CV_MOP_CLOSE, closingIteration);
     img = img.Rotate(90.0, new Gray(maxGrayValue), false);
     img = img.MorphologyEx(element, Emgu.CV.CvEnum.CV_MORPH_OP.CV_MOP_CLOSE, closingIteration);
     img = img.Rotate(-90.0, new Gray(maxGrayValue), false);
     if (threshold >= 0 && threshold <= maxGrayVal)
     {
         img = img.ThresholdBinary(new Gray(threshold), new Gray(maxGrayValue));
     }
     else
     {
         MessageBox.Show("Threshold is not appropriate, please choose another!");
     }
     return img;
 }
コード例 #25
0
ファイル: MatchImage.cs プロジェクト: pakerliu/sharp-context
        public double MatchIteration(Matrix X, Matrix Y, Matrix V1, Matrix V2, Matrix t1, Matrix t2)
        {
            timer.Clear();
            double timeused = 0;

            timer.Restart();
            var tk = t1.Clone(); // tk=t1;
            int w = 4;
            int ndum = (int)(nsamp * ndum_frac);
            nsamp1 = nsamp2 = nsamp;

            #region demo2迭代
            Matrix Xk = X.Clone();
            int N1 = V1.Rows, N2 = V2.Columns;

            if (display_flag) {
                Draw(X, Y, V1, V2, @"D:\Play Data\Iteration\原图.bmp", "原始图像和采样");
                //DrawGradient(X, Y, t1, t2, N2, N1, @"D:\Play Data\Iteration\原图梯度.bmp", "原始图像切向量");
            }

            int k = 0;
            var out_vec_1 = Utils.InitArray<bool>(nsamp1, false);//out_vec_1=zeros(1,nsamp1);
            var out_vec_2 = Utils.InitArray<bool>(nsamp2, false);//out_vec_2=zeros(1,nsamp2);

            double ori_weight = 0.1;
            double tan_eps = 1.0;
            bool affine_start_flag = true;
            bool polarity_flag = true;
            double matchcost = double.MaxValue;
            double sc_cost = 0, aff_cost = 0, E = 0;

            Matrix cx = null, cy = null; // cx, cy是插值线性方程组的解的两列
            Matrix axt = null, wxt = null, ayt = null, wyt = null, d2 = null, U = null,
                X2 = null, Y2 = null, X2b = null, X3b = null, Y3 = null;
            double mean_dist_1 = 0, mean_dist_2 = 0;
            var min1 = new[] { new { Val = 0.0, Idx = 0 } };
            var min2 = min1;

            #region 用于打网格的坐标
            Matrix coordX = null, coordY = null;
            int coordMargin = (int)(N1 * coordMarginRate);
            MatrixUtils.CreateGrid(N1 + coordMargin * 2, N2 + coordMargin * 2, out coordX, out coordY);
            coordX = coordX.Each(v => v - coordMargin * 2);
            coordY = coordY.Each(v => v - coordMargin * 2);
            //int MM = N1 * N2 / 25; // M=length(x);
            #endregion

            timeused += timer.StopAndSay("初始化");

            while (k < n_iter) {
                Debug("Iter={0}", k);

                #region 计算两个形状上下文
                timer.Restart();
                // [BH1,mean_dist_1]=sc_compute(Xk',zeros(1,nsamp),mean_dist_global,nbins_theta,nbins_r,r_inner,r_outer,out_vec_1);
                var BH1 = ComputeSC(Xk.Transpose(), Zeros(1, nsamp), mean_dist_global, out mean_dist_1, out_vec_1);
                //var BH1 = ComputeSC(Xk.Transpose(), t1.Transpose(), mean_dist_global, out mean_dist_1, out_vec_1);

                // [BH2,mean_dist_2]=sc_compute(Y',zeros(1,nsamp),mean_dist_global,nbins_theta,nbins_r,r_inner,r_outer,out_vec_2);
                var BH2 = ComputeSC(Y.Transpose(), Zeros(1, nsamp), mean_dist_global, out mean_dist_2, out_vec_2);
                //var BH2 = ComputeSC(Y.Transpose(), t2.Transpose(), mean_dist_global, out mean_dist_2, out_vec_2);

                timeused += timer.StopAndSay("计算两个形状上下文");

                Debug("Mean_dist_1:{0:F4}", mean_dist_1);
                Debug("Mean_dist_2:{0:F4}", mean_dist_2);
                #endregion

                #region 计算lambda_o和beta_k
                double lambda_o;
                if (affine_start_flag) {
                    if (k == 0)
                        lambda_o = 1000;
                    else
                        lambda_o = beta_init * Math.Pow(r, k - 1); // lambda_o=beta_init*r^(k-2);
                } else {
                    lambda_o = beta_init * Math.Pow(r, k); // lambda_o=beta_init*r^(k-1);
                }
                double beta_k = mean_dist_2 * mean_dist_2 * lambda_o;
                #endregion

                #region 计算代价矩阵
                timer.Restart();
                var costmat_shape = HistCost(BH1, BH2); // costmat_shape = hist_cost_2(BH1, BH2);

                // theta_diff=repmat(tk,1,nsamp)-repmat(t2',nsamp,1);
                var theta_diff = tk.RepMat(1, nsamp) - t2.Transpose().RepMat(nsamp, 1);

                Matrix costmat_theta;
                if (polarity_flag) {
                    // costmat_theta=0.5*(1-cos(theta_diff));
                    //costmat_theta = 0.5 * (Ones(costmat_shape.Rows, costmat_shape.Columns) - theta_diff.Each(v => Math.Cos(v)));
                    costmat_theta = theta_diff.Each(v => 0.5 * (1 - Math.Cos(v)));
                } else {
                    // costmat_theta=0.5*(1-cos(2*theta_diff));
                    //costmat_theta = 0.5 * (Ones(costmat_shape.Rows, costmat_shape.Columns) - theta_diff.Each(v => Math.Cos(2 * v)));
                    costmat_theta = theta_diff.Each(v => 0.5 * (1 - Math.Cos(2 * v)));
                }
                // costmat=(1-ori_weight)*costmat_shape+ori_weight*costmat_theta;
                var costmat = (1 - ori_weight) * costmat_shape + ori_weight * costmat_theta;

                int nptsd = nsamp + ndum; // nptsd=nsamp+ndum;
                var costmat2 = new DenseMatrix(nptsd, nptsd, eps_dum); // costmat2=eps_dum*ones(nptsd,nptsd);
                costmat2.SetSubMatrix(0, nsamp, 0, nsamp, costmat); // costmat2(1:nsamp,1:nsamp)=costmat;
                timeused += timer.StopAndSay("计算代价矩阵");
                #endregion

                #region 匈牙利算法
                timer.Restart();
                var costmat_int = new int[nptsd, nptsd];
                for (int i = 0; i < nptsd; ++i) {
                    for (int j = 0; j < nptsd; ++j) {
                        costmat_int[i, j] = (int)(costmat2[i, j] * 10000);
                    }
                }
                var km = new KM(nptsd, costmat_int);
                km.Match(false);
                matchcost = km.MatchResult / 10000.0;
                int[] cvec = km.MatchPair; // cvec=hungarian(costmat2);
                timeused += timer.StopAndSay("匈牙利算法");
                #endregion

                #region 计算野点标记向量,重排匹配点
                timer.Restart();
                int[] cvec2 = cvec.Select((v, i) => new { Val = v, Idx = i })
                                  .OrderBy(v => v.Val)
                                  .Select(v => v.Idx)
                                  .ToArray();// [a,cvec2]=sort(cvec);
                out_vec_1 = cvec2.Take(nsamp1).Select(v => v > nsamp2).ToArray(); // out_vec_1=cvec2(1:nsamp1)>nsamp2;
                out_vec_2 = cvec.Take(nsamp2).Select(v => v > nsamp1).ToArray(); // out_vec_2=cvec(1:nsamp2)>nsamp1;

                //X2 = NaNs(nptsd, 2); // X2=NaN*ones(nptsd,2);
                //X2.SetSubMatrix(0, nsamp1, 0, X2.Columns, Xk); // X2(1:nsamp1,:)=Xk;
                //X2 = X2.SortRowsBy(cvec); // X2=X2(cvec,:);

                X2b = NaNs(nptsd, 2); // X2b=NaN*ones(nptsd,2);
                X2b.SetSubMatrix(0, nsamp1, 0, X2b.Columns, X); // X2b(1:nsamp1,:)=X;
                X2b = X2b.SortRowsBy(cvec); // X2b=X2b(cvec,:);

                Y2 = NaNs(nptsd, 2); // Y2=NaN*ones(nptsd,2);
                Y2.SetSubMatrix(0, nsamp2, 0, Y2.Columns, Y); // Y2(1:nsamp2,:)=Y;

                var ind_good = X2b.GetColumn(1).Take(nsamp).FindIdxBy(v => !double.IsNaN(v)); // ind_good=find(~isnan(X2b(1:nsamp,1)));
                int n_good = ind_good.Length; // n_good=length(ind_good);

                X3b = X2b.FilterRowsBy(ind_good);//  X3b=X2b(ind_good,:);
                Y3 = Y2.FilterRowsBy(ind_good); // Y3=Y2(ind_good,:);
                timeused += timer.StopAndSay("计算野点标记向量,重排匹配点");
                #endregion

                #region figure 2
                if (display_flag) {
                    //figure(2)
                    //plot(X2(:,1),X2(:,2),'b+',Y2(:,1),Y2(:,2),'ro')
                    //hold on
                    //h=plot([X2(:,1) Y2(:,1)]',[X2(:,2) Y2(:,2)]','k-');

                    //if display_flag
                    //%	 set(h,'linewidth',1)
                    //quiver(Xk(:,1),Xk(:,2),cos(tk),sin(tk),0.5,'b') // 画箭头
                    //quiver(Y(:,1),Y(:,2),cos(t2),sin(t2),0.5,'r')
                    DrawGradient(Xk, Y, tk, t2, N2, N1,
                                 String.Format(@"D:\Play Data\Iteration\梯度\{0}.bmp", k),
                                 String.Format("Iter={0}梯度方向\n匹配点数{1}", k, n_good));
                    //end
                    //hold off
                    //axis('ij')
                    //title([int2str(n_good) ' correspondences (warped X)'])
                    //axis([1 N2 1 N1])
                    //drawnow
                }
                #endregion

                #region figure 3 显示未变形图的匹配关系
                if (display_flag) {
                    //% show the correspondences between the untransformed images
                    //figure(3)
                    //plot(X(:,1),X(:,2),'b+',Y(:,1),Y(:,2),'ro')
                    //ind=cvec(ind_good);
                    //hold on
                    //plot([X2b(:,1) Y2(:,1)]',[X2b(:,2) Y2(:,2)]','k-')
                    //hold off
                    //axis('ij')
                    //title([int2str(n_good) ' correspondences (unwarped X)'])
                    //axis([1 N2 1 N1])
                    //drawnow
                    Draw(X, Y, cvec, null, N2, N1, String.Format(@"D:\Play Data\Iteration\匹配\{0}.bmp", k),
                         String.Format("Iter={0}\n匹配代价{1},匹配数{2}", k, matchcost, n_good));
                }
                #endregion

                #region 求解变换矩阵
                timer.Restart();
                Bookstein(X3b, Y3, beta_k, ref cx, ref cy, ref E); // [cx,cy,E]=bookstein(X3b,Y3,beta_k);
                timeused += timer.StopAndSay("求解变换矩阵");
                #endregion

                #region 通过解出来的变换对点和梯度进行变换
                timer.Restart();
                // % calculate affine cost
                var A = MatrixUtils.RankHorizon(
                    cx.GetSubMatrix(n_good + 1, 2, 0, 1), cy.GetSubMatrix(n_good + 1, 2, 0, 1)
                );//A=[cx(n_good+2:n_good+3,:) cy(n_good+2:n_good+3,:)];
                var s = new Svd(A, true).S(); // s=svd(A);
                aff_cost = Math.Log(s[0] / s[1]); // aff_cost=log(s(1)/s(2));

                // % calculate shape context cost
                min1 = costmat.GetColumns().Select(col => {
                    int minwhere = 0;
                    for (int i = 1; i < col.Count; ++i) {
                        if (col[i] < col[minwhere]) minwhere = i;
                    }
                    return new { Val = col[minwhere], Idx = minwhere };
                }).ToArray();// [a1,b1]=min(costmat,[],1);
                min2 = costmat.GetRows().Select(row => {
                    int minwhere = 0;
                    for (int i = 1; i < row.Count; ++i) {
                        if (row[i] < row[minwhere]) minwhere = i;
                    }
                    return new { Val = row[minwhere], Idx = minwhere };
                }).ToArray(); // [a2,b2]=min(costmat,[],2);}
                sc_cost = Math.Max(min1.Average(a => a.Val), min2.Average(a => a.Val)); // sc_cost=max(mean(a1),mean(a2));

                // % warp each coordinate
                axt = cx.GetSubMatrix(n_good, 3, 0, 1).Transpose(); // axt是cx中的最后三个,即a的x分量
                wxt = cx.GetSubMatrix(0, n_good, 0, 1).Transpose(); // wxt是cs中的前n个,即w的x分量
                ayt = cy.GetSubMatrix(n_good, 3, 0, 1).Transpose();
                wyt = cy.GetSubMatrix(0, n_good, 0, 1).Transpose();

                d2 = Dist2(X3b, X).Each(v => v > 0 ? v : 0); // d2=max(dist2(X3b,X),0);
                U = d2.PointMultiply(d2.Each(v => Math.Log(v + Epsilon))); // U=d2.*log(d2+eps);

                Debug("MatchCost:{0:F4}\taff_cost:{1:F4}\tsc_cost:{2:F4}\tE:{3:F8}", matchcost, aff_cost, sc_cost, E);

                var Z = Transformation(X, U, axt, wxt, ayt, wyt);

                //% apply the warp to the tangent vectors to get the new angles
                var Xtan = X + tan_eps * MatrixUtils.RankHorizon(t1.Each(Math.Cos), t1.Each(Math.Sin)); // Xtan=X+tan_eps*[cos(t1) sin(t1)];
                d2 = Dist2(X3b, Xtan).Each(v => v > 0 ? v : 0); // d2=max(dist2(X3b,Xtan),0);
                U = d2.PointMultiply(d2.Each(v => Math.Log(v + Epsilon))); // U=d2.*log(d2+eps);
                //Transformation(Xtan, U, axt, wxt, ayt, wyt, out fx, out fy);
                //var Ztan = MatrixUtils.RankVertical(fx, fy).Transpose(); // Ztan=[fx; fy]';
                var Ztan = Transformation(Xtan, U, axt, wxt, ayt, wyt);
                for (int i = 0; i < nsamp; ++i) {
                    tk[i, 0] = Math.Atan2(Ztan[i, 1] - Z[i, 1], Ztan[i, 0] - Z[i, 0]);
                }//tk=atan2(Ztan(:,2)-Z(:,2),Ztan(:,1)-Z(:,1));
                timeused += timer.StopAndSay("通过解出来的变换对点和梯度进行变换");
                #endregion

                #region figure 4 显示变形后的点集
                if (display_flag) {
                    //figure(4)
                    //plot(Z(:,1),Z(:,2),'b+',Y(:,1),Y(:,2),'ro');
                    //axis('ij')
                    //title(['k=' int2str(k) ', \lambda_o=' num2str(lambda_o) ', I_f=' num2str(E) ', aff.cost=' num2str(aff_cost) ', SC cost=' num2str(sc_cost)])
                    //axis([1 N2 1 N1])
                    //% show warped coordinate grid
                    //fx_aff=cx(n_good+1:n_good+3)'*[ones(1,M); x'; y'];
                    //d2=dist2(X3b,[x y]);
                    //fx_wrp=cx(1:n_good)'*(d2.*log(d2+eps));
                    //fx=fx_aff+fx_wrp;
                    //fy_aff=cy(n_good+1:n_good+3)'*[ones(1,M); x'; y'];
                    //fy_wrp=cy(1:n_good)'*(d2.*log(d2+eps));
                    //fy=fy_aff+fy_wrp;
                    //hold on
                    //plot(fx,fy,'k.','markersize',1)
                    //hold off
                    //drawnow
                    d2 = Dist2(X3b, MatrixUtils.RankHorizon(coordX, coordY));//d2=dist2(X3b,[x y]);
                    U = d2.PointMultiply(d2.Each(v => Math.Log(v + Epsilon)));
                    //Transformation(MatrixUtils.RankHorizon(coordX, coordY), U, axt, wxt, ayt, wyt, out fx, out fy);
                    //var coordsT = MatrixUtils.RankVertical(fx, fy).Transpose();
                    var coordsT = Transformation(MatrixUtils.RankHorizon(coordX, coordY), U, axt, wxt, ayt, wyt);

                    Draw(Z, Y, null, coordsT, N2, N1, String.Format(@"D:\Play Data\Iteration\变换\{0}.bmp", k),
                        String.Format("Iter={0}\nλo={1:F4},If={2:F4},aff_cost{3:F4},sc_cost{4:F4}", k, lambda_o, E, aff_cost, sc_cost));
                }
                #endregion

                Xk = Z.Clone();
                ++k;
            }

            #endregion

            #region 迭代完成后的代价计算

            #region 我来尝试计算Dsc
            // Xk是Q变换后的结果,而Y是模板图形P
            /*
             * Dsc = Avg_each_p_in_P(argmin(q in Q, C(p, T(q))) + Avg_each_q_in_Q(argmin(p in P, C(p, T(q)))
             * */
            double mean_dist_final_1;
            var scQ = ComputeSC(Xk.Transpose(), Zeros(1, nsamp), mean_dist_global, out mean_dist_final_1, out_vec_1);
            //var scQ = ComputeSC(Xk.Transpose(), Zeros(1, nsamp), mean_dist_1, out mean_dist_final_1, out_vec_1);
            //var scQ = ComputeSC(Xk.Transpose(), t1.Transpose(), mean_dist_1, out mean_dist_final_1, out_vec_1);

            double mean_dist_final_2;
            var scP = ComputeSC(Y.Transpose(), Zeros(1, nsamp), mean_dist_global, out mean_dist_final_2, out_vec_2);
            //var scP = ComputeSC(Y.Transpose(), Zeros(1, nsamp), mean_dist_2, out mean_dist_final_2, out_vec_2);
            //var scP = ComputeSC(Y.Transpose(), t2.Transpose(), mean_dist_2, out mean_dist_final_2, out_vec_2);

            var costmat_final = HistCost(scQ, scP);
            double distance_sc = costmat_final.GetRows().Select(row => row.Min()).Average()
                               + costmat_final.GetColumns().Select(col => col.Min()).Average();
            Debug("distance_sc:{0}", distance_sc);

            #endregion

            #region 图像变换和插值
            timer.Restart();
            //[x,y]=meshgrid(1:N2,1:N1);
            //x=x(:);y=y(:);
            Matrix x = null, y = null;
            MatrixUtils.CreateGrid(N1, N2, out x, out y);
            //int M = N1 * N2; // M=length(x);
            d2 = Dist2(X3b, MatrixUtils.RankHorizon(x, y));//d2=dist2(X3b,[x y]);
            U = d2.PointMultiply(d2.Each(v => Math.Log(v + Epsilon)));
            //Transformation(MatrixUtils.RankHorizon(x, y), U, axt, wxt, ayt, wyt, out fx, out fy);
            var fxy = Transformation(MatrixUtils.RankHorizon(x, y), U, axt, wxt, ayt, wyt);

            //disp('computing warped image...')
            //V1w=griddata(reshape(fx,N1,N2),reshape(fy,N1,N2),V1,reshape(x,N1,N2),reshape(y,N1,N2));
            Matrix V1w = Interpolation(
                fxy.GetSubMatrix(0, fxy.Rows, 0, 1).Reshape(N1, N2),
                fxy.GetSubMatrix(0, fxy.Rows, 1, 1).Reshape(N1, N2),
                V1
            );

            #region 这个山寨插值方法会造成图像裂缝,用闭运算来尝试修补
            Image<Gray, Byte> img = new Image<Gray, byte>(N2, N1);
            for (int i = 0; i < N2; ++i) {
                for (int j = 0; j < N1; ++j) {
                    img[i, j] = new Gray(V1w[i, j] * 255);
                }
            }
            var see = new StructuringElementEx(new int[,] { { 1, 1, 1 }, { 1, 1, 1 }, { 1, 1, 1 } }, 1, 1);
            //img = img.MorphologyEx(see, Emgu.CV.CvEnum.CV_MORPH_OP.CV_MOP_CLOSE, 1);
            img = img.Dilate(1).Erode(1);
            for (int i = 0; i < N2; ++i) {
                for (int j = 0; j < N1; ++j) {
                    V1w[i, j] = img[i, j].Intensity / 255;
                }
            }
            img.Dispose();
            #endregion
            timeused += timer.StopAndSay("图像变换和插值");
            #endregion

            //fz=find(isnan(V1w));
            //V1w(fz)=0;
            var ssd = (V2 - V1w).Each(v => v * v);//ssd=(V2-V1w).^2;			%%%%%%SSD在这里
            var ssd_global = ssd.SumAll();//ssd_global=sum(ssd(:));
            Debug("ssd_global:{0}", ssd_global);

            #region figure 5
            if (display_flag) {
                //   figure(5)
                //   subplot(2,2,1)
                //   im(V1)
                //   subplot(2,2,2)
                //   im(V2)
                //   subplot(2,2,4)
                //   im(V1w)
                //   title('V1 after warping')
                //   subplot(2,2,3)
                //   im(V2-V1w)
                //   h=title(['SSD=' num2str(ssd_global)]);
                //   colormap(cmap)
            }
            #endregion

            #region 窗口SSD比较

            timer.Restart();
            //%%%
            //%%% windowed SSD comparison
            //%%%
            var wd = 2 * w + 1;//wd=2*w+1;
            var win_fun = MatrixUtils.GaussianKernal(wd); //win_fun=gaussker(wd);
            //% extract sets of blocks around each coordinate
            //% first do 1st shape; need to use transformed coords.
            var win_list_1 = Zeros(nsamp, wd * wd);//win_list_1=zeros(nsamp,wd^2);
            for (int qq = 0; qq < nsamp; ++qq) {
                int row_qq = (int)Xk[qq, 1],//   row_qq=round(Xk(qq,2));
                    col_qq = (int)Xk[qq, 0];//   col_qq=round(Xk(qq,1));
                row_qq = Math.Max(w + 1, Math.Min(N1 - w - 1, row_qq));//   row_qq=max(w+1,min(N1-w,row_qq));
                col_qq = Math.Max(w + 1, Math.Min(N2 - w - 1, col_qq));//   col_qq=max(w+1,min(N2-w,col_qq));
                //   tmp=V1w(row_qq-w:row_qq+w,col_qq-w:col_qq+w);
                var tmp = V1w.GetSubMatrix(row_qq - w, w * 2 + 1, col_qq - w, w * 2 + 1);
                tmp = win_fun.PointMultiply(tmp);//   tmp=win_fun.*tmp;
                //   win_list_1(qq,:)=tmp(:)';
                win_list_1.SetSubMatrix(qq, 1, 0, win_list_1.Columns, tmp.Reshape(1, tmp.Rows * tmp.Columns));
            }

            //% now do 2nd shape
            var win_list_2 = Zeros(nsamp, wd * wd);//win_list_2=zeros(nsamp,wd^2);
            for (int qq = 0; qq < nsamp; ++qq) {
                int row_qq = (int)Y[qq, 1],//   row_qq = round(Y(qq, 2));
                    col_qq = (int)Y[qq, 0];//   col_qq = round(Y(qq, 1));
                row_qq = Math.Max(w + 1, Math.Min(N1 - w - 1, row_qq));//   row_qq=max(w+1,min(N1-w,row_qq));
                col_qq = Math.Max(w + 1, Math.Min(N2 - w - 1, col_qq));//   col_qq=max(w+1,min(N2-w,col_qq));
                //   tmp=V2(row_qq-w:row_qq+w,col_qq-w:col_qq+w)
                var tmp = V2.GetSubMatrix(row_qq - w, w * 2 + 1, col_qq - w, w * 2 + 1);
                tmp = win_fun.PointMultiply(tmp);//   tmp=win_fun.*tmp;
                win_list_2.SetSubMatrix(qq, 1, 0, win_list_2.Columns, tmp.Reshape(1, tmp.Rows * tmp.Columns));// win_list_2(qq,:)=tmp(:)';
            }

            var ssd_all = Dist2(win_list_1, win_list_2).Each(Math.Sqrt);//ssd_all=sqrt(dist2(win_list_1,win_list_2));
            timeused += timer.StopAndSay("窗口SSD比较");
            #endregion

            #region 最后的KNN,不知道干嘛用

            timer.Restart();
            //%%%%%%%	KNN在此
            //% loop over nearest neighbors in both directions, project in
            //% both directions, take maximum
            double cost_1 = 0, cost_2 = 0;
            //List<double> cost_1 = new List<double>(), cost_2 = new List<double>();
            for (int qq = 0; qq < nsamp; ++qq) {
                cost_1 += ssd_all[qq, min2[qq].Idx];//   cost_1=cost_1+ssd_all(qq,b2(qq));
                cost_2 += ssd_all[min1[qq].Idx, qq];//   cost_2=cost_2+ssd_all(b1(qq),qq);
                //cost_1.Add(ssd_all[qq, min2[qq].Idx]);
                //cost_2.Add(ssd_all[min1[qq].Idx, qq]);
            }
            var ssd_local = (1.0 / nsamp) * Math.Max(cost_1, cost_2);
            var ssd_local_avg = (1.0 / nsamp) * 0.5 * (cost_1 + cost_2);
            //var ssd_local = (1.0 / nsamp) * Math.Max(cost_1.Average(), cost_2.Average());//ssd_local=(1/nsamp)*max(mean(cost_1),mean(cost_2));
            //var ssd_local_avg = (1.0 / nsamp) * 0.5 * (cost_1.Average() + cost_2.Average());//ssd_local_avg=(1/nsamp)*0.5*(mean(cost_1)+mean(cost_2));
            Debug("ssd_local:{0}", ssd_local);
            Debug("ssd_local_avg:{0}", ssd_local_avg);
            timeused += timer.StopAndSay("计算ssd_local");
            #region 最后的组合图
            //if display_flag
            //%   set(h,'string',['local SSD=' num2str(ssd_local) ', avg. local SSD=' num2str(ssd_local_avg)])
            //   set(h,'string',['local SSD=' num2str(ssd_local)])
            //end
            if (display_flag) {
                Draw(V1, @"D:\Play Data\Iteration\结果\0-V1.bmp", "V1");
                Draw(V2, @"D:\Play Data\Iteration\结果\1-V2.bmp", "V2");
                Draw(V1w, @"D:\Play Data\Iteration\结果\2-V1w.bmp", "V1 after warping");
                Draw(V2 - V1w, @"D:\Play Data\Iteration\结果\3-V2-V1w.bmp",
                     String.Format("local SSD={0:F8}", ssd_local_avg));
            }
            #endregion
            #endregion

            #endregion

            var distance = 1.6 * ssd_local_avg + distance_sc + 0.3 * E; // 不知道最后的距离公式到底是什么
            Debug("distance={0:F8}", distance);
            return distance;
        }