コード例 #1
0
ファイル: Window1.xaml.cs プロジェクト: samuto/UnityOpenCV
        private void image1_Initialized(object sender, EventArgs e)
        {
            Image<Bgr, Byte> image = new Image<Bgr, byte>(400, 100, new Bgr(255, 255, 255));
            MCvFont f = new MCvFont(Emgu.CV.CvEnum.FONT.CV_FONT_HERSHEY_PLAIN, 3.0, 3.0);
            image.Draw("Hello, world", ref f, new System.Drawing.Point(10, 50), new Bgr(255.0, 0.0, 0.0));

            image1.Source = ToBitmapSource(image);
        }
コード例 #2
0
ファイル: Window1.xaml.cs プロジェクト: LoyVanBeek/SetVision
        private static void DrawSet(Image<Bgr, Byte> table, Dictionary<Card, System.Drawing.Point> cards, Random rnd, List<Card> set)
        {
            Bgr setcolor = new Bgr(rnd.Next(255), rnd.Next(255), rnd.Next(255));
            List<System.Drawing.Point> centers = new List<System.Drawing.Point>();

            foreach (Card card in set)
            {
                System.Drawing.Point p = cards[card];
                PointF center = new PointF(p.X, p.Y);
                centers.Add(p);
                CircleF circle = new CircleF(center, 50);
                table.Draw(circle, setcolor, 2);
            }

            table.DrawPolyline(centers.ToArray(), true, setcolor, 5);
        }
コード例 #3
0
        // FaceDetection in the normal way
        public override void DoNormalDetection(string imagePath)
        {
            _image = new Image<Bgr, byte>(imagePath); //Read the files as an 8-bit Bgr image  
            _egray = _image.Convert<Gray, Byte>(); //Convert it to Grayscale
            _gray = _egray.Copy();    // Copy image in Grayscale            
            _egray._EqualizeHist(); // Equalize
            Image<Gray, Byte> tempgray = _egray.Copy();

            MCvAvgComp[][] facesDetected = _egray.DetectHaarCascade(_faces, 1.1, 1, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new System.Drawing.Size(20, 20));


            foreach (MCvAvgComp f in facesDetected[0])
            {
                if (f.neighbors > 100)
                {
                    //_image.Draw(f.rect, new Bgr(System.Drawing.Color.Blue), 2); // face
                    tempgray.ROI = f.rect; //Set the region of interest on the faces
                    MCvAvgComp[][] eyesDetected = tempgray.DetectHaarCascade(_eyes, 1.1, 1, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new System.Drawing.Size(20, 20));
                    if (eyesDetected[0].Length != 0)
                    {
                        foreach (MCvAvgComp e in eyesDetected[0])
                        {
                            if (e.neighbors > 100)
                            {
                                System.Drawing.Rectangle eyeRect = e.rect;
                                eyeRect.Offset(f.rect.X, f.rect.Y);
                                _image.Draw(eyeRect, new Bgr(System.Drawing.Color.Red), 2);
                            }
                        }
                    }

                }
            }

            this._processedImages = new IImage[3];
            this._processedImages[0] = _gray;
            this._processedImages[1] = _egray;
            this._processedImages[2] = _image;

        }
コード例 #4
0
ファイル: MainWindow.xaml.cs プロジェクト: kadron/rat-trek
        private void sensor_AllFramesReady(object sender, AllFramesReadyEventArgs e)
        {
            //TODO Keep the previous frame image as well,
            //Compare both on a background process and save it to the worksheet
            //Convert x&y differences to millimeters according to depth data (distance)
            //and some trigonometry
            BitmapSource depthBmp = null;
            blobCount = 0;

            using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
            {
                using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
                {
                    if (depthFrame != null)
                    {

                        blobCount = 0;

                        depthBmp = depthFrame.SliceDepthImage((int)sliderMin.Value, (int)sliderMax.Value);

                        Image<Bgr, Byte> openCVImg = new Image<Bgr, byte>(depthBmp.ToBitmap());
                        Image<Gray, byte> gray_image = openCVImg.Convert<Gray, byte>();

                        if (running)
                        {
                            wsheet.Cells[1, frameCount + 1].Value = "Frame " + frameCount;
                            frameCount++;
                            using (MemStorage stor = new MemStorage())
                            {
                                //Find contours with no holes try CV_RETR_EXTERNAL to find holes
                                Contour<System.Drawing.Point> contours = gray_image.FindContours(
                                 Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
                                 Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_EXTERNAL,
                                 stor);

                                //Conversion of depthPixels to skeletonPoints which contain all three dimensions in meters.
                                //The conversion and copying is assumed to be costly but there are no single pixel to single point conversion I could find.
                                depthFrame.CopyDepthImagePixelDataTo(depthPixels);
                                //mapper.MapDepthFrameToSkeletonFrame(depthFormat, depthPixels, skeletonPoints);

                                for (int i = 0; contours != null; contours = contours.HNext)
                                {
                                    i++;

                                    if ((contours.Area > Math.Pow(sliderMinSize.Value, 2)) && (contours.Area < Math.Pow(sliderMaxSize.Value, 2)))
                                    {
                                        MCvBox2D box = contours.GetMinAreaRect();
                                        //DrQ RED BOX AROUND BLOB
                                        openCVImg.Draw(box, new Bgr(System.Drawing.Color.Red), 2);
                                        blobCount++;
                                        int x = (int) box.center.X;
                                        int y = (int) box.center.Y;
                                        DepthImagePoint p = new DepthImagePoint();
                                        p.X = x;
                                        p.Y = y;
                                        p.Depth = depthPixels[x + 640 * y].Depth;
                                        SkeletonPoint s = mapper.MapDepthPointToSkeletonPoint(depthFormat, p);

                                        //TODO Conversion from absolute coordinates to relative coordinates

                                        addCoordData(3 * blobCount - 1, frameCount, s.X, s.Y, s.Z);
                                        /*if (KinectSensor.IsKnownPoint(s))
                                        {
                                            addCoordData(3 * blobCount - 1, frameCount, s.X, s.Y, s.Z);
                                        }*/

                                    }
                                }

                            }
                        }

                        this.outImg.Source = ImageHelpers.ToBitmapSource(openCVImg);
                        txtBlobCount.Text = blobCount.ToString();

                        getNext().RunWorkerAsync(openCVImg);
                    }
                }

                if (colorFrame != null)
                {

                      colorFrame.CopyPixelDataTo(this.colorPixels);
                      this.colorBitmap.WritePixels(
                          new Int32Rect(0, 0, this.colorBitmap.PixelWidth, this.colorBitmap.PixelHeight),
                          this.colorPixels,
                          this.colorBitmap.PixelWidth * sizeof(int),
                          0);

                }
            }
        }
コード例 #5
0
        // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        private void SensorAllFramesReady(object sender, AllFramesReadyEventArgs e)
        {
            #region get image and skeleton data from Kinect

            // if in the middle of shutting down, so nothing to do
            if (null == this.sensor) return;

            bool depthReceived = false;
            bool colorReceived = false;

            using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
            {
                if (null != depthFrame)
                {
                    // Copy the pixel data from the image to a temporary array
                    depthFrame.CopyDepthImagePixelDataTo(this.depthPixels);

                    depthReceived = true;
                }
            }

            int colorFrameNum = -1;

            using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
            {
                if (null != colorFrame)
                {
                    // Copy the pixel data from the image to a temporary array
                    colorFrame.CopyPixelDataTo(this.colorPixels);
                    colorFrameNum = colorFrame.FrameNumber;
                    colorReceived = true;
                }
            }

            Skeleton[] skeletons = new Skeleton[0];
            using (SkeletonFrame skeletonFrame = e.OpenSkeletonFrame())
            {
                if (skeletonFrame != null)
                {
                    skeletons = new Skeleton[skeletonFrame.SkeletonArrayLength];
                    skeletonFrame.CopySkeletonDataTo(skeletons);
                }
            }

            #endregion

            #region convert kinect frame to emgu images

            if (colorReceived)
            {
                // get Emgu colour image
                colourImage = new Image<Bgr, byte>(ToBitmap(colorPixels,
                    sensor.ColorStream.FrameWidth, this.sensor.ColorStream.FrameHeight));
            }

            if (depthReceived)
            {
                // create Emgu depth and playerMask images
                depthImage = new Image<Gray, float>(sensor.DepthStream.FrameWidth, this.sensor.DepthStream.FrameHeight);
                playerMasks = new Image<Gray, Byte>(sensor.DepthStream.FrameWidth, this.sensor.DepthStream.FrameHeight);

                int maxdepth = sensor.DepthStream.MaxDepth;

                // loop over each row and column of the depth
                // this can be slow, but only way to preserve full depth information
                int depthW = sensor.DepthStream.FrameWidth;
                int depthH = sensor.DepthStream.FrameHeight;
                int i;
                for (int y = 0; y < depthH; y++)
                {
                    for (int x = 0; x < depthW; x++)
                    {
                        // calculate index into depth array
                        i = x + (y * depthW);
                        DepthImagePixel depthPixel = this.depthPixels[i];

                        // save pixel to images
                        depthImage.Data[y, x, 0] = (depthPixel.Depth < maxdepth) ? depthPixel.Depth : maxdepth;
                        playerMasks.Data[y, x, 0] = (byte)depthPixel.PlayerIndex;
                    }
                }

                // HACK set one pixel to max value to stop flickering when
                // converting float image to byte or Bgr
                depthImage.Data[0, 0, 0] = sensor.DepthStream.MaxDepth;

                // should work, but causes grey values to be pseudo randomly transposed
                //playerMasks = new Image<Gray, byte>(ToBitmap(playerPixelData,
                //    sensor.DepthStream.FrameWidth, this.sensor.DepthStream.FrameHeight,
                //    System.Drawing.Imaging.PixelFormat.Format8bppIndexed));
            }

            #endregion

            // pick which skeleton to track
            int playerId = TrackClosestSkeleton(sensor, skeletons);
            Skeleton skeleton = null;
            if (playerId > 0) skeleton = skeletons.First(s => s.TrackingId == playerId);

            #region use the emgu images to do something interesting

            int demo = 2;

            // DEMO 0: You
            if (demo == 0 && skeleton != null && colourImage != null && depthImage != null)
            {

                displayImage = colourImage.Copy();

                if (ShowDebug)
                {

                }
            }

            // DEMO 1: blur and boost
            else if (demo == 1 && skeleton != null && colourImage != null
                && depthImage != null)
            {
                SkeletonPoint sleft = skeleton.Joints[JointType.HandLeft].Position;
                SkeletonPoint sright = skeleton.Joints[JointType.HandRight].Position;
                double hand_x_dist = Math.Abs(sleft.X - sright.Y);
                double hand_y_dist = Math.Abs(sleft.Y - sright.Y);

                // scale by 2 to speed up
                displayImage = colourImage.Resize(0.5, INTER.CV_INTER_NN);
                // displayImage = colourImage.Copy(); // slower

                // boost the RGB values based on vertical hand distance
                float boost = 3 - (float)(hand_y_dist * 5);
                displayImage = colourImage.Convert(delegate(Byte b)
                    { return (byte)((b * boost < 255) ? (b * boost) : 255); });

                // blur based on horizontal hand distance
                int blur = (int)(hand_x_dist * 20);
                if (blur > 0)
                    displayImage = displayImage.SmoothBlur(blur, blur);

                // show debug
                if (ShowDebug)
                {
                    debugImg2 = depthImage.Convert<Bgr, Byte>();
                    DepthImagePoint dp;
                    dp = sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(sleft,
                        sensor.DepthStream.Format);
                    debugImg2.Draw(new CircleF(dp.ToPointF(), 20), new Bgr(Color.Coral), 1);
                    dp = sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(sright,
                        sensor.DepthStream.Format);
                    debugImg2.Draw(new CircleF(dp.ToPointF(), 20), new Bgr(Color.LightGreen), 1);
                    Utilities.WriteDebugText(debugImg2, 10, 40, "{0:.00}m {1:0.00}m",
                        hand_x_dist, hand_y_dist);
                }

            }

            // DEMO 2: Painting
            else if (demo == 2 && skeleton != null &&
                colourImage != null && depthImage != null)
            {
                // create a player mask for player we want
                byte playerIndex = (byte)(Array.IndexOf(skeletons, skeleton) + 1);

                //double[] min, max;
                //Point[] pmin, pmax;
                //playerMasks.MinMax(out min, out max, out pmin, out pmax);

                // pick the player mask for the skeleton we're tracking
                Image<Gray, Byte> playerMask = playerMasks.Convert(delegate(Byte b)
                    { return (Byte)(b == playerIndex ? 255 : 0); });

                // register depth to Rgb using Emgu
                // compute homography if first frame
                if (depthToRGBHomography == null)
                    depthToRGBHomography = ComputeDepthToRGBHomography(
                        depthImage.Convert<Gray, byte>(), sensor);
                // do the registration warp
                Image<Gray, byte> registeredplayerMask = playerMask.WarpPerspective(
                    depthToRGBHomography, INTER.CV_INTER_CUBIC, WARP.CV_WARP_DEFAULT,
                    new Gray(0));

                // create the display image background
                // blended RGB and player mask
                displayImage = colourImage.AddWeighted(
                    registeredplayerMask.Resize(2, INTER.CV_INTER_NN).Convert<Bgr, byte>(),
                    0.7, 0.3, 0);

                // blur it out
                displayImage = displayImage.SmoothBlur(5, 5);

                // get body depth (in m)
                SkeletonPoint sposition = skeleton.Position;
                // get the left and right hand  positions (m)
                SkeletonPoint sleft = skeleton.Joints[JointType.HandLeft].Position;
                SkeletonPoint sright = skeleton.Joints[JointType.HandRight].Position;
                // head position
                SkeletonPoint shead = skeleton.Joints[JointType.Head].Position;

                // mask out depth except playermask
                Image<Gray, float> playerDepth = depthImage.Copy();
                playerDepth.SetValue(new Gray(0), playerMask.Not());

                // erase all interaction
                // - - - - - - - - - - - - -
                // raising both hands over head, when hands are close together, erases

                // get the distance between the hands
                double hand_distance = Utilities.Distance(sleft, sright);

                // if hands closer than 30cm and over head, then erase
                if (hand_distance < 0.3 && sleft.Y > shead.Y && sright.Y > shead.Y)
                {
                    paintingImage.SetZero();
                }

                // paint with hands interaction
                // - - - - - - - - - - - - -

                // hands become a brush or eraser when more that 250 mm closer to kinect
                float brushThresh = (sposition.Z * 1000) - 250;

                // the brush is the player depth image, but only body parts below the
                // brush threshold
                brushMask = playerDepth.Copy();
                brushMask = brushMask.ThresholdToZeroInv(new Gray(brushThresh));

                // create brush image if first time through
                if (brushImage == null) brushImage =
                    new Image<Bgr, byte>(displayImage.Width, displayImage.Height);

                // update the brush image
                brushImage.SetZero();
                brushImage.SetValue(brushColour,
                    brushMask.Convert<Gray, byte>().Resize(2, INTER.CV_INTER_NN));
                brushImage = brushImage.SmoothBlur(10, 10);

                // create the painting image if first time
                if (paintingImage == null)
                    paintingImage =
                        new Image<Bgr, byte>(displayImage.Width, displayImage.Height);

                // layer on a bit of paint each frame
                paintingImage = paintingImage.Add(brushImage * 0.05);

                // display the painting ontop of the RGB image
                displayImage = displayImage.AddWeighted(paintingImage, 0.5, 0.5, 0.0);

                // show debug
                if (ShowDebug)
                {
                    debugImg2 = depthImage.Convert<Bgr, Byte>();
                    DepthImagePoint dp;
                    dp = sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(
                        sleft, sensor.DepthStream.Format);
                    debugImg2.Draw(new CircleF(dp.ToPointF(), 20),
                        new Bgr(Color.Coral), 1);
                    dp = sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(sright, sensor.DepthStream.Format);
                    debugImg2.Draw(new CircleF(dp.ToPointF(), 20),
                        new Bgr(Color.LightGreen), 1);
                    dp = sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(shead, sensor.DepthStream.Format);
                    debugImg2.Draw(new CircleF(dp.ToPointF(), 20),
                        new Bgr(Color.Cyan), 1);
                    Utilities.WriteDebugText(debugImg2, 10, 40, "Player: {0} {1}",
                        playerId, playerIndex);

                }

                // colour picker interaction
                // - - - - - - - - - - - - -
                // pick colours by creating a "depth hole" by joining index finger and
                // thumb of opposite hands

                // hands can form a picker hole when more than 150 mm closer to kinect
                float pickerThresh = (sposition.Z * 1000) - 150;

                // isolate part of depth where picker can appear
                Image<Gray, float> pickerImage = playerDepth.ThresholdToZeroInv(
                    new Gray(pickerThresh));

                // clean up small pixels and noise with morpholigical operations
                pickerImage = pickerImage.Dilate(2).Erode(2);

                if (ShowDebug)
                {
                    debugImg1 = new Image<Bgr, byte>(pickerImage.Width, pickerImage.Height);
                    debugImg1.SetValue(new Bgr(Color.Yellow),
                        pickerImage.Convert<Gray, Byte>());
                    debugImg1.SetValue(new Bgr(Color.OrangeRed),
                        brushMask.Convert<Gray, Byte>());
                }

                // use contours to look for hole
                Contour<Point> contours = null;

                // contours only work on byte image, need to convert
                Image<Gray, byte> pickerImageByte = pickerImage.Convert<Gray, byte>();

                // the picker point (if we find one)
                PointF pickerPoint = PointF.Empty;

                // detect two levels of image contours (RETR_TYPE.CV_RETR_CCOMP)
                // contours is a list of all contours found at top level
                for (contours = pickerImageByte.FindContours(
                    CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
                    RETR_TYPE.CV_RETR_CCOMP);
                        contours != null;
                        contours = contours.HNext)
                {

                    // VNext is 'vertical' traversal down in contour tree
                    // if there's a hole, VNext won't be null
                    Contour<Point> hole = contours.VNext;
                    if (hole != null && hole.Area > 50)
                    {
                        if (ShowDebug)
                            debugImg1.Draw(hole, new Bgr(0, 0, 255), 2);

                        // get the centre of the hole
                        MCvMoments moments = hole.GetMoments();
                        MCvPoint2D64f p = moments.GravityCenter;

                        // set the point
                        pickerPoint = new PointF((float)p.x, (float)p.y);
                    }
                }

                // if we found a hole, we can get the colour in the RGB image
                // at that position
                if (pickerPoint != PointF.Empty)
                {
                    // get the RGB pixel at the picker depth point
                    DepthImagePoint dp = new DepthImagePoint();
                    dp.X = (int)pickerPoint.X;
                    dp.Y = (int)pickerPoint.Y;
                    dp.Depth = (int)depthImage[dp.Y, dp.X].Intensity;
                    ColorImagePoint cp =
                        sensor.CoordinateMapper.MapDepthPointToColorPoint(
                        sensor.DepthStream.Format, dp, sensor.ColorStream.Format);

                    // if we got a valid RGB point, get the colour
                    if (cp.X > 0 &&
                        cp.X < colourImage.Width &&
                        cp.Y > 0 &&
                        cp.Y < colourImage.Height)
                    {
                        // get the Emgu colour
                        Bgr c = colourImage[cp.Y, cp.X];

                        // convert to HSV so we can boost S and V
                        double hue, sat, val;
                        Color cc = Color.FromArgb((int)c.Red, (int)c.Green, (int)c.Blue);
                        Utilities.ColorToHSV(cc, out hue, out sat, out val);
                        // boost
                        sat = Math.Min(sat * 2, 1);
                        val = Math.Min(val * 2, 1);
                        // convert back to RGB colour
                        cc = Utilities.HSVToColor(hue, sat, val);

                        // display some feedback to show that picking is working
                        CircleF circle = new CircleF(new PointF(cp.X, cp.Y), 10);
                        displayImage.Draw(circle, new Bgr(cc), -1);
                        displayImage.Draw(circle, new Bgr(255, 255, 255), 1);

                        // set the new brush colour
                        brushColour = new Bgr(cc);
                    }
                }

            }
            // waiting for skeleton, just show RGB
            else if (colourImage != null && depthImage != null)
            {
                displayImage = colourImage.Copy();

                if (ShowDebug)
                {
                    debugImg1 = displayImage.Copy();
                    debugImg2 = depthImage.Convert<Bgr, Byte>();
                    Utilities.WriteDebugText(debugImg2, 10, 40, "No Skeleton");
                }
            }
            // something's wrong, we don't have RGB and depth
            else
            {
                displayImage.SetValue(new Bgr(Color.CadetBlue));
            }

            #endregion

            // display image
            if (displayImage != null)
            {
                // convert Rmgu image to WPF image source
                // This is handy to incorporate Emgu with standard UI or layer WPF graphics
                // on top. In this demo, it lets us make the window resize.
                if (DisplayImageHelper == null)
                {
                    // InteropBitmapHelper is a helper class to do the conversion
                    DisplayImageHelper =
                        new InteropBitmapHelper(displayImage.Width,
                            displayImage.Height, displayImage.Bytes, PixelFormats.Bgr24);
                    DisplayImage.Source = DisplayImageHelper.InteropBitmap;
                }
                // convert here using the helper object
                DisplayImageHelper.UpdateBits(displayImage.Bytes);
            }

            // display Emgu debug images
            if (ShowDebug)
            {
                CvInvoke.cvShowImage("debugImg1", debugImg1);
                CvInvoke.cvShowImage("debugImg2", debugImg2);
            }
        }
コード例 #6
0
    private void DrawFace(Image<Bgr, Byte> image) {
      if (null == Sensor.FPoints) { return; }

      var pts = Sensor.FPoints;
      var color = Sensor.Mood > 0 ? bgR : Sensor.Mood < 0 ? bGr : BGR;
      foreach (var pt in pts) {
        image.Draw(new CircleF(new PointF(pt.X, pt.Y), 1.0f), color, 2);
      }
    }
コード例 #7
0
        private Image<Bgr, byte> Canny(Image<Bgr, byte> newImg)
        {
            return newImg;
            Image<Ycc, Byte> ycc = newImg.Convert<Ycc, byte>();

            var skin = ycc.InRange(new Ycc(0, 131, 80), new Ycc(255, 185, 135));
            skin.Erode(2);
            skin.Dilate(2);

            var contours = skin.FindContours();
            if (contours != null)
            {
                List<Seq<Point>> allContours = new List<Seq<Point>>();
                for (Seq<Point> c = contours; c != null; c = c.HNext)
                {
                    allContours.Add(c);
                }
                allContours.Sort((a, b) => b.Total - a.Total);

                var biggest = allContours.Take(2);

                foreach (Seq<Point> points in biggest)
                {
                    var hull = points.GetConvexHull(ORIENTATION.CV_CLOCKWISE);

                    newImg.Draw(hull, new Bgr(Color.Red), 2);
                }
            }

            return newImg;
        }
コード例 #8
0
    private void DrawFaces(Image<Bgr, Byte> image) {
      MCvAvgComp[] Faces = Sensor.FaceManager.Faces;
      String[] Names = Sensor.FaceManager.Names;
      Image<Gray, byte>[] Thumbs = Sensor.FaceManager.Thumbs;
      for (int i = 0; i < Faces.Length; i++) {

        // Draw Rect
        MCvAvgComp f = Faces[i];
        if (f.rect == null) { continue; }
        image.Draw(f.rect, border, 2);

        // Draw text
        var rect = new Rectangle(f.rect.X, f.rect.Y + f.rect.Height + 20, f.rect.Width, f.rect.Height);
        DrawText(image, rect, Names[i] != null ? Names[i] : "");

        // Draw thumb
        if (Thumbs[i] != null) {
          this.imageTrainedFace.Source = ToBitmapSource(Thumbs[i]);
        }
      }
    }
コード例 #9
0
    private void DrawJoints(Image<Bgr, Byte> image) {
      if (!Chx_Skeleton.IsChecked.GetValueOrDefault(true)) { return;  }
      if (null == Sensor.GestureManager) { return; }
      if (null == Sensor.GestureManager.Skeleton) { return; }

      Sensor.Copy2DJoints = true;
      var pts = Sensor.Joints2D;
      try {
        image.DrawPolyline(new Point[] { pts[JointType.Head], pts[JointType.ShoulderCenter], pts[JointType.Spine], pts[JointType.HipCenter] }, false, bGr, 2);
        image.DrawPolyline(new Point[] { pts[JointType.HipCenter], pts[JointType.HipLeft], pts[JointType.KneeLeft], pts[JointType.AnkleLeft], pts[JointType.FootLeft] }, false, bGr, 2);
        image.DrawPolyline(new Point[] { pts[JointType.HipCenter], pts[JointType.HipRight], pts[JointType.KneeRight], pts[JointType.AnkleRight], pts[JointType.FootRight] }, false, bGr, 2);
        image.DrawPolyline(new Point[] { pts[JointType.ShoulderCenter], pts[JointType.ShoulderLeft], pts[JointType.ElbowLeft], pts[JointType.WristLeft], pts[JointType.HandLeft] }, false, bGr, 2);
        image.DrawPolyline(new Point[] { pts[JointType.ShoulderCenter], pts[JointType.ShoulderRight], pts[JointType.ElbowRight], pts[JointType.WristRight], pts[JointType.HandRight] }, false, bGr, 2);
      }
      catch (Exception) { /* May have Exception accessing the map  */  }
      foreach (JointType t in jointInterest) {
        image.Draw(new CircleF(new PointF(pts[t].X, pts[t].Y), 1.0f), bgR, 10);
      }
    }
コード例 #10
0
        private void DrawLinesHoriontal(List<System.Drawing.PointF> markersF, Image<Bgr, byte> outImg1, bool captureAngles)
        {


            for (int i = 0; i < markersF.Count; i++)
            {
                LineSegment2D line1 = new LineSegment2D(new System.Drawing.Point(0, (int)markersF[i].Y),
                                                    new System.Drawing.Point((int)markersF[i].X, (int)markersF[i].Y));

                outImg1.Draw(line1, new Bgr(System.Drawing.Color.Red), 1);
            }
            
          
            countFrames++;
        }
コード例 #11
0
ファイル: MainWindow.xaml.cs プロジェクト: jlark/cvlock
        /// <summary>
        /// Used for drawing for hot corner boxes on screen
        /// </summary>
        /// <param name="img"></param>
        /// <returns></returns>
        public Image<Bgr, Byte> drawOnscreen(Image<Bgr, Byte> img)
        {
            //Assuming the image we're using is 640X480
            MCvBox2D boxUpR = new MCvBox2D(new PointF(200, 200), boxSize, 0);

            img.Draw(boxUpR, new Bgr(System.Drawing.Color.Green), 2);

            return img;
        }
コード例 #12
0
        private void sensor_AllFramesReady(object sender, AllFramesReadyEventArgs e)
        {
            BitmapSource depthBmp = null;
            blobCount = 0;

            using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
            {
                using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
                {
                    if (depthFrame != null)
                    {

                        blobCount = 0;

                        depthBmp = depthFrame.SliceDepthImage((int)sliderMin.Value, (int)sliderMax.Value);

                        Image<Bgr, Byte> openCVImg = new Image<Bgr, byte>(depthBmp.ToBitmap());
                        Image<Gray, byte> gray_image = openCVImg.Convert<Gray, byte>();

                        //Find contours
                        using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
                        {
                            CvInvoke.FindContours(gray_image, contours, new Mat(), Emgu.CV.CvEnum.RetrType.List, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple);

                            for (int i = 0; i < contours.Size; i++)
                            {
                                VectorOfPoint contour = contours[i];
                                double area = CvInvoke.ContourArea(contour, false);

                                if ((area > Math.Pow(sliderMinSize.Value, 2)) && (area < Math.Pow(sliderMaxSize.Value, 2)))
                                {
                                    System.Drawing.Rectangle box = CvInvoke.BoundingRectangle(contour);
                                    openCVImg.Draw(box, new Bgr(System.Drawing.Color.Red), 2);
                                    blobCount++;
                                }
                            }
                        }

                        this.outImg.Source = ImageHelpers.ToBitmapSource(openCVImg);
                        txtBlobCount.Text = blobCount.ToString();
                    }
                }

                if (colorFrame != null)
                {

                    colorFrame.CopyPixelDataTo(this.colorPixels);
                    this.colorBitmap.WritePixels(
                        new Int32Rect(0, 0, this.colorBitmap.PixelWidth, this.colorBitmap.PixelHeight),
                        this.colorPixels,
                        this.colorBitmap.PixelWidth * sizeof(int),
                        0);

                }
            }
        }
コード例 #13
0
        private void CompositionTarget_Rendering(object sender, EventArgs e)
        {
            _status.Fill = _rd;

            #region Recognition
            currentFrame = grabber.QueryFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
            gray = currentFrame.Convert<Gray, Byte>();

            var size = new System.Drawing.Size(20, 20);
            var window = new System.Drawing.Size(grabber.Width, grabber.Height);

            _rects = _faceClassifier.DetectMultiScale(gray, 1.2, 10, size, window);

            foreach (var f in _rects)
            {
                result = currentFrame.Copy(f).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
                _status.Fill = new SolidColorBrush(Colors.Green);
                currentFrame.Draw(f, new Bgr(System.Drawing.Color.Red), 2);

                //if we have already trained
                if (CommonData.TrainingImages.Count > 0)
                {
                    MCvTermCriteria termCrit = new MCvTermCriteria(ContTrain, 0.001);
                    //Eigen face recognizer
                    recognizer = new EigenObjectRecognizer(
                       CommonData.TrainingImages.ToArray(),
                       CommonData.Names.ToArray(),
                       3000,
                       ref termCrit);

                    string name = recognizer.Recognize(result);
                    currentFrame.Draw(name, ref font, new System.Drawing.Point(f.X - 2, f.Y - 2),
                        new Bgr(System.Drawing.Color.LightGreen));
                }

                //finally draw the source
                _imgCamera.Source = ImageHelper.ToBitmapSource(currentFrame);
            }
            #endregion
        }
コード例 #14
0
    public void RepaintColorFrame(Image<Bgra, Byte> buffer, NBody body) {

      // Face & Name
      var head = body.GetJoint(NJointType.Head).Area;
      if (head.Width > 0) {
        buffer.Draw(head, brdGreen, 4);
        if (body.Name != null) {
          var txt = new Rectangle(head.X, head.Y + head.Height + 20, head.Width, head.Height); 
          DrawText(buffer, txt, body.Name);
        }
      }

      // Hands
      var right = body.GetJoint(NJointType.HandRight).Area;
      if (right.Width > 0) {
        buffer.Draw(right, brdGreen, 4);
      }

      var left = body.GetJoint(NJointType.HandLeft).Area;
      if (left.Width > 0) {
        buffer.Draw(left, brdGreen, 4);
      }

      // Joints
      foreach (var joint in body.Joints.Values) {
        if (joint == null) { continue; }
        if (joint.Tracking == NTrackingState.NotTracked) { continue; }
        var color = joint.Tracking == NTrackingState.Inferred ? brdRed : brdGreen;
        buffer.Draw(new CircleF(joint.Position2D, 10.0f), color, 10);
      }

      // Torso
      DrawBone(body, NJointType.Head, NJointType.Neck);
      DrawBone(body, NJointType.Neck, NJointType.SpineShoulder);
      DrawBone(body, NJointType.SpineShoulder, NJointType.SpineMid);
      DrawBone(body, NJointType.SpineMid, NJointType.SpineBase);
      DrawBone(body, NJointType.SpineShoulder, NJointType.ShoulderRight);
      DrawBone(body, NJointType.SpineShoulder, NJointType.ShoulderLeft);
      DrawBone(body, NJointType.SpineBase, NJointType.HipRight);
      DrawBone(body, NJointType.SpineBase, NJointType.HipLeft);

      // Right Arm    
      DrawBone(body, NJointType.ShoulderRight, NJointType.ElbowRight);
      DrawBone(body, NJointType.ElbowRight, NJointType.WristRight);
      DrawBone(body, NJointType.WristRight, NJointType.HandRight);
      DrawBone(body, NJointType.HandRight, NJointType.HandTipRight);
      DrawBone(body, NJointType.WristRight, NJointType.ThumbRight);

      // Left Arm
      DrawBone(body, NJointType.ShoulderLeft, NJointType.ElbowLeft);
      DrawBone(body, NJointType.ElbowLeft, NJointType.WristLeft);
      DrawBone(body, NJointType.WristLeft, NJointType.HandLeft);
      DrawBone(body, NJointType.HandLeft, NJointType.HandTipLeft);
      DrawBone(body, NJointType.WristLeft, NJointType.ThumbLeft);

      // Right Leg
      DrawBone(body, NJointType.HipRight, NJointType.KneeRight);
      DrawBone(body, NJointType.KneeRight, NJointType.AnkleRight);
      DrawBone(body, NJointType.AnkleRight, NJointType.FootRight);

      // Left Leg
      DrawBone(body, NJointType.HipLeft, NJointType.KneeLeft);
      DrawBone(body, NJointType.KneeLeft, NJointType.AnkleLeft);
      DrawBone(body, NJointType.AnkleLeft, NJointType.FootLeft);
    }
コード例 #15
0
        private Bitmap faceDetect(Bitmap bitmap)
        {
            HaarCascade haar = new HaarCascade("haarcascade_frontalface_alt.xml");
            Image<Bgr, byte> temp = new Image<Bgr, byte>(bitmap);
            Image<Gray, byte> grayframe = temp.Convert<Gray, byte>();
            var faces =
                    grayframe.DetectHaarCascade(
                            haar, 1.4, 4,
                            HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                            new System.Drawing.Size(temp.Width / 8, temp.Height / 8)
                            )[0];

            foreach (var face in faces)
            {
                temp.Draw(face.rect, new Bgr(0, double.MaxValue, 0), 3);
            }
           return temp.ToBitmap();
        }
コード例 #16
0
        // FaceDetection in the normal way
        public override void DoNormalDetection(string imagePath)
        {   
            _image = new Image<Bgr, byte>(imagePath); //Read the files as an 8-bit Bgr image  
            _egray = _image.Convert<Gray, Byte>(); //Convert it to Grayscale
            _gray = _egray.Copy();    // Copy image in Grayscale            
            _egray._EqualizeHist(); // Equalize

            MCvAvgComp[][] facesDetected = _egray.DetectHaarCascade(_faces, 1.1, 1, Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING, new System.Drawing.Size(20, 20));

            foreach (MCvAvgComp f in facesDetected[0])
            {
                if (f.neighbors > 100)
                {
                    //_egray.ROI = f.rect; // test
                    //_gray.ROI = f.rect; // test
                    _image.Draw(f.rect, new Bgr(System.Drawing.Color.Blue), 2);

                }
            }


            this._processedImages = new IImage[3];
            this._processedImages[0] = _gray;
            this._processedImages[1] = _egray;
            this._processedImages[2] = _image;
            
        }
コード例 #17
0
        void FrameGrabber(object sender, EventArgs e)
        {
            NamePersons.Add("");

            currentFrame = grabber.QueryFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);

            //Convert it to Grayscale
            gray = currentFrame.Convert<Gray, Byte>();

            //Face Detector
            MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
              face,
              1.2,
              10,
              Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
              new System.Drawing.Size(20, 20));

            Console.WriteLine(facesDetected[0].Length);

            //Action for each element detected
            foreach (MCvAvgComp f in facesDetected[0])
            {
                t = t + 1;
                result = currentFrame.Copy(f.rect).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
                //draw the face detected in the 0th (gray) channel with blue color
                currentFrame.Draw(f.rect, new Bgr(System.Drawing.Color.Red), 2);

                if (trainingImages.ToArray().Length != 0)
                {
                    //TermCriteria for face recognition with numbers of trained images like maxIteration
                    MCvTermCriteria termCrit = new MCvTermCriteria(ContTrain, 0.001);

                    EigenObjectRecognizer recognizer = new EigenObjectRecognizer(
                       trainingImages.ToArray(),
                       labels.ToArray(),
                       3000,
                       ref termCrit);

                    name = recognizer.Recognize(result);

                    //Draw the label for each face detected and recognized
                    currentFrame.Draw(name, ref font, new System.Drawing.Point(f.rect.X - 2, f.rect.Y - 2), new Bgr(System.Drawing.Color.LightGreen));

                }

                NamePersons[t - 1] = name;
                NamePersons.Add("");

                label3.Text = facesDetected[0].Length.ToString();

                if (result != null)
                {
                    dispatcherTimer.Stop();
                    break;
                }

            }
            t = 0;

            //Names concatenation of persons recognized
            for (int nnn = 0; nnn < facesDetected[0].Length; nnn++)
            {
                names = names + NamePersons[nnn] + ", ";
            }

            imageBoxFrameGrabber.Source = ConvertImage(currentFrame.ToBitmap());
            label4.Text = names;
            names = "";
            //Clear the list(vector) of names
            NamePersons.Clear();
        }
コード例 #18
0
ファイル: MainWindow.xaml.cs プロジェクト: jlark/cvlock
        /// <summary>
        /// This is the frame event handler this method will run for evey frame captured , meat of app is here.
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        private void sensor_AllFramesReady(object sender, AllFramesReadyEventArgs e)
        {
            BitmapSource depthBmp = null;
            blobCount = 0;
            int tempP = index % currentPass.Length;

            using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
            {
                using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
                {
                    if (depthFrame != null)
                    {

                        blobCount = 0;

                        depthBmp = depthFrame.SliceDepthImage((int)sliderMin.Value, (int)sliderMax.Value);

                        Image<Bgr, Byte> openCVImg = new Image<Bgr, byte>(depthBmp.ToBitmap());
                        Image<Gray, byte> gray_image = openCVImg.Convert<Gray, byte>();

                        using (MemStorage stor = new MemStorage())
                        {
                            //Find contours with no holes try CV_RETR_EXTERNAL to find holes
                            Contour<System.Drawing.Point> contours = gray_image.FindContours(
                             Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
                             Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_EXTERNAL,
                             stor);

                            for (int i = 0; contours != null; contours = contours.HNext)
                            {
                                i++;

                                //Get window for depth image
                                if ((contours.Area > Math.Pow(sliderMinSize.Value, 2)) && (contours.Area < Math.Pow(sliderMaxSize.Value, 2)))
                                {
                                    MCvBox2D box = contours.GetMinAreaRect();

                                    //Get box of countour
                                    System.Drawing.Rectangle roi2 = contours.BoundingRectangle;

                                    openCVImg.Draw(box, new Bgr(System.Drawing.Color.Red), 2);
                                    blobCount++;

                                    if (isTracking)
                                    {
                                        //Need another conversion to gray and bye here from rectangle
                                        result = gray_image.Copy(roi2).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
                                        this.toTrackIMG.Source = ImageHelpers.ToBitmapSource(result);
                                        if (Eigen_Recog.IsTrained)
                                        {
                                            string name = Eigen_Recog.Recognise(result);

                                            //Looks like we found a number add it to the password and make check if its correct
                                            if (!name.Equals("") && !name.Equals(lastChar))
                                            {
                                                currentPass[tempP] = (char)name[0];
                                                index++;
                                                ImageHelpers.saveBitMapToDisk(result.ToBitmap(), Convert.ToInt32(name), "Test", 1);
                                                displayCurrPass();

                                            }

                                            openCVImg.Draw(lastChar, ref font, new System.Drawing.Point((int)box.center.X, (int)box.center.Y), new Bgr(System.Drawing.Color.OrangeRed));
                                            if (!name.Equals(""))
                                            {
                                                //preserver the recognized symbole for future reference
                                                lastChar = name;
                                            }
                                        }

                                    }
                                    // Display bounding box for training sequence easy way to filter out noise
                                    else if (isTraining)
                                    {
                                        result = gray_image.Copy(roi2).Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
                                        this.toTrackIMG.Source = ImageHelpers.ToBitmapSource(result);

                                    }
                                }
                            }
                            //Draw bounding box. Do this down here so that it's not mixed up with calculation of countours and eigen vectors
                            if (isTraining)
                            {
                                openCVImg = drawOnscreen(openCVImg);
                            }
                        }
                        //Draw on screen
                        this.outImg.Source = ImageHelpers.ToBitmapSource(openCVImg);
                        txtBlobCount.Text = blobCount.ToString();
                    }
                }
                //Draw color image
                if (colorFrame != null)
                {

                      colorFrame.CopyPixelDataTo(this.colorPixels);
                      this.colorBitmap.WritePixels(
                          new Int32Rect(0, 0, this.colorBitmap.PixelWidth, this.colorBitmap.PixelHeight),
                          this.colorPixels,
                          this.colorBitmap.PixelWidth * sizeof(int),
                          0);

                }
            }
        }
コード例 #19
0
        private void DrawLines(List<System.Drawing.PointF> markersF, Image<Bgr, byte> outImg1, bool captureAngles)
        {



            LineSegment2D line1 = new LineSegment2D(new System.Drawing.Point((int)markersF[0].X, (int)markersF[0].Y),
                                                    new System.Drawing.Point((int)markersF[1].X, (int)markersF[1].Y));
            LineSegment2D line2 = new LineSegment2D(new System.Drawing.Point((int)markersF[1].X, (int)markersF[1].Y),
                                                    new System.Drawing.Point((int)markersF[2].X, (int)markersF[2].Y));

            outImg1.Draw(line1, new Bgr(System.Drawing.Color.Red), 1);
            outImg1.Draw(line2, new Bgr(System.Drawing.Color.Red), 1);
            double angleEmgu = line1.GetExteriorAngleDegree(line2);
            double angle = findAngle(markersF);
            // double angle = angleEmgu;

            MCvFont f = new MCvFont(FONT.CV_FONT_HERSHEY_COMPLEX, 1.0, 1.0);
            outImg1.Draw(((int)angle).ToString(), ref f, new System.Drawing.Point((int)markersF[1].X, (int)markersF[1].Y), new Bgr(121, 116, 40));

            if(captureAngles)
                angles.Add(angle);

            //========================== activar un BOOL para empezar a capturar lo angulos y desactivarlo al presionarlo nuevamente 
            //========================== luego guardar la lista de angulos y mostrarlo en la grafica

            //tbxResults.AppendText("line.AddPoint(" + (countFrames * 1.0).ToString() + "," + Math.Abs(angle).ToString() + ");");
            tbxResults.AppendText(Math.Abs(angle).ToString() + ",");
            tbxResults.AppendText(Environment.NewLine);

            /*
            ListBoxItem item = new ListBoxItem();
            item.Content = "AddPoint(" + (countFrames * 1.0).ToString() + "," + angle.ToString() + ");";
            listResults.Items.Add(item);
            */
            countFrames++;
        }
コード例 #20
0
        private void helloWorldTestButton_Click(object sender, RoutedEventArgs e)
        {
            //Create an image of 400x200 of Blue color
            using (Image<Bgr, Byte> img = new Image<Bgr, byte>(400, 200, new Bgr(255, 0, 0)))
            {
                //Create the font
                MCvFont f = new MCvFont(FONT.CV_FONT_HERSHEY_COMPLEX, 1.0, 1.0);

                //Draw "Hello, world." on the image using the specific font
                img.Draw("Hello, world", ref f, new Point(10, 80), new Bgr(0, 255, 0));

                //Show the image using ImageViewer from Emgu.CV.UI
                CvInvoke.cvShowImage("Hello World Test Window", img.Ptr);
            }

            // Test crop
            using (Image<Gray, Byte> img = new Image<Gray, byte>(400, 200, new Gray(0)))
            {
                MCvFont f = new MCvFont(FONT.CV_FONT_HERSHEY_COMPLEX, 1.0, 1.0);
                img.Draw("Hello, world", ref f, new Point(10, 80), new Gray(255));
                using (Image<Gray, Byte> img2 = Utilities.stripBorder(img, new Gray(100)))
                    CvInvoke.cvShowImage("After crop", img2.Ptr);
            }
        }
コード例 #21
0
        //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::


        //:::::::Display the stuff:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
        private void CompositionTarget_Rendering(object sender, EventArgs e)
        {
            //---------------------------------------------------------------
            List<Object> returnHandDetectorFrontActual = new List<object>(2);
            List<Object> returnHandDetectorSideActual = new List<object>(2);
            System.Drawing.Rectangle[] RoiKinectFrontActual;
            System.Drawing.Rectangle[] RoiKinectSideActual;
            Image<Gray, Byte> imagenKinectGray1;
            Image<Gray, Byte> imagenKinectGray2;
            Image<Gray, Byte> imageRoi1 = new Image<Gray,Byte>(200, 200);
            Image<Gray, Byte> imageRoi2 = new Image<Gray, Byte>(200, 200);
            
            List<Object> returnGettingSegK1 = new List<object>(); 
            List<Object> returnGettingSegK2 = new List<object>(); 
            
            bool noPositiveFalses;
            bool flag_execute = false;
            bool FrameHandDetected = false;
            bool detectRoiFront = false;
            bool detectRoiSide = false;

            //to get the position of the hand  
            PointF centerHandFront = new PointF(); 
            PointF centerHandSide = new PointF(); 
            PointF positionRoi1Front;  
            PointF positionRoi1Side;  
            //-------------------------------------------------------------------------------

            //-------------------------------------------------------------------------------
            //Get actual frame and the next frame. To delate the frames with positive falses.  
            imagenKinectGray1 = new Image<Gray, byte>(pathFront + "Front" + "_" + numFrames.ToString() + "_1_1" + ".png"); //Poner el folder
            imagenKinectGray2 = new Image<Gray, byte>(pathSide + "Side" + "_" + numFrames.ToString() + "_1_1" + ".png"); 

            returnHandDetectorFrontActual = HandDetection.Detection(imagenKinectGray1, positionCenterHandF);
            returnHandDetectorSideActual = HandDetection.Detection(imagenKinectGray2, positionCenterHandS);

            //Cast the return of each frame                                                      MODIFICAR
            RoiKinectFrontActual = (System.Drawing.Rectangle[])returnHandDetectorFrontActual[0]; //Poner todos los rectangulos que regresen
            RoiKinectSideActual = (System.Drawing.Rectangle[])returnHandDetectorSideActual[0];

            //Set the bool variables,
            if (RoiKinectFrontActual.Length != 0)
                detectRoiFront = true;
            if (RoiKinectSideActual.Length != 0)
                detectRoiSide = true; 

            //To compare two consecitives frames. 
            if ( (numFrames == 1) || (FrameHandDetected == false) ) 
            {   
                RectArrayPrev = RoiKinectFrontActual; 
            }

            //Guardar las imagenes para ver cuantas detecta   
            //{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{ 
            int numHsFront = RoiKinectFrontActual.Length;
            int numHsSide = RoiKinectSideActual.Length;

            imagenKinectGray1.Save(pathFront + "DFront_" + numFrames.ToString() + ".png");
            imagenKinectGray2.Save(pathSide + "DSide_" + numFrames.ToString() + ".png"); 

            switch (numHsFront)
            {
                case 0:
                    imagenKinectGray1.Save(pathFront + @"CeroH\" + numFrames.ToString() + "_" + sec.ToString() + "_" + fps.ToString() + ".png");
                    break;
                case 1:
                    imagenKinectGray1.Save(pathFront + @"OneH\" + numFrames.ToString() + "_" + sec.ToString() + "_" + fps.ToString() + ".png");
                    break;
                default:
                    imagenKinectGray1.Save(pathFront + @"TwoH\" + numFrames.ToString() + "_" + sec.ToString() + "_" + fps.ToString() + ".png");
                    break;
            }

            switch (numHsSide)
            {
                case 0:
                    imagenKinectGray2.Save(pathSide + @"CeroH\" + numFrames.ToString() + "_" + sec.ToString() + "_" + fps.ToString() + ".png");
                    break;
                case 1:
                    imagenKinectGray2.Save(pathSide + @"OneH\" + numFrames.ToString() + "_" + sec.ToString() + "_" + fps.ToString() + ".png");
                    break;
                default:
                    imagenKinectGray2.Save(pathSide + @"TwoH\" + numFrames.ToString() + "_" + sec.ToString() + "_" + fps.ToString() + ".png");
                    break;
            }
            //}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}


            if ( (detectRoiFront) || (detectRoiSide) )//Esto se realiza si el cuadro del kinect es detectado. 
            {
                if (detectRoiFront)
                {
                    returnGettingSegK1 = GettingSegmentationK1.HandConvexHull(imagenKinectGray1, RoiKinectFrontActual[0]);
                    
                    if (returnGettingSegK1 != null)
                    {
                        centerHandFront = (PointF)returnGettingSegK1[0];                //Meter en una funcion 

                        positionRoi1Front = RoiKinectFrontActual[0].Location;
                        positionCenterHandF.X = positionRoi1Front.X + centerHandFront.X;
                        positionCenterHandF.Y = positionRoi1Front.Y + centerHandFront.Y;

                        CircleF centrillo = new CircleF(positionCenterHandF, 5f);

                        imagenKinectGray1.Draw(centrillo, new Gray(150), 3);

                        //SaveFeaturesText.SaveFeaturesTraining("2", returnGettingSegK1, pathFront+"TrainingFront2.txt"); 
                        SaveFeaturesText.SaveFeaturesTest(numFrames, returnGettingSegK1, pathFront + "Test1Front.txt");
                    }  
                }

                if (detectRoiSide)
                {
                    returnGettingSegK2 = GettingSegmentationK2.HandConvexHull(imagenKinectGray2, RoiKinectSideActual[0]);

                    if (returnGettingSegK2 != null)
                    {
                        centerHandSide = (PointF)returnGettingSegK2[0];                //Meter en una funcion 

                        positionRoi1Side = RoiKinectSideActual[0].Location;
                        positionCenterHandS.X = positionRoi1Side.X + centerHandSide.X;
                        positionCenterHandS.Y = positionRoi1Side.Y + centerHandSide.Y;

                        CircleF centrillo = new CircleF(positionCenterHandS, 5f);

                        imagenKinectGray2.Draw(centrillo, new Gray(150), 3);

                        //SaveFeaturesText.SaveFeaturesTraining("2", returnGettingSegK1, pathFront+"TrainingFront2.txt"); 
                        SaveFeaturesText.SaveFeaturesTest(numFrames, returnGettingSegK2, pathSide + "Test1Side.txt");
                    } 
                }

                if ((RoiKinectFrontActual.Length == 1) && (RoiKinectSideActual.Length == 1)) //Para saber cuantas detecta al mismo tiempo
                {
                    //SaveFeaturesText.SaveFeaturesTraining("2", returnGettingSegK1, pathFront+"TrainingFront2.txt"); 
                    SaveFeaturesText.FeaturesTwoKinectTest(numFrames, returnGettingSegK1, returnGettingSegK2, pathFront + "Test1.txt"); 

                    //Save the images
                    imagenKinectGray1.Save(pathFront + "F_" + numFrames.ToString() + "_"+ ".png");
                    imagenKinectGray2.Save(pathSide + "S_" + numFrames.ToString() + "_"+ ".png");
                }
            } 

            DepthImageK1.Source = imagetoWriteablebitmap(imagenKinectGray1);
            DepthImageK2.Source = imagetoWriteablebitmap(imagenKinectGray2); 

            if (fps == 30)
            {
                fps = 1;
                sec++;
             }
            fps++;
            numFrames++; 

        } //end CompositionTarget 
コード例 #22
0
        private void button_Click(object sender, RoutedEventArgs e)
        {
            OpenFileDialog openFileDialog = new OpenFileDialog();
            openFileDialog.ShowDialog();
              var filePath =   openFileDialog.FileName;
            Image<Bgr, Byte> image = new Image<Bgr, byte>(filePath); //Read the files as an 8-bit Bgr image
            List<System.Drawing.Rectangle> faces = new List<System.Drawing.Rectangle>();
            List<System.Drawing.Rectangle> eyes = new List<System.Drawing.Rectangle>();

            Detect(image, "haarcascade_frontalface_default.xml", "haarcascade_eye.xml", faces, eyes);

            foreach (System.Drawing.Rectangle face in faces)
                image.Draw(face, new Bgr(System.Drawing.Color.Red), 2);
            foreach (System.Drawing.Rectangle eye in eyes)
                image.Draw(eye, new Bgr(System.Drawing.Color.Blue), 2);

            ImageViewer.Show(image);
            File.WriteAllBytes("test.jpg", image.ToJpegData());

            Image<Gray, Byte> smileImage = new Image<Gray, byte>("happy.jpg"); //Read the files as an 8-bit Bgr image
            Image<Gray, Byte> sadImage = new Image<Gray, byte>("sad.jpg"); //Read the files as an 8-bit Bgr image

            List<Image<Gray, Byte>> trainingList = new List<Image<Gray, byte>>();
            trainingList.Add(smileImage);
            trainingList.Add(sadImage);

            List<string> labelList = new List<string>();
            labelList.Add("happy");
            labelList.Add("sad");
            // labelList.Add(2);

            MCvTermCriteria termCrit = new MCvTermCriteria(10, 0.001);

                //Eigen face recognizer
                EigenObjectRecognizer recognizer = new EigenObjectRecognizer(
                trainingList.ToArray(),
                labelList.ToArray(),
                5000,
                ref termCrit);

            Image<Gray, Byte> inputImage = new Image<Gray, byte>(filePath); //Read the files as an 8-bit Bgr image
            var resizedImage = inputImage.Resize(smileImage.Width, smileImage.Height, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);

            var name = recognizer.Recognize(resizedImage).Label;

            List<int> temp = new List<int>();
            temp.Add(1);
            temp.Add(2);

            EigenFaceRecognizer recogizer2 = new EigenFaceRecognizer(80, double.PositiveInfinity);
            recogizer2.Train(trainingList.ToArray(), temp.ToArray());
               var dd = recogizer2.Predict(resizedImage);

            ImageViewer.Show(resizedImage);
        }
コード例 #23
0
        /// <summary>
        /// main function processing of the image data
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        void _Capture_ImageGrabbed(object sender, EventArgs e)
        {
            //lets get a frame from our capture device
            img = _Capture.RetrieveBgrFrame();
            Gray_Frame = img.Convert<Gray, Byte>();

            //apply chess board detection
            if (currentMode == Mode.SavingFrames)
            {
                corners = CameraCalibration.FindChessboardCorners(Gray_Frame, patternSize, Emgu.CV.CvEnum.CALIB_CB_TYPE.ADAPTIVE_THRESH);
                //we use this loop so we can show a colour image rather than a gray: //CameraCalibration.DrawChessboardCorners(Gray_Frame, patternSize, corners);

                if (corners != null) //chess board found
                {
                    //make mesurments more accurate by using FindCornerSubPixel
                    Gray_Frame.FindCornerSubPix(new PointF[1][] { corners }, new System.Drawing.Size(11, 11), new System.Drawing.Size(-1, -1), new MCvTermCriteria(30, 0.1));

                    //if go button has been pressed start aquiring frames else we will just display the points
                    if (start_Flag)
                    {
                        Frame_array_buffer[frame_buffer_savepoint] = Gray_Frame.Copy(); //store the image
                        frame_buffer_savepoint++;//increase buffer positon

                        //check the state of buffer
                        if (frame_buffer_savepoint == Frame_array_buffer.Length) currentMode = Mode.Caluculating_Intrinsics; //buffer full
                    }

                    //dram the results
                    img.Draw(new CircleF(corners[0], 3), new Bgr(System.Drawing.Color.Yellow), 1);
                    for (int i = 1; i < corners.Length; i++)
                    {
                        img.Draw(new LineSegment2DF(corners[i - 1], corners[i]), line_colour_array[i], 2);
                        img.Draw(new CircleF(corners[i], 3), new Bgr(System.Drawing.Color.Yellow), 1);
                    }
                    //calibrate the delay bassed on size of buffer
                    //if buffer small you want a big delay if big small delay
                    Thread.Sleep(100);//allow the user to move the board to a different position
                }
                corners = null;
            }
            if (currentMode == Mode.Caluculating_Intrinsics)
            {
                //we can do this in the loop above to increase speed
                for (int k = 0; k < Frame_array_buffer.Length; k++)
                {

                    corners_points_list[k] = CameraCalibration.FindChessboardCorners(Frame_array_buffer[k], patternSize, Emgu.CV.CvEnum.CALIB_CB_TYPE.ADAPTIVE_THRESH);
                    //for accuracy
                    Gray_Frame.FindCornerSubPix(corners_points_list, new System.Drawing.Size(11, 11), new System.Drawing.Size(-1, -1), new MCvTermCriteria(30, 0.1));

                    //Fill our objects list with the real world mesurments for the intrinsic calculations
                    List<MCvPoint3D32f> object_list = new List<MCvPoint3D32f>();
                    for (int i = 0; i < height; i++)
                    {
                        for (int j = 0; j < width; j++)
                        {
                            object_list.Add(new MCvPoint3D32f(j * 20.0F, i * 20.0F, 0.0F));
                        }
                    }
                    corners_object_list[k] = object_list.ToArray();
                }

                //our error should be as close to 0 as possible

                double error = CameraCalibration.CalibrateCamera(corners_object_list, corners_points_list, Gray_Frame.Size, IC, Emgu.CV.CvEnum.CALIB_TYPE.CV_CALIB_RATIONAL_MODEL, new MCvTermCriteria(30, 0.1), out EX_Param);
                //If Emgu.CV.CvEnum.CALIB_TYPE == CV_CALIB_USE_INTRINSIC_GUESS and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be initialized before calling the function
                //if you use FIX_ASPECT_RATIO and FIX_FOCAL_LEGNTH options, these values needs to be set in the intrinsic parameters before the CalibrateCamera function is called. Otherwise 0 values are used as default.
                System.Windows.Forms.MessageBox.Show("Intrinsic Calculation Error: " + error.ToString(), "Results", MessageBoxButtons.OK, MessageBoxIcon.Information); //display the results to the user
                currentMode = Mode.Calibrated;
                this.Dispatcher.Invoke((Action)(() =>
                {
                    Write_BTN.IsEnabled = true;
                }));
            }
            if (currentMode == Mode.Calibrated)
            {
                //calculate the camera intrinsics
                Matrix<float> Map1, Map2;
                IC.InitUndistortMap(img.Width, img.Height, out Map1, out Map2);

                //remap the image to the particular intrinsics
                //In the current version of EMGU any pixel that is not corrected is set to transparent allowing the original image to be displayed if the same
                //image is mapped backed, in the future this should be controllable through the flag '0'
                Image<Bgr, Byte> temp = img.CopyBlank();
                CvInvoke.cvRemap(img, temp, Map1, Map2, 0, new MCvScalar(0));
                img = temp.Copy();

                //set up to allow another calculation
                SetButtonState(true);
                start_Flag = false;
            }
            Image<Bgr, byte> mainImage = img.Resize(((double)Main_Picturebox.Width / (double)img.Width), Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR);
            Main_Picturebox.Image = mainImage;
        }
コード例 #24
0
        private void detectFaces(Image<Bgr,Byte> image, int mode)
        {
            /* Check to see that there was a frame collected */
            if (image != null)
            {
                if (mode == CAMERA)
                    mode_name.Content = "Camera";
                else
                    mode_name.Content = "Image";

                /* convert the frame from the camera to a transformed Image that improves facial detection */
                Image<Gray, Byte> grayFrame = image.Convert<Gray, Byte>();
                /* Detect how many faces are there on the image */
                var detectedFaces = grayFrame.DetectHaarCascade(haarCascade)[0];

                /* update the faces count */
                faces_count.Content = detectedFaces.Length;
                /* loop through all faces that were detected and draw a rectangle */
                foreach (var face in detectedFaces)
                {
                    image.Draw(face.rect, new Bgr(0, double.MaxValue, 0), 3);
                }
                image_to_filter = image;
                filtered_image = image;
                modified_image = ToBitmapSource(image);
                /* set the transformed image to the image1 object */
                image1.Source = modified_image;
            }
        }
コード例 #25
0
        //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::


        //:::::::Display the stuff:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
        private void CompositionTarget_Rendering(object sender, EventArgs e)
        {
            //---------------------------------------------------------------
            List<Object> returnHandDetectorFrontActual = new List<object>(2);
            List<Object> returnHandDetectorSideActual = new List<object>(2);
            System.Drawing.Rectangle[] RoiKinectFrontActual;
            System.Drawing.Rectangle[] RoiKinectSideActual;
            Image<Gray, Byte> imagenKinectGray1;
            Image<Gray, Byte> imagenKinectGray2;
            Image<Gray, Byte> imageRoi1 = new Image<Gray,Byte>(200, 200);
            Image<Gray, Byte> imageRoi2 = new Image<Gray, Byte>(200, 200);
            
            List<Object> returnGettingSegK1 = new List<object>(); 
            List<Object> returnGettingSegK2 = new List<object>(); 
            
            bool noPositiveFalses;
            bool flag_execute = false;
            bool FrameHandDetected = false;
            bool detectRoiFront = false;
            bool detectRoiSide = false;

            //to get the position of the hand  
            PointF centerHandFront = new PointF(); 
            PointF centerHandSide = new PointF(); 
            PointF positionRoi1Front;  
            PointF positionRoi1Side;  
            //-------------------------------------------------------------------------------

            //-------------------------------------------------------------------------------
            //Get actual frame and the next frame. To delate the frames with positive falses.  
            imagenKinectGray1 = new Image<Gray, byte>(pathFront + "Front" + "_" + numFrames.ToString() + "_1_1" + ".png"); //Poner el folder
            imagenKinectGray2 = new Image<Gray, byte>(pathSide + "Side" + "_" + numFrames.ToString() + "_1_1" + ".png"); 

            returnHandDetectorFrontActual = HandDetection.Detection(imagenKinectGray1, positionCenterHandF);
            returnHandDetectorSideActual = HandDetection.Detection(imagenKinectGray2, positionCenterHandS);

            //Cast the return of each frame                                                      MODIFICAR
            RoiKinectFrontActual = (System.Drawing.Rectangle[])returnHandDetectorFrontActual[0]; //Poner todos los rectangulos que regresen
            RoiKinectSideActual = (System.Drawing.Rectangle[])returnHandDetectorSideActual[0];

            //Set the bool variables,
            if (RoiKinectFrontActual.Length != 0)
                detectRoiFront = true;
            if (RoiKinectSideActual.Length != 0)
                detectRoiSide = true; 

            //Guardar las imagenes
            //{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{ 
            int numHsFront = RoiKinectFrontActual.Length;
            int numHsSide = RoiKinectSideActual.Length;

            imagenKinectGray1.Save(pathFront + "DF_" + numFrames.ToString() + ".png");
            imagenKinectGray2.Save(pathSide + "DS_" + numFrames.ToString() + ".png"); 
            //}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}

            if (detectRoiFront)
            {
                returnGettingSegK1 = GettingSegmentationK1.HandConvexHull(imagenKinectGray1, RoiKinectFrontActual[0]);

                if (returnGettingSegK1 != null)
                {
                    try
                    {
                        centerHandFront = (PointF)returnGettingSegK1[0];                //Meter en una funcion 

                        positionRoi1Front = RoiKinectFrontActual[0].Location;
                        positionCenterHandF.X = positionRoi1Front.X + centerHandFront.X;
                        positionCenterHandF.Y = positionRoi1Front.Y + centerHandFront.Y;

                        CircleF centrillo = new CircleF(positionCenterHandF, 5f);

                        imagenKinectGray1.Draw(centrillo, new Gray(150), 3);
                    }
                    catch (Exception)
                    { 
                    }
                }
            } 

            if (detectRoiSide)
            {
                returnGettingSegK2 = GettingSegmentationK2.HandConvexHull(imagenKinectGray2, RoiKinectSideActual[0]);

                if (returnGettingSegK2 != null )
                {
                    try
                    {
                        centerHandSide = (PointF)returnGettingSegK2[0];                //Meter en una funcion 

                        positionRoi1Side = RoiKinectSideActual[0].Location;
                        positionCenterHandS.X = positionRoi1Side.X + centerHandSide.X;
                        positionCenterHandS.Y = positionRoi1Side.Y + centerHandSide.Y;

                        CircleF centrillo = new CircleF(positionCenterHandS, 5f);

                        imagenKinectGray2.Draw(centrillo, new Gray(150), 3);
                    } 
                    catch (Exception) 
                    {
                    
                    }
                }
            }

            SaveFeaturesText.FeaturesTwoKTest(numFrames, returnGettingSegK1, returnGettingSegK2, pathFront + "Clasificar2Features.txt");
            SaveFeaturesText.FeaturesOneTest(numFrames, returnGettingSegK1, pathFront + "Clasificar1feature.txt");
            

            DepthImageK1.Source = imagetoWriteablebitmap(imagenKinectGray1);
            DepthImageK2.Source = imagetoWriteablebitmap(imagenKinectGray2); 

            if (fps == 30)
            {
                fps = 1;
                sec++;
             }
            fps++;
            numFrames++; 

        } //end CompositionTarget 
コード例 #26
0
ファイル: Cage4.xaml.cs プロジェクト: kadron/rat-trek
        private void sensor_AllFramesReady(object sender, AllFramesReadyEventArgs e)
        {
            using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
            {
                if (colorFrame != null)
                {
                    colorFrame.CopyPixelDataTo(this.colorPixels);
                    this.colorBitmap.WritePixels(
                        new Int32Rect(0, 0, this.colorBitmap.PixelWidth, this.colorBitmap.PixelHeight),
                        this.colorPixels,
                        this.colorBitmap.PixelWidth * sizeof(int),
                        0);
                    int sx = (int)this.sld_c1_sX.Value;
                    int sy = (int)this.sld_c1_sY.Value;
                    int dx = (int)this.sld_c1_dX.Value;
                    int dy = (int)this.sld_c1_dY.Value;
                    int w = 0;
                    int h = 0;
                    if (dx >= sx)
                        w = (dx - sx);
                    if (dy >= sy)
                        h = (dy - sy);
                    float cx = (float)sx + ((float)w) / 2;
                    float cy = (float)sy + ((float)h) / 2;
                    Image<Bgr, Byte> openCVImg = new Image<Bgr, byte>(colorBitmap.ToBitmap());
                    box = new MCvBox2D(new PointF(cx, cy), new SizeF(new PointF((float)w, (float)h)), 0);
                    openCVImg.Draw(box, new Bgr(System.Drawing.Color.Green), 4);
                    this.cimg_cage4.Source = ImageHelpers.ToBitmapSource(openCVImg);

                }
            }
        }
コード例 #27
0
        private void sensor_AllFramesReady(object sender, AllFramesReadyEventArgs e)
        {
            BitmapSource depthBmp = null;
            blobCount = 0;

            using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
            {
                using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
                {
                    if (depthFrame != null)
                    {

                        blobCount = 0;

                        depthBmp = depthFrame.SliceDepthImage((int)sliderMin.Value, (int)sliderMax.Value);
                        
                        Image<Bgr, Byte> openCVImg = new Image<Bgr, byte>(depthBmp.ToBitmap());
                        Image<Gray, byte> gray_image = openCVImg.Convert<Gray, byte>();

                        using (MemStorage stor = new MemStorage())
                        {
                            //Find contours with no holes try CV_RETR_EXTERNAL to find holes
                            Contour<System.Drawing.Point> contours = gray_image.FindContours(
                             Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
                             Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_EXTERNAL,
                             stor);

                            for (int i = 0; contours != null; contours = contours.HNext)
                            {
                                i++;

                                if ((contours.Area > Math.Pow(sliderMinSize.Value, 2)) && (contours.Area < Math.Pow(sliderMaxSize.Value, 2)))
                                {
                                    MCvBox2D box = contours.GetMinAreaRect();                                    
                                    openCVImg.Draw(box, new Bgr(System.Drawing.Color.Red), 2);                                    
                                    blobCount++;
                                }
                            }
                        }

                        this.outImg.Source = ImageHelpers.ToBitmapSource(openCVImg);                        
                        txtBlobCount.Text = blobCount.ToString();
                    }
                }


                if (colorFrame != null)
                {
                    
                      colorFrame.CopyPixelDataTo(this.colorPixels);
                      this.colorBitmap.WritePixels(
                          new Int32Rect(0, 0, this.colorBitmap.PixelWidth, this.colorBitmap.PixelHeight),
                          this.colorPixels,
                          this.colorBitmap.PixelWidth * sizeof(int),
                          0);
                    
                }
            }
        }
コード例 #28
0
        private Image<Hls, Byte> piecesCheck2(Image<Bgr, Byte> img)
        {
            Image<Hls, Byte> result = new Image<Hls, byte>(img.Bitmap).PyrDown().PyrUp();

            Game a = new Game(leftRadio.IsChecked.Value);

            if (gaussian == true)
                result = result.SmoothGaussian(gaussianValue);

            if (contrast == true)
                result._EqualizeHist();

            //result[2] += saturation;

            int countBlack;
            int countWhite;

            List<int> gameState = new List<int>();

            for (int i = 0; i < 32; i++)
            {
                gameState.Add(2);
            }

            for (int i = 0; i < 32; i++)
            {
                int x = (int)boxList[i].center.X;
                int y = (int)boxList[i].center.Y;

                countWhite = 0;
                countBlack = 0;

                byte asd = result.Data[y, x, 1];

                if (asd > whiteLightness)
                {
                    countWhite++;
                    gameState[i] = 0;
                    result.Draw(new CircleF(boxList[i].center, 3), new Hls(120, 50, 100), 3);
                }
                if (asd < blackLightness)
                {
                    countBlack++;
                    gameState[i] = 1;
                    result.Draw(new CircleF(boxList[i].center, 3), new Hls(220, 60, 100), 3);
                }
            }

            previousGame = a;

            a.updateStatus(gameState);

            currentGame = a;

            return result;
        }
コード例 #29
0
        private Image<Hls, Byte> piecesCheck(Image<Bgr, Byte> img)
        {
            Image<Hls, Byte> result = new Image<Hls, byte>(img.Bitmap).PyrDown().PyrUp();

            if (gaussian == true)
                result = result.SmoothGaussian(gaussianValue);

            if (contrast == true)
                result._EqualizeHist();

            //result[2] += saturation;

            int countBlack;
            int countWhite;

            for (int i = 0; i < 32; i++)
            {
                int x = (int)boxList[i].center.X;
                int y = (int)boxList[i].center.Y;

                countWhite = 0;
                countBlack = 0;

                byte asd = result.Data[y, x, 1];

                if (asd > whiteLightness)
                {
                    //countWhite++;
                    result.Draw(new CircleF(boxList[i].center, 3), new Hls(120, 50, 100), 3);
                }
                if (asd < blackLightness)
                {
                    //countBlack++;
                    result.Draw(new CircleF(boxList[i].center, 3), new Hls(220, 60, 100), 3);
                }
            }

            return result;
        }
コード例 #30
-1
        void myKinect_ColorFrameReady(object sender, ColorImageFrameReadyEventArgs e)
        {
            using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
            {
                if (colorFrame == null) return;

                bmap = OpenCV2WPFConverter.ColorImageFrameToBitmap(colorFrame);

                imgBgr = new Image<Bgr, Byte>(bmap);
                imgHsv = new Image<Hsv, Byte>(bmap);

                if (imgBgr == null || imgHsv == null) return;
                processedBgr = imgBgr.InRange(new Bgr(B_min, G_min, R_min), new Bgr(B_max, G_max, R_max));

                processedHsv = imgHsv.InRange(new Hsv(H_min, S_min, V_min), new Hsv(H_max, S_max, V_max));
                //0,130,0 ~ 120, 256, 120 for green color.
                processedBgr = processedBgr.SmoothGaussian(7);
                processedHsv = processedHsv.SmoothGaussian(7);

                CircleF[] circlesBgr = processedBgr.HoughCircles(cannyThreshold, circleAccumulatorThreshold
                    , 2, processedBgr.Height / 8 , 8, 40)[0];

                CircleF[] circlesHsv = processedBgr.HoughCircles(cannyThreshold, circleAccumulatorThreshold
                    , 2, processedHsv.Height / 8, 8, 40)[0];

                HsvCircleCount = 0;
                RgbCircleCount = 0;

                // Draw Circles for RBG video stream
                foreach (CircleF circle in circlesBgr)
                {

                    RgbCircleCount += 1;
                    imgBgr.Draw(circle, new Bgr(System.Drawing.Color.Bisque), 3);

                }

                // Draw Circles for HSV video stream
                foreach (CircleF circle in circlesHsv)
                {

                    HsvCircleCount += 1;
                    imgBgr.Draw(circle, new Bgr(System.Drawing.Color.Bisque), 3);

                }

                kinectVideo.Source = OpenCV2WPFConverter.ToBitmapSource(imgBgr);
                HsvVideo.Source = OpenCV2WPFConverter.ToBitmapSource(processedHsv);
                RgbVideo.Source = OpenCV2WPFConverter.ToBitmapSource(processedBgr);
                //control the distance of different circles!
                this.HsvCircleUI.Content = HsvCircleCount.ToString();
                this.RgbCircleUI.Content = RgbCircleCount.ToString();

            }
        }