private List <Rect> DetectCars(Mat frame, BackgroundSubtractorMOG bgSubtractor, bool groupRectangles, bool isDebug = true)
        {
            Mat fgMask = new Mat();

            //get the mask of what changed.  Everything unchanged will be black
            bgSubtractor.Apply(frame, fgMask);

            //pad detected changes to a minimum size of 10x10
            var kernel  = Cv2.GetStructuringElement(MorphShapes.Rect, new Size(10, 10));
            var closing = new Mat();
            var opening = new Mat();

            Cv2.MorphologyEx(fgMask, closing, MorphTypes.Close, kernel);
            Cv2.MorphologyEx(closing, opening, MorphTypes.Open, kernel);
            Cv2.Dilate(opening, fgMask, kernel);

            var cars      = new List <Rect>();
            var hierarchy = new Mat();

            Cv2.FindContours(fgMask, out Mat[] contours, hierarchy, RetrievalModes.External, ContourApproximationModes.ApproxTC89L1);
            foreach (var contour in contours)
            {
                var boundingRect = Cv2.BoundingRect(contour);
                //TODO:  Add to config
                if (boundingRect.Width < 130 ||
                    boundingRect.Height < 40 ||
                    boundingRect.Bottom < 120 ||
                    boundingRect.Height > 210)
                {
                    continue;
                }

                cars.Add(boundingRect);
            }

            //group rectangles together so you don't rectangles inside of rectangles
            if (groupRectangles)
            {
                var duplicateCars = new List <Rect>(cars);
                duplicateCars.AddRange(cars);

                Cv2.GroupRectangles(duplicateCars, 1, 1);
                cars = duplicateCars;
            }

            if (isDebug)
            {
                foreach (var boundingRect in cars)
                {
                    Cv2.Rectangle(frame, boundingRect, Scalar.Blue, 5);
                }

                //Cv2.ImShow("Mask", fgMask);
                //Cv2.WaitKey(1);
            }

            return(cars);
        }
예제 #2
0
 private void ProcessVideo(object sender, EventArgs e)
 {
     FrameCounter++;
     try
     {
         // Check the end of video
         if (FrameCounter == TotalFrames)
         {
             capturedVideo.Dispose();
             return;
         }
         capturedVideo.Retrieve(originalFrame);
         // Check which frames to show
         if (playingState == 0)
         {
             displayingFrame = originalFrame.Clone();
         }
         else if (playingState == 1)
         {
             CvInvoke.CvtColor(originalFrame, originalFrame, Emgu.CV.CvEnum.ColorConversion.Bgra2Gray, 1);
             mog.Apply(originalFrame, thresholdedFrame);
             displayingFrame = thresholdedFrame.Clone();
         }
         else if (playingState == 2)
         {
             CvInvoke.CvtColor(originalFrame, originalFrame, Emgu.CV.CvEnum.ColorConversion.Bgra2Gray, 1);
             mog2.Apply(originalFrame, thresholdedFrame, -1);
             displayingFrame = thresholdedFrame.Clone();
         }
         // Use another thread to update UI
         this.Dispatcher.Invoke(() =>
         {
             BitmapImage bitmapImage = new BitmapImage();
             using (MemoryStream memoryLocation = new MemoryStream())
             {
                 displayingFrame.Bitmap.Save(memoryLocation, ImageFormat.Png);
                 memoryLocation.Position = 0;
                 bitmapImage.BeginInit();
                 bitmapImage.StreamSource = memoryLocation;
                 bitmapImage.CacheOption  = BitmapCacheOption.OnLoad;
                 bitmapImage.EndInit();
             }
             ImageViewer.Source = bitmapImage;
         });
     }
     catch (Exception err)
     {
         System.Windows.MessageBox.Show("Something went wrong!\n" + err.ToString(), "Error!", MessageBoxButton.OK, MessageBoxImage.Asterisk);
     }
 }
예제 #3
0
        private void OnTimerTick(object sender, EventArgs e)
        {
            var frameHsv     = new Mat();
            var workingFrame = capture.QueryFrame();

            PaintImage(workingFrame, camPictureBox);

            CvInvoke.CvtColor(workingFrame, frameHsv, ColorConversion.Rgb2Hsv);


            Mat foregroundMask = new Mat();

            backgroundSubtractor.Apply(frameHsv, foregroundMask);
            //CvInvoke.GaussianBlur(foregroundMask, foregroundMask, new Size(11, 11), 0);
            //CvInvoke.Threshold(foregroundMask, foregroundMask, 192, 255, ThresholdType.Binary);
            //PaintImage(foregroundMask, camPictureBox2);

            int xmin = Int32.MaxValue, ymin = Int32.MaxValue;
            int xmax = Int32.MinValue, ymax = Int32.MinValue;

            using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
            {
                CvInvoke.FindContours(foregroundMask, contours, null, RetrType.External, ChainApproxMethod.ChainApproxSimple);
                var nContours = contours.Size;

                for (var i = 0; i < nContours; i++)
                {
                    var rect = CvInvoke.BoundingRectangle(contours[i]);
                    if (rect.Height * rect.Width < 0.05 * workingFrame.Width * workingFrame.Height)
                    {
                        continue;
                    }

                    xmin = Math.Min(xmin, rect.Left);
                    xmax = Math.Max(xmax, rect.Right);
                    ymin = Math.Min(ymin, rect.Top);
                    ymax = Math.Max(ymax, rect.Bottom);
                }
            }

            var roi = new Rectangle(xmin, ymin, xmax - xmin, ymax - ymin);

            //CvInvoke.cvSetImageROI(workingFrame, roi);
            workingFrame.CopyTo(currentFrame);
            CvInvoke.Rectangle(workingFrame, roi, new MCvScalar(255, 0, 0));
            PaintImage(workingFrame, camPictureBox2);
        }
예제 #4
0
        internal unsafe int ProcessFrame(ushort *frameData, uint infraredFrameDataSize, FrameDescription infraredFrameDescription, bool captureSpell, string spellName)
        {
            // If Valid Trace has been detected, we either need to process it or complete the effect that follows it.
            if (validTraceDetected)
            {
                // Process the traceFinal produced during the last frame.
                // The trace actually ended last frame, but instead of processing it right away,
                // we store that trace away until the next frame so the user can see their finished
                // trace before the CPU is plugged up with processing.
                if (!validTraceProcessed)
                {
                    if (captureSpell)
                    {
                        bool   path_found = false;
                        string path       = "";
                        int    counter    = 0;
                        while (!path_found)
                        {
                            path = Path.Combine(SAVE_PREFIX, $"{spellName}_{counter}.png");
                            if (!File.Exists(path))
                            {
                                path_found = true;
                            }
                            else
                            {
                                counter++;
                            }
                        }
                        Cv2.ImWrite(path, traceFinal);
                    }
                    else
                    {
                        // Starting the image as a larger image and then dilating/downsizing seems to produce better results than directly drawing the spell small.
                        Mat kernel = new Mat(5, 5, MatType.CV_8UC1);
                        kernel.SetTo(new Scalar(1));
                        Mat squeezed = new Mat();
                        Cv2.Dilate(traceFinal, squeezed, kernel, iterations: 2);
                        Cv2.Resize(squeezed, squeezed, new Size(SpellAI.TRACE_AI_SIZE, SpellAI.TRACE_AI_SIZE));
                        int     pixels = SpellAI.TRACE_AI_SIZE * SpellAI.TRACE_AI_SIZE;
                        float[] sample = new float[pixels];
                        byte *  data   = (byte *)squeezed.Data;
                        for (int i = 0; i < pixels; i++)
                        {
                            sample[i] = (float)data[i];
                        }
                        var result = spellAI.Identify(sample);
                        Task.Run(() => gameController.TriggerSpell(result));
                        spellArt = new Mat();
                        Cv2.ImRead($"{ART_PREFIX}{result}.png", ImreadModes.Grayscale).ConvertTo(spellArt, MatType.CV_32FC1, 1 / 256.0);
                        //Cv2.PutText(traceCanvas, result.ToString(), new Point(5, traceCanvas.Height-5), HersheyFonts.HersheySimplex, 1.5, Scalar.White);
                    }
                    validTraceProcessed = true;
                }
                //traceCanvas.SetTo(new Scalar(0));
                var current_effect_time = (DateTime.Now - traceDetectedEffectStart).TotalSeconds;
                //Cv2.Circle(traceCanvas,
                //    new Point(infraredFrameDescription.Width / 2, infraredFrameDescription.Height / 2),
                //    (int)(infraredFrameDescription.Width * (current_effect_time / VALID_TRACE_EFFECT_DURATION)),
                //    Scalar.White,
                //    thickness: 5);
                if (current_effect_time <= EFFECT_TRACE_DURATION)
                {
                    // Do nothing. traceCanvas is set to the preview right as soon as it is created (in EndTrace),
                    // so we don't need to update it here.
                }
                else if (current_effect_time <= EFFECT_TRACE_DURATION + EFFECT_TRANSITION_DURATION && !captureSpell)
                {
                    var ratio = (current_effect_time - EFFECT_TRACE_DURATION) / EFFECT_TRANSITION_DURATION;
                    Cv2.AddWeighted(spellTrace, 1 - ratio, spellArt, ratio, 0, traceCanvas);
                }
                else if (current_effect_time <= EFFECT_TRACE_DURATION + EFFECT_TRANSITION_DURATION + EFFECT_ART_DURATION && !captureSpell)
                {
                    //Yes, this will be repeated a whole bunch of times for no reason, but I don't care enough to fix it. So.
                    spellArt.CopyTo(traceCanvas);
                }
                else
                {
                    validTraceDetected  = false;
                    validTraceProcessed = false;
                }

                return(0);
            }
            else
            {
                //If ValidTraceDetected is false, then we need to work on detecting a new one.
                var input     = new Mat(infraredFrameDescription.Height, infraredFrameDescription.Width, MatType.CV_16U, (IntPtr)frameData);
                Mat converted = new Mat();
                input.ConvertTo(converted, MatType.CV_8U, 1.0 / 256.0);

                Mat mask = new Mat();
                mog.Apply(converted, mask);

                var keypoints = blobby.Detect(mask);
                if (!TraceDetected()) // Show the user's beautiful face while no spell is being drawn.
                {
                    //traceCanvas.SetTo(new Scalar(0));
                    //Cv2.BitwiseAnd(converted, mask, converted);
                    foreach (var keypoint in keypoints)
                    {
                        Cv2.Circle(converted, (Point)keypoint.Pt, 10 /*(int)keypoint.Size*/, Scalar.White, 2);
                    }
                    converted.ConvertTo(traceCanvas, MatType.CV_32F, 1.0 / 256.0);
                }

                // This function call produces the traceFinal image, which gets saved or processed by ML.
                // However, it does not do anything with that image; we intentionally wait a frame so that the user has a spell to look at before clogging up the CPU.
                ProcessKeypoints(keypoints);
                converted.Dispose();
                mask.Dispose();
                input.Dispose();
                return(keypoints.Count());
            }
        }
예제 #5
0
        // Procedure uses as timer
        private void MyProcess(object sender, EventArgs e)
        {
            // Changing UI Labels (Make thread)
            labelCurrent.Invoke((MethodInvoker) delegate {
                // Check Pause button
                if (!play)
                {
                    CapturePaused();
                }
                else
                {
                    CapturePlayed();
                }

                // Frame-Rate Change
                labelCurrent.Text      = FrameCount.ToString();
                labelROI_H.Text        = speedBoxH.ToString();
                labelPixelDensity.Text = PixelFrameDensity.ToString();
            });
            FrameCount++;

            // Capture the frame of video
            _capture.Retrieve(frame);

            // Show an adjustable ROI
            if (ROIEnabled)
            {
                speedBox = new Rectangle(speedBoxX, speedBoxY, speedBoxW, speedBoxH);
                //CvInvoke.PutText(frame, "" + speedBoxY, new Point(speedBoxX, speedBoxY - 10), Emgu.CV.CvEnum.FontFace.HersheyComplexSmall, 1, new MCvScalar(0, 255, 0), 1);
                //CvInvoke.PutText(frame, "" + (speedBoxY + speedBoxH), new Point(speedBoxX, speedBoxY + speedBoxH + 20), Emgu.CV.CvEnum.FontFace.HersheyComplexSmall, 1, new MCvScalar(0, 255, 0), 1);
            }
            else
            {
                speedBox = new Rectangle(0, 0, frame.Width, frame.Height);
            }

            CvInvoke.Rectangle(frame, speedBox, new MCvScalar(0, 255, 0), 2); // Green

            PixelFrameDensity = Math.Round((Ref_dist_m + CalibrationVehicleLength) / speedBoxH, 3);

            pictureBox4.Image = new Bitmap(frame.Bitmap);

            // Convert Frame to Grayscale
            CvInvoke.CvtColor(frame, frame_gray, Emgu.CV.CvEnum.ColorConversion.Bgra2Gray, 1);

            // Improved Adaptive Background Mixture Model for Real-time Tracking
            mog.Apply(frame_gray, mask);
            //mog2.Apply(frame_gray, mask);

            pictureBoxDiffrence.Image = new Bitmap(mask.Bitmap);

            // Post Processing
            postProcessing();

            // Crop to ROI
            CropROI();

            // Track
            TrackinROI();

            // Check End of Video
            if (FrameCount == TotalFrames)
            {
                MessageBox.Show("End of video", "", MessageBoxButtons.OK, MessageBoxIcon.Exclamation);
                _capture.Dispose();
                // Change UI
                buttonNext.Invoke((MethodInvoker) delegate {
                    buttonNext.Enabled          = true;
                    buttonPrevious.Enabled      = true;
                    button_select_video.Enabled = true;
                });
            }
        }