void Run() { try { var trt = CvInvoke.BuildInformation; _putenv_s("OPENCV_FFMPEG_CAPTURE_OPTIONS", "rtsp_transport;udp"); // _putenv_s("OPENCV_FFMPEG_CAPTURE_OPTIONS", ""); cameraCapture = new VideoCapture("rtsp://*****:*****@192.168.5.49:554/onvif1", VideoCapture.API.Ffmpeg); //_cameraCapture = new VideoCapture("http://192.168.1.90:81/stream?x.mjpeg", VideoCapture.API.Any); //_cameraCapture = new VideoCapture("rtsp://192.168.1.90:8554", VideoCapture.API.Ffmpeg); //_cameraCapture = new VideoCapture("http://192.168.1.90/?x.mjpeg", VideoCapture.API.Ffmpeg); //_cameraCapture = new VideoCapture("http://192.168.1.90", VideoCapture.API.Any); // Mat _frame = new Mat(); // Mat _frameCopy = new Mat(); // _cameraCapture.Read(_frame); //// _cameraCapture.Retrieve(_frame, 0); // _frame.CopyTo(_frameCopy); } catch (Exception e) { MessageBox.Show(e.Message); return; } fgDetector = new BackgroundSubtractorMOG2(); blobDetector = new CvBlobDetector(); tracker = new CvTracks(); Application.Idle += ProcessFrame; }
private void detectMotionButton_Click(object sender, EventArgs e) { try { isStopped = false; VideoCapture videoCapture = CreateVideoCapture(); if (videoCapture != null) { if (int.TryParse(minAreaTxBx.Text, out int parsedMinMotionArea)) { minMotionArea = parsedMinMotionArea; } else { minMotionArea = 50; ShowWarning("Warning", "Min motion area to detect not specified, default (50) will be used."); } backgroundSubtractor = CreateBackgroundSubtractor(); MotionDetection(videoCapture); } else { ShowError("Error", "Cannot acquire video!"); } } catch (Exception ex) { ShowError("Error", ex.Message); } }
public Streams([NotNull] ICamera camera, [NotNull] IDenoiser denoiser, [NotNull] IBackgroundSubtractor subtractor, [NotNull] ICorrector corrector, [NotNull] IPeopleDetector detector) { Camera = camera ?? throw new ArgumentNullException(nameof(camera)); Denoiser = denoiser ?? throw new ArgumentNullException(nameof(denoiser)); Subtractor = subtractor ?? throw new ArgumentNullException(nameof(subtractor)); Corrector = corrector ?? throw new ArgumentNullException(nameof(corrector)); Detector = detector ?? throw new ArgumentNullException(nameof(detector)); }
private void ProcessFrame(object sender, EventArgs arg) { _capture.Retrieve(frame); if (frame != null) { //CvInvoke.Resize(frame, frame, new Size(imageBox1.Width, imageBox1.Height), 0, 0, Inter.Linear); //This resizes the image to the size of Imagebox1 CvInvoke.Resize(frame, frame, new Size(640, 480), 0, 0, Inter.Linear); //CvInvoke.GaussianBlur(frame, smoothedFrame, new Size(3, 3), 1); //filter out noises if (_forgroundDetector == null) { _forgroundDetector = new BackgroundSubtractorMOG2(); } _forgroundDetector.Apply(frame, _forgroundMask); //CvInvoke.Canny(_forgroundMask, smoothedFrame, 100, 60); //CvInvoke.Threshold(_forgroundMask, smoothedFrame, 0, 255, Emgu.CV.CvEnum.ThresholdType.Otsu | Emgu.CV.CvEnum.ThresholdType.Binary); contours = new Emgu.CV.Util.VectorOfVectorOfPoint(); CvInvoke.FindContours(_forgroundMask, contours, hier, Emgu.CV.CvEnum.RetrType.External, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple); if (contours.Size > 0) { for (int i = 0; i < contours.Size; i++) { double area = CvInvoke.ContourArea(contours[i]); Rectangle rect = CvInvoke.BoundingRectangle(contours[i]); if (area > 300 && rect.Y > 200 && area < 3500 && rect.X > 100 && rect.X < 400) { CvInvoke.Rectangle(frame, rect, new MCvScalar(0, 255, 0), 6); } } } //CvInvoke.DrawContours(frame, contours, -1, new MCvScalar(255, 0, 0)); imageBox1.Image = frame; curFrameCount++; if (curFrameCount >= TotalFrame - 10) { updateButton2Text("Load Video"); _capture.Stop(); _capture.Dispose(); _captureInProgress = false; _capture = null; } updateCurFrameCount("Current Frame # " + curFrameCount); } else { _capture.Pause(); _capture.ImageGrabbed -= ProcessFrame; MessageBox.Show("null frame"); } }
void Run() { try { _cameraCapture = new VideoCapture(); } catch (Exception e) { MessageBox.Show(e.Message); return; } _fgDetector = new BackgroundSubtractorMOG2(); //_blobDetector = new SimpleBlobDetector(); Application.Idle += ProcessFrame; }
void Run() { TokenStr = Token.GetTotek(); try { _cameraCapture = new VideoCapture(); } catch (Exception e) { MessageBox.Show(e.Message); return; } _fgDetector = new BackgroundSubtractorMOG2(); _blobDetector = new CvBlobDetector(); _tracker = new CvTracks(); Application.Idle += ProcessFrame; }
public cameraSetup() { DataTable dt = null; motionDetector = new MotionDetector(new TwoFramesDifferenceDetector(), new MotionAreaHighlighting()); if (isSIDECAM) { dt = taCAMERA.getByName("SIDE"); } else { dt = taCAMERA.getByName("ATM"); } if (dt.Rows[0][2].ToString() != "False") { EqualizeHist = true; } if (dt.Rows[0][3].ToString() != "False") { GammaCorrect = true; } if (dt.Rows[0][4].ToString() != "False") { bodyDetection = true; } if (dt.Rows[0][5].ToString() != "False") { detectMotionAnoramlities = true; } if (dt.Rows[0][6].ToString() != "False") { isSIDECAM = true; } int.TryParse(dt.Rows[0][7].ToString(), out freezDelay); int tmp = 0; int.TryParse(dt.Rows[0][8].ToString(), out tmp); motionRatio = tmp / 100f; fgDetector = new BackgroundSubtractorMOG2(); descriptor.SetSVMDetector(HOGDescriptor.GetDefaultPeopleDetector()); }
/// <summary> /// Computes a background image. /// </summary> /// <param name="backgroundImage">The output background image</param> /// <param name="subtractor">The background subtractor</param> /// <remarks> Sometimes the background image can be very blurry, as it contain the average background statistics.</remarks> public static void GetBackgroundImage(this IBackgroundSubtractor subtractor, IOutputArray backgroundImage) { using (OutputArray oaBackgroundImage = backgroundImage.GetOutputArray()) CvInvoke.cveBackgroundSubtractorGetBackgroundImage(subtractor.BackgroundSubtractorPtr, oaBackgroundImage); }
/// <summary> /// Update the background model /// </summary> /// <param name="image">The image that is used to update the background model</param> /// <param name="learningRate">Use -1 for default</param> /// <param name="subtractor">The background subtractor</param> /// <param name="fgMask">The output foreground mask</param> public static void Apply(this IBackgroundSubtractor subtractor, IInputArray image, IOutputArray fgMask, double learningRate = -1) { using (InputArray iaImage = image.GetInputArray()) using (OutputArray oaFgMask = fgMask.GetOutputArray()) CvInvoke.cveBackgroundSubtractorUpdate(subtractor.BackgroundSubtractorPtr, iaImage, oaFgMask, learningRate); }
private void ProcessFrame(object sender, EventArgs e) { Mat image = new Mat(); _capture.Retrieve(image); if (_forgroundDetector == null) { _forgroundDetector = new BackgroundSubtractorMOG2(); } _forgroundDetector.Apply(image, _forgroundMask); //update the motion history _motionHistory.Update(_forgroundMask); #region get a copy of the motion mask and enhance its color double[] minValues, maxValues; Point[] minLoc, maxLoc; _motionHistory.Mask.MinMax(out minValues, out maxValues, out minLoc, out maxLoc); Mat motionMask = new Mat(); using (ScalarArray sa = new ScalarArray(255.0 / maxValues[0])) CvInvoke.Multiply(_motionHistory.Mask, sa, motionMask, 1, DepthType.Cv8U); //Image<Gray, Byte> motionMask = _motionHistory.Mask.Mul(255.0 / maxValues[0]); #endregion //create the motion image Mat motionImage = new Mat(motionMask.Size.Height, motionMask.Size.Width, DepthType.Cv8U, 3); motionImage.SetTo(new MCvScalar(0)); //display the motion pixels in blue (first channel) //motionImage[0] = motionMask; CvInvoke.InsertChannel(motionMask, motionImage, 0); //Threshold to define a motion area, reduce the value to detect smaller motion double minArea = 100; //storage.Clear(); //clear the storage Rectangle[] rects; using (VectorOfRect boundingRect = new VectorOfRect()) { _motionHistory.GetMotionComponents(_segMask, boundingRect); rects = boundingRect.ToArray(); } //iterate through each of the motion component foreach (Rectangle comp in rects) { int area = comp.Width * comp.Height; //reject the components that have small area; if (area < minArea) { continue; } // find the angle and motion pixel count of the specific area double angle, motionPixelCount; _motionHistory.MotionInfo(_forgroundMask, comp, out angle, out motionPixelCount); //reject the area that contains too few motion if (motionPixelCount < area * 0.05) { continue; } //Draw each individual motion in red DrawMotion(motionImage, comp, angle, new Bgr(System.Drawing.Color.Red)); } // find and draw the overall motion angle double overallAngle, overallMotionPixelCount; _motionHistory.MotionInfo(_forgroundMask, new Rectangle(Point.Empty, motionMask.Size), out overallAngle, out overallMotionPixelCount); DrawMotion(motionImage, new Rectangle(Point.Empty, motionMask.Size), overallAngle, new Bgr(System.Drawing.Color.Green)); if (this.Disposing || this.IsDisposed) { return; } capturedImageBox.Image = image; forgroundImageBox.Image = _forgroundMask; //Display the amount of motions found on the current image UpdateText(String.Format("Total Motions found: {0}; Motion Pixel count: {1}", rects.Length, overallMotionPixelCount)); //Display the image of the motion motionImageBox.Image = motionImage; }
public void ProcessFrameTwo(object sender, EventArgs e) { if (_forgroundDetectorTwo == null) { _forgroundDetectorTwo = new BackgroundSubtractorMOG2(); } _captureTwo.Retrieve(imageTwo); _forgroundDetectorTwo.Apply(imageTwo, _forgroundMaskTwo); _motionHistoryTwo.Update(_forgroundMaskTwo, DateTime.Now); //if (dims <= 2 && step[0] > 0) { _motionHistoryTwo.Update(_forgroundMaskTwo) }; #region get a copy of the motion mask and enhance its color double[] minValues_2, maxValues_2; Point[] minLoc_2, maxLoc_2; _motionHistoryTwo.Mask.MinMax(out minValues_2, out maxValues_2, out minLoc_2, out maxLoc_2);; Mat motionMask_2 = new Mat(); using (ScalarArray sa2 = new ScalarArray(255.0 / maxValues_2[0])) CvInvoke.Multiply(_motionHistoryTwo.Mask, sa2, motionMask_2, 1, DepthType.Cv8U); //Image<Gray, Byte> motionMask = _motionHistory.Mask.Mul(255.0 / maxValues[0]); #endregion Mat motionImageTwo = new Mat(motionMask_2.Size.Height, motionMask_2.Size.Width, DepthType.Cv8U, 3); MotionImageTwo = motionImageTwo; motionImageTwo.SetTo(new MCvScalar(0)); CvInvoke.InsertChannel(motionMask_2, motionImageTwo, 0); //Threshold to define a motion area, reduce the value to detect smaller motion double minArea_2 = 5000; Rectangle[] rects_2; using (VectorOfRect boundingRect_2 = new VectorOfRect()) { _motionHistoryTwo.GetMotionComponents(_segMaskTwo, boundingRect_2); rects_2 = boundingRect_2.ToArray(); } foreach (Rectangle comp_2 in rects_2) { int area_2 = comp_2.Width * comp_2.Height; //reject the components that have small area; if (area_2 < minArea_2) { continue; } // find the angle and motion pixel count of the specific area double angle_2, motionPixelCount_2; _motionHistoryTwo.MotionInfo(_forgroundMaskTwo, comp_2, out angle_2, out motionPixelCount_2); //reject the area that contains too few motion if (motionPixelCount_2 < area_2 * 0.0005) { continue; } //Draw each individual motion in red DrawMotion(motionImageTwo, comp_2, angle_2, new Bgr(Color.Red)); } double overallAngle_2, overallMotionPixelCount_2; _motionHistoryTwo.MotionInfo(_forgroundMaskTwo, new Rectangle(Point.Empty, motionMask_2.Size), out overallAngle_2, out overallMotionPixelCount_2); DrawMotion(motionImageTwo, new Rectangle(Point.Empty, motionMask_2.Size), overallAngle_2, new Bgr(Color.Green)); if (this.Disposing || this.IsDisposed) { return; } if (cameraTwoCheckBox.Checked == true) { cameraTwoImageBox.Image = MotionImageTwo; } else { cameraTwoImageBox.Image = imageTwo; } //Display the amount of motions found on the current image UpdateTextTwo(String.Format("Total Motions found: {0}; Motion Pixel count: {1}", ((rects_2.Length * 0.021) * 1.5), overallMotionPixelCount_2)); MotionCountTwo = (rects_2.Length * 0.021) * 1.5; }
//++++++++++++++ Camera Feed Processing / Motion Detection ++++++++++++++// public void ProcessFrame(object sender, EventArgs e) { //Separate Background and Foreground if (_forgroundDetectorOne == null) { _forgroundDetectorOne = new BackgroundSubtractorMOG2(); } _captureOne.Retrieve(imageOne); //Apply camera one feed to object _forgroundDetectorOne.Apply(imageOne, _forgroundMask); //detect foreground (object in motion) _motionHistoryOne.Update(_forgroundMask, DateTime.Now); //update motion history #region get a copy of the motion mask and enhance its color double[] minValues, maxValues; //determines minimum rectangle size for motion sectors Point[] minLoc, maxLoc; _motionHistoryOne.Mask.MinMax(out minValues, out maxValues, out minLoc, out maxLoc); Mat motionMask = new Mat(); using (ScalarArray sa = new ScalarArray(255.0 / maxValues[0])) CvInvoke.Multiply(_motionHistoryOne.Mask, sa, motionMask, 1, DepthType.Cv8U); //Image<Gray, Byte> motionMask = _motionHistory.Mask.Mul(255.0 / maxValues[0]); #endregion //create the motion image Mat motionImageOne = new Mat(motionMask.Size.Height, motionMask.Size.Width, DepthType.Cv8U, 3); MotionImageOne = motionImageOne; motionImageOne.SetTo(new MCvScalar(0)); //display the motion pixels in blue (first channel) //motionImage[0] = motionMask; CvInvoke.InsertChannel(motionMask, motionImageOne, 0); //Threshold to define a motion area, reduce the value to detect smaller motion double minArea = 5000; //Lower = More Sensitive Motion Detection Rectangle[] rects; using (VectorOfRect boundingRect = new VectorOfRect()) { _motionHistoryOne.GetMotionComponents(_segMask, boundingRect); rects = boundingRect.ToArray(); } //iterate through each of the motion component foreach (Rectangle comp in rects) { int area = comp.Width * comp.Height; //reject the components that have small area; if (area < minArea) { continue; } // find the angle and motion pixel count of the specific area double angle, motionPixelCount; _motionHistoryOne.MotionInfo(_forgroundMask, comp, out angle, out motionPixelCount); //reject the area that contains too few motion if (motionPixelCount < area * 0.0005) { continue; //originally (motionPixelCount < area * 0.05) } //Draw each individual motion in red DrawMotion(motionImageOne, comp, angle, new Bgr(Color.Red)); } // find and draw the overall motion angle double overallAngle, overallMotionPixelCount; _motionHistoryOne.MotionInfo(_forgroundMask, new Rectangle(Point.Empty, motionMask.Size), out overallAngle, out overallMotionPixelCount); DrawMotion(motionImageOne, new Rectangle(Point.Empty, motionMask.Size), overallAngle, new Bgr(Color.Green)); if (cameraOneCheckBox.Checked == true) { cameraOneImageBox.Image = MotionImageOne; } else { cameraOneImageBox.Image = imageOne; } //Display the amount of motions found on the current image //UpdateText(String.Format("Total Motions found: {0}; Motion Pixel count: {1}", rects.Length, overallMotionPixelCount)); UpdateText(String.Format("Total Motions found: {0}; Motion Pixel count: {1}", ((rects.Length * 0.021) * 1.5), overallMotionPixelCount)); MotionCountOne = (rects.Length * 0.021) * 1.5; }
private void ProcessFrame(object sender, EventArgs e) { List <Rectangle> rt = new List <Rectangle>(); double overallAngle = default(double), overallMotionPixelCount = default(double); Mat image = new Mat(); _capture.Retrieve(image); if (fpsc++ >= allfpsc) { fpsc = 0; if (_forgroundDetector == null) { _forgroundDetector = new BackgroundSubtractorMOG2(); } _forgroundDetector.Apply(image, _forgroundMask); //update the motion history _motionHistory.Update(_forgroundMask); #region get a copy of the motion mask and enhance its color double[] minValues, maxValues; Point[] minLoc, maxLoc; _motionHistory.Mask.MinMax(out minValues, out maxValues, out minLoc, out maxLoc); Mat motionMask = new Mat(); using (ScalarArray sa = new ScalarArray(255.0 / maxValues[0])) CvInvoke.Multiply(_motionHistory.Mask, sa, motionMask, 1, DepthType.Cv8U); //Image<Gray, Byte> motionMask = _motionHistory.Mask.Mul(255.0 / maxValues[0]); #endregion //create the motion image Mat motionImage = new Mat(motionMask.Size.Height, motionMask.Size.Width, DepthType.Cv8U, 3); motionImage.SetTo(new MCvScalar(0)); //display the motion pixels in blue (first channel) //motionImage[0] = motionMask; CvInvoke.InsertChannel(motionMask, motionImage, 0); //Threshold to define a motion area, reduce the value to detect smaller motion double minArea = 100; //storage.Clear(); //clear the storage Rectangle[] rects; using (VectorOfRect boundingRect = new VectorOfRect()) { _motionHistory.GetMotionComponents(_segMask, boundingRect); rects = boundingRect.ToArray(); } //iterate through each of the motion component foreach (Rectangle comp in rects) { if (Zone.Contains(comp)) { rt.Add(comp); } int area = comp.Width * comp.Height; //reject the components that have small area; if (area < minArea) { continue; } // find the angle and motion pixel count of the specific area double angle, motionPixelCount; _motionHistory.MotionInfo(_forgroundMask, comp, out angle, out motionPixelCount); //reject the area that contains too few motion if (motionPixelCount < area * 0.05) { continue; } //Draw each individual motion in red //DrawMotion(motionImage, comp, angle, new Bgr(Color.Blue)); } //rects = rt.ToArray(); // find and draw the overall motion angle if (Zone.Width <= 0 & Zone.Height <= 0 & Zone.X < 0 & Zone.Y < 0) { Zone = new Rectangle(0, 0, 10, 10); } _motionHistory.MotionInfo(_forgroundMask, Zone /*new Rectangle(Point.Empty, motionMask.Size)*/, out overallAngle, out overallMotionPixelCount); //DrawMotion(motionImage, new Rectangle(Point.Empty, motionMask.Size), overallAngle, new Bgr(Color.Green)); Image <Bgr, Byte> grayImage = image.ToImage <Bgr, Byte>(); if (DetectPed) { long processingTime; Rectangle[] results; if (CudaInvoke.HasCuda) { using (GpuMat gpuMat = new GpuMat(image)) results = FindPedestrian.Find(gpuMat, out processingTime); } else { using (UMat uImage = image.GetUMat(AccessType.ReadWrite)) results = FindPedestrian.Find(uImage, out processingTime); } foreach (Rectangle rect in results) { CvInvoke.Rectangle(image, rect, new Bgr(Color.Red).MCvScalar); } } } if (maxfpsc++ > skipfpsc) { OnMD?.Invoke((long)overallMotionPixelCount, image, rt.Count); } else { OnMD?.Invoke(default(long), image, default(int)); } }