void frame(object sender, EventArgs e)//捕捉帧进行线程函数 { double sum_pix = 0; double angle = 0; VectorOfRect rects = new VectorOfRect(); //创建VectorOfRect存储运动矩形。 Mat _segMask = new Mat(); //创建背景蒙版图片。 _capture.Retrieve(scr); //获取帧数据。 // CvInvoke.Resize(scr, scr, new Size(320, 480)); _motionDetect.Apply(scr, mask); //进行运动检测。 CvInvoke.MedianBlur(mask, mask, 5); //中值滤波。 _motionhistory.Update(mask); //更新背景图片。 _motionhistory.GetMotionComponents(_segMask, rects); //获取背景蒙版及运动矩形。 for (int j = 0; j < rects.ToArray().Length; j++) //遍历所有运动矩形。 { if (rects[j].Width * rects[j].Height > 1000) //删除一些较小的矩形采用面积的方式。 { CvInvoke.Rectangle(scr, rects[j], new MCvScalar(0, 0, 255)); //在scr图像总绘制运动矩形。 _motionhistory.MotionInfo(_segMask, rects[j], out angle, out sum_pix); //指定矩阵获取运动的角度和像素值。 CvInvoke.PutText(scr, "angle : " + (int)angle, rects[j].Location, Emgu.CV.CvEnum.FontFace.HersheyComplex, 0.5, new MCvScalar(0, 255, 0)); //绘制运动的角度。 } } imageBox1.Image = drar_rect(scr); // imageBox1.Image = scr;//显示图像。 imageBox2.Image = mask;//显示运动检测输出图像。 }
/// <summary> /// Get Motion Areas from history /// </summary> /// <returns>Array of motion areas</returns> private Rectangle[] GetMotionAreas() { Rectangle[] rects; using (VectorOfRect boundingRect = new VectorOfRect()) { _motionHistory.GetMotionComponents(_segMask, boundingRect); rects = boundingRect.ToArray(); } return(rects); }
/// <summary> /// Draws bounding rectangles around objects in motion. /// </summary> /// <param name="annotatedImage">The image with the bounded rectangles.</param> /// <param name="data">The raw data for the bounding rectangles.</param> private void DrawMotion(ref Image <Bgr, byte> annotatedImage, ref List <object> data) { using (var boundingRect = new VectorOfRect()) { // get the motion components (and their bounding rectangles) _motionHistory.GetMotionComponents(_segMask, boundingRect); var rects = boundingRect.ToArray(); // draw the rectangles and populate the data foreach (var rect in rects.Where(r => r.Width * r.Height >= _minArea)) { annotatedImage.Draw(rect, new Bgr(_annoColor.Color()), _lineThick); data.Add(new Box(rect)); } } }
private void Pulse() { using (ColorImageFrame imageFrame = _kinectSensor.ColorStream.OpenNextFrame(200)) { if (imageFrame == null) { return; } using (Image <Bgr, byte> image = imageFrame.ToOpenCVImage <Bgr, byte>()) using (MemStorage storage = new MemStorage()) //create storage for motion components { if (_forgroundDetector == null) { _forgroundDetector = new BGStatModel <Bgr>(image , Emgu.CV.CvEnum.BG_STAT_TYPE.GAUSSIAN_BG_MODEL); } _forgroundDetector.Update(image); //update the motion history _motionHistory.Update(_forgroundDetector.ForgroundMask); //get a copy of the motion mask and enhance its color double[] minValues, maxValues; System.Drawing.Point[] minLoc, maxLoc; _motionHistory.Mask.MinMax(out minValues, out maxValues , out minLoc, out maxLoc); Image <Gray, Byte> motionMask = _motionHistory.Mask .Mul(255.0 / maxValues[0]); //create the motion image Image <Bgr, Byte> motionImage = new Image <Bgr, byte>(motionMask.Size); motionImage[0] = motionMask; //Threshold to define a motion area //reduce the value to detect smaller motion double minArea = 100; storage.Clear(); //clear the storage Seq <MCvConnectedComp> motionComponents = _motionHistory.GetMotionComponents(storage); bool isMotionDetected = false; //iterate through each of the motion component for (int c = 0; c < motionComponents.Count(); c++) { MCvConnectedComp comp = motionComponents[c]; //reject the components that have small area; if (comp.area < minArea) { continue; } OnDetection(); isMotionDetected = true; break; } if (isMotionDetected == false) { OnDetectionStopped(); this.Dispatcher.Invoke(new Action(() => rgbImage.Source = null)); StopRecording(); return; } this.Dispatcher.Invoke( new Action(() => rgbImage.Source = imageFrame.ToBitmapSource()) ); Record(imageFrame); } } }
protected override MotionDetectorOutput DoProcess(MotionDetectorInput input) { var output = new MotionDetectorOutput(); var subtractorConfig = input.Settings.SubtractorConfig; if (_foregroundDetector == null || !_currentSubtractorConfig.Equals(subtractorConfig)) { if (_foregroundDetector != null) { _foregroundDetector.Dispose(); } _foregroundDetector = new BackgroundSubtractorMOG2( subtractorConfig.History , subtractorConfig.Threshold , subtractorConfig.ShadowDetection); _currentSubtractorConfig = subtractorConfig; } _foregroundDetector.Apply(input.Captured, _forgroundMask); _motionHistory.Update(_forgroundMask); #region get a copy of the motion mask and enhance its color double[] minValues, maxValues; Point[] minLoc, maxLoc; _motionHistory.Mask.MinMax(out minValues, out maxValues, out minLoc, out maxLoc); var motionMask = new Mat(); using (var sa = new ScalarArray(255.0 / maxValues[0])) { CvInvoke.Multiply(_motionHistory.Mask, sa, motionMask, 1, DepthType.Cv8U); } #endregion if (input.SetCapturedImage) { output.ForegroundImage = _forgroundMask.ToImage <Bgr, byte>(); output.MotionImage = new Image <Bgr, byte>(motionMask.Size); CvInvoke.InsertChannel(motionMask, output.MotionImage, 0); } Rectangle[] motionComponents; using (var boundingRect = new VectorOfRect()) { _motionHistory.GetMotionComponents(_segMask, boundingRect); motionComponents = boundingRect.ToArray(); } foreach (Rectangle motionComponent in motionComponents) { int area = motionComponent.Area(); //reject the components that have small area; if (area < input.Settings.MinimumArea || area > input.Settings.MaximumArea) { continue; } // find the angle and motion pixel count of the specific area double angle, motionPixelCountDouble; _motionHistory.MotionInfo(_forgroundMask, motionComponent, out angle, out motionPixelCountDouble); int motionPixelCount = (int)motionPixelCountDouble; //reject the area that contains too few motion if (motionPixelCount < area * input.Settings.MinimumPercentMotionInArea) { continue; } var motionSection = new MotionSection(); motionSection.Area = area; motionSection.Region = motionComponent; motionSection.Angle = angle; motionSection.PixelsInMotionCount = motionPixelCount; output.MotionSections.Add(motionSection); } if (output.IsDetected) { switch (input.Settings.BiggestMotionType) { case BiggestMotionType.Unspecified: break; case BiggestMotionType.Area: output.MotionSections.Sort((x, y) => y.Area.CompareTo(x.Area)); break; case BiggestMotionType.Pixels: output.MotionSections.Sort((x, y) => y.PixelsInMotionCount.CompareTo(x.PixelsInMotionCount)); break; } output.BiggestMotion = output.MotionSections.FirstOrDefault(); } double overallAngle, overallMotionPixelCount; _motionHistory.MotionInfo(_forgroundMask, new Rectangle(Point.Empty, motionMask.Size), out overallAngle, out overallMotionPixelCount); output.OverallAngle = overallAngle; output.OverallMotionPixelCount = Convert.ToInt32(overallMotionPixelCount); return(output); }
private void ProcessFrame(object sender, EventArgs e) { Mat image = new Mat(); _capture.Retrieve(image); if (_forgroundDetector == null) { _forgroundDetector = new BackgroundSubtractorMOG2(); } _forgroundDetector.Apply(image, _forgroundMask); //update the motion history _motionHistory.Update(_forgroundMask); #region get a copy of the motion mask and enhance its color double[] minValues, maxValues; Point[] minLoc, maxLoc; _motionHistory.Mask.MinMax(out minValues, out maxValues, out minLoc, out maxLoc); Mat motionMask = new Mat(); using (ScalarArray sa = new ScalarArray(255.0 / maxValues[0])) CvInvoke.Multiply(_motionHistory.Mask, sa, motionMask, 1, DepthType.Cv8U); //Image<Gray, Byte> motionMask = _motionHistory.Mask.Mul(255.0 / maxValues[0]); #endregion //create the motion image Mat motionImage = new Mat(motionMask.Size.Height, motionMask.Size.Width, DepthType.Cv8U, 3); //display the motion pixels in blue (first channel) //motionImage[0] = motionMask; CvInvoke.InsertChannel(motionMask, motionImage, 0); //Threshold to define a motion area, reduce the value to detect smaller motion double minArea = 100; //storage.Clear(); //clear the storage Rectangle[] rects; using (VectorOfRect boundingRect = new VectorOfRect()) { _motionHistory.GetMotionComponents(_segMask, boundingRect); rects = boundingRect.ToArray(); } //iterate through each of the motion component foreach (Rectangle comp in rects) { int area = comp.Width * comp.Height; //reject the components that have small area; if (area < minArea) { continue; } // find the angle and motion pixel count of the specific area double angle, motionPixelCount; _motionHistory.MotionInfo(_forgroundMask, comp, out angle, out motionPixelCount); //reject the area that contains too few motion if (motionPixelCount < area * 0.05) { continue; } //Draw each individual motion in red DrawMotion(motionImage, comp, angle, new Bgr(Color.Red)); } // find and draw the overall motion angle double overallAngle, overallMotionPixelCount; _motionHistory.MotionInfo(_forgroundMask, new Rectangle(Point.Empty, motionMask.Size), out overallAngle, out overallMotionPixelCount); DrawMotion(motionImage, new Rectangle(Point.Empty, motionMask.Size), overallAngle, new Bgr(Color.Green)); if (this.Disposing || this.IsDisposed) { return; } capturedImageBox.Image = image; forgroundImageBox.Image = _forgroundMask; //Display the amount of motions found on the current image UpdateText(String.Format("Total Motions found: {0}; Motion Pixel count: {1}", rects.Length, overallMotionPixelCount)); //Display the image of the motion motionImageBox.Image = motionImage; }
private void ProcessFrame(object sender, EventArgs e) { using (Image <Bgr, Byte> image = _capture.RetrieveBgrFrame()) using (MemStorage storage = new MemStorage()) //create storage for motion components { if (_forgroundDetector == null) { //_forgroundDetector = new BGCodeBookModel<Bgr>(); _forgroundDetector = new FGDetector <Bgr>(Emgu.CV.CvEnum.FORGROUND_DETECTOR_TYPE.FGD); //_forgroundDetector = new BGStatModel<Bgr>(image, Emgu.CV.CvEnum.BG_STAT_TYPE.FGD_STAT_MODEL); } _forgroundDetector.Update(image); capturedImageBox.Image = image; //update the motion history _motionHistory.Update(_forgroundDetector.ForegroundMask); forgroundImageBox.Image = _forgroundDetector.ForegroundMask; #region get a copy of the motion mask and enhance its color double[] minValues, maxValues; Point[] minLoc, maxLoc; _motionHistory.Mask.MinMax(out minValues, out maxValues, out minLoc, out maxLoc); Image <Gray, Byte> motionMask = _motionHistory.Mask.Mul(255.0 / maxValues[0]); #endregion //create the motion image Image <Bgr, Byte> motionImage = new Image <Bgr, byte>(motionMask.Size); //display the motion pixels in blue (first channel) motionImage[0] = motionMask; //Threshold to define a motion area, reduce the value to detect smaller motion double minArea = 100; storage.Clear(); //clear the storage Seq <MCvConnectedComp> motionComponents = _motionHistory.GetMotionComponents(storage); //iterate through each of the motion component foreach (MCvConnectedComp comp in motionComponents) { //reject the components that have small area; if (comp.area < minArea) { continue; } // find the angle and motion pixel count of the specific area double angle, motionPixelCount; _motionHistory.MotionInfo(comp.rect, out angle, out motionPixelCount); //reject the area that contains too few motion if (motionPixelCount < comp.area * 0.05) { continue; } //Draw each individual motion in red DrawMotion(motionImage, comp.rect, angle, new Bgr(Color.Red)); } // find and draw the overall motion angle double overallAngle, overallMotionPixelCount; _motionHistory.MotionInfo(motionMask.ROI, out overallAngle, out overallMotionPixelCount); DrawMotion(motionImage, motionMask.ROI, overallAngle, new Bgr(Color.Green)); //Display the amount of motions found on the current image UpdateText(String.Format("Total Motions found: {0}; Motion Pixel count: {1}", motionComponents.Total, overallMotionPixelCount)); //Display the image of the motion motionImageBox.Image = motionImage; } }
public void ProcessFrame(ref Image <Bgr, Byte> image, bool SetBitmap, GameTime gameTime) { using (MemStorage storage = new MemStorage()) //create storage for motion components { if (_forgroundDetector == null) { //Whats the differnce between these? //_forgroundDetector = new BGCodeBookModel<Bgr>(); //_forgroundDetector = new FGDetector<Bgr>(Emgu.CV.CvEnum.FORGROUND_DETECTOR_TYPE.FGD); //_forgroundDetector = new FGDetector<Bgr>(Emgu.CV.CvEnum.FORGROUND_DETECTOR_TYPE.FGD_SIMPLE); //_forgroundDetector = new FGDetector<Bgr>(Emgu.CV.CvEnum.FORGROUND_DETECTOR_TYPE.MOG); //_forgroundDetector = new BGStatModel<Bgr>(image, Emgu.CV.CvEnum.BG_STAT_TYPE.FGD_STAT_MODEL); _forgroundDetector = new BGStatModel <Bgr>(image, Emgu.CV.CvEnum.BG_STAT_TYPE.GAUSSIAN_BG_MODEL); } _forgroundDetector.Update(image); //update the motion history _motionHistory.Update(_forgroundDetector.ForgroundMask); #region get a copy of the motion mask and enhance its color double[] minValues, maxValues; System.Drawing.Point[] minLoc, maxLoc; _motionHistory.Mask.MinMax(out minValues, out maxValues, out minLoc, out maxLoc); Image <Gray, Byte> motionMask = _motionHistory.Mask.Mul(255.0 / maxValues[0]); #endregion //create the motion image Image <Bgr, Byte> motionImage = new Image <Bgr, byte>(motionMask.Size); //display the motion pixels one of the colors motionImage[gameTime.TotalGameTime.Milliseconds % 3] = motionMask; //Threshold to define a motion area, reduce the value to detect smaller motion //default 100; double minArea = 100; storage.Clear(); //clear the storage Seq <MCvConnectedComp> motionComponents = _motionHistory.GetMotionComponents(storage); Vector2 partVect, partDir; float partRadius, partXDirection, partYDirection; //iterate through each of the motion component foreach (MCvConnectedComp comp in motionComponents) { //reject the components that have small area; if (comp.area < minArea) { continue; } // find the angle and motion pixel count of the specific area double angle, motionPixelCount; _motionHistory.MotionInfo(comp.rect, out angle, out motionPixelCount); //Motion Particles if (ParticleManager.Instance().Enabled) { partVect = new Vector2(comp.rect.X, comp.rect.Y); partVect = FaceController.ScaleFromVideoResolution(partVect); //Get the overall motion and set it to a vector2 partRadius = (motionMask.ROI.Width + motionMask.ROI.Height) >> 2; partXDirection = (float)(Math.Cos(angle * (Math.PI / 180.0)) * partRadius); partYDirection = (float)(Math.Sin(angle * (Math.PI / 180.0)) * partRadius); partDir = new Vector2(partXDirection, partYDirection); ParticleManager.Instance().ParticleSystems["motionparticles"].AddParticles(partVect, Vector2.Normalize(partDir)); } //reject the area that contains too few motion if (motionPixelCount < comp.area * 0.05) { continue; } //Draw each individual motion in red if (SetBitmap) { DrawMotion(motionImage, comp.rect, angle, new Bgr(System.Drawing.Color.Red)); } } // find and draw the overall motion angle double overallAngle, overallMotionPixelCount; _motionHistory.MotionInfo(motionMask.ROI, out overallAngle, out overallMotionPixelCount); //Get the overall motion and set it to a vector2 float circleRadius = (motionMask.ROI.Width + motionMask.ROI.Height) >> 2; float xDirection = (float)(Math.Cos(overallAngle * (Math.PI / 180.0)) * circleRadius); float yDirection = (float)(Math.Sin(overallAngle * (Math.PI / 180.0)) * circleRadius); MotionSum = new Vector2(xDirection, yDirection); OverallMotionPixelCount = (int)overallMotionPixelCount; TotalMotionsFound = (int)motionComponents.Total; if (SetBitmap) { DrawMotion(motionImage, motionMask.ROI, overallAngle, new Bgr(System.Drawing.Color.Green)); image = image.Add(motionImage); gameConsole.DebugText = String.Format("Total Motions found: {0};\n Motion Pixel count: {1}\nMotionSum:\n{2}" , motionComponents.Total, overallMotionPixelCount, MotionSum); gameConsole.DebugText += String.Format("\nOverallAngle: {0};", overallAngle); gameConsole.DebugText += String.Format("\nMotionSumLength(): {0};", MotionSum.Length()); } } }
//motion detection processing private Image <Bgr, Byte> ProcessFrame(Image <Bgr, Byte> image) { // using (Image<Bgr, Byte> image = grabber.QueryFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC)) using (MemStorage storage = new MemStorage()) //create storage for motion components { if (_forgroundDetector == null) { //_forgroundDetector = new BGCodeBookModel<Bgr>(); // _forgroundDetector = new FGDetector<Bgr>(Emgu.CV.CvEnum.FORGROUND_DETECTOR_TYPE.FGD); _forgroundDetector = new BGStatModel <Bgr>(image, Emgu.CV.CvEnum.BG_STAT_TYPE.FGD_STAT_MODEL); } _forgroundDetector.Update(image); // imageBoxFrameGrabber.Image = image; //update the motion history _motionHistory.Update(_forgroundDetector.ForgroundMask); #region get a copy of the motion mask and enhance its color double[] minValues, maxValues; Point[] minLoc, maxLoc; _motionHistory.Mask.MinMax(out minValues, out maxValues, out minLoc, out maxLoc); Image <Gray, Byte> motionMask = _motionHistory.Mask.Mul(255.0 / maxValues[0]); #endregion //create the motion image Image <Bgr, Byte> motionImage = new Image <Bgr, byte>(motionMask.Size); //display the motion pixels in blue (first channel) motionImage[0] = motionMask; //Threshold to define a motion area, reduce the value to detect smaller motion double minArea = 100; storage.Clear(); //clear the storage Seq <MCvConnectedComp> motionComponents = _motionHistory.GetMotionComponents(storage); if (showGridLines) { LineSegment2D line = new LineSegment2D(new Point(0, 169), new Point(520, 169)); LineSegment2D line2 = new LineSegment2D(new Point(259, 0), new Point(259, 340)); image.Draw(line, new Bgr(Color.White), 2); image.Draw(line2, new Bgr(Color.White), 2); } if (displayPosNum) { for (int i = 0; i < dsPos.Tables[0].Rows.Count; i++) { if (showPos) { image.Draw("# " + dsPos.Tables[0].Rows[i][0].ToString(), ref font, new Point(int.Parse(dsPos.Tables[0].Rows[i][1].ToString()) - 120, int.Parse(dsPos.Tables[0].Rows[i][2].ToString()) - 50), new Bgr(Color.Yellow)); } if (showNames) { image.Draw(dsPos.Tables[0].Rows[i][3].ToString(), ref font, new Point(int.Parse(dsPos.Tables[0].Rows[i][1].ToString()) - 120, int.Parse(dsPos.Tables[0].Rows[i][2].ToString()) - 70), new Bgr(Color.Yellow)); } } } if (red1 && red1cnt < 100) { red1cnt++; image.Draw(new Rectangle(0, 0, 255, 165), new Bgr(Color.Red), 3); if (red1cnt == 99) { red1 = false; red1cnt = 0; } } if (red2 && red2cnt < 100) { red2cnt++; image.Draw(new Rectangle(262, 0, 257, 167), new Bgr(Color.Red), 3); if (red2cnt == 99) { red2 = false; red2cnt = 0; } } if (red3 && red3cnt < 100) { red3cnt++; image.Draw(new Rectangle(0, 170, 260, 170), new Bgr(Color.Red), 3); if (red3cnt == 99) { red3 = false; red3cnt = 0; } } if (red4 && red4cnt < 100) { red4cnt++; image.Draw(new Rectangle(260, 170, 260, 170), new Bgr(Color.Red), 3); if (red4cnt == 99) { red4 = false; red4cnt = 0; } } if (green1 && green1cnt < 200) { green1cnt++; image.Draw(new Rectangle(0, 0, 255, 165), new Bgr(Color.Green), 3); if (green1cnt == 199) { green1 = false; green1cnt = 0; } } if (green2 && green2cnt < 200) { green2cnt++; image.Draw(new Rectangle(262, 0, 257, 167), new Bgr(Color.Green), 3); if (green2cnt == 199) { green2 = false; green2cnt = 0; } } if (green3 && green3cnt < 200) { green3cnt++; image.Draw(new Rectangle(0, 170, 260, 170), new Bgr(Color.Green), 3); if (green3cnt == 199) { green3 = false; green3cnt = 0; } } if (green4 && green4cnt < 200) { green4cnt++; image.Draw(new Rectangle(260, 170, 260, 170), new Bgr(Color.Green), 3); if (green4cnt == 199) { green4 = false; green4cnt = 0; } } //iterate through each of the motion component foreach (MCvConnectedComp comp in motionComponents) { //reject the components that have small area; if (comp.area < minArea) { continue; } // find the angle and motion pixel count of the specific area double angle, motionPixelCount; _motionHistory.MotionInfo(comp.rect, out angle, out motionPixelCount); //if (motionPixelCount > 100000) { image.Draw(l5 , new Bgr(Color.Red), 10); } else { image.Draw(l5 , new Bgr(Color.Green), 10); } //reject the area that contains too few motion // if (motionPixelCount < comp.area * 0.8) continue; if (motionPixelCount < comp.area * 0.05) { continue; } int nearpos = nearestPosition(comp.rect.X, comp.rect.Y); //if (1000 > comp.area) continue; //Draw each individual motion in red // DrawMotion(motionImage, comp.rect, angle, new Bgr(Color.Red)); if (nearpos == 3 && comp.area < 500) { continue; } if (nearpos == 4 && comp.area < 500) { continue; } if (comp.rect.X > 60 && comp.rect.Y > 60) { if (motionQueue.Count == 100) { motionQueue.Dequeue(); motionQueue.Enqueue(nearpos); } else { motionQueue.Enqueue(nearpos); } // LineSegment2D l5 = new LineSegment2D(new Point(comp.rect.X, comp.rect.Y), new Point(comp.rect.X, comp.rect.Y)); // image.Draw(l5, new Bgr(Color.Red), 10); // image.Draw(comp.area.ToString(), ref font, new Point(comp.rect.X, comp.rect.Y), new Bgr(Color.LightGreen)); if (showMotion) { image.Draw(comp.rect, new Bgr(Color.Yellow), 2); } } } // find and draw the overall motion angle double overallAngle, overallMotionPixelCount; _motionHistory.MotionInfo(motionMask.ROI, out overallAngle, out overallMotionPixelCount); // DrawMotion(motionImage, motionMask.ROI, overallAngle, new Bgr(Color.Green)); //Display the amount of motions found on the current image // UpdateText(String.Format("Total Motions found: {0}; Motion Pixel count: {1}", motionComponents.Total, overallMotionPixelCount)); //Display the image of the motion // imageBoxFrameGrabber.Image = motionImage; ///motion image return(image); } }
public void ProcessFrameTwo(object sender, EventArgs e) { if (_forgroundDetectorTwo == null) { _forgroundDetectorTwo = new BackgroundSubtractorMOG2(); } _captureTwo.Retrieve(imageTwo); _forgroundDetectorTwo.Apply(imageTwo, _forgroundMaskTwo); _motionHistoryTwo.Update(_forgroundMaskTwo, DateTime.Now); //if (dims <= 2 && step[0] > 0) { _motionHistoryTwo.Update(_forgroundMaskTwo) }; #region get a copy of the motion mask and enhance its color double[] minValues_2, maxValues_2; Point[] minLoc_2, maxLoc_2; _motionHistoryTwo.Mask.MinMax(out minValues_2, out maxValues_2, out minLoc_2, out maxLoc_2);; Mat motionMask_2 = new Mat(); using (ScalarArray sa2 = new ScalarArray(255.0 / maxValues_2[0])) CvInvoke.Multiply(_motionHistoryTwo.Mask, sa2, motionMask_2, 1, DepthType.Cv8U); //Image<Gray, Byte> motionMask = _motionHistory.Mask.Mul(255.0 / maxValues[0]); #endregion Mat motionImageTwo = new Mat(motionMask_2.Size.Height, motionMask_2.Size.Width, DepthType.Cv8U, 3); MotionImageTwo = motionImageTwo; motionImageTwo.SetTo(new MCvScalar(0)); CvInvoke.InsertChannel(motionMask_2, motionImageTwo, 0); //Threshold to define a motion area, reduce the value to detect smaller motion double minArea_2 = 5000; Rectangle[] rects_2; using (VectorOfRect boundingRect_2 = new VectorOfRect()) { _motionHistoryTwo.GetMotionComponents(_segMaskTwo, boundingRect_2); rects_2 = boundingRect_2.ToArray(); } foreach (Rectangle comp_2 in rects_2) { int area_2 = comp_2.Width * comp_2.Height; //reject the components that have small area; if (area_2 < minArea_2) { continue; } // find the angle and motion pixel count of the specific area double angle_2, motionPixelCount_2; _motionHistoryTwo.MotionInfo(_forgroundMaskTwo, comp_2, out angle_2, out motionPixelCount_2); //reject the area that contains too few motion if (motionPixelCount_2 < area_2 * 0.0005) { continue; } //Draw each individual motion in red DrawMotion(motionImageTwo, comp_2, angle_2, new Bgr(Color.Red)); } double overallAngle_2, overallMotionPixelCount_2; _motionHistoryTwo.MotionInfo(_forgroundMaskTwo, new Rectangle(Point.Empty, motionMask_2.Size), out overallAngle_2, out overallMotionPixelCount_2); DrawMotion(motionImageTwo, new Rectangle(Point.Empty, motionMask_2.Size), overallAngle_2, new Bgr(Color.Green)); if (this.Disposing || this.IsDisposed) { return; } if (cameraTwoCheckBox.Checked == true) { cameraTwoImageBox.Image = MotionImageTwo; } else { cameraTwoImageBox.Image = imageTwo; } //Display the amount of motions found on the current image UpdateTextTwo(String.Format("Total Motions found: {0}; Motion Pixel count: {1}", ((rects_2.Length * 0.021) * 1.5), overallMotionPixelCount_2)); MotionCountTwo = (rects_2.Length * 0.021) * 1.5; }
//++++++++++++++ Camera Feed Processing / Motion Detection ++++++++++++++// public void ProcessFrame(object sender, EventArgs e) { //Separate Background and Foreground if (_forgroundDetectorOne == null) { _forgroundDetectorOne = new BackgroundSubtractorMOG2(); } _captureOne.Retrieve(imageOne); //Apply camera one feed to object _forgroundDetectorOne.Apply(imageOne, _forgroundMask); //detect foreground (object in motion) _motionHistoryOne.Update(_forgroundMask, DateTime.Now); //update motion history #region get a copy of the motion mask and enhance its color double[] minValues, maxValues; //determines minimum rectangle size for motion sectors Point[] minLoc, maxLoc; _motionHistoryOne.Mask.MinMax(out minValues, out maxValues, out minLoc, out maxLoc); Mat motionMask = new Mat(); using (ScalarArray sa = new ScalarArray(255.0 / maxValues[0])) CvInvoke.Multiply(_motionHistoryOne.Mask, sa, motionMask, 1, DepthType.Cv8U); //Image<Gray, Byte> motionMask = _motionHistory.Mask.Mul(255.0 / maxValues[0]); #endregion //create the motion image Mat motionImageOne = new Mat(motionMask.Size.Height, motionMask.Size.Width, DepthType.Cv8U, 3); MotionImageOne = motionImageOne; motionImageOne.SetTo(new MCvScalar(0)); //display the motion pixels in blue (first channel) //motionImage[0] = motionMask; CvInvoke.InsertChannel(motionMask, motionImageOne, 0); //Threshold to define a motion area, reduce the value to detect smaller motion double minArea = 5000; //Lower = More Sensitive Motion Detection Rectangle[] rects; using (VectorOfRect boundingRect = new VectorOfRect()) { _motionHistoryOne.GetMotionComponents(_segMask, boundingRect); rects = boundingRect.ToArray(); } //iterate through each of the motion component foreach (Rectangle comp in rects) { int area = comp.Width * comp.Height; //reject the components that have small area; if (area < minArea) { continue; } // find the angle and motion pixel count of the specific area double angle, motionPixelCount; _motionHistoryOne.MotionInfo(_forgroundMask, comp, out angle, out motionPixelCount); //reject the area that contains too few motion if (motionPixelCount < area * 0.0005) { continue; //originally (motionPixelCount < area * 0.05) } //Draw each individual motion in red DrawMotion(motionImageOne, comp, angle, new Bgr(Color.Red)); } // find and draw the overall motion angle double overallAngle, overallMotionPixelCount; _motionHistoryOne.MotionInfo(_forgroundMask, new Rectangle(Point.Empty, motionMask.Size), out overallAngle, out overallMotionPixelCount); DrawMotion(motionImageOne, new Rectangle(Point.Empty, motionMask.Size), overallAngle, new Bgr(Color.Green)); if (cameraOneCheckBox.Checked == true) { cameraOneImageBox.Image = MotionImageOne; } else { cameraOneImageBox.Image = imageOne; } //Display the amount of motions found on the current image //UpdateText(String.Format("Total Motions found: {0}; Motion Pixel count: {1}", rects.Length, overallMotionPixelCount)); UpdateText(String.Format("Total Motions found: {0}; Motion Pixel count: {1}", ((rects.Length * 0.021) * 1.5), overallMotionPixelCount)); MotionCountOne = (rects.Length * 0.021) * 1.5; }
private void ProcessFrame(object sender, EventArgs e) { List <Rectangle> rt = new List <Rectangle>(); double overallAngle = default(double), overallMotionPixelCount = default(double); Mat image = new Mat(); _capture.Retrieve(image); if (fpsc++ >= allfpsc) { fpsc = 0; if (_forgroundDetector == null) { _forgroundDetector = new BackgroundSubtractorMOG2(); } _forgroundDetector.Apply(image, _forgroundMask); //update the motion history _motionHistory.Update(_forgroundMask); #region get a copy of the motion mask and enhance its color double[] minValues, maxValues; Point[] minLoc, maxLoc; _motionHistory.Mask.MinMax(out minValues, out maxValues, out minLoc, out maxLoc); Mat motionMask = new Mat(); using (ScalarArray sa = new ScalarArray(255.0 / maxValues[0])) CvInvoke.Multiply(_motionHistory.Mask, sa, motionMask, 1, DepthType.Cv8U); //Image<Gray, Byte> motionMask = _motionHistory.Mask.Mul(255.0 / maxValues[0]); #endregion //create the motion image Mat motionImage = new Mat(motionMask.Size.Height, motionMask.Size.Width, DepthType.Cv8U, 3); motionImage.SetTo(new MCvScalar(0)); //display the motion pixels in blue (first channel) //motionImage[0] = motionMask; CvInvoke.InsertChannel(motionMask, motionImage, 0); //Threshold to define a motion area, reduce the value to detect smaller motion double minArea = 100; //storage.Clear(); //clear the storage Rectangle[] rects; using (VectorOfRect boundingRect = new VectorOfRect()) { _motionHistory.GetMotionComponents(_segMask, boundingRect); rects = boundingRect.ToArray(); } //iterate through each of the motion component foreach (Rectangle comp in rects) { if (Zone.Contains(comp)) { rt.Add(comp); } int area = comp.Width * comp.Height; //reject the components that have small area; if (area < minArea) { continue; } // find the angle and motion pixel count of the specific area double angle, motionPixelCount; _motionHistory.MotionInfo(_forgroundMask, comp, out angle, out motionPixelCount); //reject the area that contains too few motion if (motionPixelCount < area * 0.05) { continue; } //Draw each individual motion in red //DrawMotion(motionImage, comp, angle, new Bgr(Color.Blue)); } //rects = rt.ToArray(); // find and draw the overall motion angle if (Zone.Width <= 0 & Zone.Height <= 0 & Zone.X < 0 & Zone.Y < 0) { Zone = new Rectangle(0, 0, 10, 10); } _motionHistory.MotionInfo(_forgroundMask, Zone /*new Rectangle(Point.Empty, motionMask.Size)*/, out overallAngle, out overallMotionPixelCount); //DrawMotion(motionImage, new Rectangle(Point.Empty, motionMask.Size), overallAngle, new Bgr(Color.Green)); Image <Bgr, Byte> grayImage = image.ToImage <Bgr, Byte>(); if (DetectPed) { long processingTime; Rectangle[] results; if (CudaInvoke.HasCuda) { using (GpuMat gpuMat = new GpuMat(image)) results = FindPedestrian.Find(gpuMat, out processingTime); } else { using (UMat uImage = image.GetUMat(AccessType.ReadWrite)) results = FindPedestrian.Find(uImage, out processingTime); } foreach (Rectangle rect in results) { CvInvoke.Rectangle(image, rect, new Bgr(Color.Red).MCvScalar); } } } if (maxfpsc++ > skipfpsc) { OnMD?.Invoke((long)overallMotionPixelCount, image, rt.Count); } else { OnMD?.Invoke(default(long), image, default(int)); } }
public List <IImage> ProcessFrame(IImage original) { List <IImage> processedImages = new List <IImage>(); Mat foregroundBlobs = new Mat(); Mat motionMask = new Mat(); Mat segmentMask = new Mat(); //threshold to define the minimum motion area double minArea = AdjustableParameters["MinMotionArea"].CurrentValue; //threshold to define the minimun motion 1/20th of the size of the bounding blob double minMotion = AdjustableParameters["MinMotionDistance"].CurrentValue; Rectangle[] motionComponents; _backgroundSubtractor.Apply(original, foregroundBlobs); //update the motion-history _motionHistory.Update(foregroundBlobs); //Get a copy of the mask and enhance its color double[] minValues, maxValues; Point[] minLocation, maxLocation; _motionHistory.Mask.MinMax(out minValues, out maxValues, out minLocation, out maxLocation); //Mutiply the copy by a scalar array outputs motionMask using (ScalarArray myScalar = new ScalarArray(255.0 / maxValues[0])) CvInvoke.Multiply(_motionHistory.Mask, myScalar, motionMask, 1, Emgu.CV.CvEnum.DepthType.Cv8U); //Create the motion image Mat motionImage = new Mat(motionMask.Size.Height, motionMask.Size.Width, Emgu.CV.CvEnum.DepthType.Cv8U, 3); motionImage.SetTo(new MCvScalar(0)); //Insert the motion mask into the blue channel of the motionImage CvInvoke.InsertChannel(motionMask, motionImage, 0); //Get the motion components using (VectorOfRect boundingRect = new VectorOfRect()) { _motionHistory.GetMotionComponents(segmentMask, boundingRect); motionComponents = boundingRect.ToArray(); } //Loop through the motion components foreach (Rectangle component in motionComponents) { int area = component.Width * component.Height; //reject components that are smaller that the threshold if (area < minArea) { continue; } //find angle and pixel count for the motionComponent double angle, pixelCount; _motionHistory.MotionInfo(foregroundBlobs, component, out angle, out pixelCount); //reject component of motion pixel count is less than the min threshold if (pixelCount < area * minMotion) { continue; } //draw motions in red DrawMotion(motionImage, component, angle, new Bgr(Color.Red)); } // find and draw the overall motion angle double overallAngle, overallMotionPixelCount; _motionHistory.MotionInfo(foregroundBlobs, new Rectangle(Point.Empty, motionMask.Size), out overallAngle, out overallMotionPixelCount); DrawMotion(motionImage, new Rectangle(Point.Empty, motionMask.Size), overallAngle, new Bgr(Color.Green)); processedImages.Add(foregroundBlobs); processedImages.Add(motionImage); processedImages.Add(segmentMask); return(processedImages); }
private void ProcessFrame(object sender, EventArgs e) { Mat image = new Mat(); _capture.Retrieve(image); if (_forgroundDetector == null) { _forgroundDetector = new BackgroundSubtractorMOG2(); } _forgroundDetector.Apply(image, _forgroundMask); capturedImageBox.Image = image; //update the motion history _motionHistory.Update(_forgroundMask); foreground.Image = _forgroundMask; #region get a copy of the motion mask and enhance its color double[] minValues, maxValues; Point[] minLoc, maxLoc; _motionHistory.Mask.MinMax(out minValues, out maxValues, out minLoc, out maxLoc); Mat motionMask = new Mat(); using (ScalarArray sa = new ScalarArray(255.0 / maxValues[0])) CvInvoke.Multiply(_motionHistory.Mask, sa, motionMask, 1, DepthType.Cv8U); //Image<Gray, Byte> motionMask = _motionHistory.Mask.Mul(255.0 / maxValues[0]); #endregion //create the motion image // Image<Bgr, Byte> motionImage = new Image<Bgr, byte>(motionMask.Size); //display the motion pixels in blue (first channel) //motionImage[0] = motionMask; // CvInvoke.InsertChannel(motionMask, motionImage, 0); //Threshold to define a motion area, reduce the value to detect smaller motion // double minArea = 100; //storage.Clear(); //clear the storage Rectangle[] rects; using (VectorOfRect boundingRect = new VectorOfRect()) { _motionHistory.GetMotionComponents(_segMask, boundingRect); rects = boundingRect.ToArray(); } //iterate through each of the motion component foreach (Rectangle comp in rects) { time.Start(); // find the angle and motion pixel count of the specific area double angle, motionPixelCount; _motionHistory.MotionInfo(_forgroundMask, comp, out angle, out motionPixelCount); if (Main.security.Text == "SECURITY MODE ON") { long x = time.ElapsedMilliseconds; if (x > ellapsed_time) { if (motionPixelCount > pixel_count) { //MessageBox.Show("My message here"); Console.Beep(5000, 1000); if (Main.connected == true) { chat.send(Encoding.ASCII.GetBytes("Someone is in the room")); } break; time.Stop(); } } } } }
private void ProcessFrame(object sender, EventArgs e) { Mat image = new Mat(); _capture.Retrieve(image); if (_forgroundDetector == null) { _forgroundDetector = new BackgroundSubtractorMOG2(); } _forgroundDetector.Apply(image, _forgroundMask); //update the motion history _motionHistory.Update(_forgroundMask); #region get a copy of the motion mask and enhance its color double[] minValues, maxValues; Point[] minLoc, maxLoc; _motionHistory.Mask.MinMax(out minValues, out maxValues, out minLoc, out maxLoc); Mat motionMask = new Mat(); using (ScalarArray sa = new ScalarArray(255.0 / maxValues[0])) CvInvoke.Multiply(_motionHistory.Mask, sa, motionMask, 1, DepthType.Cv8U); //Image<Gray, Byte> motionMask = _motionHistory.Mask.Mul(255.0 / maxValues[0]); #endregion //DetectFace.Detect(image, "haarcascade_frontalface_default.xml", "haarcascade_eye.xml", faces, eyes, tryUseCuda, out detectionTime); //create the motion image Mat motionImage = new Mat(motionMask.Size.Height, motionMask.Size.Width, DepthType.Cv8U, 3); //display the motion pixels in blue (first channel) //motionImage[0] = motionMask; CvInvoke.InsertChannel(motionMask, motionImage, 0); //Threshold to define a motion area, reduce the value to detect smaller motion double minArea = 100; //storage.Clear(); //clear the storage Rectangle[] rects; using (VectorOfRect boundingRect = new VectorOfRect()) { _motionHistory.GetMotionComponents(_segMask, boundingRect); rects = boundingRect.ToArray(); } List <Rectangle> Availablerects = new List <Rectangle>(); foreach (Rectangle comp in rects) { int area = comp.Width * comp.Height; //reject the components that have small area; if (area < minArea) { continue; } double angle, motionPixelCount; _motionHistory.MotionInfo(_forgroundMask, comp, out angle, out motionPixelCount); //reject the area that contains too few motion if (motionPixelCount < area * size) { continue; } else { Availablerects.Add(comp); } } //iterate through each of the motion component List <Rectangle> faces = new List <Rectangle>(); List <Rectangle> eyes = new List <Rectangle>(); Task task2 = new Task(() => { Mat Detectmat = new Mat(); Detectmat = image; DetectFace.Detect(Detectmat, "haarcascade_frontalface_default.xml", "haarcascade_eye.xml", faces, eyes, tryUseCuda, out detectionTime); if (faces.Count > 0) { label1.Text = "detectionTime:" + detectionTime.ToString(); for (int i = 0; i < faces.Count; i++) { Bitmap bt2 = DetectFace.Cutbitmap(Detectmat.Bitmap, faces[i].X, faces[i].Y, faces[i].Width, faces[i].Height); Emgu.CV.Image <Bgr, Byte> currentFrame1 = new Emgu.CV.Image <Bgr, Byte>(bt2); //只能这么转 Mat invert1 = new Mat(); CvInvoke.BitwiseAnd(currentFrame1, currentFrame1, invert1); //这是官网上的方法,变通用。没看到提供其它方法直接转换的。 faceimage.Image = invert1; string filePath = "G:\\motion1\\" + DateTime.Now.ToString("人脸-yyyy年MM月dd日HH点mm分ss秒") + i.ToString() + "-" + faces.Count.ToString() + ".jpg"; bt2.Save(filePath); System.Media.SystemSounds.Beep.Play(); } Bitmap bt1 = Detectmat.Bitmap; string filePath2 = "G:\\motion1\\" + DateTime.Now.ToString("原图-yyyy年MM月dd日HH点mm分ss秒") + ".jpg"; //System.Diagnostics.Debug.WriteLine("准备保存原图" + detectionTime.ToString()); bt1.Save(filePath2); } }); task2.Start(); foreach (Rectangle comp in Availablerects) { int area = comp.Width * comp.Height; //reject the components that have small area; if (area < minArea) { continue; } // find the angle and motion pixel count of the specific area double angle, motionPixelCount; _motionHistory.MotionInfo(_forgroundMask, comp, out angle, out motionPixelCount); //reject the area that contains too few motion if (motionPixelCount < area * size) { continue; } //Draw each individual motion in red //=================转换mat格式为bitmap并裁切=========================== Task task = new Task(() => { Bitmap bt = DetectFace.Cutbitmap(image.Bitmap, comp.X, comp.Y, comp.Width, comp.Height); Emgu.CV.Image <Bgr, Byte> currentFrame = new Emgu.CV.Image <Bgr, Byte>(bt); //只能这么转 Mat invert = new Mat(); CvInvoke.BitwiseAnd(currentFrame, currentFrame, invert); //这是官网上的方法,变通用。没看到提供其它方法直接转换的。 moveimage.Image = invert; }); task.Start(); try { DrawMotion(motionImage, comp, angle, new Bgr(Color.Red)); DrawMotion(capturedImageBox.Image, comp, angle, new Bgr(Color.Red)); } catch (Exception a) { } #region//area /* * bool time = false; * if ((comp.X > 1770 && comp.X < 1830) && (comp.Y > 2 && comp.Y < 40)) * { * time = true; * } * if (youxiaorects.Count < 50&&!time) * { * if (capturedImageBox.Image != null) * { * Random rd = new Random(); * Bitmap bt = new Bitmap(capturedImageBox.Image.Bitmap); * // string filePath = "G:\\motion\\" + DateTime.Now.ToString("yyyy年MM月dd日HH点mm分ss秒") + ".jpg"; * // image.Save(filePath); * * } * } */ #endregion } #region//垃圾堆 //=================当检测到图像更变,获取更变区域坐标与大小时,尝试将更变区域保存 传入人脸识别函数分析===================== //===============根据更变区域个数来动态创建线程,增加效率====================== /* Thread[] downloadThread; * Thread face=new Thread(confirmface); * face.Start();*/ /* int areacount = Availablerects.Count; * //声名下载线程,这是C#的优势,即数组初始化时,不需要指定其长度,可以在使用时才指定。 * * //这个声名应为类级,这样也就为其它方法控件它们提供了可能 * * ThreadStart startDownload = new ThreadStart(confirmface); * //线程起始设置:即每个线程都执行DownLoad() * downloadThread = new Thread[areacount];//为线程申请资源,确定线程总数 * for (int k = 0; k < areacount; k++)//开启指定数量的线程数 * { * downloadThread[k] = new Thread(startDownload);//指定线程起始设置 * downloadThread[k].Start();//逐个开启线程 * }*/ #endregion #region//_forgroundMask /* * // find and draw the overall motion angle * double overallAngle, overallMotionPixelCount; * * _motionHistory.MotionInfo(_forgroundMask, new Rectangle(Point.Empty, motionMask.Size), out overallAngle, out overallMotionPixelCount); * // DrawMotion(motionImage, new Rectangle(Point.Empty, motionMask.Size), overallAngle, new Bgr(Color.Green)); * // DrawMotion(image, new Rectangle(Point.Empty, image.Size), overallAngle, new Bgr(Color.Green)); * if (this.Disposing || this.IsDisposed) * return; */ /* foreach (Rectangle face in faces) * CvInvoke.Rectangle(image, face, new Bgr(Color.Red).MCvScalar, 2); * foreach (Rectangle eye in eyes) * CvInvoke.Rectangle(image, eye, new Bgr(Color.Blue).MCvScalar, 2);*/ capturedImageBox.Image = image; // forgroundImageBox.Image = _forgroundMask; //Display the amount of motions found on the current image //UpdateText(String.Format("Total Motions found: {0}; Motion Pixel count: {1} detectionTime:{2} ", rects.Length, overallMotionPixelCount, detectionTime)); //Display the image of the motion // motionImageBox.Image = motionImage; #endregion }