/* public void TestCodeBookBGModel() { using (Capture capture = new Capture()) using (BGCodeBookModel<Bgr> model = new BGCodeBookModel<Bgr>()) { ImageViewer viewer = new ImageViewer(); Image<Gray, byte> fgMask = capture.QueryFrame().Convert<Gray, Byte>(); Application.Idle += delegate(Object sender, EventArgs args) { Mat frame = capture.QueryFrame(); model.Apply(frame); viewer.Image = model.ForegroundMask; }; viewer.ShowDialog(); } } public void TestBlobTracking() { MCvFGDStatModelParams fgparam = new MCvFGDStatModelParams(); fgparam.alpha1 = 0.1f; fgparam.alpha2 = 0.005f; fgparam.alpha3 = 0.1f; fgparam.delta = 2; fgparam.is_obj_without_holes = 1; fgparam.Lc = 32; fgparam.Lcc = 16; fgparam.minArea = 15; fgparam.N1c = 15; fgparam.N1cc = 25; fgparam.N2c = 25; fgparam.N2cc = 35; fgparam.perform_morphing = 0; fgparam.T = 0.9f; BlobTrackerAutoParam<Bgr> param = new BlobTrackerAutoParam<Bgr>(); param.BlobDetector = new BlobDetector(Emgu.CV.CvEnum.BlobDetectorType.CC); param.FGDetector = new FGDetector<Bgr>(Emgu.CV.CvEnum.ForgroundDetectorType.Fgd, fgparam); param.BlobTracker = new BlobTracker(Emgu.CV.CvEnum.BLOBTRACKER_TYPE.MSFG); param.FGTrainFrames = 10; BlobTrackerAuto<Bgr> tracker = new BlobTrackerAuto<Bgr>(param); //MCvFont font = new MCvFont(Emgu.CV.CvEnum.FontFace.HersheySimplex, 1.0, 1.0); using(ImageViewer viewer = new ImageViewer()) using (Capture capture = new Capture()) { capture.ImageGrabbed += delegate(object sender, EventArgs e) { tracker.Process(capture.RetrieveBgrFrame()); //Image<Bgr, Byte> img = capture.RetrieveBgrFrame(); Image<Bgr, Byte> img = tracker.ForegroundMask.Convert<Bgr, Byte>(); foreach (MCvBlob blob in tracker) { img.Draw((Rectangle)blob, new Bgr(255.0, 255.0, 255.0), 2); img.Draw(blob.ID.ToString(), Point.Round(blob.Center), CvEnum.FontFace.HersheySimplex, 1.0, new Bgr(255.0, 255.0, 255.0)); } viewer.Image = img; }; capture.Start(); viewer.ShowDialog(); } }*/ public void TestCvBlob() { //MCvFont font = new MCvFont(Emgu.CV.CvEnum.FontFace.HersheySimplex, 0.5, 0.5); using (CvTracks tracks = new CvTracks()) using (ImageViewer viewer = new ImageViewer()) using (Capture capture = new Capture()) using (Mat fgMask = new Mat()) { //BGStatModel<Bgr> bgModel = new BGStatModel<Bgr>(capture.QueryFrame(), Emgu.CV.CvEnum.BG_STAT_TYPE.GAUSSIAN_BG_MODEL); BackgroundSubtractorMOG2 bgModel = new BackgroundSubtractorMOG2(0, 0, true); //BackgroundSubstractorMOG bgModel = new BackgroundSubstractorMOG(0, 0, 0, 0); capture.ImageGrabbed += delegate(object sender, EventArgs e) { Mat frame = new Mat(); capture.Retrieve(frame); bgModel.Apply(frame, fgMask); using (CvBlobDetector detector = new CvBlobDetector()) using (CvBlobs blobs = new CvBlobs()) { detector.Detect(fgMask.ToImage<Gray, Byte>(), blobs); blobs.FilterByArea(100, int.MaxValue); tracks.Update(blobs, 20.0, 10, 0); Image<Bgr, Byte> result = new Image<Bgr, byte>(frame.Size); using (Image<Gray, Byte> blobMask = detector.DrawBlobsMask(blobs)) { frame.CopyTo(result, blobMask); } //CvInvoke.cvCopy(frame, result, blobMask); foreach (KeyValuePair<uint, CvTrack> pair in tracks) { if (pair.Value.Inactive == 0) //only draw the active tracks. { CvBlob b = blobs[pair.Value.BlobLabel]; Bgr color = detector.MeanColor(b, frame.ToImage<Bgr, Byte>()); result.Draw(pair.Key.ToString(), pair.Value.BoundingBox.Location, CvEnum.FontFace.HersheySimplex, 0.5, color); result.Draw(pair.Value.BoundingBox, color, 2); Point[] contour = b.GetContour(); result.Draw(contour, new Bgr(0, 0, 255), 1); } } viewer.Image = frame.ToImage<Bgr, Byte>().ConcateVertical(fgMask.ToImage<Bgr, Byte>().ConcateHorizontal(result)); } }; capture.Start(); viewer.ShowDialog(); } }
private void videoPredictToolStripMenuItem_Click(object sender, EventArgs e) { try { _cameraCapture = new VideoCapture(1); _tracker = new CvTracks(); //Load haarcascades for face detection face = new CascadeClassifier("haarcascade_frontalface_default.xml"); //if (File.Exists("svm.txt")) //{ // svm = new SVM(); // FileStorage file = new FileStorage("svm.txt", FileStorage.Mode.Read); // svm.Read(file.GetNode("opencv_ml_svm")); //} //if (svm == null) //{ // return; //} } catch (Exception ex) { MessageBox.Show(ex.Message); return; } Application.Idle += new EventHandler(ProcessFrame); }
void Run() { try { var trt = CvInvoke.BuildInformation; _putenv_s("OPENCV_FFMPEG_CAPTURE_OPTIONS", "rtsp_transport;udp"); // _putenv_s("OPENCV_FFMPEG_CAPTURE_OPTIONS", ""); cameraCapture = new VideoCapture("rtsp://*****:*****@192.168.5.49:554/onvif1", VideoCapture.API.Ffmpeg); //_cameraCapture = new VideoCapture("http://192.168.1.90:81/stream?x.mjpeg", VideoCapture.API.Any); //_cameraCapture = new VideoCapture("rtsp://192.168.1.90:8554", VideoCapture.API.Ffmpeg); //_cameraCapture = new VideoCapture("http://192.168.1.90/?x.mjpeg", VideoCapture.API.Ffmpeg); //_cameraCapture = new VideoCapture("http://192.168.1.90", VideoCapture.API.Any); // Mat _frame = new Mat(); // Mat _frameCopy = new Mat(); // _cameraCapture.Read(_frame); //// _cameraCapture.Retrieve(_frame, 0); // _frame.CopyTo(_frameCopy); } catch (Exception e) { MessageBox.Show(e.Message); return; } fgDetector = new BackgroundSubtractorMOG2(); blobDetector = new CvBlobDetector(); tracker = new CvTracks(); Application.Idle += ProcessFrame; }
public ObjectTracker( TrackerSettings settings) { _settings = settings; _foregroundDetector = new BackgroundSubtractorMOG2(_settings.BackgroundSubtractorHistory.Value, _settings.BackgroundSubtractorMaxComponents.Value, false); _blobDetector = new CvBlobDetector(); _blobs = new CvBlobs(); _tracks = new CvTracks(); _trackedObjectIdentities = new Dictionary<uint, TrackedObject>(); }
private static void Track() { using (var video = new CvCapture("data/bach.mp4")) { IplImage frame = null; IplImage gray = null; IplImage binary = null; IplImage render = null; IplImage renderTracks = null; CvTracks tracks = new CvTracks(); CvWindow window = new CvWindow("render"); CvWindow windowTracks = new CvWindow("tracks"); for (int i = 0; ; i++) { frame = video.QueryFrame(); //if (frame == null) // frame = new IplImage("data/shapes.png"); if (gray == null) { gray = new IplImage(frame.Size, BitDepth.U8, 1); binary = new IplImage(frame.Size, BitDepth.U8, 1); render = new IplImage(frame.Size, BitDepth.U8, 3); renderTracks = new IplImage(frame.Size, BitDepth.U8, 3); } render.Zero(); renderTracks.Zero(); Cv.CvtColor(frame, gray, ColorConversion.BgrToGray); Cv.Threshold(gray, binary, 0, 255, ThresholdType.Otsu); CvBlobs blobs = new CvBlobs(binary); CvBlobs newBlobs = new CvBlobs(blobs .OrderByDescending(pair => pair.Value.Area) .Take(200) .ToDictionary(pair => pair.Key, pair => pair.Value), blobs.Labels); newBlobs.RenderBlobs(binary, render); window.ShowImage(render); newBlobs.UpdateTracks(tracks, 10.0, Int32.MaxValue); tracks.Render(binary, renderTracks); windowTracks.ShowImage(renderTracks); Cv.WaitKey(200); Console.WriteLine(i); } } }
public void Dispose() { if (_foregroundDetector == null) return; try { _blobDetector.Dispose(); _blobs.Dispose(); _tracks.Dispose(); ((IDisposable)_foregroundDetector).Dispose(); } catch (Exception ex) { Log.Error("Exception disposing foreground detector", ex); } _blobDetector = null; _blobs = null; _tracks = null; _foregroundDetector = null; }
/// <summary> /// Updates list of tracks based on current blobs. /// </summary> /// <param name="b">List of blobs.</param> /// <param name="t">List of tracks.</param> /// <param name="thDistance">Max distance to determine when a track and a blob match.</param> /// <param name="thInactive">Max number of frames a track can be inactive.</param> /// <remarks> /// Tracking based on: /// A. Senior, A. Hampapur, Y-L Tian, L. Brown, S. Pankanti, R. Bolle. Appearance Models for /// Occlusion Handling. Second International workshop on Performance Evaluation of Tracking and /// Surveillance Systems & CVPR'01. December, 2001. /// (http://www.research.ibm.com/peoplevision/PETS2001.pdf) /// </remarks> public static void UpdateTracks(CvBlobs b, CvTracks t, double thDistance, uint thInactive) { if (b == null) throw new ArgumentNullException("b"); if (t == null) throw new ArgumentNullException("t"); CvBlobInvoke.cvb_cvUpdateTracks(b.CvPtr, t.CvPtr, thDistance, thInactive); }
/// <summary> /// Prints tracks information. /// </summary> /// <param name="tracks">List of tracks.</param> /// <param name="imgSource">Input image (depth=IPL_DEPTH_8U and num. channels=3).</param> /// <param name="imgDest">Output image (depth=IPL_DEPTH_8U and num. channels=3).</param> /// <param name="mode">Render mode. By default is CV_TRACK_RENDER_ID.</param> /// <param name="font">OpenCV font for print on the image.</param> public static void RenderTracks(CvTracks tracks, IplImage imgSource, IplImage imgDest, RenderTracksMode mode, CvFont font) { if (tracks == null) throw new ArgumentNullException("tracks"); if (imgSource == null) throw new ArgumentNullException("imgSource"); if (imgDest == null) throw new ArgumentNullException("imgDest"); IntPtr fontPtr = (font == null) ? IntPtr.Zero : font.CvPtr; CvBlobInvoke.cvb_cvRenderTracks(tracks.CvPtr, imgSource.CvPtr, imgDest.CvPtr, mode, fontPtr); }
/// <summary> /// Prints tracks information. /// </summary> /// <param name="tracks">List of tracks.</param> /// <param name="imgSource">Input image (depth=IPL_DEPTH_8U and num. channels=3).</param> /// <param name="imgDest">Output image (depth=IPL_DEPTH_8U and num. channels=3).</param> /// <param name="mode">Render mode. By default is CV_TRACK_RENDER_ID.</param> public static void RenderTracks(CvTracks tracks, IplImage imgSource, IplImage imgDest, RenderTracksMode mode) { RenderTracks(tracks, imgSource, imgDest, mode, null); }
/// <summary> /// Updates list of tracks based on current blobs. /// </summary> /// <param name="blobs">List of blobs.</param> /// <param name="tracks">List of tracks.</param> /// <param name="thDistance">Max distance to determine when a track and a blob match.</param> /// <param name="thInactive">Max number of frames a track can be inactive.</param> /// <param name="thActive">If a track becomes inactive but it has been active less than thActive frames, the track will be deleted.</param> /// <remarks> /// Tracking based on: /// A. Senior, A. Hampapur, Y-L Tian, L. Brown, S. Pankanti, R. Bolle. Appearance Models for /// Occlusion Handling. Second International workshop on Performance Evaluation of Tracking and /// Surveillance Systems & CVPR'01. December, 2001. /// (http://www.research.ibm.com/peoplevision/PETS2001.pdf) /// </remarks> public static void UpdateTracks(CvBlobs blobs, CvTracks tracks, double thDistance, int thInactive, int thActive) { if (blobs == null) throw new ArgumentNullException(nameof(blobs)); blobs.UpdateTracks(tracks, thDistance, thInactive, thActive); }
/// <summary> /// Prints tracks information. /// </summary> /// <param name="tracks">List of tracks.</param> /// <param name="imgSource">Input image (depth=IPL_DEPTH_8U and num. channels=3).</param> /// <param name="imgDest">Output image (depth=IPL_DEPTH_8U and num. channels=3).</param> /// <param name="mode">Render mode. By default is CV_TRACK_RENDER_ID.</param> /// <param name="textColor"></param> /// <param name="fontFace"></param> /// <param name="fontScale"></param> /// <param name="thickness"></param> public static void RenderTracks(CvTracks tracks, Mat imgSource, Mat imgDest, RenderTracksMode mode, Scalar textColor, HersheyFonts fontFace = HersheyFonts.HersheySimplex, double fontScale = 1d, int thickness = 1) { if (tracks == null) throw new ArgumentNullException(nameof(tracks)); tracks.Render(imgSource, imgDest, mode, textColor, fontFace, fontScale, thickness); }
/// <summary> /// Prints tracks information. /// </summary> /// <param name="tracks">List of tracks.</param> /// <param name="imgSource">Input image (depth=IPL_DEPTH_8U and num. channels=3).</param> /// <param name="imgDest">Output image (depth=IPL_DEPTH_8U and num. channels=3).</param> /// <param name="mode">Render mode. By default is CV_TRACK_RENDER_ID.</param> public static void RenderTracks(CvTracks tracks, Mat imgSource, Mat imgDest, RenderTracksMode mode) { if (tracks == null) throw new ArgumentNullException(nameof(tracks)); tracks.Render(imgSource, imgDest, mode); }
/// <summary> /// Prints tracks information. /// </summary> /// <param name="tracks">List of tracks.</param> /// <param name="imgSource">Input image (depth=IPL_DEPTH_8U and num. channels=3).</param> /// <param name="imgDest">Output image (depth=IPL_DEPTH_8U and num. channels=3).</param> /// <param name="mode">Render mode. By default is CV_TRACK_RENDER_ID.</param> /// <param name="font">OpenCV font for print on the image.</param> public static void RenderTracks(CvTracks tracks, IplImage imgSource, IplImage imgDest, RenderTracksMode mode, CvFont font) { if (tracks == null) throw new ArgumentNullException("tracks"); tracks.Render(imgSource, imgDest, mode, font); }
/// <summary> /// Prints tracks information. /// </summary> /// <param name="tracks">List of tracks.</param> /// <param name="imgSource">Input image (depth=IPL_DEPTH_8U and num. channels=3).</param> /// <param name="imgDest">Output image (depth=IPL_DEPTH_8U and num. channels=3).</param> public static void RenderTracks(CvTracks tracks, Mat imgSource, Mat imgDest) { if (tracks == null) throw new ArgumentNullException("tracks"); tracks.Render(imgSource, imgDest); }