コード例 #1
0
        static void TestHaar()
        {
            if (Environment.Is64BitProcess)
            {
                throw new Exception("Run in 32-bits");
            }

            // Let's test the detector using a sample video from
            // the collection of test videos in the framework:
            TestVideos ds       = new TestVideos();
            string     fileName = ds["crowd.mp4"];

            // In this example, we will be creating a cascade for a Face detector:
            var cascade = new Accord.Vision.Detection.Cascades.FaceHaarCascade();

            // Now, create a new Haar object detector with the cascade:
            var detector = new HaarObjectDetector(cascade, minSize: 25,
                                                  searchMode: ObjectDetectorSearchMode.Average,
                                                  scalingMode: ObjectDetectorScalingMode.SmallerToGreater,
                                                  scaleFactor: 1.1f)
            {
                Suppression = 5 // This should make sure we only report regions as faces if
                                // they have been detected at least 5 times within different cascade scales.
            };

            // Now, let's open the video using FFMPEG:
            var video = new VideoFileReader();

            video.Open(fileName);

            Stopwatch sw = Stopwatch.StartNew();

            // Now, for each frame of the video
            for (int frameIndex = 0; frameIndex < video.FrameCount; frameIndex++)
            {
                // Read the current frame into the bitmap data
                Bitmap bmp = video.ReadVideoFrame(frameIndex);

                // Feed the frame to the tracker
                Rectangle[] faces = detector.ProcessFrame(bmp);

                Console.WriteLine(faces.Length);
                Console.WriteLine(bmp.Flags);
            }

            sw.Stop();

            Console.WriteLine(sw.Elapsed);

            video.Close();
        }
コード例 #2
0
        public void ProcessVideo()
        {
            string basePath = Path.Combine(NUnit.Framework.TestContext.CurrentContext.TestDirectory, "detector");

            #region doc_video
            // Let's test the detector using a sample video from
            // the collection of test videos in the framework:
            TestVideos ds       = new TestVideos(basePath);
            string     fileName = ds["crowd.mp4"];

            // In this example, we will be creating a cascade for a Face detector:
            var cascade = new Accord.Vision.Detection.Cascades.FaceHaarCascade();

            // Now, create a new Haar object detector with the cascade:
            var detector = new HaarObjectDetector(cascade, minSize: 25,
                                                  searchMode: ObjectDetectorSearchMode.Average,
                                                  scalingMode: ObjectDetectorScalingMode.SmallerToGreater,
                                                  scaleFactor: 1.1f)
            {
                Suppression = 5 // This should make sure we only report regions as faces if
                                // they have been detected at least 5 times within different cascade scales.
            };

            // Now, let's open the video using FFMPEG:
            var video = new VideoFileReader();
            video.Open(fileName);

            // And then check the contents of one of the frames:
            Bitmap frame = video.ReadVideoFrame(frameIndex: 0);

            // Creating bitmaps and locking them is an expensive
            // operation. Instead, let's allocate once and reuse
            BitmapData     bitmapData     = frame.LockBits(ImageLockMode.ReadWrite);
            UnmanagedImage unmanagedImage = new UnmanagedImage(bitmapData);

            // We will create a color marker to show the faces
            var objectMarker = new RectanglesMarker(Color.Red);

            // This example is going to show two different ways to save results to disk. The
            // first is to save the results frame-by-frame, saving each individual frame as
            // a separate .png file. The second is to save them as a video in .mp4 format.

            // To save results as a movie clip in mp4 format, you can use:
            VideoFileWriter writer = new VideoFileWriter();
            writer.Open(Path.Combine(basePath, "detected_faces.mp4"), frame.Width, frame.Height);

            // Now, for each frame of the video
            for (int frameIndex = 0; frameIndex < video.FrameCount; frameIndex++)
            {
                // Read the current frame into the bitmap data
                video.ReadVideoFrame(frameIndex, bitmapData);

                // Feed the frame to the tracker
                Rectangle[] faces = detector.ProcessFrame(unmanagedImage);

                // Mark the location of the tracker object in red color
                objectMarker.Rectangles = faces;
                objectMarker.ApplyInPlace(unmanagedImage); // overwrite the frame

                // Save it to disk: first saving each frame separately:
                frame.Save(Path.Combine(basePath, "frame_{0}.png".Format(frameIndex)));

                // And then, saving as a .mp4 file:
                writer.WriteVideoFrame(bitmapData);
            }

            // The generated video can be seen at https://1drv.ms/v/s!AoiTwBxoR4OAoLJhPozzixD25XcbiQ
            video.Close();
            writer.Close();
            #endregion
        }
コード例 #3
0
        public void ProcessFrame()
        {
            string basePath = Path.Combine(NUnit.Framework.TestContext.CurrentContext.TestDirectory, "matching-tracker");

            #region doc_track
            // Let's test the tracker using a sample video from
            // the collection of test videos in the framework:
            TestVideos ds       = new TestVideos(basePath);
            string     fileName = ds["walking.mp4"];

            // Now, let's open the video using FFMPEG:
            var video = new VideoFileReader();
            video.Open(fileName);

            // And then check the contents of one of the frames:
            Bitmap frame = video.ReadVideoFrame(frameIndex: 150);
            frame.Save(Path.Combine(basePath, "walking_frame.png"));

            // Let's register a template for the bike rider in gray shirt
            Rectangle roi = new Rectangle(x: 70, y: 105, width: 28, height: 54);

            // initialization
            var tracker = new MatchingTracker()
            {
                SearchWindow          = roi,
                Threshold             = 0.0, // never reset the tracker in case it gets lost
                RegistrationThreshold = 0.95 // re-register the template if we are 95%
                                             // confident that the tracked object is indeed the object we want to follow
            };

            // Creating bitmaps and locking them is an expensive
            // operation. Instead, let's allocate once and reuse
            BitmapData     bitmapData     = frame.LockBits(ImageLockMode.ReadWrite);
            UnmanagedImage unmanagedImage = new UnmanagedImage(bitmapData);

            // We will create two color markers: one to show the location of the
            // tracked object (red) and another one to show the regions of the image
            // that the tracker is looking at (white).
            RectanglesMarker objectMarker = new RectanglesMarker(Color.Red);
            RectanglesMarker windowMarker = new RectanglesMarker(Color.White);

            // Now, for each frame of the video
            for (int frameIndex = 0; frameIndex < video.FrameCount; frameIndex++)
            {
                // Read the current frame into the bitmap data
                video.ReadVideoFrame(frameIndex, bitmapData);

                if (frameIndex > 150) // wait until the bike rider enters the scene
                {
                    // Feed the frame to the tracker
                    tracker.ProcessFrame(unmanagedImage);

                    // Mark the location of the tracker object in red color
                    objectMarker.SingleRectangle = tracker.TrackingObject.Rectangle;
                    objectMarker.ApplyInPlace(unmanagedImage); // overwrite the frame

                    windowMarker.SingleRectangle = tracker.SearchWindow;
                    windowMarker.ApplyInPlace(unmanagedImage); // overwrite the frame
                }

                // Save it to disk
                frame.Save(Path.Combine(basePath, "frame_{0}.png".Format(frameIndex)));
            }

            frame.UnlockBits(bitmapData);
            #endregion
        }