コード例 #1
0
ファイル: ImageViewer.cs プロジェクト: hitswa/winforms
	private void OpenClicked(Object sender, EventArgs e)
	{
		OpenFileDialog dialog = new OpenFileDialog();
		dialog.Filter =
			"All image files (*.bmp, *.jpg, *.png, *.gif, *.ico, *.cur)" +
			"|*.bmp;*.jpg;*.png;*.gif;*.ico;*.cur" +
			"|BMP files (*.bmp)|*.bmp" +
			"|JPEG files (*.jpg)|*.jpg" +
			"|PNG files (*.png)|*.png" +
			"|GIF files (*.gif)|*.gif" +
			"|Icon files (*.ico)|*.ico" +
			"|Cursor files (*.cur)|*.cur" +
			"|All files (*.*)|*.*";
		if(dialog.ShowDialog(this) == DialogResult.OK)
		{
			Bitmap image;
			try
			{
				image = new Bitmap(dialog.FileName);
			}
			catch(Exception)
			{
				MessageBox.Show
					(String.Format("Unknown image format for \"{0}\"",
								   dialog.FileName),
					 "Error", MessageBoxButtons.OK, MessageBoxIcon.Hand);
				image = null;
			}
			if(image != null)
			{
				ImageWindow window = new ImageWindow
					(dialog.FileName, image);
				window.MdiParent = this;
				window.Visible = true;
			}
		}
	}
コード例 #2
0
        public static object RunViewer(Attachment a, bool localRequest)
        {
            if (a == null)
            {
                return(null);
            }

            if (a.Format == (int)AttachmentFormat.Pdf && a.MediaData != null)
            {
                string pdfPathName = Utils.RandomFilePath(".pdf");
                try
                {
                    using (var fs = new FileStream(pdfPathName, FileMode.Create))
                    {
                        fs.Write(a.MediaData.Data, 0, a.MediaData.Data.Length);
                    }
                    //Process.Start(pdfPathName);
                    Utils.ReportMediaOpened(StEvent.PdfOpened, a);
                    var pdfReader = ReaderWindow2.Instance(pdfPathName, a.Id, a.ArgPoint != null ? (int?)a.ArgPoint.Topic.Id : null, localRequest);
                    pdfReader.Show();
                    return(pdfReader);
                }
                catch
                {
                }
            }
            else if (MiniAttachmentManager.IsGraphicFormat(a))
            {
                if (a.Format == (int)AttachmentFormat.PngScreenshot)
                {
                    Utils.ReportMediaOpened(StEvent.ScreenshotOpened, a);
                }
                else
                {
                    Utils.ReportMediaOpened(StEvent.ImageOpened, a);
                }

                if (a.MediaData.Data != null)
                {
                    if (!ExplanationModeMediator.Inst.ImageViewerOpen)
                    {
                        var wnd = ImageWindow.Instance(a.Id, a.ArgPoint != null ? a.ArgPoint.Topic.Id : -1, localRequest);
                        wnd.img.Source = LoadImageFromBlob(a.MediaData.Data);
                        wnd.Show();
                        wnd.Activate();
                        return(wnd);
                    }
                }
            }
            else
            {
                //office file
                var    ext      = Path.GetExtension(a.Link).ToLower();
                string pathName = Utils.RandomFilePath(ext);
                try
                {
                    using (var fs = new FileStream(pathName, FileMode.Create))
                    {
                        fs.Write(a.MediaData.Data, 0, a.MediaData.Data.Length);
                    }
                    Process.Start(pathName);
                }
                catch (Exception e)
                {
                    MessageDlg.Show(e.ToString(), "Error");
                }
            }

            return(null);
        }
コード例 #3
0
        private static void Main(string[] args)
        {
            try
            {
                if (args.Length == 0)
                {
                    Console.WriteLine("Give an image dataset XML file to run this program.");
                    Console.WriteLine("For example, if you are running from the examples folder then run this program by typing");
                    Console.WriteLine("   ./RandomCropper faces/training.xml");
                    return;
                }

                // First lets load a dataset
                IEnumerable <Matrix <RgbPixel> >      images;
                IEnumerable <IEnumerable <MModRect> > boxes;
                Dlib.LoadImageDataset(args[0], out images, out boxes);

                // Here we make our random_cropper.  It has a number of options.
                var cropper = new DlibDotNet.ImageTransforms.RandomCropper();
                // We can tell it how big we want the cropped images to be.
                cropper.ChipDims = new ChipDims(400, 400);
                // Also, when doing cropping, it will map the object annotations from the
                // dataset to the cropped image as well as perform random scale jittering.
                // You can tell it how much scale jittering you would like by saying "please
                // make the objects in the crops have a min and max size of such and such".
                // You do that by calling these two functions.  Here we are saying we want the
                // objects in our crops to be no more than 0.8*400 pixels in height and width.
                cropper.MaxObjectSize = 0.8;
                // And also that they shouldn't be too small. Specifically, each object's smallest
                // dimension (i.e. height or width) should be at least 60 pixels and at least one of
                // the dimensions must be at least 80 pixels.  So the smallest objects the cropper will
                // output will be either 80x60 or 60x80.
                cropper.MinObjectLengthLongDim  = 80;
                cropper.MinObjectLengthShortDim = 60;
                // The cropper can also randomly mirror and rotate crops, which we ask it to
                // perform as well.
                cropper.RandomlyFlip       = true;
                cropper.MaxRotationDegrees = 50;
                // This fraction of crops are from random parts of images, rather than being centered
                // on some object.
                cropper.BackgroundCropsFraction = 0.2;

                // Now ask the cropper to generate a bunch of crops.  The output is stored in
                // crops and crop_boxes.
                IEnumerable <Matrix <RgbPixel> >      crops;
                IEnumerable <IEnumerable <MModRect> > cropBoxes;
                // Make 1000 crops.
                cropper.Operator(1000, images, boxes, out crops, out cropBoxes);

                // Finally, lets look at the results
                var cropList      = crops?.ToArray() ?? new Matrix <RgbPixel> [0];
                var cropBoxesList = cropBoxes?.ToArray() ?? new IEnumerable <MModRect> [0];
                using (var win = new ImageWindow())
                    for (var i = 0; i < cropList.Count(); ++i)
                    {
                        win.ClearOverlay();
                        win.SetImage(cropList[i]);
                        foreach (var b in cropBoxesList[i])
                        {
                            // Note that mmod_rect has an ignore field.  If an object was labeled
                            // ignore in boxes then it will still be labeled as ignore in
                            // crop_boxes.  Moreover, objects that are not well contained within
                            // the crop are also set to ignore.
                            var rect = b.Rect;
                            if (b.Ignore)
                            {
                                win.AddOverlay(rect, new RgbPixel {
                                    Red = 255, Blue = 255
                                });                                                           // draw ignored boxes as orange
                            }
                            else
                            {
                                win.AddOverlay(rect, new RgbPixel {
                                    Red = 255
                                });                                                 // draw other boxes as red
                            }
                        }

                        Console.WriteLine("Hit enter to view the next random crop.");
                        Console.ReadKey();
                    }
            }
            catch (Exception e)
            {
                Console.WriteLine(e);
            }
        }
コード例 #4
0
        static void AutoWhiteBalance(Camera camera)
        {
            // Check whether the Balance White Auto feature is available.
            if (!camera.Parameters[PLCamera.BalanceWhiteAuto].IsWritable)
            {
                Console.WriteLine("The Camera does not support balance white auto.");
                return;
            }

            // Maximize the grabbed area of interest (Image AOI).
            camera.Parameters[PLCamera.OffsetX].TrySetValue(camera.Parameters[PLCamera.OffsetX].GetMinimum());
            camera.Parameters[PLCamera.OffsetY].TrySetValue(camera.Parameters[PLCamera.OffsetY].GetMinimum());
            camera.Parameters[PLCamera.Width].SetValue(camera.Parameters[PLCamera.Width].GetMaximum());
            camera.Parameters[PLCamera.Height].SetValue(camera.Parameters[PLCamera.Height].GetMaximum());

            // Set the Auto Function ROI for white balace statistics.
            // We want to use ROI2 for gathering the statistics.
            if (camera.Parameters [regionSelector].IsWritable)
            {
                camera.Parameters [regionSelector].SetValue(regionSelectorValue1);
                camera.Parameters [autoFunctionAOIROIUseWhiteBalance].SetValue(false); // ROI 1 is not used for white balance control
                camera.Parameters [regionSelector].SetValue(regionSelectorValue2);
                camera.Parameters [autoFunctionAOIROIUseWhiteBalance].SetValue(true);  // ROI 2 is used for white balance control
            }
            camera.Parameters[regionSelector].SetValue(regionSelectorValue2);
            camera.Parameters[regionSelectorOffsetX].SetValue(camera.Parameters [PLCamera.OffsetX].GetMinimum());
            camera.Parameters[regionSelectorOffsetY].SetValue(camera.Parameters [PLCamera.OffsetY].GetMinimum());
            camera.Parameters[regionSelectorWidth].SetValue(camera.Parameters[PLCamera.Width].GetMaximum());
            camera.Parameters[regionSelectorHeight].SetValue(camera.Parameters[PLCamera.Height].GetMaximum());

            Console.WriteLine("Trying 'BalanceWhiteAuto = Once'.");
            Console.WriteLine("Initial balance ratio:");
            camera.Parameters[PLCamera.BalanceRatioSelector].SetValue(PLCamera.BalanceRatioSelector.Red);
            Console.Write("R = {0}  ", camera.Parameters[balanceRatio].GetValue());
            camera.Parameters[PLCamera.BalanceRatioSelector].SetValue(PLCamera.BalanceRatioSelector.Green);
            Console.Write("G = {0}  ", camera.Parameters[balanceRatio].GetValue());
            camera.Parameters[PLCamera.BalanceRatioSelector].SetValue(PLCamera.BalanceRatioSelector.Blue);
            Console.Write("B = {0}  ", camera.Parameters[balanceRatio].GetValue());
            camera.Parameters[PLCamera.BalanceWhiteAuto].SetValue(PLCamera.BalanceWhiteAuto.Once);

            // When the "once" mode of operation is selected,
            // the parameter values are automatically adjusted until the related image property
            // reaches the target value. After the automatic parameter value adjustment is complete, the auto
            // function will automatically be set to "off" and the new parameter value will be applied to the
            // subsequently grabbed images.
            int n = 0;

            while (camera.Parameters[PLCamera.BalanceWhiteAuto].GetValue() != PLCamera.BalanceWhiteAuto.Off)
            {
                IGrabResult result = camera.StreamGrabber.GrabOne(5000, TimeoutHandling.ThrowException);
                using (result)
                {
                    // Image grabbed successfully?
                    if (result.GrabSucceeded)
                    {
                        ImageWindow.DisplayImage(1, result);
                    }
                }
                n++;

                //For demonstration purposes only. Wait until the image is shown.
                System.Threading.Thread.Sleep(100);

                //Make sure the loop is exited.
                if (n > 100)
                {
                    throw new TimeoutException("The adjustment of auto white balance did not finish.");
                }
            }
            Console.WriteLine("BalanceWhiteAuto went back to 'Off' after {0} Frames", n);
            Console.WriteLine("Final balance ratio: ");
            camera.Parameters[PLCamera.BalanceRatioSelector].SetValue(PLCamera.BalanceRatioSelector.Red);
            Console.Write("R = {0}  ", camera.Parameters[balanceRatio].GetValue());
            camera.Parameters[PLCamera.BalanceRatioSelector].SetValue(PLCamera.BalanceRatioSelector.Green);
            Console.Write("G = {0}  ", camera.Parameters[balanceRatio].GetValue());
            camera.Parameters[PLCamera.BalanceRatioSelector].SetValue(PLCamera.BalanceRatioSelector.Blue);
            Console.Write("B = {0}  ", camera.Parameters[balanceRatio].GetValue());
        }
コード例 #5
0
        private static void Main(string[] args)
        {
            try
            {
                // Make sure the user entered an argument to this program.  It should be the
                // filename for an image.
                if (args.Length != 2)
                {
                    Console.WriteLine("error, you have to enter a BMP file as an argument to this program.");
                    return;
                }

                // Here we declare an image object that can store color rgb_pixels.

                // Now load the image file into our image.  If something is wrong then
                // load_image() will throw an exception.  Also, if you linked with libpng
                // and libjpeg then load_image() can load PNG and JPEG files in addition
                // to BMP files.
                using (var img = Dlib.LoadImage <RgbPixel>(args[0]))
                {
                    // Now convert the image into a FHOG feature image.  The output, hog, is a 2D array
                    // of 31 dimensional vectors.
                    using (var hog = Dlib.ExtracFHogFeatures <float>(img))
                    {
                        Console.WriteLine($"hog image has {hog.Rows} rows and {hog.Columns} columns.");

                        // Let's see what the image and FHOG features look like.
                        using (var win = new ImageWindow(img))
                            using (var drawhog = Dlib.DrawHog(hog))
                                using (var winhog = new ImageWindow(drawhog))
                                {
                                    // Another thing you might want to do is map between the pixels in img and the
                                    // cells in the hog image.  dlib provides the image_to_fhog() and fhog_to_image()
                                    // routines for this.  Their use is demonstrated in the following loop which
                                    // responds to the user clicking on pixels in the image img.
                                    Point p; // A 2D point, used to represent pixel locations.
                                    while (win.GetNextDoubleClick(out p))
                                    {
                                        using (var hp = Dlib.ImgaeToFHog(p))
                                        {
                                            Console.WriteLine($"The point {p} in the input image corresponds to {hp} in hog space.");
                                            var row    = hog[hp.Y];
                                            var column = row[hp.X];
                                            var t      = Dlib.Trans(column);
                                            Console.WriteLine($"FHOG features at this point: {t}");
                                        }
                                    }

                                    // Finally, sometimes you want to get a planar representation of the HOG features
                                    // rather than the explicit vector (i.e. interlaced) representation used above.
                                    var planar_hog = Dlib.ExtracFHogFeaturesArray <float>(img);
                                    // Now we have an array of 31 float valued image planes, each representing one of
                                    // the dimensions of the HOG feature vector.
                                }
                    }
                }
            }
            catch (Exception e)
            {
                Console.WriteLine($"exception thrown: {e}");
            }
        }
コード例 #6
0
        internal static void Main()
        {
            // The exit code of the sample application.
            int exitCode = 0;

            try
            {
                // Create a camera object selecting the first camera device found.
                // More constructors are available for selecting a specific camera device.
                using (Camera camera = new Camera())
                {
                    // Change default configuration to enable software triggering.
                    camera.CameraOpened += Configuration.SoftwareTrigger;

                    // Open the camera.
                    camera.Open();

                    // Register image grabbed event to print frame info
                    camera.StreamGrabber.ImageGrabbed += OnImageGrabbed;

                    // DeviceVendorName, DeviceModelName, and DeviceFirmwareVersion are string parameters.
                    Console.WriteLine("Camera Device Information");
                    Console.WriteLine("=========================");
                    Console.WriteLine("Vendor           : {0}", camera.Parameters[PLCamera.DeviceVendorName].GetValue());
                    Console.WriteLine("Model            : {0}", camera.Parameters[PLCamera.DeviceModelName].GetValue());
                    Console.WriteLine("Firmware version : {0}", camera.Parameters[PLCamera.DeviceFirmwareVersion].GetValue());
                    Console.WriteLine("");
                    Console.WriteLine("Camera Device Settings");
                    Console.WriteLine("======================");

                    // Can the camera device be queried whether it is ready to accept the next frame trigger?
                    if (camera.CanWaitForFrameTriggerReady)
                    {
                        // bool for testing if sequencer is available or not
                        bool sequencerAvailable = false;

                        if (camera.GetSfncVersion() < sfnc2_0_0) // Handling for older cameras
                        {
                            if (camera.Parameters[PLCamera.SequenceEnable].IsWritable)
                            {
                                sequencerAvailable = true; //Sequencer is available that is why it is true.

                                // Disable the sequencer before changing parameters.
                                // The parameters under control of the sequencer are locked
                                // when the sequencer is enabled. For a list of parameters
                                // controlled by the sequencer, see the camera User's Manual.
                                camera.Parameters[PLCamera.SequenceEnable].SetValue(false);

                                // Turn configuration mode on
                                if (camera.Parameters[PLCamera.SequenceConfigurationMode].IsWritable)
                                {
                                    camera.Parameters[PLCamera.SequenceConfigurationMode].SetValue(PLCamera.SequenceConfigurationMode.On);
                                }

                                // Maximize the image area of interest (Image AOI).
                                camera.Parameters[PLCamera.OffsetX].TrySetValue(camera.Parameters[PLCamera.OffsetX].GetMinimum());
                                camera.Parameters[PLCamera.OffsetY].TrySetValue(camera.Parameters[PLCamera.OffsetY].GetMinimum());
                                camera.Parameters[PLCamera.Width].SetValue(camera.Parameters[PLCamera.Width].GetMaximum());
                                camera.Parameters[PLCamera.Height].SetValue(camera.Parameters[PLCamera.Height].GetMaximum());

                                // Set the pixel data format.
                                camera.Parameters[PLCamera.PixelFormat].SetValue(PLCamera.PixelFormat.Mono8);

                                // Set up sequence sets.

                                // Configure how the sequence will advance.
                                // 'Auto' refers to the auto sequence advance mode.
                                // The advance from one sequence set to the next will occur automatically with each image acquired.
                                // After the end of the sequence set cycle was reached a new sequence set cycle will start.
                                camera.Parameters[PLCamera.SequenceAdvanceMode].SetValue(PLCamera.SequenceAdvanceMode.Auto);

                                // Our sequence sets relate to three steps (0..2).
                                // In each step we will increase the height of the Image AOI by one increment.
                                camera.Parameters[PLCamera.SequenceSetTotalNumber].SetValue(3);

                                long increments = (camera.Parameters[PLCamera.Height].GetMaximum() - camera.Parameters[PLCamera.Height].GetMinimum()) / camera.Parameters[PLCamera.Height].GetIncrement();

                                // Set the parameters for step 0; quarter height image.
                                camera.Parameters[PLCamera.SequenceSetIndex].SetValue(0);
                                camera.Parameters[PLCamera.Height].SetValue(camera.Parameters[PLCamera.Height].GetIncrement() * (increments / 4) + camera.Parameters[PLCamera.Height].GetMinimum());
                                camera.Parameters[PLCamera.SequenceSetStore].Execute();

                                // Set the parameters for step 1; half height image.
                                camera.Parameters[PLCamera.SequenceSetIndex].SetValue(1);
                                camera.Parameters[PLCamera.Height].SetValue(camera.Parameters[PLCamera.Height].GetIncrement() * (increments / 2) + camera.Parameters[PLCamera.Height].GetMinimum());
                                camera.Parameters[PLCamera.SequenceSetStore].Execute();

                                // Set the parameters for step 2; full height image.
                                camera.Parameters[PLCamera.SequenceSetIndex].SetValue(2);
                                camera.Parameters[PLCamera.Height].SetValue(camera.Parameters[PLCamera.Height].GetIncrement() * (increments) + camera.Parameters[PLCamera.Height].GetMinimum());
                                camera.Parameters[PLCamera.SequenceSetStore].Execute();

                                // Finish configuration
                                if (camera.Parameters[PLCamera.SequenceConfigurationMode].IsWritable)
                                {
                                    camera.Parameters[PLCamera.SequenceConfigurationMode].SetValue(PLCamera.SequenceConfigurationMode.Off);
                                }

                                // Enable the sequencer feature.
                                // From here on you cannot change the sequencer settings anymore.
                                camera.Parameters[PLCamera.SequenceEnable].SetValue(true);

                                // Start the grabbing of countOfImagesToGrab images.
                                camera.StreamGrabber.Start(countOfImagesToGrab);
                            }
                            else
                            {
                                sequencerAvailable = false; // Sequencer not available
                            }
                        }
                        else // Handling for newer cameras (using SFNC 2.0, e.g. USB3 Vision cameras)
                        {
                            if (camera.Parameters[PLCamera.SequencerMode].IsWritable)
                            {
                                sequencerAvailable = true;

                                // Disable the sequencer before changing parameters.
                                // The parameters under control of the sequencer are locked
                                // when the sequencer is enabled. For a list of parameters
                                // controlled by the sequencer, see the camera User's Manual.
                                camera.Parameters[PLCamera.SequencerMode].SetValue(PLCamera.SequencerMode.Off);

                                // Maximize the image area of interest (Image AOI).
                                camera.Parameters[PLCamera.OffsetX].TrySetValue(camera.Parameters[PLCamera.OffsetX].GetMinimum());
                                camera.Parameters[PLCamera.OffsetY].TrySetValue(camera.Parameters[PLCamera.OffsetY].GetMinimum());
                                camera.Parameters[PLCamera.Width].SetValue(camera.Parameters[PLCamera.Width].GetMaximum());
                                camera.Parameters[PLCamera.Height].SetValue(camera.Parameters[PLCamera.Height].GetMaximum());

                                // Set the pixel data format.
                                // This parameter may be locked when the sequencer is enabled.
                                camera.Parameters[PLCamera.PixelFormat].SetValue(PLCamera.PixelFormat.Mono8);

                                // Set up sequence sets and turn sequencer configuration mode on.
                                camera.Parameters[PLCamera.SequencerConfigurationMode].SetValue(PLCamera.SequencerConfigurationMode.On);

                                // Configure how the sequence will advance.

                                // The sequence sets relate to three steps (0..2).
                                // In each step, the height of the Image AOI is doubled.

                                long increments = (camera.Parameters[PLCamera.Height].GetMaximum() - camera.Parameters[PLCamera.Height].GetMinimum()) / camera.Parameters[PLCamera.Height].GetIncrement();

                                long initialSet = camera.Parameters[PLCamera.SequencerSetSelector].GetMinimum();
                                long incSet     = camera.Parameters[PLCamera.SequencerSetSelector].GetIncrement();
                                long curSet     = initialSet;

                                // Set the parameters for step 0; quarter height image.
                                camera.Parameters[PLCamera.SequencerSetSelector].SetValue(initialSet);
                                {
                                    // valid for all sets
                                    // reset on software signal 1;
                                    camera.Parameters[PLCamera.SequencerPathSelector].SetValue(0);
                                    camera.Parameters[PLCamera.SequencerSetNext].SetValue(initialSet);
                                    camera.Parameters[PLCamera.SequencerTriggerSource].SetValue(PLCamera.SequencerTriggerSource.SoftwareSignal1);
                                    // advance on Frame Start
                                    camera.Parameters[PLCamera.SequencerPathSelector].SetValue(1);
                                    camera.Parameters[PLCamera.SequencerTriggerSource].SetValue(PLCamera.SequencerTriggerSource.FrameStart);
                                }
                                camera.Parameters[PLCamera.SequencerSetNext].SetValue(curSet + incSet);

                                // Set the parameters for step 0; quarter height image.
                                camera.Parameters[PLCamera.Height].SetValue(camera.Parameters[PLCamera.Height].GetIncrement() * (increments / 4) + camera.Parameters[PLCamera.Height].GetMinimum());
                                camera.Parameters[PLCamera.SequencerSetSave].Execute();

                                // Set the parameters for step 1; half height image.
                                curSet += incSet;
                                camera.Parameters[PLCamera.SequencerSetSelector].SetValue(curSet);
                                // advance on Frame Start to next set
                                camera.Parameters[PLCamera.SequencerSetNext].SetValue(curSet + incSet);
                                camera.Parameters[PLCamera.Height].SetValue(camera.Parameters[PLCamera.Height].GetIncrement() * (increments / 2) + camera.Parameters[PLCamera.Height].GetMinimum());
                                camera.Parameters[PLCamera.SequencerSetSave].Execute();

                                // Set the parameters for step 2; full height image.
                                curSet += incSet;
                                camera.Parameters[PLCamera.SequencerSetSelector].SetValue(curSet);
                                // advance on Frame End to initial set,
                                camera.Parameters[PLCamera.SequencerSetNext].SetValue(initialSet); // terminates sequence definition
                                                                                                   // full height
                                camera.Parameters[PLCamera.Height].SetValue(camera.Parameters[PLCamera.Height].GetIncrement() * increments + camera.Parameters[PLCamera.Height].GetMinimum());
                                camera.Parameters[PLCamera.SequencerSetSave].Execute();

                                // Enable the sequencer feature.
                                // From here on you cannot change the sequencer settings anymore.
                                camera.Parameters[PLCamera.SequencerConfigurationMode].SetValue(PLCamera.SequencerConfigurationMode.Off);
                                camera.Parameters[PLCamera.SequencerMode].SetValue(PLCamera.SequencerMode.On);

                                // Start the grabbing of countOfImagesToGrab images.
                                camera.StreamGrabber.Start(countOfImagesToGrab);
                            }
                            else
                            {
                                sequencerAvailable = false; // Sequencer not available
                            }
                        }

                        if (sequencerAvailable)
                        {
                            IGrabResult result;
                            // Camera.StopGrabbing() is called automatically by the RetrieveResult() method
                            // when countOfImagesToGrab images have been retrieved.
                            while (camera.StreamGrabber.IsGrabbing)
                            {
                                // Execute the software trigger. Wait up to 1000 ms for the camera to be ready for trigger.
                                if (camera.WaitForFrameTriggerReady(1000, TimeoutHandling.ThrowException))
                                {
                                    camera.ExecuteSoftwareTrigger();

                                    // Wait for an image and then retrieve it. A timeout of 5000 ms is used.
                                    result = camera.StreamGrabber.RetrieveResult(5000, TimeoutHandling.ThrowException);

                                    using (result)
                                    {
                                        // Image grabbed successfully?
                                        if (result.GrabSucceeded)
                                        {
                                            // Display the grabbed image.
                                            ImageWindow.DisplayImage(1, result);
                                        }
                                        else
                                        {
                                            Console.WriteLine("Error code:{0} Error description:{1}", result.ErrorCode, result.ErrorDescription);
                                        }
                                    }
                                }

                                // Wait for user input.
                                Console.WriteLine("Press Enter to continue.");
                                while (camera.StreamGrabber.IsGrabbing && Console.ReadKey().Key != ConsoleKey.Enter)
                                {
                                    ;
                                }
                            }

                            // Disable the sequencer.
                            if (camera.GetSfncVersion() < sfnc2_0_0) // Handling for older cameras
                            {
                                camera.Parameters[PLCamera.SequenceEnable].SetValue(false);
                            }
                            else // Handling for newer cameras (using SFNC 2.0, e.g. USB3 Vision cameras)
                            {
                                camera.Parameters[PLCamera.SequencerMode].SetValue(PLCamera.SequencerMode.Off);
                            }
                        }
                        else
                        {
                            Console.WriteLine("The sequencer feature is not available for this camera.");
                        }
                    }
                    else
                    {
                        Console.WriteLine("This sample can only be used with cameras that can be queried whether they are ready to accept the next frame trigger.");
                    }

                    // Close the camera.
                    camera.Close();
                }
            }
            catch (Exception e)
            {
                // Error handling.
                Console.Error.WriteLine("Exception: {0}", e.Message);
                exitCode = 1;
            }

            // Comment the following two lines to disable waiting on exit.
            Console.Error.WriteLine("\nPress enter to exit.");
            Console.ReadLine();

            Environment.Exit(exitCode);
        }
コード例 #7
0
        private static void Main(string[] args)
        {
            if (args.Length != 1)
            {
                Console.WriteLine("Call this program like this: ");
                Console.WriteLine("VideoTracking.exe <path of video_frames directory>");
                return;
            }

            var path  = args[0];
            var files = new DirectoryInfo(path).GetFiles("*.jpg").Select(info => info.FullName).ToList();



            files.Sort();

            if (files.Count == 0)
            {
                Console.WriteLine($"No images found in {path}");
                return;
            }


            // 定义图像捕捉方式 从摄像头 , 注意 Windows下需要选择 VideoCaptureAPIs.DSHOW
            var cap = new VideoCapture(0, VideoCaptureAPIs.DSHOW);

            // 定义图像捕捉方式 从摄像头 视频文件
            //var cap = new VideoCapture("video.webm");

            //判断捕捉设备是否打开
            if (!cap.IsOpened())
            {
                Console.WriteLine("Unable to connect to camera");
                return;
            }

            Mat temp    = null;
            var tracker = new CorrelationTracker();

            int init = 0;

            //定义显示窗口
            using (var win = new ImageWindow())
            {
                Console.WriteLine("对象追踪程序启动");
                Console.WriteLine("选择命令行为当前窗口,通过按键选择需要追踪的区域Width: [A,Z] Height:[S,X] X:[right,left] Y:[up,down] ,点击Enter开始追踪");
                Console.WriteLine("注意:切换命令行窗口输入法为英文输入状态");
                //选择追踪对象
                while (!win.IsClosed())
                {
                    //获得1帧图片
                    temp = cap.RetrieveMat();// new Mat();


                    if (temp == null)
                    {
                        Console.WriteLine("图像获取错误!");
                        return;
                    }

                    var array = new byte[temp.Width * temp.Height * temp.ElemSize()];
                    Marshal.Copy(temp.Data, array, 0, array.Length);
                    using (var cimg = Dlib.LoadImageData <BgrPixel>(array, (uint)temp.Height, (uint)temp.Width, (uint)(temp.Width * temp.ElemSize())))
                    {
                        init++;
                        if (init > 1)
                        {
                            var KK = Console.ReadKey();
                            if (KK.Key == ConsoleKey.Enter)
                            {
                                Console.WriteLine("开始追踪目标!");

                                //确定 追踪 位置
                                var rect2 = DRectangle.CenteredRect(a_X, a_Y, a_W, a_H);
                                //开始追踪
                                tracker.StartTrack(cimg, rect2);
                                win.SetImage(cimg);
                                win.ClearOverlay();
                                win.AddOverlay(rect2);
                                break;
                            }

                            //选择 追踪区域
                            if (KK.Key == ConsoleKey.RightArrow || KK.Key == ConsoleKey.LeftArrow || KK.Key == ConsoleKey.UpArrow || KK.Key == ConsoleKey.DownArrow || KK.Key == ConsoleKey.A || KK.Key == ConsoleKey.Z || KK.Key == ConsoleKey.S || KK.Key == ConsoleKey.X)
                            {
                                if (KK.Key == ConsoleKey.RightArrow)
                                {
                                    a_X++;
                                    if (a_X > cimg.Rect.Width - a_W)
                                    {
                                        a_X = cimg.Rect.Width - a_W;
                                    }
                                }
                                if (KK.Key == ConsoleKey.LeftArrow)
                                {
                                    a_X--;
                                    if (a_X < 0)
                                    {
                                        a_X = 0;
                                    }
                                }

                                if (KK.Key == ConsoleKey.UpArrow)
                                {
                                    a_Y--;
                                    if (a_Y < 0)
                                    {
                                        a_Y = 0;
                                    }
                                }
                                if (KK.Key == ConsoleKey.DownArrow)
                                {
                                    a_Y++;
                                    if (a_Y > cimg.Rect.Height - a_H)
                                    {
                                        a_Y = cimg.Rect.Height - a_H;
                                    }
                                }

                                if (KK.Key == ConsoleKey.A)
                                {
                                    a_W++;
                                    if (a_W >= cimg.Rect.Width - a_X)
                                    {
                                        a_W = cimg.Rect.Width - a_X;
                                    }
                                }
                                if (KK.Key == ConsoleKey.Z)
                                {
                                    a_W--;
                                    if (a_W < 10)
                                    {
                                        a_W = 10;
                                    }
                                }
                                if (KK.Key == ConsoleKey.S)
                                {
                                    a_H++;
                                    if (a_H > cimg.Rect.Height - a_Y)
                                    {
                                        a_H = cimg.Rect.Height - a_Y;
                                    }
                                }
                                if (KK.Key == ConsoleKey.X)
                                {
                                    a_H--;
                                    if (a_H < 10)
                                    {
                                        a_H = 10;
                                    }
                                }
                            }
                        }

                        var rect = DRectangle.CenteredRect(a_X, a_Y, a_W, a_H);

                        Console.WriteLine("Set RECT:" + a_X + " " + a_Y + " " + a_W + " " + a_H);

                        //显示图片
                        win.SetImage(cimg);
                        win.ClearOverlay();
                        //显示框
                        win.AddOverlay(rect);
                    }
                }

                //选择追踪对象
                while (!win.IsClosed())
                {
                    //获得1帧图片
                    temp = cap.RetrieveMat();// new Mat();


                    if (temp == null)
                    {
                        Console.WriteLine("图像获取错误!");
                        return;
                    }

                    var array = new byte[temp.Width * temp.Height * temp.ElemSize()];
                    Marshal.Copy(temp.Data, array, 0, array.Length);
                    using (var cimg = Dlib.LoadImageData <BgrPixel>(array, (uint)temp.Height, (uint)temp.Width, (uint)(temp.Width * temp.ElemSize())))
                    {
                        //更新追踪图像
                        tracker.Update(cimg);

                        win.SetImage(cimg);
                        win.ClearOverlay();

                        //获得追踪到的目标位置
                        DRectangle rect = tracker.GetPosition();
                        win.AddOverlay(rect);


                        Console.WriteLine("OBJ RECT:" + (int)rect.Left + " " + (int)rect.Top + " " + (int)rect.Width + " " + (int)rect.Height);

                        System.Threading.Thread.Sleep(100);
                    }
                }
            }



            Console.WriteLine("任意键退出");
            Console.ReadKey();
        }
コード例 #8
0
ファイル: Program.cs プロジェクト: zhuxb711/DlibDotNet
        private static void Main(string[] args)
        {
            try
            {
                // In this example we are going to train a face detector based on the
                // small faces dataset in the examples/faces directory.  So the first
                // thing we do is load that dataset.  This means you need to supply the
                // path to this faces folder as a command line argument so we will know
                // where it is.
                if (args.Length != 1)
                {
                    Console.WriteLine("Give the path to the examples/faces directory as the argument to this");
                    Console.WriteLine("program.  For example, if you are in the examples folder then execute ");
                    Console.WriteLine("this program by running: ");
                    Console.WriteLine("   ./fhog_object_detector_ex faces");
                    Console.WriteLine();
                    return;
                }

                var facesDirectory = args[0];
                // The faces directory contains a training dataset and a separate
                // testing dataset.  The training data consists of 4 images, each
                // annotated with rectangles that bound each human face.  The idea is
                // to use this training data to learn to identify human faces in new
                // images.
                //
                // Once you have trained an object detector it is always important to
                // test it on data it wasn't trained on.  Therefore, we will also load
                // a separate testing set of 5 images.  Once we have a face detector
                // created from the training data we will see how well it works by
                // running it on the testing images.
                //
                // So here we create the variables that will hold our dataset.
                // images_train will hold the 4 training images and face_boxes_train
                // holds the locations of the faces in the training images.  So for
                // example, the image images_train[0] has the faces given by the
                // rectangles in face_boxes_train[0].
                IList <Matrix <byte> >     tmpImagesTrain;
                IList <Matrix <byte> >     tmpImagesTest;
                IList <IList <Rectangle> > tmpFaceBoxesTrain;
                IList <IList <Rectangle> > tmpFaceBoxesTest;

                // Now we load the data.  These XML files list the images in each
                // dataset and also contain the positions of the face boxes.  Obviously
                // you can use any kind of input format you like so long as you store
                // the data into images_train and face_boxes_train.  But for convenience
                // dlib comes with tools for creating and loading XML image dataset
                // files.  Here you see how to load the data.  To create the XML files
                // you can use the imglab tool which can be found in the tools/imglab
                // folder.  It is a simple graphical tool for labeling objects in images
                // with boxes.  To see how to use it read the tools/imglab/README.txt
                // file.
                Dlib.LoadImageDataset(Path.Combine(facesDirectory, "training.xml"), out tmpImagesTrain, out tmpFaceBoxesTrain);
                Dlib.LoadImageDataset(Path.Combine(facesDirectory, "testing.xml"), out tmpImagesTest, out tmpFaceBoxesTest);

                // Now we do a little bit of pre-processing.  This is optional but for
                // this training data it improves the results.  The first thing we do is
                // increase the size of the images by a factor of two.  We do this
                // because it will allow us to detect smaller faces than otherwise would
                // be practical (since the faces are all now twice as big).  Note that,
                // in addition to resizing the images, these functions also make the
                // appropriate adjustments to the face boxes so that they still fall on
                // top of the faces after the images are resized.
                var imageTrain     = new List <Matrix <byte> >(tmpImagesTrain);
                var faceBoxesTrain = new List <IList <Rectangle> >(tmpFaceBoxesTrain);
                Dlib.UpsampleImageDataset(2, imageTrain, faceBoxesTrain);
                var imageTest     = new List <Matrix <byte> >(tmpImagesTest);
                var faceBoxesTest = new List <IList <Rectangle> >(tmpFaceBoxesTest);
                Dlib.UpsampleImageDataset(2, imageTest, faceBoxesTest);

                // Since human faces are generally left-right symmetric we can increase
                // our training dataset by adding mirrored versions of each image back
                // into images_train.  So this next step doubles the size of our
                // training dataset.  Again, this is obviously optional but is useful in
                // many object detection tasks.
                Dlib.AddImageLeftRightFlips(imageTrain, faceBoxesTrain);
                Console.WriteLine($"num training images: {imageTrain.Count()}");
                Console.WriteLine($"num testing images:  {imageTest.Count()}");


                // Finally we get to the training code.  dlib contains a number of
                // object detectors.  This typedef tells it that you want to use the one
                // based on Felzenszwalb's version of the Histogram of Oriented
                // Gradients (commonly called HOG) detector.  The 6 means that you want
                // it to use an image pyramid that downsamples the image at a ratio of
                // 5/6.  Recall that HOG detectors work by creating an image pyramid and
                // then running the detector over each pyramid level in a sliding window
                // fashion.
                using (var scanner = new ScanFHogPyramid <PyramidDown, DefaultFHogFeatureExtractor>(6))
                {
                    // The sliding window detector will be 80 pixels wide and 80 pixels tall.
                    scanner.SetDetectionWindowSize(80, 80);

                    using (var trainer = new StructuralObjectDetectionTrainer <ScanFHogPyramid <PyramidDown, DefaultFHogFeatureExtractor> >(scanner))
                    {
                        // Set this to the number of processing cores on your machine.
                        trainer.SetNumThreads(4);
                        // The trainer is a kind of support vector machine and therefore has the usual SVM
                        // C parameter.  In general, a bigger C encourages it to fit the training data
                        // better but might lead to overfitting.  You must find the best C value
                        // empirically by checking how well the trained detector works on a test set of
                        // images you haven't trained on.  Don't just leave the value set at 1.  Try a few
                        // different C values and see what works best for your data.
                        trainer.SetC(1);
                        // We can tell the trainer to print it's progress to the console if we want.
                        trainer.BeVerbose();
                        // The trainer will run until the "risk gap" is less than 0.01.  Smaller values
                        // make the trainer solve the SVM optimization problem more accurately but will
                        // take longer to train.  For most problems a value in the range of 0.1 to 0.01 is
                        // plenty accurate.  Also, when in verbose mode the risk gap is printed on each
                        // iteration so you can see how close it is to finishing the training.
                        trainer.SetEpsilon(0.01);


                        // Now we run the trainer.  For this example, it should take on the order of 10
                        // seconds to train.
                        var detector = trainer.Train(imageTrain, faceBoxesTrain);

                        // Now that we have a face detector we can test it.  The first statement tests it
                        // on the training data.  It will print the precision, recall, and then average precision.
                        using (var matrix = Dlib.TestObjectDetectionFunction(detector, imageTrain, faceBoxesTrain))
                            Console.WriteLine($"training results: {matrix}");
                        // However, to get an idea if it really worked without overfitting we need to run
                        // it on images it wasn't trained on.  The next line does this.  Happily, we see
                        // that the object detector works perfectly on the testing images.
                        using (var matrix = Dlib.TestObjectDetectionFunction(detector, imageTest, faceBoxesTest))
                            Console.WriteLine($"testing results: {matrix}");

                        // If you have read any papers that use HOG you have probably seen the nice looking
                        // "sticks" visualization of a learned HOG detector.  This next line creates a
                        // window with such a visualization of our detector.  It should look somewhat like
                        // a face.
                        using (var fhog = Dlib.DrawFHog(detector))
                            using (var hogwin = new ImageWindow(fhog, "Learned fHOG detector"))
                            {
                                // Now for the really fun part.  Let's display the testing images on the screen and
                                // show the output of the face detector overlaid on each image.  You will see that
                                // it finds all the faces without false alarming on any non-faces.
                                using (var win = new ImageWindow())
                                    for (var i = 0; i < imageTest.Count; ++i)
                                    {
                                        // Run the detector and get the face detections.
                                        var dets = detector.Operator(imageTest[i]);
                                        win.ClearOverlay();
                                        win.SetImage(imageTest[i]);
                                        win.AddOverlay(dets, new RgbPixel(255, 0, 0));
                                        Console.WriteLine("Hit enter to process the next image...");
                                        Console.ReadKey();
                                        Console.WriteLine("");
                                    }
                            }


                        // Like everything in dlib, you can save your detector to disk using the
                        // serialize() function.
                        detector.Serialize("face_detector.svm");

                        // Then you can recall it using the deserialize() function.
                        using (var tmp = new ScanFHogPyramid <PyramidDown, DefaultFHogFeatureExtractor>(6))
                            using (var detector2 = new ObjectDetector <ScanFHogPyramid <PyramidDown, DefaultFHogFeatureExtractor> >(tmp))
                                detector2.Deserialize("face_detector.svm");



                        // Now let's talk about some optional features of this training tool as well as some
                        // important points you should understand.
                        //
                        // The first thing that should be pointed out is that, since this is a sliding
                        // window classifier, it can't output an arbitrary rectangle as a detection.  In
                        // this example our sliding window is 80 by 80 pixels and is run over an image
                        // pyramid.  This means that it can only output detections that are at least 80 by
                        // 80 pixels in size (recall that this is why we upsampled the images after loading
                        // them).  It also means that the aspect ratio of the outputs is 1.  So if,
                        // for example, you had a box in your training data that was 200 pixels by 10
                        // pixels then it would simply be impossible for the detector to learn to detect
                        // it.  Similarly, if you had a really small box it would be unable to learn to
                        // detect it.
                        //
                        // So the training code performs an input validation check on the training data and
                        // will throw an exception if it detects any boxes that are impossible to detect
                        // given your setting of scanning window size and image pyramid resolution.  You
                        // can use a statement like:
                        //   remove_unobtainable_rectangles(trainer, images_train, face_boxes_train)
                        // to automatically discard these impossible boxes from your training dataset
                        // before running the trainer.  This will avoid getting the "impossible box"
                        // exception.  However, I would recommend you be careful that you are not throwing
                        // away truth boxes you really care about.  The remove_unobtainable_rectangles()
                        // will return the set of removed rectangles so you can visually inspect them and
                        // make sure you are OK that they are being removed.
                        //
                        // Next, note that any location in the images not marked with a truth box is
                        // implicitly treated as a negative example.  This means that when creating
                        // training data it is critical that you label all the objects you want to detect.
                        // So for example, if you are making a face detector then you must mark all the
                        // faces in each image.  However, sometimes there are objects in images you are
                        // unsure about or simply don't care if the detector identifies or not.  For these
                        // objects you can pass in a set of "ignore boxes" as a third argument to the
                        // trainer.train() function.  The trainer will simply disregard any detections that
                        // happen to hit these boxes.
                        //
                        // Another useful thing you can do is evaluate multiple HOG detectors together. The
                        // benefit of this is increased testing speed since it avoids recomputing the HOG
                        // features for each run of the detector.  You do this by storing your detectors
                        // into a std::vector and then invoking evaluate_detectors() like so:
                        var myDetectors = new List <ObjectDetector <ScanFHogPyramid <PyramidDown, DefaultFHogFeatureExtractor> > >();
                        myDetectors.Add(detector);
                        var dect2 = Dlib.EvaluateDetectors(myDetectors, imageTrain[0]);
                        //
                        //
                        // Finally, you can add a nuclear norm regularizer to the SVM trainer.  Doing has
                        // two benefits.  First, it can cause the learned HOG detector to be composed of
                        // separable filters and therefore makes it execute faster when detecting objects.
                        // It can also help with generalization since it tends to make the learned HOG
                        // filters smoother.  To enable this option you call the following function before
                        // you create the trainer object:
                        //    scanner.set_nuclear_norm_regularization_strength(1.0);
                        // The argument determines how important it is to have a small nuclear norm.  A
                        // bigger regularization strength means it is more important.  The smaller the
                        // nuclear norm the smoother and faster the learned HOG filters will be, but if the
                        // regularization strength value is too large then the SVM will not fit the data
                        // well.  This is analogous to giving a C value that is too small.
                        //
                        // You can see how many separable filters are inside your detector like so:
                        Console.WriteLine($"num filters: {Dlib.NumSeparableFilters(detector)}");
                        // You can also control how many filters there are by explicitly thresholding the
                        // singular values of the filters like this:
                        using (var newDetector = Dlib.ThresholdFilterSingularValues(detector, 0.1))
                        {
                        }
                        // That removes filter components with singular values less than 0.1.  The bigger
                        // this number the fewer separable filters you will have and the faster the
                        // detector will run.  However, a large enough threshold will hurt detection
                        // accuracy.
                    }
                }
            }
            catch (Exception e)
            {
                Console.WriteLine(e);
            }
        }
コード例 #9
0
        internal static void Main()
        {
            // Time to wait for the user to disconnect the camera device.
            const int cTimeOutMs = 60000;

            // The exit code of the sample application.
            int exitCode = 0;

            try
            {
                // Create a camera object that selects the first camera device found.
                // More constructors are available for selecting a specific camera device.
                using (Camera camera = new Camera())
                {
                    // Print the model name of the camera.
                    Console.WriteLine("Using camera {0}.", camera.CameraInfo[CameraInfoKey.ModelName]);

                    // Set the acquisition mode to free running continuous acquisition when the camera is opened.
                    camera.CameraOpened += Configuration.AcquireContinuous;

                    // For demonstration purposes, only add an event handler for connection loss.
                    camera.ConnectionLost += OnConnectionLost;

                    // Open the connection to the camera device.
                    camera.Open();

                    ///////////////// Don't single step beyond this line when using GigE cameras (see comments above) ///////////////////////////////
                    // Before testing the callbacks, we manually set the heartbeat timeout to a short value when using GigE cameras.
                    // For debug versions, the heartbeat timeout has been set to 5 minutes, so it would take up to 5 minutes
                    // until device removal is detected.
                    camera.Parameters[PLTransportLayer.HeartbeatTimeout].TrySetValue(1000, IntegerValueCorrection.Nearest); // 1000 ms timeout

                    // Start the grabbing.
                    camera.StreamGrabber.Start();

                    // Start the timeout timer.
                    Console.WriteLine("Please disconnect the device. (Timeout {0}s)", cTimeOutMs / 1000.0);
                    Stopwatch stopWatch = new Stopwatch();
                    stopWatch.Start();

                    // Grab and display images until timeout.
                    while (camera.StreamGrabber.IsGrabbing && stopWatch.ElapsedMilliseconds < cTimeOutMs)
                    {
                        try
                        {
                            // Wait for an image and then retrieve it. A timeout of 5000 ms is used.
                            IGrabResult grabResult = camera.StreamGrabber.RetrieveResult(5000, TimeoutHandling.ThrowException);
                            using (grabResult)
                            {
                                // Image grabbed successfully?
                                if (grabResult.GrabSucceeded)
                                {
                                    // Display the grabbed image.
                                    ImageWindow.DisplayImage(0, grabResult);
                                }
                            }
                        }
                        catch (Exception)
                        {
                            // An exception occurred. Is it because the camera device has been physically removed?

                            // Known issue: Wait until the system safely detects a possible removal.
                            System.Threading.Thread.Sleep(1000);

                            if (!camera.IsConnected)
                            {
                                // Yes, the camera device has been physically removed.
                                Console.WriteLine("The camera device has been removed. Please reconnect. (Timeout {0}s)", cTimeOutMs / 1000.0);

                                // Close the camera object to close underlying resources used for the previous connection.
                                camera.Close();

                                // Try to re-establish a connection to the camera device until timeout.
                                // Reopening the camera triggers the above registered Configuration.AcquireContinous.
                                // Therefore, the camera is parameterized correctly again.
                                camera.Open(cTimeOutMs, TimeoutHandling.ThrowException);

                                // Due to unplugging the camera, settings have changed, e.g. the heartbeat timeout value for GigE cameras.
                                // After the camera has been reconnected, all settings must be restored. This can be done in the CameraOpened
                                // event as shown for the Configuration.AcquireContinous.
                                camera.Parameters[PLTransportLayer.HeartbeatTimeout].TrySetValue(1000, IntegerValueCorrection.Nearest);

                                // Restart grabbing.
                                camera.StreamGrabber.Start();

                                // Restart the timeout timer.
                                Console.WriteLine("Camera reconnected. You may disconnect the camera device again (Timeout {0}s)", cTimeOutMs / 1000.0);
                                stopWatch.Restart();
                            }
                            else
                            {
                                throw;
                            }
                        }
                    }
                }
            }
            catch (Exception e)
            {
                Console.Error.WriteLine("Exception: {0}", e.Message);
                exitCode = 1;
            }
            finally
            {
                // Comment the following two lines to disable waiting on exit.
                Console.Error.WriteLine("\nPress enter to exit.");
                Console.ReadLine();
            }

            Environment.Exit(exitCode);
        }
コード例 #10
0
        private void PositionDropWindows(Point pt)
        {
            //
            // Find the before and after sections from the point given
            HeaderColumnSection before;
            HeaderColumnSection after;

            GetDropBoundsSections(pt, out before, out after);

            //
            // Calculate the x position of the insertion arrows
            int xPos;

            if (before != null && after != null)
            {
                xPos = before.HostBasedRectangle.Right + (after.HostBasedRectangle.Left - before.HostBasedRectangle.Right) / 2;
            }
            else if (before == null)
            {
                xPos = HostBasedRectangle.Left;
                if (after != null)
                {
                    xPos += (after.HostBasedRectangle.Left - HostBasedRectangle.Left) / 2;
                }
            }
            else
            {
                xPos = before.HostBasedRectangle.Right;
            }

            //
            // Calculate the y position of the insertion arrows
            int top;
            int bottom;

            if (after != null)
            {
                top    = after.Rectangle.Top;
                bottom = after.Rectangle.Bottom;
            }
            else if (before != null)
            {
                top    = before.Rectangle.Top;
                bottom = before.Rectangle.Bottom;
            }
            else
            {
                top    = Rectangle.Top;
                bottom = Rectangle.Bottom;
            }

            //
            // Now we have all the info actually position them.
            ImageWindow downArrowWindow = _insertionArrows.DownArrowWindow;
            ImageWindow upArrowWindow   = _insertionArrows.UpArrowWindow;
            Point       downArrowPoint  = new Point(xPos - downArrowWindow.Width / 2, top - downArrowWindow.Height);

            if (LayoutController != null)
            {
                if (xPos < 0)
                {
                    LayoutController.CurrentHorizontalScrollPosition = xPos + GetAbsoluteScrollCoordinates().X;
                }
            }

            downArrowWindow.Location = Host.PointToScreen(
                downArrowPoint
                );

            Point upArrowPoint = new Point(xPos - upArrowWindow.Width / 2, bottom);

            upArrowWindow.Location = Host.PointToScreen(upArrowPoint);

            //
            // Finally make them
            if (!upArrowWindow.Visible)
            {
                upArrowWindow.Visible = true;
            }
            if (!downArrowWindow.Visible)
            {
                downArrowWindow.Visible = true;
            }
        }
コード例 #11
0
ファイル: Program.cs プロジェクト: zhuxb711/DlibDotNet
        private static void Main(string[] args)
        {
            try
            {
                if (args.Length != 2)
                {
                    Console.WriteLine("Call this program like this:");
                    Console.WriteLine("./dnn_mmod_dog_hipsterizer mmod_dog_hipsterizer.dat faces/dogs.jpg");
                    Console.WriteLine("You can get the mmod_dog_hipsterizer.dat file from:");
                    Console.WriteLine("http://dlib.net/files/mmod_dog_hipsterizer.dat.bz2");
                    return;
                }

                // load the models as well as glasses and mustache.
                using (var deserialize = new ProxyDeserialize(args[0]))
                    using (var net = LossMmod.Deserialize(deserialize))
                        using (var sp = ShapePredictor.Deserialize(deserialize))
                            using (var glasses = Matrix <RgbAlphaPixel> .Deserialize(deserialize))
                                using (var mustache = Matrix <RgbAlphaPixel> .Deserialize(deserialize))
                                {
                                    Dlib.PyramidUp(glasses);
                                    Dlib.PyramidUp(mustache);

                                    using (var win1 = new ImageWindow(glasses))
                                        using (var win2 = new ImageWindow(mustache))
                                            using (var winWireframe = new ImageWindow())
                                                using (var winHipster = new ImageWindow())
                                                {
                                                    // Now process each image, find dogs, and hipsterize them by drawing glasses and a
                                                    // mustache on each dog :)
                                                    for (var i = 1; i < args.Length; ++i)
                                                    {
                                                        using (var img = Dlib.LoadImageAsMatrix <RgbPixel>(args[i]))
                                                        {
                                                            // Upsampling the image will allow us to find smaller dog faces but will use more
                                                            // computational resources.
                                                            //pyramid_up(img);
                                                            var dets = net.Operator(img).First();
                                                            winWireframe.ClearOverlay();
                                                            winWireframe.SetImage(img);

                                                            // We will also draw a wireframe on each dog's face so you can see where the
                                                            // shape_predictor is identifying face landmarks.
                                                            var lines = new List <ImageWindow.OverlayLine>();
                                                            foreach (var d in dets)
                                                            {
                                                                // get the landmarks for this dog's face
                                                                var shape = sp.Detect(img, d.Rect);

                                                                var color    = new RgbPixel(0, 255, 0);
                                                                var top      = shape.GetPart(0);
                                                                var leftEar  = shape.GetPart(1);
                                                                var leftEye  = shape.GetPart(2);
                                                                var nose     = shape.GetPart(3);
                                                                var rightEar = shape.GetPart(4);
                                                                var rightEye = shape.GetPart(5);

                                                                // The locations of the left and right ends of the mustache.
                                                                var leftMustache  = 1.3 * (leftEye - rightEye) / 2 + nose;
                                                                var rightMustache = 1.3 * (rightEye - leftEye) / 2 + nose;

                                                                // Draw the glasses onto the image.
                                                                var from = new[]
                                                                {
                                                                    2 * new Point(176, 36), 2 * new Point(59, 35)
                                                                };
                                                                var to = new[]
                                                                {
                                                                    leftEye, rightEye
                                                                };
                                                                using (var transform = Dlib.FindSimilarityTransform(from, to))
                                                                    for (uint r = 0, nr = (uint)glasses.Rows; r < nr; ++r)
                                                                    {
                                                                        for (uint c = 0, nc = (uint)glasses.Columns; c < nc; ++c)
                                                                        {
                                                                            var p = (Point)transform.Operator(new DPoint(c, r));
                                                                            if (Dlib.GetRect(img).Contains(p))
                                                                            {
                                                                                var rgb = img[p.Y, p.X];
                                                                                Dlib.AssignPixel(ref rgb, glasses[(int)r, (int)c]);
                                                                                img[p.Y, p.X] = rgb;
                                                                            }
                                                                        }
                                                                    }

                                                                // Draw the mustache onto the image right under the dog's nose.
                                                                var mustacheRect = Dlib.GetRect(mustache);
                                                                from = new[]
                                                                {
                                                                    mustacheRect.TopLeft, mustacheRect.TopRight
                                                                };
                                                                to = new[]
                                                                {
                                                                    rightMustache, leftMustache
                                                                };
                                                                using (var transform = Dlib.FindSimilarityTransform(from, to))
                                                                    for (uint r = 0, nr = (uint)mustache.Rows; r < nr; ++r)
                                                                    {
                                                                        for (uint c = 0, nc = (uint)mustache.Columns; c < nc; ++c)
                                                                        {
                                                                            var p = (Point)transform.Operator(new DPoint(c, r));
                                                                            if (Dlib.GetRect(img).Contains(p))
                                                                            {
                                                                                var rgb = img[p.Y, p.X];
                                                                                Dlib.AssignPixel(ref rgb, mustache[(int)r, (int)c]);
                                                                                img[p.Y, p.X] = rgb;
                                                                            }
                                                                        }
                                                                    }

                                                                // Record the lines needed for the face wire frame.
                                                                lines.Add(new ImageWindow.OverlayLine(leftEye, nose, color));
                                                                lines.Add(new ImageWindow.OverlayLine(nose, rightEye, color));
                                                                lines.Add(new ImageWindow.OverlayLine(rightEye, leftEye, color));
                                                                lines.Add(new ImageWindow.OverlayLine(rightEye, rightEar, color));
                                                                lines.Add(new ImageWindow.OverlayLine(rightEar, top, color));
                                                                lines.Add(new ImageWindow.OverlayLine(top, leftEar, color));
                                                                lines.Add(new ImageWindow.OverlayLine(leftEar, leftEye, color));

                                                                winWireframe.AddOverlay(lines);
                                                                winHipster.SetImage(img);
                                                            }

                                                            Console.WriteLine("Hit enter to process the next image.");
                                                            Console.ReadKey();
                                                        }
                                                    }
                                                }
                                }
            }
            catch (Exception e)
            {
                Console.WriteLine(e);
            }
        }
コード例 #12
0
        public void Jet2()
        {
            var path = this.GetDataFile($"{LoadTarget}.bmp");

            var tests = new[]
            {
                new { Type = ImageTypes.RgbPixel, ExpectResult = false, Max = 255, Min = 0 },
                new { Type = ImageTypes.RgbAlphaPixel, ExpectResult = false, Max = 255, Min = 0 },
                new { Type = ImageTypes.UInt8, ExpectResult = true, Max = 255, Min = 0 },
                new { Type = ImageTypes.UInt16, ExpectResult = true, Max = 255, Min = 0 },
                new { Type = ImageTypes.UInt32, ExpectResult = true, Max = 255, Min = 0 },
                new { Type = ImageTypes.Int8, ExpectResult = true, Max = 255, Min = 0 },
                new { Type = ImageTypes.Int16, ExpectResult = true, Max = 255, Min = 0 },
                new { Type = ImageTypes.Int32, ExpectResult = true, Max = 255, Min = 0 },
                new { Type = ImageTypes.HsiPixel, ExpectResult = false, Max = 255, Min = 0 },
                new { Type = ImageTypes.Float, ExpectResult = true, Max = 255, Min = 0 },
                new { Type = ImageTypes.Double, ExpectResult = true, Max = 255, Min = 0 },
                new { Type = ImageTypes.RgbPixel, ExpectResult = false, Max = 75, Min = 50 },
                new { Type = ImageTypes.RgbAlphaPixel, ExpectResult = false, Max = 75, Min = 50 },
                new { Type = ImageTypes.UInt8, ExpectResult = true, Max = 75, Min = 50 },
                new { Type = ImageTypes.UInt16, ExpectResult = true, Max = 75, Min = 50 },
                new { Type = ImageTypes.UInt32, ExpectResult = true, Max = 75, Min = 50 },
                new { Type = ImageTypes.Int8, ExpectResult = true, Max = 75, Min = 50 },
                new { Type = ImageTypes.Int16, ExpectResult = true, Max = 75, Min = 50 },
                new { Type = ImageTypes.Int32, ExpectResult = true, Max = 75, Min = 50 },
                new { Type = ImageTypes.HsiPixel, ExpectResult = false, Max = 75, Min = 50 },
                new { Type = ImageTypes.Float, ExpectResult = true, Max = 75, Min = 50 },
                new { Type = ImageTypes.Double, ExpectResult = true, Max = 75, Min = 50 }
            };

            var type = this.GetType().Name;

            foreach (var test in tests)
            {
                Array2DBase imageObj       = null;
                Array2DBase horzObj        = null;
                Array2DBase vertObj        = null;
                Array2DBase outputImageObj = null;
                MatrixOp    matrix         = null;
                DlibObject  windowObj      = null;

                try
                {
                    const ImageTypes inputType = ImageTypes.Float;

                    var image = DlibTest.LoadImage(test.Type, path);
                    imageObj = image;
                    var horz = Array2DTest.CreateArray2D(inputType);
                    horzObj = horz;
                    var vert = Array2DTest.CreateArray2D(inputType);
                    vertObj = vert;
                    var outputImage = Array2DTest.CreateArray2D(test.Type);
                    outputImageObj = outputImage;

                    Dlib.SobelEdgeDetector(image, horz, vert);
                    Dlib.SuppressNonMaximumEdges(horz, vert, outputImage);

                    try
                    {
                        matrix = Jet(test.Type, outputImage, test.Max, test.Min);

                        if (test.ExpectResult)
                        {
                            if (this.CanGuiDebug)
                            {
                                var window = new ImageWindow(matrix, $"{test.Type} - Max: {test.Max}, Min : {test.Min}");
                                windowObj = window;
                            }

                            Dlib.SaveBmp(image, $"{Path.Combine(this.GetOutDir(type, "Jet2"), $"{LoadTarget}_{test.Type}_{test.Max}_{test.Min}.bmp")}");
                        }
                        else
                        {
                            Assert.Fail($"Failed to execute Jet2 to Type: {test.Type}");
                        }
                    }
                    catch (Exception)
                    {
                        if (!test.ExpectResult)
                        {
                            Console.WriteLine("OK");
                        }
                        else
                        {
                            throw;
                        }
                    }
                }
                catch (Exception e)
                {
                    Console.WriteLine(e.StackTrace);
                    Console.WriteLine($"Failed to execute Jet2 to Type: {test.Type}");
                    throw;
                }
                finally
                {
                    if (outputImageObj != null)
                    {
                        this.DisposeAndCheckDisposedState(outputImageObj);
                    }
                    if (vertObj != null)
                    {
                        this.DisposeAndCheckDisposedState(vertObj);
                    }
                    if (horzObj != null)
                    {
                        this.DisposeAndCheckDisposedState(horzObj);
                    }
                    if (windowObj != null)
                    {
                        this.DisposeAndCheckDisposedState(windowObj);
                    }
                    if (matrix != null)
                    {
                        this.DisposeAndCheckDisposedState(matrix);
                    }
                    if (imageObj != null)
                    {
                        this.DisposeAndCheckDisposedState(imageObj);
                    }
                }
            }
        }
コード例 #13
0
        private static void Main()
        {
            try
            {
                var cap = new VideoCapture(0);
                //var cap = new VideoCapture("20090124_WeeklyAddress.ogv.360p.webm");
                if (!cap.IsOpened())
                {
                    Console.WriteLine("Unable to connect to camera");
                    return;
                }

                using (var win = new ImageWindow())
                {
                    // Load face detection and pose estimation models.
                    using (var detector = Dlib.GetFrontalFaceDetector())
                        using (var poseModel = ShapePredictor.Deserialize("shape_predictor_68_face_landmarks.dat"))
                        {
                            // Grab and process frames until the main window is closed by the user.
                            while (!win.IsClosed())
                            {
                                // Grab a frame
                                var temp = new Mat();
                                if (!cap.Read(temp))
                                {
                                    break;
                                }

                                // Turn OpenCV's Mat into something dlib can deal with.  Note that this just
                                // wraps the Mat object, it doesn't copy anything.  So cimg is only valid as
                                // long as temp is valid.  Also don't do anything to temp that would cause it
                                // to reallocate the memory which stores the image as that will make cimg
                                // contain dangling pointers.  This basically means you shouldn't modify temp
                                // while using cimg.
                                var array = new byte[temp.Width * temp.Height * temp.ElemSize()];
                                Marshal.Copy(temp.Data, array, 0, array.Length);
                                using (var cimg = Dlib.LoadImageData <RgbPixel>(array, (uint)temp.Height, (uint)temp.Width, (uint)(temp.Width * temp.ElemSize())))
                                {
                                    // Detect faces
                                    var faces = detector.Detect(cimg);
                                    // Find the pose of each face.
                                    var shapes = new List <FullObjectDetection>();
                                    for (var i = 0; i < faces.Length; ++i)
                                    {
                                        var det = poseModel.Detect(cimg, faces[i]);
                                        shapes.Add(det);
                                    }

                                    // Display it all on the screen
                                    win.ClearOverlay();
                                    win.SetImage(cimg);
                                    var lines = Dlib.RenderFaceDetections(shapes);
                                    win.AddOverlay(lines);

                                    foreach (var line in lines)
                                    {
                                        line.Dispose();
                                    }
                                }
                            }
                        }
                }
            }
            //catch (serialization_error&e)
            //{
            //    cout << "You need dlib's default face landmarking model file to run this example." << endl;
            //    cout << "You can get it from the following URL: " << endl;
            //    cout << "   http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2" << endl;
            //    cout << endl << e.what() << endl;
            //}
            catch (Exception e)
            {
                Console.WriteLine(e.Message);
            }
        }
コード例 #14
0
        public void RawApiFrontalFaceDetectorUsingMemoryInput()
        {
            const string imagePath  = "images\\lenna.bmp";
            var          imageBytes = File.ReadAllBytes(imagePath);

            IntPtr detector = IntPtr.Zero;
            IntPtr dets     = IntPtr.Zero;

            try
            {
                using (var window = new ImageWindow())
                    using (var image = new Array2dUchar())
                    {
                        Console.WriteLine($"Size: ({image.Width},{image.Height})");
                        Trace.Assert(image.Width == 0 && image.Height == 0);
                        window.SetImage(image);
                        OpenCvSharp.Cv2.WaitKey(Cv2WaitKeyDelay);

                        NativeMethods.dlib_load_bmp_array2d_uchar(image.DlibArray2dUchar, imageBytes, new IntPtr(imageBytes.Length));
                        Console.WriteLine($"Size: ({image.Width},{image.Height})");
                        Trace.Assert(image.Width == 512 && image.Height == 480);
                        window.SetImage(image);
                        OpenCvSharp.Cv2.WaitKey(Cv2WaitKeyDelay);

                        image.SetBitmap(new System.Drawing.Bitmap(imagePath));
                        Console.WriteLine($"Size: ({image.Width},{image.Height})");
                        Trace.Assert(image.Width == 512 && image.Height == 480);
                        window.SetImage(image);
                        OpenCvSharp.Cv2.WaitKey(Cv2WaitKeyDelay);

                        image.PyramidUp();
                        Console.WriteLine($"Size: ({image.Width},{image.Height})");
                        window.SetImage(image);
                        OpenCvSharp.Cv2.WaitKey(Cv2WaitKeyDelay);

                        detector = NativeMethods.dlib_get_frontal_face_detector();
                        dets     = NativeMethods.vector_Rect_new1();
                        NativeMethods.dlib_frontal_face_detector_operator(detector, image.DlibArray2dUchar, -0.5, dets);
                        unsafe
                        {
                            Rect *rectangles = (Rect *)NativeMethods.vector_Rect_getPointer(dets).ToPointer();
                            long  count      = NativeMethods.vector_Rect_getSize(dets).ToInt64();
                            for (int i = 0; i < count; i++)
                            {
                                Console.WriteLine(rectangles[i]);
                            }
                        }
                    }
            }
            finally
            {
                if (detector != IntPtr.Zero)
                {
                    NativeMethods.dlib_frontal_face_detector_delete(detector);
                }
                if (dets != IntPtr.Zero)
                {
                    NativeMethods.vector_Rect_delete(dets);
                }
            }
        }
コード例 #15
0
        public void RawApiFaceLandmarkDetectionUsingMemoryInput()
        {
            const string imagePath  = "images\\lenna.bmp";
            var          imageBytes = File.ReadAllBytes(imagePath);

            IntPtr detector = IntPtr.Zero;
            IntPtr dets     = IntPtr.Zero;

            try
            {
                using (var window = new ImageWindow())
                    using (var image = new Array2dRgbPixel())
                    {
                        Console.WriteLine($"Size: ({image.Width},{image.Height})");
                        Trace.Assert(image.Width == 0 && image.Height == 0);
                        window.SetImage(image);
                        OpenCvSharp.Cv2.WaitKey(Cv2WaitKeyDelay);

                        NativeMethods.dlib_load_bmp_array2d_rgbpixel(image.DlibArray2dRgbPixel, imageBytes, new IntPtr(imageBytes.Length));
                        Console.WriteLine($"Size: ({image.Width},{image.Height})");
                        Trace.Assert(image.Width == 512 && image.Height == 480);
                        window.SetImage(image);
                        OpenCvSharp.Cv2.WaitKey(Cv2WaitKeyDelay);

                        image.SetBitmap(new System.Drawing.Bitmap(imagePath));
                        Console.WriteLine($"Size: ({image.Width},{image.Height})");
                        Trace.Assert(image.Width == 512 && image.Height == 480);
                        window.SetImage(image);
                        OpenCvSharp.Cv2.WaitKey(Cv2WaitKeyDelay);

                        image.PyramidUp();
                        Console.WriteLine($"Size: ({image.Width},{image.Height})");
                        window.SetImage(image);
                        OpenCvSharp.Cv2.WaitKey(Cv2WaitKeyDelay);

                        detector = NativeMethods.dlib_get_face_landmark_detection("D:/Data/Dlib/shape_predictor_68_face_landmarks.dat");
                        dets     = NativeMethods.vector_FaceLandmarkInternal_new1();
                        NativeMethods.dlib_face_landmark_detection_operator(detector, image.DlibArray2dRgbPixel, -0.5, dets);
                        Trace.Assert(dets != null && dets != IntPtr.Zero);
                        long count = NativeMethods.vector_FaceLandmarkInternal_getSize(dets).ToInt64();
                        // If it does not return ret here, exception occurs.
                        if (count > 0)
                        {
                            unsafe
                            {
                                FaceLandmarkInternal *faceLandmarkInternals = (FaceLandmarkInternal *)NativeMethods.vector_FaceLandmarkInternal_getPointer(dets).ToPointer();
                                for (int i = 0; i < count; i++)
                                {
                                    Console.WriteLine(new FaceLandmark(faceLandmarkInternals[i]));
                                }
                            }
                        }
                    }
            }
            finally
            {
                if (detector != IntPtr.Zero)
                {
                    NativeMethods.dlib_face_landmark_detection_delete(detector);
                }
                if (dets != IntPtr.Zero)
                {
                    NativeMethods.vector_FaceLandmarkInternal_delete(dets);
                }
            }
        }
コード例 #16
0
ファイル: Program.cs プロジェクト: TrojanOlx/AI
        private static void Main()
        {
            try
            {
                //var cap = new VideoCapture(0);
                //var cap = new VideoCapture("https://js.live-play.acgvideo.com/live-js/890069/live_30947419_1716018.flv?wsSecret=2cee8a379a871fa8dbf714ba9d16e8a4&wsTime=1548240723&trid=4f64a0ae5e2444938cfdd109a54c6e1c&sig=no&platform=web&pSession=yR3bsQk1-SCY4-4QGi-K7EG-AsbTiwbX7tZF");
                var cap = new VideoCapture(0);
                if (!cap.IsOpened())
                {
                    Console.WriteLine("Unable to connect to camera");
                    return;
                }

                using (var win = new ImageWindow())
                {
                    // Load face detection and pose estimation models.
                    using (var detector = Dlib.GetFrontalFaceDetector())
                        using (var poseModel = ShapePredictor.Deserialize("shape_predictor_68_face_landmarks.dat"))
                        {
                            //抓取和处理帧,直到用户关闭主窗口。
                            while (!win.IsClosed())
                            {
                                // Grab a frame
                                var temp = new Mat();
                                if (!cap.Read(temp))
                                {
                                    break;
                                }

                                //把OpenCV的Mat变成dlib可以处理的东西。注意
                                //包装Mat对象,它不复制任何东西。所以cimg只对as有效
                                //只要温度是有效的。也不要做任何可能导致它的临时工作
                                //重新分配存储图像的内存,因为这将使cimg
                                //包含悬空指针。这基本上意味着您不应该修改temp
                                //使用cimg时。
                                var array = new byte[temp.Width * temp.Height * temp.ElemSize()];
                                Marshal.Copy(temp.Data, array, 0, array.Length);
                                using (var cimg = Dlib.LoadImageData <RgbPixel>(array, (uint)temp.Height, (uint)temp.Width, (uint)(temp.Width * temp.ElemSize())))
                                {
                                    // Detect faces
                                    var faces = detector.Operator(cimg);
                                    // Find the pose of each face.
                                    var shapes = new List <FullObjectDetection>();
                                    for (var i = 0; i < faces.Length; ++i)
                                    {
                                        var det = poseModel.Detect(cimg, faces[i]);
                                        Console.WriteLine(faces[i].Left);
                                        shapes.Add(det);
                                    }

                                    //在屏幕上显示
                                    win.ClearOverlay();
                                    win.SetImage(cimg);
                                    var lines = Dlib.RenderFaceDetections(shapes);
                                    win.AddOverlay(faces, new RgbPixel {
                                        Red = 255
                                    });
                                    win.AddOverlay(lines);
                                    foreach (var line in lines)
                                    {
                                        line.Dispose();
                                    }
                                }
                            }
                        }
                }
            }
            //catch (serialization_error&e)
            //{
            //    cout << "You need dlib's default face landmarking model file to run this example." << endl;
            //    cout << "You can get it from the following URL: " << endl;
            //    cout << "   http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2" << endl;
            //    cout << endl << e.what() << endl;
            //}
            catch (Exception e)
            {
                Console.WriteLine(e.Message);
            }
        }
コード例 #17
0
ファイル: App.xaml.cs プロジェクト: shamee786/ClipFlair
        private bool ParseUrlParameters(ActivityWindow activityWindow)
        {
            if (IsRunningOutOfBrowser)
            {
                return(false);
            }

            bool foundParam = false;

            if (queryString.ContainsKey(PARAMETER_ACTIVITY))
            {
                activityWindow.LoadOptions(makeActivityUri(queryString[PARAMETER_ACTIVITY]));
                foundParam = true;
            }

            if (queryString.ContainsKey(PARAMETER_MEDIA))
            {
                WaitTillNotBusy(activityWindow); //TODO: doesn't work (should wait for any activity to load first)
                MediaPlayerWindow w = new MediaPlayerWindow();
                w.Width  = activityWindow.Width;
                w.Height = activityWindow.Height - (activityWindow.ActivityView.ToolbarVisible ? 80 : 0); //TODO: should change this if/when activity toolbar is made vertical (have option to get ActualWidth/ActualHeight of activity toolbar)
                activityWindow.Container.AddWindowInViewCenter(w);
                w.MediaPlayerView.Source = makeClipUri(CLIPFLAIR_GALLERY_VIDEO, queryString[PARAMETER_MEDIA]);
                foundParam = true;
            }

            if (queryString.ContainsKey(PARAMETER_VIDEO))
            {
                WaitTillNotBusy(activityWindow); //TODO: doesn't work (should wait for any activity to load first)
                MediaPlayerWindow w = new MediaPlayerWindow();
                w.Width  = activityWindow.Width;
                w.Height = activityWindow.Height - (activityWindow.ActivityView.ToolbarVisible ? 80 : 0); //TODO: should change this if/when activity toolbar is made vertical (have option to get ActualWidth/ActualHeight of activity toolbar)
                activityWindow.Container.AddWindowInViewCenter(w);
                w.MediaPlayerView.Source = makeClipUri(CLIPFLAIR_GALLERY_VIDEO, queryString[PARAMETER_VIDEO]);
                foundParam = true;
            }

            if (queryString.ContainsKey(PARAMETER_AUDIO))
            {
                WaitTillNotBusy(activityWindow); //TODO: doesn't work (should wait for any activity to load first)
                MediaPlayerWindow w = new MediaPlayerWindow();
                w.Width  = activityWindow.Width;
                w.Height = 69; //Using small height since there's only audio //NOTE: if SMF skin changes this may not look ok
                activityWindow.Container.AddWindowInViewCenter(w);
                w.MediaPlayerView.VideoVisible = false;
                w.MediaPlayerView.Source       = new Uri(new Uri(CLIPFLAIR_GALLERY_AUDIO), queryString[PARAMETER_AUDIO]);
                foundParam = true;
            }

            if (queryString.ContainsKey(PARAMETER_IMAGE))
            {
                WaitTillNotBusy(activityWindow); //TODO: doesn't work (should wait for any activity to load first)
                ImageWindow w = new ImageWindow();
                w.Width  = activityWindow.Width;
                w.Height = activityWindow.Height - (activityWindow.ActivityView.ToolbarVisible ? 80 : 0); //TODO: should change this if/when activity toolbar is made vertical (have option to get ActualWidth/ActualHeight of activity toolbar)
                activityWindow.Container.AddWindowInViewCenter(w);
                w.ImageView.Source = new Uri(new Uri(CLIPFLAIR_GALLERY_IMAGE), queryString[PARAMETER_IMAGE]);
                foundParam         = true;
            }

            if (queryString.ContainsKey(PARAMETER_GALLERY))
            {
                WaitTillNotBusy(activityWindow); //TODO: doesn't work (should wait for any activity to load first)
                GalleryWindow w = new GalleryWindow();
                w.Width  = activityWindow.Width;
                w.Height = activityWindow.Height - (activityWindow.ActivityView.ToolbarVisible ? 80 : 0); //TODO: should change this if/when activity toolbar is made vertical (have option to get ActualWidth/ActualHeight of activity toolbar)
                activityWindow.Container.AddWindowInViewCenter(w);
                w.GalleryView.Source = makeGalleryUri(queryString[PARAMETER_GALLERY]);
                foundParam           = true;
            }

            if (queryString.ContainsKey(PARAMETER_COLLECTION))
            {
                WaitTillNotBusy(activityWindow); //TODO: doesn't work (should wait for any activity to load first)
                GalleryWindow w = new GalleryWindow();
                w.Width  = activityWindow.Width;
                w.Height = activityWindow.Height - (activityWindow.ActivityView.ToolbarVisible ? 80 : 0); //TODO: should change this if/when activity toolbar is made vertical (have option to get ActualWidth/ActualHeight of activity toolbar)
                activityWindow.Container.AddWindowInViewCenter(w);
                w.GalleryView.Source = makeGalleryUri(queryString[PARAMETER_COLLECTION]);
                foundParam           = true;
            }

            //TODO: add ...PARAMETER_CAPTIONS, PARAMETER_COMPONENT, TEXT, MAP etc.

            return(foundParam);
        } //TODO: add CAPTIONS parameter to load .SRT/.TTS/.SSA and show it
コード例 #18
0
ファイル: DlibTest.cs プロジェクト: tnw513/DlibDotNet
        public void LoadImageData()
        {
            const int cols = 512;
            const int rows = 512;
            var       path = this.GetDataFile($"lena_gray.raw");
            var       data = File.ReadAllBytes(path.FullName);

            var tests = new[]
            {
                new { Type = ImageTypes.UInt8, ExpectResult = true },
                //new { Type = ImageTypes.RgbPixel,      ExpectResult = true},
                //new { Type = ImageTypes.RgbAlphaPixel, ExpectResult = true},
                //new { Type = ImageTypes.UInt16,        ExpectResult = true},
                //new { Type = ImageTypes.HsiPixel,      ExpectResult = true},
                //new { Type = ImageTypes.Float,         ExpectResult = true},
                //new { Type = ImageTypes.Double,        ExpectResult = true}
            };

            foreach (var test in tests)
            {
                TwoDimentionObjectBase image;
                using (var win = new ImageWindow())
                {
                    switch (test.Type)
                    {
                    case ImageTypes.UInt8:
                        image = Dlib.LoadImageData <byte>(data, 512, 512, 512);

                        if (this.CanGuiDebug)
                        {
                            win.SetImage((Array2D <byte>)image);
                            win.WaitUntilClosed();
                        }
                        break;

                    //case ImageTypes.UInt16:
                    //    image = Dlib.LoadBmp<ushort>(path.FullName);
                    //    break;
                    //case ImageTypes.HsiPixel:
                    //    image = Dlib.LoadBmp<HsiPixel>(path.FullName);
                    //    break;
                    //case ImageTypes.Float:
                    //    image = Dlib.LoadBmp<float>(path.FullName);
                    //    break;
                    //case ImageTypes.Double:
                    //    image = Dlib.LoadBmp<double>(path.FullName);
                    //    break;
                    //case ImageTypes.RgbPixel:
                    //    image = Dlib.LoadBmp<RgbPixel>(path.FullName);
                    //    break;
                    //case ImageTypes.RgbAlphaPixel:
                    //    image = Dlib.LoadBmp<RgbAlphaPixel>(path.FullName);
                    //    break;
                    default:
                        throw new ArgumentOutOfRangeException();
                    }
                }

                Assert.AreEqual(image.Columns, cols, $"Failed to load {test.Type}.");
                Assert.AreEqual(image.Rows, rows, $"Failed to load {test.Type}.");

                this.DisposeAndCheckDisposedState(image);
            }
        }
コード例 #19
0
ファイル: FaceLoginController.cs プロジェクト: TrojanOlx/AI
        public async Task <ActionResult> Login([FromBody] InputFaceModel model)
        {
            RequestFaceModel request = new RequestFaceModel()
            {
                Status  = 500,
                Message = null
            };
            var filePath = Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "FaceImages", model.user_name);

            if (!Directory.Exists(filePath))
            {
                request.Enum = RequestEnum.Failed;
                Console.WriteLine(request.Message);
                Thread.Sleep(5000);
                return(Ok(request));
            }
            FaceContrast faceContrast = new FaceContrast(filePath);

            VideoCapture cap = null;

            try
            {
                if (model.rmtp_url == "0")
                {
                    cap = new VideoCapture(0);
                }
                else
                {
                    cap = new VideoCapture(model.rmtp_url);
                }


                var flag     = false;
                var faceFlag = false;

                var bioFlag = false;

                QueueFixedLength <double> leftEarQueue  = new QueueFixedLength <double>(10);
                QueueFixedLength <double> rightEarQueue = new QueueFixedLength <double>(10);
                QueueFixedLength <double> mouthQueue    = new QueueFixedLength <double>(20);
                bool leftEarFlag  = false;
                bool rightEarFlag = false;
                bool mouthFlag    = false;
                using (var sp = ShapePredictor.Deserialize(Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "ShapeModel", "shape_predictor_5_face_landmarks.dat")))
                    using (var win = new ImageWindow())
                    {
                        // Load face detection and pose estimation models.
                        using (var detector = Dlib.GetFrontalFaceDetector())
                            using (var net = LossMetric.Deserialize(Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "ShapeModel", "dlib_face_recognition_resnet_model_v1.dat")))
                                using (var poseModel = ShapePredictor.Deserialize(Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "ShapeModel", "shape_predictor_68_face_landmarks.dat")))
                                {
                                    var ti = true;

                                    System.Timers.Timer t = new System.Timers.Timer(30000);
                                    t.Elapsed += new System.Timers.ElapsedEventHandler((object source, System.Timers.ElapsedEventArgs e) =>
                                    {
                                        ti = false;
                                    });

                                    t.AutoReset = false;
                                    t.Enabled   = true;

                                    //抓取和处理帧,直到用户关闭主窗口。
                                    while (/*!win.IsClosed() &&*/ ti)
                                    {
                                        try
                                        {
                                            // Grab a frame
                                            var temp = new Mat();
                                            if (!cap.Read(temp))
                                            {
                                                break;
                                            }

                                            //把OpenCV的Mat变成dlib可以处理的东西。注意
                                            //包装Mat对象,它不复制任何东西。所以cimg只对as有效
                                            //只要温度是有效的。也不要做任何可能导致它的临时工作
                                            //重新分配存储图像的内存,因为这将使cimg
                                            //包含悬空指针。这基本上意味着您不应该修改temp
                                            //使用cimg时。
                                            var array = new byte[temp.Width * temp.Height * temp.ElemSize()];
                                            Marshal.Copy(temp.Data, array, 0, array.Length);
                                            using (var cimg = Dlib.LoadImageData <RgbPixel>(array, (uint)temp.Height, (uint)temp.Width, (uint)(temp.Width * temp.ElemSize())))
                                            {
                                                // Detect faces
                                                var faces = detector.Operator(cimg);
                                                // Find the pose of each face.
                                                var shapes = new List <FullObjectDetection>();
                                                for (var i = 0; i < faces.Length; ++i)
                                                {
                                                    var det = poseModel.Detect(cimg, faces[i]);
                                                    shapes.Add(det);
                                                }

                                                if (shapes.Count > 0)
                                                {
                                                    // 活体检测

                                                    if (!bioFlag)
                                                    {
                                                        bioFlag = BioAssay(shapes[0], ref leftEarQueue, ref rightEarQueue, ref mouthQueue, ref leftEarFlag, ref rightEarFlag, ref mouthFlag);
                                                    }
                                                }


                                                if (!faceFlag)
                                                {
                                                    foreach (var face in faces)
                                                    {
                                                        var shape                   = sp.Detect(cimg, face);
                                                        var faceChipDetail          = Dlib.GetFaceChipDetails(shape, 150, 0.25);
                                                        Matrix <RgbPixel> rgbPixels = new Matrix <RgbPixel>(cimg);
                                                        var faceChip                = Dlib.ExtractImageChip <RgbPixel>(rgbPixels, faceChipDetail);
                                                        var faceDescriptors         = net.Operator(faceChip);
                                                        faceFlag = faceContrast.Contrast(faceDescriptors);
                                                    }
                                                }
                                                Console.WriteLine(model.user_name + ":" + faceFlag);
                                                if (bioFlag && faceFlag)
                                                {
                                                    flag = bioFlag && faceFlag;
                                                    if (flag)
                                                    {
                                                        break;
                                                    }
                                                }

                                                //在屏幕上显示
                                                win.ClearOverlay();
                                                win.SetImage(cimg);
                                                var lines = Dlib.RenderFaceDetections(shapes);
                                                win.AddOverlay(faces, new RgbPixel {
                                                    Red = 72, Green = 118, Blue = 255
                                                });
                                                win.AddOverlay(lines);
                                                foreach (var line in lines)
                                                {
                                                    line.Dispose();
                                                }
                                            }
                                        }
                                        catch (Exception ex)
                                        {
                                            request.Message = ex.ToString();
                                            break;
                                        }
                                    }
                                }
                    }

                if (flag)
                {
                    request.Enum = RequestEnum.Succeed;
                }
                else
                {
                    request.Enum = RequestEnum.Failed;
                }
            }
            catch (Exception ex)
            {
                request.Message = ex.ToString();
            }
            finally
            {
                if (cap != null)
                {
                    cap.Dispose();
                }
            }
            Console.WriteLine(request.Message);
            return(Ok(request));
        }
コード例 #20
0
ファイル: DlibTest.cs プロジェクト: tnw513/DlibDotNet
        public void LoadImageData2()
        {
            const int cols  = 512;
            const int rows  = 512;
            const int steps = 512;

            var tests = new[]
            {
                new { Type = ImageTypes.UInt8, ExpectResult = true },
                new { Type = ImageTypes.UInt16, ExpectResult = true },
                new { Type = ImageTypes.Int16, ExpectResult = true },
                new { Type = ImageTypes.Int32, ExpectResult = true },
                new { Type = ImageTypes.HsiPixel, ExpectResult = true },
                new { Type = ImageTypes.RgbPixel, ExpectResult = true },
                new { Type = ImageTypes.RgbAlphaPixel, ExpectResult = true },
                new { Type = ImageTypes.Float, ExpectResult = true },
                new { Type = ImageTypes.Double, ExpectResult = true }
            };

            var random = new Random(0);

            foreach (var test in tests)
            {
                TwoDimentionObjectBase image;
                using (var win = new ImageWindow())
                {
                    switch (test.Type)
                    {
                    case ImageTypes.UInt8:
                    {
                        var data = new byte[rows * cols];
                        for (var r = 0; r < rows; r++)
                        {
                            for (var c = 0; c < cols; c++)
                            {
                                data[steps * r + c] = (byte)random.Next(0, 255);
                            }
                        }

                        image = Dlib.LoadImageData <byte>(data, rows, cols, steps);

                        if (this.CanGuiDebug)
                        {
                            win.SetImage((Array2D <byte>)image);
                            win.WaitUntilClosed();
                        }
                    }
                    break;

                    case ImageTypes.UInt16:
                    {
                        var data = new ushort[rows * cols];
                        for (var r = 0; r < rows; r++)
                        {
                            for (var c = 0; c < cols; c++)
                            {
                                data[steps * r + c] = (ushort)random.Next(0, 255);
                            }
                        }

                        image = Dlib.LoadImageData <ushort>(data, rows, cols, steps);

                        if (this.CanGuiDebug)
                        {
                            win.SetImage((Array2D <ushort>)image);
                            win.WaitUntilClosed();
                        }
                    }
                    break;

                    case ImageTypes.Int16:
                    {
                        var data = new short[rows * cols];
                        for (var r = 0; r < rows; r++)
                        {
                            for (var c = 0; c < cols; c++)
                            {
                                data[steps * r + c] = (short)random.Next(0, 255);
                            }
                        }

                        image = Dlib.LoadImageData <short>(data, rows, cols, steps);

                        if (this.CanGuiDebug)
                        {
                            win.SetImage((Array2D <short>)image);
                            win.WaitUntilClosed();
                        }
                    }
                    break;

                    case ImageTypes.Int32:
                    {
                        var data = new int[rows * cols];
                        for (var r = 0; r < rows; r++)
                        {
                            for (var c = 0; c < cols; c++)
                            {
                                data[steps * r + c] = random.Next(0, 255);
                            }
                        }

                        image = Dlib.LoadImageData <int>(data, rows, cols, steps);

                        if (this.CanGuiDebug)
                        {
                            win.SetImage((Array2D <int>)image);
                            win.WaitUntilClosed();
                        }
                    }
                    break;

                    case ImageTypes.Float:
                    {
                        var data = new float[rows * cols];
                        for (var r = 0; r < rows; r++)
                        {
                            for (var c = 0; c < cols; c++)
                            {
                                data[steps * r + c] = (float)random.NextDouble();
                            }
                        }

                        image = Dlib.LoadImageData <float>(data, rows, cols, steps);

                        if (this.CanGuiDebug)
                        {
                            win.SetImage((Array2D <float>)image);
                            win.WaitUntilClosed();
                        }
                    }
                    break;

                    case ImageTypes.Double:
                    {
                        var data = new double[rows * cols];
                        for (var r = 0; r < rows; r++)
                        {
                            for (var c = 0; c < cols; c++)
                            {
                                data[steps * r + c] = (double)random.NextDouble();
                            }
                        }

                        image = Dlib.LoadImageData <double>(data, rows, cols, steps);

                        if (this.CanGuiDebug)
                        {
                            win.SetImage((Array2D <double>)image);
                            win.WaitUntilClosed();
                        }
                    }
                    break;

                    case ImageTypes.HsiPixel:
                    {
                        var data = new HsiPixel[rows * cols];
                        for (var r = 0; r < rows; r++)
                        {
                            for (var c = 0; c < cols; c++)
                            {
                                data[steps * r + c] = new HsiPixel
                                {
                                    H = (byte)random.Next(0, 255),
                                    S = (byte)random.Next(0, 255),
                                    I = (byte)random.Next(0, 255)
                                }
                            }
                        }
                        ;

                        image = Dlib.LoadImageData <HsiPixel>(data, rows, cols, steps);

                        if (this.CanGuiDebug)
                        {
                            win.SetImage((Array2D <HsiPixel>)image);
                            win.WaitUntilClosed();
                        }
                    }
                    break;

                    case ImageTypes.RgbPixel:
                    {
                        var data = new RgbPixel[rows * cols];
                        for (var r = 0; r < rows; r++)
                        {
                            for (var c = 0; c < cols; c++)
                            {
                                data[steps * r + c] = new RgbPixel
                                {
                                    Red   = (byte)random.Next(0, 255),
                                    Green = (byte)random.Next(0, 255),
                                    Blue  = (byte)random.Next(0, 255)
                                }
                            }
                        }
                        ;

                        image = Dlib.LoadImageData <RgbPixel>(data, rows, cols, steps);

                        if (this.CanGuiDebug)
                        {
                            win.SetImage((Array2D <RgbPixel>)image);
                            win.WaitUntilClosed();
                        }
                    }
                    break;

                    case ImageTypes.RgbAlphaPixel:
                    {
                        var data = new RgbAlphaPixel[rows * cols];
                        for (var r = 0; r < rows; r++)
                        {
                            for (var c = 0; c < cols; c++)
                            {
                                data[steps * r + c] = new RgbAlphaPixel
                                {
                                    Red   = (byte)random.Next(0, 255),
                                    Green = (byte)random.Next(0, 255),
                                    Blue  = (byte)random.Next(0, 255),
                                    Alpha = (byte)random.Next(0, 255)
                                }
                            }
                        }
                        ;

                        image = Dlib.LoadImageData <RgbAlphaPixel>(data, rows, cols, steps);

                        if (this.CanGuiDebug)
                        {
                            win.SetImage((Array2D <RgbAlphaPixel>)image);
                            win.WaitUntilClosed();
                        }
                    }
                    break;

                    default:
                        throw new ArgumentOutOfRangeException();
                    }
                }

                Assert.AreEqual(image.Columns, cols, $"Failed to load {test.Type}.");
                Assert.AreEqual(image.Rows, rows, $"Failed to load {test.Type}.");

                this.DisposeAndCheckDisposedState(image);
            }
        }
コード例 #21
0
        internal static void Main()
        {
            // The exit code of the sample application.
            int exitCode = 0;

            try
            {
                // Create a camera object that selects the first camera device found.
                // More constructors are available for selecting a specific camera device.
                // For multicast only look for GigE cameras here.
                using (Camera camera = new Camera(DeviceType.GigE, CameraSelectionStrategy.FirstFound))
                {
                    // Print the model name of the camera.
                    Console.WriteLine("Using camera {0}.", camera.CameraInfo[CameraInfoKey.ModelName]);
                    String deviceType = camera.CameraInfo[CameraInfoKey.DeviceType];

                    Console.WriteLine("==========");
                    Console.WriteLine("{0} Camera", deviceType);
                    Console.WriteLine("==========");
                    camera.StreamGrabber.ImageGrabbed += OnImageGrabbed;
                    camera.StreamGrabber.ImageGrabbed += OnImageSkipped;
                    // Get the Key from the user for selecting the mode

                    Console.Write("Start multicast sample in (c)ontrol or in (m)onitor mode? (c/m) ");
                    ConsoleKeyInfo keyPressed = Console.ReadKey();
                    switch (keyPressed.KeyChar)
                    {
                    // The default configuration must be removed when monitor mode is selected
                    // because the monitoring application is not allowed to modify any parameter settings.
                    case 'm':
                    case 'M':
                        // Monitor mode selected.
                        Console.WriteLine("\nIn Monitor mode");

                        // Set MonitorModeActive to true to act as monitor
                        camera.Parameters [PLCameraInstance.MonitorModeActive].SetValue(true);      // Set monitor mode

                        // Open the camera.
                        camera.Open();

                        // Select transmission type. If the camera is already controlled by another application
                        // and configured for multicast, the active camera configuration can be used
                        // (IP Address and Port will be set automatically).
                        camera.Parameters[PLGigEStream.TransmissionType].TrySetValue(PLGigEStream.TransmissionType.UseCameraConfig);

                        // Alternatively, the stream grabber could be explicitly set to "multicast"...
                        // In this case, the IP Address and the IP port must also be set.
                        //
                        //camera.Parameters[PLGigEStream.TransmissionType].SetValue(PLGigEStream.TransmissionType.Multicast);
                        //camera.Parameters[PLGigEStream.DestinationAddr].SetValue("239.0.0.1");
                        //camera.Parameters[PLGigEStream.DestinationPort].SetValue(49152);

                        if ((camera.Parameters[PLGigEStream.DestinationAddr].GetValue() != "0.0.0.0") &&
                            (camera.Parameters[PLGigEStream.DestinationPort].GetValue() != 0))
                        {
                            camera.StreamGrabber.Start(countOfImagesToGrab);
                        }
                        else
                        {
                            throw new Exception("Failed to open stream grabber (monitor mode): The acquisition is not yet started by the controlling application. Start the controlling application before starting the monitor application.");
                        }
                        break;

                    case 'c':
                    case 'C':
                        // Controlling mode selected.
                        Console.WriteLine("\nIn Control mode");

                        // Open the camera.
                        camera.Open();

                        // Set transmission type to "multicast"...
                        // In this case, the IP Address and the IP port must also be set.
                        camera.Parameters[PLGigEStream.TransmissionType].SetValue(PLGigEStream.TransmissionType.Multicast);
                        //camera.Parameters[PLGigEStream.DestinationAddr].SetValue("239.0.0.1");
                        //camera.Parameters[PLGigEStream.DestinationPort].SetValue(49152);

                        // Maximize the image area of interest (Image AOI).
                        camera.Parameters[PLGigECamera.OffsetX].TrySetValue(camera.Parameters[PLGigECamera.OffsetX].GetMinimum());
                        camera.Parameters[PLGigECamera.OffsetY].TrySetValue(camera.Parameters[PLGigECamera.OffsetY].GetMinimum());
                        camera.Parameters[PLGigECamera.Width].SetValue(camera.Parameters[PLGigECamera.Width].GetMaximum());
                        camera.Parameters[PLGigECamera.Height].SetValue(camera.Parameters[PLGigECamera.Height].GetMaximum());

                        // Set the pixel data format.
                        camera.Parameters[PLGigECamera.PixelFormat].SetValue(PLGigECamera.PixelFormat.Mono8);

                        camera.StreamGrabber.Start();
                        break;

                    default:
                        throw new NotSupportedException("Invalid mode selected.");
                    }

                    IGrabResult grabResult;

                    // Camera.StopGrabbing() is called automatically by the RetrieveResult() method
                    // when countOfImagesToGrab images have been retrieved in monitor mode
                    // or when a key is pressed and the camera object is destroyed.
                    Console.WriteLine("Press any key to quit FrameGrabber...");

                    while (!Console.KeyAvailable && camera.StreamGrabber.IsGrabbing)
                    {
                        grabResult = camera.StreamGrabber.RetrieveResult(5000, TimeoutHandling.ThrowException);
                        using (grabResult)
                        {
                            // Image grabbed successfully?
                            if (grabResult.GrabSucceeded)
                            {
                                // Display the image
                                ImageWindow.DisplayImage(1, grabResult);

                                // The grab result could now be processed here.
                            }
                            else
                            {
                                Console.WriteLine("Error: {0} {1}", grabResult.ErrorCode, grabResult.ErrorDescription);
                            }
                        }
                    }

                    camera.Close();
                }
            }
            catch (Exception e)
            {
                // Error handling
                Console.Error.WriteLine("\nException: {0}", e.Message);
                exitCode = 1;
            }
            finally
            {
                // Comment the following two lines to disable waiting on exit.
                Console.Error.WriteLine("\nPress enter to exit.");
                Console.ReadLine();
            }

            Environment.Exit(exitCode);
        }
コード例 #22
0
        private static void Main(string[] args)
        {
            //if (args.Length != 1)
            //{
            //    Console.WriteLine("Run this example by invoking it like this: ");
            //    Console.WriteLine("   ./DnnFaceRecognition faces/bald_guys.jpg");
            //    Console.WriteLine("You will also need to get the face landmarking model file as well as ");
            //    Console.WriteLine("the face recognition model file.  Download and then decompress these files from: ");
            //    Console.WriteLine("http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2");
            //    Console.WriteLine("http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2");
            //    return;
            //}

            // The first thing we are going to do is load all our models.  First, since we need to
            // find faces in the image we will need a face detector:
            using (var detector = Dlib.GetFrontalFaceDetector())
                // We will also use a face landmarking model to align faces to a standard pose:  (see face_landmark_detection_ex.cpp for an introduction)
                using (var sp = ShapePredictor.Deserialize("shape_predictor_5_face_landmarks.dat"))
                    // And finally we load the DNN responsible for face recognition.
                    using (var net = DlibDotNet.Dnn.LossMetric.Deserialize("dlib_face_recognition_resnet_model_v1.dat"))

                        using (var img = Dlib.LoadImage <RgbPixel>(@"C:\Users\Chris\Desktop\test3\me pics\IMG_20170520_202221.jpg"))
                            using (var mat = new Matrix <RgbPixel>(img))

                                // Display the raw image on the screen
                                using (var win = new ImageWindow(img))
                                {
                                    // Run the face detector on the image of our action heroes, and for each face extract a
                                    // copy that has been normalized to 150x150 pixels in size and appropriately rotated
                                    // and centered.
                                    var faces = new List <Matrix <RgbPixel> >();
                                    foreach (var face in detector.Detect(img))
                                    {
                                        var shape          = sp.Detect(img, face);
                                        var faceChipDetail = Dlib.GetFaceChipDetails(shape, 150, 0.25);
                                        var faceChip       = Dlib.ExtractImageChip <RgbPixel>(mat, faceChipDetail);

                                        //faces.Add(move(face_chip));
                                        faces.Add(faceChip);

                                        // Also put some boxes on the faces so we can see that the detector is finding
                                        // them.
                                        win.AddOverlay(face);
                                    }

                                    if (!faces.Any())
                                    {
                                        Console.WriteLine("No faces found in image!");
                                        return;
                                    }

                                    // This call asks the DNN to convert each face image in faces into a 128D vector.
                                    // In this 128D vector space, images from the same person will be close to each other
                                    // but vectors from different people will be far apart.  So we can use these vectors to
                                    // identify if a pair of images are from the same person or from different people.
                                    var faceDescriptors = net.Operator(faces);

                                    // In particular, one simple thing we can do is face clustering.  This next bit of code
                                    // creates a graph of connected faces and then uses the Chinese whispers graph clustering
                                    // algorithm to identify how many people there are and which faces belong to whom.
                                    var edges = new List <SamplePair>();
                                    for (uint i = 0; i < faceDescriptors.Count; ++i)
                                    {
                                        for (var j = i; j < faceDescriptors.Count; ++j)
                                        {
                                            // Faces are connected in the graph if they are close enough.  Here we check if
                                            // the distance between two face descriptors is less than 0.6, which is the
                                            // decision threshold the network was trained to use.  Although you can
                                            // certainly use any other threshold you find useful.
                                            var diff = faceDescriptors[i] - faceDescriptors[j];
                                            if (Dlib.Length(diff) < 0.6)
                                            {
                                                edges.Add(new SamplePair(i, j));
                                            }
                                        }
                                    }

                                    Dlib.ChineseWhispers(edges, 100, out var numClusters, out var labels);

                                    // This will correctly indicate that there are 4 people in the image.
                                    Console.WriteLine($"number of people found in the image: {numClusters}");

                                    // Now let's display the face clustering results on the screen.  You will see that it
                                    // correctly grouped all the faces.
                                    var winClusters = new List <ImageWindow>();
                                    for (var i = 0; i < numClusters; i++)
                                    {
                                        winClusters.Add(new ImageWindow());
                                    }
                                    var tileImages = new List <Matrix <RgbPixel> >();
                                    for (var clusterId = 0ul; clusterId < numClusters; ++clusterId)
                                    {
                                        var temp = new List <Matrix <RgbPixel> >();
                                        for (var j = 0; j < labels.Length; ++j)
                                        {
                                            if (clusterId == labels[j])
                                            {
                                                temp.Add(faces[j]);
                                            }
                                        }

                                        winClusters[(int)clusterId].Title = $"face cluster {clusterId}";
                                        var tileImage = Dlib.TileImages(temp);
                                        tileImages.Add(tileImage);
                                        winClusters[(int)clusterId].SetImage(tileImage);
                                    }

                                    // Finally, let's print one of the face descriptors to the screen.
                                    using (var trans = Dlib.Trans(faceDescriptors[0]))
                                    {
                                        Console.WriteLine($"face descriptor for one face: {trans}");

                                        // It should also be noted that face recognition accuracy can be improved if jittering
                                        // is used when creating face descriptors.  In particular, to get 99.38% on the LFW
                                        // benchmark you need to use the jitter_image() routine to compute the descriptors,
                                        // like so:
                                        var jitterImages = JitterImage(faces[0]).ToArray();
                                        var ret          = net.Operator(jitterImages);
                                        using (var m = Dlib.Mat(ret))
                                            using (var faceDescriptor = Dlib.Mean <float>(m))
                                                using (var t = Dlib.Trans(faceDescriptor))
                                                {
                                                    Console.WriteLine($"jittered face descriptor for one face: {t}");

                                                    // If you use the model without jittering, as we did when clustering the bald guys, it
                                                    // gets an accuracy of 99.13% on the LFW benchmark.  So jittering makes the whole
                                                    // procedure a little more accurate but makes face descriptor calculation slower.

                                                    Console.WriteLine("hit enter to terminate");
                                                    Console.ReadKey();

                                                    foreach (var jitterImage in jitterImages)
                                                    {
                                                        jitterImage.Dispose();
                                                    }

                                                    foreach (var tileImage in tileImages)
                                                    {
                                                        tileImage.Dispose();
                                                    }

                                                    foreach (var edge in edges)
                                                    {
                                                        edge.Dispose();
                                                    }

                                                    foreach (var descriptor in faceDescriptors)
                                                    {
                                                        descriptor.Dispose();
                                                    }

                                                    foreach (var face in faces)
                                                    {
                                                        face.Dispose();
                                                    }
                                                }
                                    }
                                }
        }
コード例 #23
0
ファイル: Program.cs プロジェクト: zhangbo27/DlibDotNet
        private static void Main(string[] args)
        {
            if (args.Length != 1)
            {
                Console.WriteLine("You call this program like this: ");
                Console.WriteLine("./dnn_instance_segmentation_train_ex /path/to/images");
                Console.WriteLine();
                Console.WriteLine($"You will also need a trained '{InstanceSegmentationNetFilename}' file.");
                Console.WriteLine("You can either train it yourself (see example program");
                Console.WriteLine("dnn_instance_segmentation_train_ex), or download a");
                Console.WriteLine($"copy from here: http://dlib.net/files/{InstanceSegmentationNetFilename}");
                return;
            }

            try
            {
                // Read the file containing the trained network from the working directory.
                using (var deserialize = new ProxyDeserialize(InstanceSegmentationNetFilename))
                    using (var detNet = LossMmod.Deserialize(deserialize, 4))
                    {
                        var segNetsByClass = new Dictionary <string, LossMulticlassLogPerPixel>();
                        segNetsByClass.Deserialize(deserialize, 4);

                        // Show inference results in a window.
                        using (var win = new ImageWindow())
                        {
                            // Find supported image files.
                            var files = Directory.GetFiles(args[0])
                                        .Where(s => s.EndsWith(".jpeg") || s.EndsWith(".jpg") || s.EndsWith(".png")).ToArray();

                            using (var rnd = new Rand())
                            {
                                Console.WriteLine($"Found {files.Length} images, processing...");
                                foreach (var file in files.Select(s => new FileInfo(s)))
                                {
                                    // Load the input image.
                                    using (var inputImage = Dlib.LoadImageAsMatrix <RgbPixel>(file.FullName))
                                    {
                                        // Create predictions for each pixel. At this point, the type of each prediction
                                        // is an index (a value between 0 and 20). Note that the net may return an image
                                        // that is not exactly the same size as the input.
                                        using (var output = detNet.Operator(inputImage))
                                        {
                                            var instances = output.First().ToList();
                                            instances.Sort((lhs, rhs) => (int)lhs.Rect.Area - (int)rhs.Rect.Area);

                                            using (var rgbLabelImage = new Matrix <RgbPixel>())
                                            {
                                                rgbLabelImage.SetSize(inputImage.Rows, inputImage.Columns);
                                                rgbLabelImage.Assign(Enumerable.Range(0, rgbLabelImage.Size).Select(i => new RgbPixel(0, 0, 0)).ToArray());

                                                var foundSomething = false;
                                                foreach (var instance in instances)
                                                {
                                                    if (!foundSomething)
                                                    {
                                                        Console.Write("Found ");
                                                        foundSomething = true;
                                                    }
                                                    else
                                                    {
                                                        Console.Write(", ");
                                                    }

                                                    Console.Write(instance.Label);

                                                    var croppingRect = GetCroppingRect(instance.Rect);
                                                    using (var dims = new ChipDims(SegDim, SegDim))
                                                        using (var chipDetails = new ChipDetails(croppingRect, dims))
                                                            using (var inputChip = Dlib.ExtractImageChip <RgbPixel>(inputImage, chipDetails, InterpolationTypes.Bilinear))
                                                            {
                                                                if (!segNetsByClass.TryGetValue(instance.Label, out var i))
                                                                {
                                                                    // per-class segmentation net not found, so we must be using the same net for all classes
                                                                    // (see bool separate_seg_net_for_each_class in dnn_instance_segmentation_train_ex.cpp)
                                                                    if (segNetsByClass.Count == 1)
                                                                    {
                                                                        throw new ApplicationException();
                                                                    }
                                                                    if (string.IsNullOrEmpty(segNetsByClass.First().Key))
                                                                    {
                                                                        throw new ApplicationException();
                                                                    }
                                                                }

                                                                var segNet = i != null
                                                               ? i                             // use the segmentation net trained for this class
                                                               : segNetsByClass.First().Value; // use the same segmentation net for all classes

                                                                using (var mask = segNet.Operator(inputChip))
                                                                {
                                                                    var randomColor = new RgbPixel(
                                                                        rnd.GetRandom8BitNumber(),
                                                                        rnd.GetRandom8BitNumber(),
                                                                        rnd.GetRandom8BitNumber()
                                                                        );

                                                                    using (var resizedMask = new Matrix <ushort>((int)chipDetails.Rect.Height, (int)chipDetails.Rect.Width))
                                                                    {
                                                                        Dlib.ResizeImage(mask.First(), resizedMask);

                                                                        for (int r = 0, nr = resizedMask.Rows; r < nr; ++r)
                                                                        {
                                                                            for (int c = 0, nc = resizedMask.Columns; c < nc; ++c)
                                                                            {
                                                                                if (resizedMask[r, c] != 0)
                                                                                {
                                                                                    var y = (int)(chipDetails.Rect.Top + r);
                                                                                    var x = (int)(chipDetails.Rect.Left + c);
                                                                                    if (y >= 0 && y < rgbLabelImage.Rows && x >= 0 && x < rgbLabelImage.Columns)
                                                                                    {
                                                                                        rgbLabelImage[y, x] = randomColor;
                                                                                    }
                                                                                }
                                                                            }
                                                                        }
                                                                    }

                                                                    var voc2012Class = PascalVOC2012.FindVoc2012Class(instance.Label);
                                                                    Dlib.DrawRectangle(rgbLabelImage, instance.Rect, voc2012Class.RgbLabel, 1u);
                                                                }
                                                            }
                                                }

                                                instances.DisposeElement();

                                                using (var tmp = Dlib.JoinRows(inputImage, rgbLabelImage))
                                                {
                                                    // Show the input image on the left, and the predicted RGB labels on the right.
                                                    win.SetImage(tmp);

                                                    if (instances.Any())
                                                    {
                                                        Console.Write($" in {file.Name} - hit enter to process the next image");
                                                        Console.ReadKey();
                                                    }
                                                }
                                            }
                                        }
                                    }
                                }
                            }
                        }

                        foreach (var kvp in segNetsByClass)
                        {
                            kvp.Value.Dispose();
                        }
                    }
            }
            catch (Exception e)
            {
                Console.WriteLine(e);
            }
        }
コード例 #24
0
        private static void Main()
        {
            using (var img = new Array2D <byte>(400, 400))
                using (var ht = new DlibDotNet.HoughTransform(300))
                    using (var win = new ImageWindow())
                        using (var win2 = new ImageWindow())
                        {
                            var angle1 = 0d;
                            var angle2 = 0d;

                            while (true)
                            {
                                angle1 += Math.PI / 130;
                                angle2 += Math.PI / 400;

                                var rect = img.Rect;
                                var cent = rect.Center;
                                var arc  = Point.Rotate(cent, cent + new Point(90, 0), angle1 * 180 / Math.PI);
                                var tmp2 = arc + new Point(500, 0);
                                var tmp3 = arc - new Point(500, 0);
                                var l    = Point.Rotate(arc, tmp2, angle2 * 180 / Math.PI);
                                var r    = Point.Rotate(arc, tmp3, angle2 * 180 / Math.PI);

                                Dlib.AssignAllPixels(img, 0);
                                Dlib.DrawLine(img, l, r, 255);

                                using (var himg = new Array2D <int>())
                                {
                                    var offset = new Point(50, 50);
                                    var hrect  = Dlib.GetRect(ht);
                                    var box    = Rectangle.Translate(hrect, offset);

                                    // Now let's compute the hough transform for a subwindow in the image.  In
                                    // particular, we run it on the 300x300 subwindow with an upper left corner at the
                                    // pixel point(50,50).  The output is stored in himg.
                                    ht.Operator(img, box, himg);

                                    // Now that we have the transformed image, the Hough image pixel with the largest
                                    // value should indicate where the line is.  So we find the coordinates of the
                                    // largest pixel:
                                    using (var mat = Dlib.Mat(himg))
                                    {
                                        var p = Dlib.MaxPoint(mat);

                                        // And then ask the ht object for the line segment in the original image that
                                        // corresponds to this point in Hough transform space.
                                        var line = ht.GetLine(p);

                                        // Finally, let's display all these things on the screen.  We copy the original
                                        // input image into a color image and then draw the detected line on top in red.
                                        using (var temp = new Array2D <RgbPixel>())
                                        {
                                            Dlib.AssignImage(img, temp);

                                            var p1 = line.Item1 + offset;
                                            var p2 = line.Item2 + offset;

                                            Dlib.DrawLine(temp, p1, p2, new RgbPixel
                                            {
                                                Red = 255
                                            });
                                            win.ClearOverlay();
                                            win.SetImage(temp);

                                            // Also show the subwindow we ran the Hough transform on as a green box.  You will
                                            // see that the detected line is exactly contained within this box and also
                                            // overlaps the original line.
                                            win.AddOverlay(box, new RgbPixel
                                            {
                                                Green = 255
                                            });

                                            using (var jet = Dlib.Jet(himg))
                                                win2.SetImage(jet);
                                        }
                                    }
                                }
                            }
                        }
        }
コード例 #25
0
        static void AutoExposureContinuous(Camera camera)
        {
            // Check whether the Exposure Auto feature is available.
            if (!camera.Parameters[PLCamera.ExposureAuto].IsWritable)
            {
                Console.WriteLine("The camera does not support Exposure Auto.");
                return;
            }

            // Maximize the grabbed image area of interest (Image AOI).
            camera.Parameters[PLCamera.OffsetX].TrySetValue(camera.Parameters[PLCamera.OffsetX].GetMinimum());
            camera.Parameters[PLCamera.OffsetY].TrySetValue(camera.Parameters[PLCamera.OffsetY].GetMinimum());
            camera.Parameters[PLCamera.Width].SetValue(camera.Parameters[PLCamera.Width].GetMaximum());
            camera.Parameters[PLCamera.Height].SetValue(camera.Parameters[PLCamera.Height].GetMaximum());

            // Set the Auto Function ROI for luminance statistics.
            // We want to use ROI1 for gathering the statistics.
            if (camera.Parameters[autoFunctionAOIROIUseBrightness].IsWritable)
            {
                camera.Parameters[regionSelector].SetValue(regionSelectorValue1);
                camera.Parameters[autoFunctionAOIROIUseBrightness].SetValue(true);  // ROI 1 is used for brightness control
                camera.Parameters[regionSelector].SetValue(regionSelectorValue2);
                camera.Parameters[autoFunctionAOIROIUseBrightness].SetValue(false); // ROI 2 is not used for brightness control
            }
            camera.Parameters[regionSelector].SetValue(regionSelectorValue1);
            camera.Parameters[regionSelectorOffsetX].SetValue(camera.Parameters [PLCamera.OffsetX].GetMinimum());
            camera.Parameters[regionSelectorOffsetY].SetValue(camera.Parameters [PLCamera.OffsetY].GetMinimum());
            camera.Parameters[regionSelectorWidth].SetValue(camera.Parameters[PLCamera.Width].GetMaximum());
            camera.Parameters[regionSelectorHeight].SetValue(camera.Parameters[PLCamera.Height].GetMaximum());

            if (camera.GetSfncVersion() < sfnc2_0_0) // Handling for older cameras
            {
                // Set the target value for luminance control. The value is always expressed
                // as an 8 bit value regardless of the current pixel data output format,
                // i.e., 0 -> black, 255 -> white.
                camera.Parameters[PLCamera.AutoTargetValue].SetValue(80);
            }
            else // Handling for newer cameras (using SFNC 2.0, e.g. USB3 Vision cameras)
            {
                // Set the target value for luminance control.
                // A value of 0.3 means that the target brightness is 30 % of the maximum brightness of the raw pixel value read out from the sensor.
                // A value of 0.4 means 40 % and so forth.
                camera.Parameters[PLCamera.AutoTargetBrightness].SetValue(0.3);
            }

            // Try ExposureAuto = Continuous.
            Console.WriteLine("Trying 'ExposureAuto = Continuous'.");
            Console.WriteLine("Initial Exposure time = {0} us", camera.Parameters[exposureTime].GetValue());
            camera.Parameters[PLCamera.ExposureAuto].SetValue(PLCamera.ExposureAuto.Continuous);

            // When "continuous" mode is selected, the parameter value is adjusted repeatedly while images are acquired.
            // Depending on the current frame rate, the automatic adjustments will usually be carried out for
            // every or every other image, unless the camera's microcontroller is kept busy by other tasks.
            // The repeated automatic adjustment will proceed until the "once" mode of operation is used or
            // until the auto function is set to "off", in which case the parameter value resulting from the latest
            // automatic adjustment will operate unless the value is manually adjusted.
            for (int n = 0; n < 20; n++)    // For demonstration purposes, we will use only 20 images.
            {
                IGrabResult result = camera.StreamGrabber.GrabOne(5000, TimeoutHandling.ThrowException);
                using (result)
                {
                    // Image grabbed successfully?
                    if (result.GrabSucceeded)
                    {
                        ImageWindow.DisplayImage(1, result);
                    }
                }

                //For demonstration purposes only. Wait until the image is shown.
                System.Threading.Thread.Sleep(100);
            }
            camera.Parameters[PLCamera.ExposureAuto].SetValue(PLCamera.ExposureAuto.Off); // Switch off Exposure Auto.

            Console.WriteLine("Final Exposure Time = {0} us", camera.Parameters[exposureTime].GetValue());
        }
コード例 #26
0
ファイル: Program.cs プロジェクト: Feodoros/BelkaFaces
        static void Main(string[] args)
        {
            /// FaceDetectionWith_API
            Location[] coord = TestImage(fileName, Model.Hog);


            /// Face DetectionWith_DLIB
            using (var fd = Dlib.GetFrontalFaceDetector())
            {
                var img = Dlib.LoadImage <RgbPixel>(fileName);

                // find all faces in the image
                var faces = fd.Operator(img);
                foreach (var face in faces)
                {
                    // draw a rectangle for each face
                    Dlib.DrawRectangle(img, face, color: new RgbPixel(0, 255, 255), thickness: 4);
                }

                Dlib.SaveJpeg(img, outputName);
            }


            // The first thing we are going to do is load all our models.  First, since we need to
            // find faces in the image we will need a face detector:
            using (var detector = Dlib.GetFrontalFaceDetector())
                // We will also use a face landmarking model to align faces to a standard pose:  (see face_landmark_detection_ex.cpp for an introduction)
                using (var sp = ShapePredictor.Deserialize("shape_predictor_68_face_landmarks.dat"))
                    // And finally we load the DNN responsible for face recognition.
                    using (var net = DlibDotNet.Dnn.LossMetric.Deserialize("dlib_face_recognition_resnet_model_v1.dat"))

                        using (var img = Dlib.LoadImageAsMatrix <RgbPixel>(fileName))

                            using (var win = new ImageWindow(img))
                            {
                                var faces = new List <Matrix <RgbPixel> >();
                                foreach (var face in detector.Operator(img))
                                {
                                    var shape          = sp.Detect(img, face);
                                    var faceChipDetail = Dlib.GetFaceChipDetails(shape, 150, 0.25);
                                    var faceChip       = Dlib.ExtractImageChip <RgbPixel>(img, faceChipDetail);

                                    //faces.Add(move(face_chip));
                                    faces.Add(faceChip);

                                    win.AddOverlay(face);
                                }

                                if (!faces.Any())
                                {
                                    Console.WriteLine("No faces found in image!");
                                    return;
                                }

                                // This call asks the DNN to convert each face image in faces into a 128D vector.
                                // In this 128D vector space, images from the same person will be close to each other
                                // but vectors from different people will be far apart.  So we can use these vectors to
                                // identify if a pair of images are from the same person or from different people.
                                var faceDescriptors = net.Operator(faces);

                                // In particular, one simple thing we can do is face clustering.  This next bit of code
                                // creates a graph of connected faces and then uses the Chinese whispers graph clustering
                                // algorithm to identify how many people there are and which faces belong to whom.
                                var edges = new List <SamplePair>();
                                for (uint i = 0; i < faceDescriptors.Count; ++i)
                                {
                                    for (var j = i; j < faceDescriptors.Count; ++j)
                                    {
                                        // Faces are connected in the graph if they are close enough.  Here we check if
                                        // the distance between two face descriptors is less than 0.6, which is the
                                        // decision threshold the network was trained to use.  Although you can
                                        // certainly use any other threshold you find useful.
                                        var diff = faceDescriptors[i] - faceDescriptors[j];
                                        if (Dlib.Length(diff) < 0.6)
                                        {
                                            edges.Add(new SamplePair(i, j));
                                        }
                                    }
                                }

                                Dlib.ChineseWhispers(edges, 100, out var numClusters, out var labels);

                                // This will correctly indicate that there are 4 people in the image.
                                Console.WriteLine($"number of people found in the image: {numClusters}");


                                // Отобразим результат в ImageList
                                var winClusters = new List <ImageWindow>();
                                for (var i = 0; i < numClusters; i++)
                                {
                                    winClusters.Add(new ImageWindow());
                                }
                                var tileImages = new List <Matrix <RgbPixel> >();
                                for (var clusterId = 0ul; clusterId < numClusters; ++clusterId)
                                {
                                    var temp = new List <Matrix <RgbPixel> >();
                                    for (var j = 0; j < labels.Length; ++j)
                                    {
                                        if (clusterId == labels[j])
                                        {
                                            temp.Add(faces[j]);
                                        }
                                    }

                                    winClusters[(int)clusterId].Title = $"face cluster {clusterId}";
                                    var tileImage = Dlib.TileImages(temp);
                                    tileImages.Add(tileImage);
                                    winClusters[(int)clusterId].SetImage(tileImage);
                                }


                                // Finally, let's print one of the face descriptors to the screen.
                                using (var trans = Dlib.Trans(faceDescriptors[0]))
                                {
                                    Console.WriteLine($"face descriptor for one face: {trans}");

                                    // It should also be noted that face recognition accuracy can be improved if jittering
                                    // is used when creating face descriptors.  In particular, to get 99.38% on the LFW
                                    // benchmark you need to use the jitter_image() routine to compute the descriptors,
                                    // like so:
                                    var jitterImages = JitterImage(faces[0]).ToArray();
                                    var ret          = net.Operator(jitterImages);
                                    using (var m = Dlib.Mat(ret))
                                        using (var faceDescriptor = Dlib.Mean <float>(m))
                                            using (var t = Dlib.Trans(faceDescriptor))
                                            {
                                                Console.WriteLine($"jittered face descriptor for one face: {t}");

                                                // If you use the model without jittering, as we did when clustering the bald guys, it
                                                // gets an accuracy of 99.13% on the LFW benchmark.  So jittering makes the whole
                                                // procedure a little more accurate but makes face descriptor calculation slower.

                                                Console.WriteLine("hit enter to terminate");
                                                Console.ReadKey();

                                                foreach (var jitterImage in jitterImages)
                                                {
                                                    jitterImage.Dispose();
                                                }

                                                foreach (var tileImage in tileImages)
                                                {
                                                    tileImage.Dispose();
                                                }

                                                foreach (var edge in edges)
                                                {
                                                    edge.Dispose();
                                                }

                                                foreach (var descriptor in faceDescriptors)
                                                {
                                                    descriptor.Dispose();
                                                }

                                                foreach (var face in faces)
                                                {
                                                    face.Dispose();
                                                }
                                            }
                                }
                            }

            System.Console.ReadLine();
        }
コード例 #27
0
        static void AutoGainOnce(Camera camera)
        {
            // Check whether the gain auto function is available.
            if (!camera.Parameters[PLCamera.GainAuto].IsWritable)
            {
                Console.WriteLine("The camera does not support Gain Auto.");
                return;
            }

            // Maximize the grabbed image area of interest (Image AOI).
            camera.Parameters[PLCamera.OffsetX].TrySetValue(camera.Parameters[PLCamera.OffsetX].GetMinimum());
            camera.Parameters[PLCamera.OffsetX].TrySetValue(camera.Parameters[PLCamera.OffsetY].GetMinimum());
            camera.Parameters[PLCamera.Width].SetValue(camera.Parameters[PLCamera.Width].GetMaximum());
            camera.Parameters[PLCamera.Height].SetValue(camera.Parameters[PLCamera.Height].GetMaximum());

            // Set the Auto Function ROI for luminance statistics.
            // We want to use ROI1 for gathering the statistics.
            if (camera.Parameters[autoFunctionAOIROIUseBrightness].IsWritable)
            {
                camera.Parameters[regionSelector].SetValue(regionSelectorValue1);
                camera.Parameters[autoFunctionAOIROIUseBrightness].SetValue(true);  // ROI 1 is used for brightness control
                camera.Parameters[regionSelector].SetValue(regionSelectorValue2);
                camera.Parameters[autoFunctionAOIROIUseBrightness].SetValue(false); // ROI 2 is not used for brightness control
            }
            camera.Parameters[regionSelector].SetValue(regionSelectorValue1);
            camera.Parameters[regionSelectorOffsetX].SetValue(camera.Parameters [PLCamera.OffsetX].GetMinimum());
            camera.Parameters[regionSelectorOffsetY].SetValue(camera.Parameters [PLCamera.OffsetY].GetMinimum());
            camera.Parameters[regionSelectorWidth].SetValue(camera.Parameters[PLCamera.Width].GetMaximum());
            camera.Parameters[regionSelectorHeight].SetValue(camera.Parameters[PLCamera.Height].GetMaximum());

            // We are going to try GainAuto = Once.
            Console.WriteLine("Trying 'GainAuto = Once'.");
            if (camera.GetSfncVersion() < sfnc2_0_0) // Handling for older cameras
            {
                // Set the target value for luminance control. The value is always expressed
                // as an 8 bit value regardless of the current pixel data output format,
                // i.e., 0 -> black, 255 -> white.
                camera.Parameters[PLCamera.AutoTargetValue].SetValue(80);

                Console.WriteLine("Initial Gain = {0}", camera.Parameters[PLCamera.GainRaw].GetValue());
                // Set the gain ranges for luminance control.
                camera.Parameters[PLCamera.AutoGainRawLowerLimit].SetValue(camera.Parameters[PLCamera.GainRaw].GetMinimum());
                camera.Parameters[PLCamera.AutoGainRawUpperLimit].SetValue(camera.Parameters[PLCamera.GainRaw].GetMaximum());
            }
            else // Handling for newer cameras (using SFNC 2.0, e.g. USB3 Vision cameras)
            {
                // Set the target value for luminance control.
                // A value of 0.3 means that the target brightness is 30 % of the maximum brightness of the raw pixel value read out from the sensor.
                // A value of 0.4 means 40 % and so forth.
                camera.Parameters[PLCamera.AutoTargetBrightness].SetValue(0.3);

                Console.WriteLine("Initial Gain = {0}", camera.Parameters[PLCamera.Gain].GetValue());
                // Set the gain ranges for luminance control.
                camera.Parameters[PLCamera.AutoGainLowerLimit].SetValue(camera.Parameters[PLCamera.Gain].GetMinimum());
                camera.Parameters[PLCamera.AutoGainUpperLimit].SetValue(camera.Parameters[PLCamera.Gain].GetMaximum());
            }
            camera.Parameters[PLCamera.GainAuto].SetValue(PLCamera.GainAuto.Once);

            // When the "once" mode of operation is selected,
            // the parameter values are automatically adjusted until the related image property
            // reaches the target value. After the automatic parameter value adjustment is complete, the auto
            // function will automatically be set to "off" and the new parameter value will be applied to the
            // subsequently grabbed images.
            int n = 0;

            while (camera.Parameters[PLCamera.GainAuto].GetValue() != PLCamera.GainAuto.Off)
            {
                IGrabResult result = camera.StreamGrabber.GrabOne(5000, TimeoutHandling.ThrowException);
                using (result)
                {
                    // Image grabbed successfully?
                    if (result.GrabSucceeded)
                    {
                        ImageWindow.DisplayImage(1, result);
                    }
                }
                n++;
                //For demonstration purposes only. Wait until the image is shown.
                System.Threading.Thread.Sleep(100);

                //Make sure the loop is exited.
                if (n > 100)
                {
                    throw new TimeoutException("The adjustment of auto gain did not finish.");
                }
            }

            Console.WriteLine("GainAuto went back to 'Off' after {0} frames", n);
            if (camera.GetSfncVersion() < sfnc2_0_0) // Handling for older cameras
            {
                Console.WriteLine("Final Gain = {0}", camera.Parameters[PLCamera.GainRaw].GetValue());
            }
            else // Handling for newer cameras (using SFNC 2.0, e.g. USB3 Vision cameras)
            {
                Console.WriteLine("Final Gain = {0}", camera.Parameters[PLCamera.Gain].GetValue());
            }
        }
コード例 #28
0
        public void AddOverlay()
        {
            if (!this.CanGuiDebug)
            {
                Console.WriteLine("Build and run as Release mode if you wanna show Gui!!");
                return;
            }

            var path  = this.GetDataFile("Lenna.bmp");
            var tests = new[]
            {
                new { Type = ImageTypes.HsiPixel, ExpectResult = true },
                new { Type = ImageTypes.LabPixel, ExpectResult = true },
                new { Type = ImageTypes.BgrPixel, ExpectResult = true },
                new { Type = ImageTypes.RgbPixel, ExpectResult = true },
                new { Type = ImageTypes.RgbAlphaPixel, ExpectResult = true },
                new { Type = ImageTypes.UInt8, ExpectResult = true },
                new { Type = ImageTypes.UInt16, ExpectResult = true },
                new { Type = ImageTypes.UInt32, ExpectResult = true },
                new { Type = ImageTypes.Int8, ExpectResult = true },
                new { Type = ImageTypes.Int16, ExpectResult = true },
                new { Type = ImageTypes.Int32, ExpectResult = true },
                new { Type = ImageTypes.Float, ExpectResult = true },
                new { Type = ImageTypes.Double, ExpectResult = true }
            };

            foreach (var test in tests)
            {
                try
                {
                    var rect  = new Rectangle(10, 10, 100, 100);
                    var array = Array2D.Array2DTest.CreateArray2DHelp(test.Type, path.FullName);
                    using (var window = new ImageWindow(array))
                    {
                        switch (test.Type)
                        {
                        case ImageTypes.UInt8:
                            window.AddOverlay(rect, (byte)0, test.Type.ToString());
                            break;

                        case ImageTypes.UInt16:
                            window.AddOverlay(rect, (ushort)0, test.Type.ToString());
                            break;

                        case ImageTypes.UInt32:
                            window.AddOverlay(rect, 0u, test.Type.ToString());
                            break;

                        case ImageTypes.Int8:
                            window.AddOverlay(rect, (sbyte)0, test.Type.ToString());
                            break;

                        case ImageTypes.Int16:
                            window.AddOverlay(rect, (short)0, test.Type.ToString());
                            break;

                        case ImageTypes.Int32:
                            window.AddOverlay(rect, 0, test.Type.ToString());
                            break;

                        case ImageTypes.Float:
                            window.AddOverlay(rect, (short)0f, test.Type.ToString());
                            break;

                        case ImageTypes.Double:
                            window.AddOverlay(rect, 0d, test.Type.ToString());
                            break;

                        case ImageTypes.RgbAlphaPixel:
                            window.AddOverlay(rect, new RgbAlphaPixel(127, 0, 0, 0), test.Type.ToString());
                            break;

                        case ImageTypes.RgbPixel:
                            window.AddOverlay(rect, new RgbPixel(0, 0, 0), test.Type.ToString());
                            break;

                        case ImageTypes.HsiPixel:
                            window.AddOverlay(rect, new HsiPixel(0, 0, 0), test.Type.ToString());
                            break;

                        case ImageTypes.LabPixel:
                            window.AddOverlay(rect, new LabPixel(0, 0, 0), test.Type.ToString());
                            break;
                        }

                        window.WaitUntilClosed();
                    }
                }
                catch (Exception e)
                {
                    Console.WriteLine(e.StackTrace);
                    Console.WriteLine($"Failed to create ImageWindow from Array2D Type: {test.Type}");
                    throw;
                }
            }
        }
コード例 #29
0
ファイル: Program.cs プロジェクト: zhuxb711/DlibDotNet
        private static void Main(string[] args)
        {
            if (args.Length != 1)
            {
                Console.WriteLine("You call this program like this: ");
                Console.WriteLine("./dnn_semantic_segmentation_train_ex /path/to/images");
                Console.WriteLine();
                Console.WriteLine("You will also need a trained 'semantic_segmentation_voc2012net.dnn' file.");
                Console.WriteLine("You can either train it yourself (see example program");
                Console.WriteLine("dnn_semantic_segmentation_train_ex), or download a");
                Console.WriteLine("copy from here: http://dlib.net/files/semantic_segmentation_voc2012net.dnn");
                return;
            }

            try
            {
                // Read the file containing the trained network from the working directory.
                using (var net = LossMulticlassLogPerPixel.Deserialize("semantic_segmentation_voc2012net.dnn"))
                {
                    // Show inference results in a window.
                    using (var win = new ImageWindow())
                    {
                        // Find supported image files.
                        var files = Directory.GetFiles(args[0])
                                    .Where(s => s.EndsWith(".jpeg") || s.EndsWith(".jpg") || s.EndsWith(".png")).ToArray();
                        Console.WriteLine($"Found {files.Length} images, processing...");
                        foreach (var file in files)
                        {
                            // Load the input image.
                            using (var inputImage = Dlib.LoadImageAsMatrix <RgbPixel>(file))
                            {
                                // Create predictions for each pixel. At this point, the type of each prediction
                                // is an index (a value between 0 and 20). Note that the net may return an image
                                // that is not exactly the same size as the input.
                                using (var output = net.Operator(inputImage))
                                    using (var temp = output.First())
                                    {
                                        // Crop the returned image to be exactly the same size as the input.
                                        var rect = Rectangle.CenteredRect((int)(temp.Columns / 2d), (int)(temp.Rows / 2d), (uint)inputImage.Columns, (uint)inputImage.Rows);
                                        using (var dims = new ChipDims((uint)inputImage.Rows, (uint)inputImage.Columns))
                                            using (var chipDetails = new ChipDetails(rect, dims))
                                                using (var indexLabelImage = Dlib.ExtractImageChip <ushort>(temp, chipDetails, InterpolationTypes.NearestNeighbor))
                                                {
                                                    // Convert the indexes to RGB values.
                                                    using (var rgbLabelImage = IndexLabelImageToRgbLabelImage(indexLabelImage))
                                                    {
                                                        // Show the input image on the left, and the predicted RGB labels on the right.
                                                        using (var joinedRow = Dlib.JoinRows(inputImage, rgbLabelImage))
                                                        {
                                                            win.SetImage(joinedRow);

                                                            // Find the most prominent class label from amongst the per-pixel predictions.
                                                            var classLabel = GetMostProminentNonBackgroundClassLabel(indexLabelImage);

                                                            Console.WriteLine($"{file} : {classLabel} - hit enter to process the next image");
                                                            Console.ReadKey();
                                                        }
                                                    }
                                                }
                                    }
                            }
                        }
                    }
                }
            }
            catch (Exception e)
            {
                Console.WriteLine(e);
            }
        }
コード例 #30
0
        internal static void Main()
        {
            // The exit code of the sample application.
            int exitCode = 0;

            try
            {
                // Create a camera object that selects the first camera device found.
                // More constructors are available for selecting a specific camera device.
                using (Camera camera = new Camera())
                {
                    // Print the model name of the camera.
                    Console.WriteLine("Using camera {0}.", camera.CameraInfo[CameraInfoKey.ModelName]);

                    // Set the acquisition mode to free running continuous acquisition when the camera is opened.
                    camera.CameraOpened += Configuration.AcquireContinuous;

                    // Open the connection to the camera device.
                    camera.Open();

                    // Set buffer factory before starting the stream grabber because allocation
                    // happens there.
                    MyBufferFactory myFactory = new MyBufferFactory();
                    camera.StreamGrabber.BufferFactory = myFactory;

                    // Start grabbing.
                    camera.StreamGrabber.Start();

                    // Grab a number of images.
                    for (int i = 0; i < 10; ++i)
                    {
                        // Wait for an image and then retrieve it. A timeout of 5000 ms is used.
                        IGrabResult grabResult = camera.StreamGrabber.RetrieveResult(5000, TimeoutHandling.ThrowException);
                        using (grabResult)
                        {
                            // Image grabbed successfully?
                            if (grabResult.GrabSucceeded)
                            {
                                // Access the image data.
                                Console.WriteLine("SizeX: {0}", grabResult.Width);
                                Console.WriteLine("SizeY: {0}", grabResult.Height);

                                // Normally we would have a byte array in the pixel data.
                                // However we are using the buffer factory here which allocates
                                // ushort arrays.
                                ushort[] buffer = grabResult.PixelData as ushort[];
                                Console.WriteLine("First value of pixel data: {0}", buffer[0]);
                                Console.WriteLine("");

                                // Display the grabbed image.
                                ImageWindow.DisplayImage(0, grabResult);
                            }
                            else
                            {
                                Console.WriteLine("Error: {0} {1}", grabResult.ErrorCode, grabResult.ErrorDescription);
                            }
                        }
                    }

                    // Stop grabbing.
                    camera.StreamGrabber.Stop();

                    // Close the connection to the camera device.
                    camera.Close();
                }
            }
            catch (Exception e)
            {
                Console.Error.WriteLine("Exception: {0}", e.Message);
                exitCode = 1;
            }
            finally
            {
                // Comment the following two lines to disable waiting on exit.
                Console.Error.WriteLine("\nPress enter to exit.");
                Console.ReadLine();
            }

            Environment.Exit(exitCode);
        }
コード例 #31
0
ファイル: Program.cs プロジェクト: wyd1520/DlibDotNet
        private static void Main(string[] args)
        {
            if (args.Length == 0)
            {
                Console.WriteLine("Give some image files as arguments to this program.");
                Console.WriteLine("Call this program like this:");
                Console.WriteLine("./face_landmark_detection_ex shape_predictor_68_face_landmarks.dat faces/*.jpg");
                Console.WriteLine("You can get the shape_predictor_68_face_landmarks.dat file from:");
                Console.WriteLine("http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2");
                return;
            }

            using (var win = new ImageWindow())
                using (var winFaces = new ImageWindow())
                    using (var detector = FrontalFaceDetector.GetFrontalFaceDetector())
                        using (var sp = new ShapePredictor(args[0]))
                            foreach (var file in args.ToList().GetRange(1, args.Length - 1))
                            {
                                Console.WriteLine($"processing image {file}");

                                using (var img = Dlib.LoadImage <RgbPixel>(file))
                                {
                                    Dlib.PyramidUp(img);

                                    var dets = detector.Detect(img);
                                    Console.WriteLine($"Number of faces detected: {dets.Length}");

                                    var shapes = new List <FullObjectDetection>();
                                    foreach (var rect in dets)
                                    {
                                        var shape = sp.Detect(img, rect);
                                        Console.WriteLine($"number of parts: {shape.Parts}");
                                        if (shape.Parts > 2)
                                        {
                                            Console.WriteLine($"pixel position of first part:  {shape.GetPart(0)}");
                                            Console.WriteLine($"pixel position of second part: {shape.GetPart(1)}");
                                            shapes.Add(shape);
                                        }
                                    }

                                    win.ClearOverlay();
                                    win.SetImage(img);

                                    if (shapes.Any())
                                    {
                                        var lines = Dlib.RenderFaceDetections(shapes);
                                        win.AddOverlay(lines);

                                        foreach (var l in lines)
                                        {
                                            l.Dispose();
                                        }

                                        var chipLocations = Dlib.GetFaceChipDetails(shapes);
                                        using (var faceChips = Dlib.ExtractImageChips <RgbPixel>(img, chipLocations))
                                            using (var tileImage = Dlib.TileImages(faceChips))
                                                winFaces.SetImage(tileImage);

                                        foreach (var c in chipLocations)
                                        {
                                            c.Dispose();
                                        }
                                    }

                                    Console.WriteLine("hit enter to process next frame");
                                    Console.ReadKey();

                                    foreach (var s in shapes)
                                    {
                                        s.Dispose();
                                    }
                                    foreach (var r in dets)
                                    {
                                        r.Dispose();
                                    }
                                }
                            }
        }
コード例 #32
0
 void buttonClick(object sender, MouseButtonEventArgs e)
 {
     Button clicked = (Button)sender;
     ImageWindow imageWindow = new ImageWindow();
     imageWindow.Content = clicked.Content;
     imageWindow.Topmost = true;
     imageWindow.Show();
 }