public void Add()
        {
            ObjectDetector objectDetector = new ObjectDetector();

            objectDetector.Add("temp", null, new Point(0, 0));
            objectDetector.Add("temp", null, new Point(1, 1));
        }
Ejemplo n.º 2
0
    // Start is called before the first frame update
    void Start()
    {
        //Use player detector to detect player distance to this object
        playerDetector = transform.Find("PlayerDetector").gameObject.GetComponent <ObjectDetector>();

        im = GameObject.Find("InventoryManager").GetComponent <InventoryManager>();
    }
Ejemplo n.º 3
0
    private void Start()
    {
        leanTouch      = GameObject.FindGameObjectWithTag("LeanTouch").GetComponent <LeanTouch>();
        objectDetector = GameObject.FindGameObjectWithTag("PresentDetector").GetComponent <ObjectDetector>();
        lidDetector    = GameObject.FindGameObjectWithTag("LidDetector").GetComponent <LidDetector>();
        blockTouchAnim = blockTouch.GetComponent <Animator>();
        blockTouchImg  = blockTouch.GetComponent <Image>();
        closePresentButton.SetActive(false);
        lid.SetActive(false);

        blockTouchAnim.SetTrigger("appear");
        DisableTouch();


        for (int i = 0; i < presentsParent.childCount; i++)
        {
            var pres = presentsParent.GetChild(i);
            if (pres != null)
            {
                CrazyPresent crazy = pres.GetComponent <CrazyPresent>();
                if (crazy != null)
                {
                    crazyList.Add(crazy);
                    crazy.enabled = false;
                }
            }
        }

        countdownText     = countdownChild.GetComponent <TextMeshProUGUI>();
        countdownAnimator = countdown.GetComponent <Animator>();

        currentTime = countdownTime;

        fsm = StateMachine <SessionState> .Initialize(this, SessionState.GameStart);
    }
Ejemplo n.º 4
0
 // Use this for initialization
 void Start()
 {
     anim      = GetComponentInChildren <Animator>();
     body      = GetComponentInChildren <Rigidbody2D>();
     coll      = GetComponentInChildren <Collider2D>();
     inventory = GetComponentInChildren <PlayerInventory>();
     detector  = GetComponentInChildren <ObjectDetector>();
 }
Ejemplo n.º 5
0
 protected override void Start()
 {
     base.Start();
     _directionVariation  = Vector3.zero;
     _npcGeneralDirection = transform.GetChild(0).GetComponent <NpcGeneralDirections>();
     _npcGeneralAddForce  = transform.GetChild(1).GetComponent <NpcGeneralAddForces>();
     _detectorScript      = transform.parent.transform.GetChild(3).GetComponent <ObjectDetector>();
 }
Ejemplo n.º 6
0
    // Start is called before the first frame update
    void Start()
    {
        gameManager = GameObject.Find("GameManager").GetComponent <GameManager>();

        ClickedLeafLitterBool = false;
        playerDetector        = transform.Find("PlayerDetector").gameObject.GetComponent <ObjectDetector>();
        acornText             = GameObject.Find("Acorn Text");
    }
        public ObjectDetectionAppService(ObjectDetector detectorClient, ILogger logger)
        {
            _detectorClient = detectorClient;
            _detectorClient.Initialize();
            var account = CloudStorageAccount.Parse(ConfigurationManager.AppSettings["AzureWebJobsStorage"]);

            _cloudBlobClient = account.CreateCloudBlobClient();
        }
Ejemplo n.º 8
0
        public ObjectDetectorTest()
        {
            var path = this.GetDataFile("face_detector.svm");

            this._Scanner        = new ScanFHogPyramid <PyramidDown, DefaultFHogFeatureExtractor>(6);
            this._ObjectDetector = new ObjectDetector <ScanFHogPyramid <PyramidDown, DefaultFHogFeatureExtractor> >(this._Scanner);
            this._ObjectDetector.Deserialize(path.FullName);
        }
Ejemplo n.º 9
0
    /* ************************************************ */
    /* Main Functions */
    /* ************************************************ */
    void Start()
    {
        _parent         = transform.parent.gameObject;
        _enemyScript    = _parent.GetComponent <EnemyTest>();
        _detectorScript = _parent.transform.GetChild(0).GetComponent <ObjectDetector>();
        _audioScript    = _parent.transform.GetChild(2).GetComponent <AudioManager>();

        _myPrefab = GetAmmoType();
    }
        /// <summary>
        /// Initializes a new instance of the <see cref="SimpleFaceDetector"/> class with the model file path that this detector uses.
        /// </summary>
        /// <param name="modelPath">The model file path that this detector uses.</param>
        /// <exception cref="FileNotFoundException">The model file is not found.</exception>
        public SimpleFaceDetector(string modelPath)
        {
            if (!File.Exists(modelPath))
            {
                throw new FileNotFoundException(modelPath);
            }

            this._Scanner        = new ScanFHogPyramid <PyramidDown, DefaultFHogFeatureExtractor>(6);
            this._ObjectDetector = new ObjectDetector <ScanFHogPyramid <PyramidDown, DefaultFHogFeatureExtractor> >(this._Scanner);
            this._ObjectDetector.Deserialize(modelPath);
        }
Ejemplo n.º 11
0
    void Start()
    {
        this.arRaycastManager = GetComponent <ARRaycastManager>();

        this.objectDetector = goObjectDetector.GetComponent <TinyYolo3Detector>();
        this.objectDetector.Start();

        this.similarWordClient = goSimilarWordClient.GetComponent <SimilarWordClient>();
        this.similarWordClient.Start();

        CalculateShift();
    }
Ejemplo n.º 12
0
    // Start is called before the first frame update
    void Start()
    {
        gameManager = GameObject.Find("GameManager").GetComponent <GameManager>();
        if (gameManager.isAcornerLevelSaved)
        {
            transform.position = gameManager.savedBranchPosition;
        }

        firstTreeHitEventDispatcher = GameObject.Find("AnalyticsManager").GetComponent <FirstTreeHitEventDispatcher>();

        playerDetector = transform.Find("PlayerDetector").gameObject.GetComponent <ObjectDetector>();
    }
Ejemplo n.º 13
0
        public void FileChangedEventHandler(object sender, FileWatcherArgs args)
        {
            var resultDetectedFrame = ObjectDetector.DetectObjAndDrawCircle(MatFrame);

            var fileEvent = new Event()
            {
                Path     = args.FullPath,
                Activity = args.ChangeType,
                Time     = DateTime.Now
            };

            Events.Add(fileEvent);
        }
        public void ObjectDetectorTest_DetectObjects_640x480()
        {
            ObjectDetector objectDetector = new ObjectDetector();

            string TestOutputDirectoryQualified = Directory.GetCurrentDirectory() + "\\" + TestOutputFilesPath;
            if (!Directory.Exists(TestOutputDirectoryQualified))
            {
                Directory.CreateDirectory(TestOutputDirectoryQualified);
            }

            var filepaths = Directory.EnumerateFiles(Directory.GetCurrentDirectory() + "\\" + DataFilesPath_640x480, "*.jpg", SearchOption.TopDirectoryOnly);
            DateTime dateTimeNow = DateTime.Now;
            long startTimeTicks = dateTimeNow.Ticks;

            DetectObjectsUsingImageCount(objectDetector, filepaths);
        }
Ejemplo n.º 15
0
        public void ObjectDetectorTest_DetectObjects_640x480()
        {
            ObjectDetector objectDetector = new ObjectDetector();

            string TestOutputDirectoryQualified = Directory.GetCurrentDirectory() + "\\" + TestOutputFilesPath;

            if (!Directory.Exists(TestOutputDirectoryQualified))
            {
                Directory.CreateDirectory(TestOutputDirectoryQualified);
            }

            var      filepaths      = Directory.EnumerateFiles(Directory.GetCurrentDirectory() + "\\" + DataFilesPath_640x480, "*.jpg", SearchOption.TopDirectoryOnly);
            DateTime dateTimeNow    = DateTime.Now;
            long     startTimeTicks = dateTimeNow.Ticks;

            DetectObjectsUsingImageCount(objectDetector, filepaths);
        }
Ejemplo n.º 16
0
        public AITests()
        {
            _detector = new ObjectDetector(ConfigurationManager.AppSettings["ObjectDetectionTrainingKey"],
                                           ConfigurationManager.AppSettings["ObjectDetectionPredictionKey"],
                                           ConfigurationManager.AppSettings["ObjectDetectionProjectName"]);

            _targetDetector = new ObjectDetector(ConfigurationManager.AppSettings["TargetObjectDetectionTrainingKey"],
                                                 ConfigurationManager.AppSettings["TargetObjectDetectionPredictionKey"],
                                                 ConfigurationManager.AppSettings["TargetObjectDetectionProjectName"]);
            try
            {
                _detector.Initialize();
                _targetDetector.Initialize();
            }
            catch
            {
                //Go on
            }
            _customVisionManager         = new CustomVisionManager(_detector);
            _targetDetectorVisionManager = new CustomVisionManager(_targetDetector);
        }
Ejemplo n.º 17
0
 public ImageRecognitionService(StatusService statusService, ObjectDetector objectDetector,
                                MetaDataService metadataService, AzureFaceService azureFace,
                                AccordFaceService accordFace, EmguFaceService emguService,
                                ThumbnailService thumbs, ConfigService configService,
                                ImageClassifier imageClassifier, ImageCache imageCache,
                                WorkService workService, ExifService exifService,
                                ImageProcessService imageProcessor)
 {
     _thumbService      = thumbs;
     _accordFaceService = accordFace;
     _azureFaceService  = azureFace;
     _statusService     = statusService;
     _objectDetector    = objectDetector;
     _metdataService    = metadataService;
     _emguFaceService   = emguService;
     _configService     = configService;
     _imageClassifier   = imageClassifier;
     _imageProcessor    = imageProcessor;
     _imageCache        = imageCache;
     _workService       = workService;
     _exifService       = exifService;
 }
Ejemplo n.º 18
0
    void InitializeProperties()
    {
        playerController = GetComponent <PlayerController>();
        playerManager    = GetComponent <PlayerManager>();
        affectionTracker = GetComponent <AffectionTracker>();
        playerNumber     = playerManager.GetPlayerNumber();

        gameManager      = GameObject.Find("Game Manager").GetComponent <GameManager>();
        dynamicReference = gameManager.GetComponent <DynamicReference>();

        objectDetector = transform.Find("Character").Find("Object Detector").GetComponent <ObjectDetector>();

        AssignDialogWindow();

        cutsceneInProgress = false;
        waitingOnInput     = false;
        timeOfPause        = 0;
        pauseLength        = 0;
        selectionIndex     = 0;
        shiftDelay         = 0.25f;
        readyToContinue    = true;
        monitorScrollState = false;
        responseActions    = new List <CutsceneAction>();
    }
Ejemplo n.º 19
0
 public ObjectRecognizer(string path)
 {
     objectDetector = new ObjectDetector(path);
 }
Ejemplo n.º 20
0
 public ObjectDetectionController(ObjectDetector objectDetection)
 {
     this.objectDetector = objectDetection;
 }
Ejemplo n.º 21
0
        private void DetectObjectsUsingImageCount(ObjectDetector objectDetector, IEnumerable <string> filepaths)
        {
            List <Rectangle> rectObjectList = new List <Rectangle>();
            List <string>    filepathsList  = new List <string>(filepaths);
            int count = filepathsList.Count;

            filepathsList.Sort();
            for (int i = 0; i < filepathsList.Count; ++i)
            {
                string filepath      = filepathsList[i];
                byte[] imagebitArray = GetImageByteArray(filepath);

                MemoryStream stream = new MemoryStream(imagebitArray);

                Bitmap image = (Bitmap)Image.FromStream(stream);

                //lets check if the image is what we expect
                if (image.PixelFormat != PixelFormat.Format24bppRgb)
                {
                    string message = String.Format("Image format from is not correct. PixelFormat: {1}", image.PixelFormat);
                    throw new Exception(message);
                }

                // Lock the bitmap's bits.
                Rectangle  rect    = new Rectangle(0, 0, image.Width, image.Height);
                BitmapData bmpData = image.LockBits(rect, ImageLockMode.ReadOnly, image.PixelFormat);

                // Get the address of the first line.
                IntPtr ptr = bmpData.Scan0;

                unsafe
                {
                    if (!objectDetector.IsInitialized())
                    {
                        IntPtr ptrPath = IntPtr.Zero;
                        try
                        {
                            ptrPath = Marshal.StringToHGlobalAnsi(ObjectDetectConfigFilePath);

                            objectDetector.InitializeFromFrame((byte *)ptr, GetBitmapSizeInBytes(bmpData), bmpData.Width, bmpData.Height, (sbyte *)ptrPath);
                        }
                        finally
                        {
                            if (ptr != IntPtr.Zero)
                            {
                                Marshal.FreeHGlobal(ptrPath);
                            }
                        }
                    }
                    else
                    {
                        Rectangle rectObject       = objectDetector.GetObjectRect((byte *)ptr, GetBitmapSizeInBytes(bmpData));
                        byte[]    binaryImageArray = new byte[image.Width * image.Height];
                        unsafe
                        {
                            fixed(byte *binaryImage = binaryImageArray)
                            {
                                objectDetector.GetBinaryImage(binaryImage);
                            }
                        }
                        string     filenameAppend   = String.Format("{0}", i + 1);
                        TextWriter binaryFileWriter = new StreamWriter(TestOutputFilesPath + "\\" + "binaryimage" + filenameAppend + ".txt");
                        for (int j = 0; j < image.Height; ++j)
                        {
                            for (int k = 0; k < image.Width; ++k)
                            {
                                if (binaryImageArray[k + j * image.Width] == 255)
                                {
                                    binaryFileWriter.Write("1");
                                }
                                else if (binaryImageArray[k + j * image.Width] == 0)
                                {
                                    binaryFileWriter.Write("0");
                                }
                                else
                                {
                                    Debug.Assert(false, "Should never happen");
                                }
                            }
                            binaryFileWriter.WriteLine("");
                        }
                        binaryFileWriter.Flush();
                        binaryFileWriter.Close();
                        rectObjectList.Add(rectObject);
                    }
                }

                image.UnlockBits(bmpData);
            }

            ValidateObjectRectsList(rectObjectList);
        }
Ejemplo n.º 22
0
 private void Awake()
 {
     objectDetector = GetComponent <ObjectDetector>();
 }
Ejemplo n.º 23
0
        private static void Main(string[] args)
        {
            try
            {
                // In this example we are going to train a face detector based on the
                // small faces dataset in the examples/faces directory.  So the first
                // thing we do is load that dataset.  This means you need to supply the
                // path to this faces folder as a command line argument so we will know
                // where it is.
                if (args.Length != 1)
                {
                    Console.WriteLine("Give the path to the examples/faces directory as the argument to this");
                    Console.WriteLine("program.  For example, if you are in the examples folder then execute ");
                    Console.WriteLine("this program by running: ");
                    Console.WriteLine("   ./fhog_object_detector_ex faces");
                    Console.WriteLine();
                    return;
                }

                var facesDirectory = args[0];
                // The faces directory contains a training dataset and a separate
                // testing dataset.  The training data consists of 4 images, each
                // annotated with rectangles that bound each human face.  The idea is
                // to use this training data to learn to identify human faces in new
                // images.
                //
                // Once you have trained an object detector it is always important to
                // test it on data it wasn't trained on.  Therefore, we will also load
                // a separate testing set of 5 images.  Once we have a face detector
                // created from the training data we will see how well it works by
                // running it on the testing images.
                //
                // So here we create the variables that will hold our dataset.
                // images_train will hold the 4 training images and face_boxes_train
                // holds the locations of the faces in the training images.  So for
                // example, the image images_train[0] has the faces given by the
                // rectangles in face_boxes_train[0].
                IList <Matrix <byte> >     tmpImagesTrain;
                IList <Matrix <byte> >     tmpImagesTest;
                IList <IList <Rectangle> > tmpFaceBoxesTrain;
                IList <IList <Rectangle> > tmpFaceBoxesTest;

                // Now we load the data.  These XML files list the images in each
                // dataset and also contain the positions of the face boxes.  Obviously
                // you can use any kind of input format you like so long as you store
                // the data into images_train and face_boxes_train.  But for convenience
                // dlib comes with tools for creating and loading XML image dataset
                // files.  Here you see how to load the data.  To create the XML files
                // you can use the imglab tool which can be found in the tools/imglab
                // folder.  It is a simple graphical tool for labeling objects in images
                // with boxes.  To see how to use it read the tools/imglab/README.txt
                // file.
                Dlib.LoadImageDataset(Path.Combine(facesDirectory, "training.xml"), out tmpImagesTrain, out tmpFaceBoxesTrain);
                Dlib.LoadImageDataset(Path.Combine(facesDirectory, "testing.xml"), out tmpImagesTest, out tmpFaceBoxesTest);

                // Now we do a little bit of pre-processing.  This is optional but for
                // this training data it improves the results.  The first thing we do is
                // increase the size of the images by a factor of two.  We do this
                // because it will allow us to detect smaller faces than otherwise would
                // be practical (since the faces are all now twice as big).  Note that,
                // in addition to resizing the images, these functions also make the
                // appropriate adjustments to the face boxes so that they still fall on
                // top of the faces after the images are resized.
                var imageTrain     = new List <Matrix <byte> >(tmpImagesTrain);
                var faceBoxesTrain = new List <IList <Rectangle> >(tmpFaceBoxesTrain);
                Dlib.UpsampleImageDataset(2, imageTrain, faceBoxesTrain);
                var imageTest     = new List <Matrix <byte> >(tmpImagesTest);
                var faceBoxesTest = new List <IList <Rectangle> >(tmpFaceBoxesTest);
                Dlib.UpsampleImageDataset(2, imageTest, faceBoxesTest);

                // Since human faces are generally left-right symmetric we can increase
                // our training dataset by adding mirrored versions of each image back
                // into images_train.  So this next step doubles the size of our
                // training dataset.  Again, this is obviously optional but is useful in
                // many object detection tasks.
                Dlib.AddImageLeftRightFlips(imageTrain, faceBoxesTrain);
                Console.WriteLine($"num training images: {imageTrain.Count()}");
                Console.WriteLine($"num testing images:  {imageTest.Count()}");


                // Finally we get to the training code.  dlib contains a number of
                // object detectors.  This typedef tells it that you want to use the one
                // based on Felzenszwalb's version of the Histogram of Oriented
                // Gradients (commonly called HOG) detector.  The 6 means that you want
                // it to use an image pyramid that downsamples the image at a ratio of
                // 5/6.  Recall that HOG detectors work by creating an image pyramid and
                // then running the detector over each pyramid level in a sliding window
                // fashion.
                using (var scanner = new ScanFHogPyramid <PyramidDown, DefaultFHogFeatureExtractor>(6))
                {
                    // The sliding window detector will be 80 pixels wide and 80 pixels tall.
                    scanner.SetDetectionWindowSize(80, 80);

                    using (var trainer = new StructuralObjectDetectionTrainer <ScanFHogPyramid <PyramidDown, DefaultFHogFeatureExtractor> >(scanner))
                    {
                        // Set this to the number of processing cores on your machine.
                        trainer.SetNumThreads(4);
                        // The trainer is a kind of support vector machine and therefore has the usual SVM
                        // C parameter.  In general, a bigger C encourages it to fit the training data
                        // better but might lead to overfitting.  You must find the best C value
                        // empirically by checking how well the trained detector works on a test set of
                        // images you haven't trained on.  Don't just leave the value set at 1.  Try a few
                        // different C values and see what works best for your data.
                        trainer.SetC(1);
                        // We can tell the trainer to print it's progress to the console if we want.
                        trainer.BeVerbose();
                        // The trainer will run until the "risk gap" is less than 0.01.  Smaller values
                        // make the trainer solve the SVM optimization problem more accurately but will
                        // take longer to train.  For most problems a value in the range of 0.1 to 0.01 is
                        // plenty accurate.  Also, when in verbose mode the risk gap is printed on each
                        // iteration so you can see how close it is to finishing the training.
                        trainer.SetEpsilon(0.01);


                        // Now we run the trainer.  For this example, it should take on the order of 10
                        // seconds to train.
                        var detector = trainer.Train(imageTrain, faceBoxesTrain);

                        // Now that we have a face detector we can test it.  The first statement tests it
                        // on the training data.  It will print the precision, recall, and then average precision.
                        using (var matrix = Dlib.TestObjectDetectionFunction(detector, imageTrain, faceBoxesTrain))
                            Console.WriteLine($"training results: {matrix}");
                        // However, to get an idea if it really worked without overfitting we need to run
                        // it on images it wasn't trained on.  The next line does this.  Happily, we see
                        // that the object detector works perfectly on the testing images.
                        using (var matrix = Dlib.TestObjectDetectionFunction(detector, imageTest, faceBoxesTest))
                            Console.WriteLine($"testing results: {matrix}");

                        // If you have read any papers that use HOG you have probably seen the nice looking
                        // "sticks" visualization of a learned HOG detector.  This next line creates a
                        // window with such a visualization of our detector.  It should look somewhat like
                        // a face.
                        using (var fhog = Dlib.DrawFHog(detector))
                            using (var hogwin = new ImageWindow(fhog, "Learned fHOG detector"))
                            {
                                // Now for the really fun part.  Let's display the testing images on the screen and
                                // show the output of the face detector overlaid on each image.  You will see that
                                // it finds all the faces without false alarming on any non-faces.
                                using (var win = new ImageWindow())
                                    for (var i = 0; i < imageTest.Count; ++i)
                                    {
                                        // Run the detector and get the face detections.
                                        var dets = detector.Operator(imageTest[i]);
                                        win.ClearOverlay();
                                        win.SetImage(imageTest[i]);
                                        win.AddOverlay(dets, new RgbPixel(255, 0, 0));
                                        Console.WriteLine("Hit enter to process the next image...");
                                        Console.ReadKey();
                                        Console.WriteLine("");
                                    }
                            }


                        // Like everything in dlib, you can save your detector to disk using the
                        // serialize() function.
                        detector.Serialize("face_detector.svm");

                        // Then you can recall it using the deserialize() function.
                        using (var tmp = new ScanFHogPyramid <PyramidDown, DefaultFHogFeatureExtractor>(6))
                            using (var detector2 = new ObjectDetector <ScanFHogPyramid <PyramidDown, DefaultFHogFeatureExtractor> >(tmp))
                                detector2.Deserialize("face_detector.svm");



                        // Now let's talk about some optional features of this training tool as well as some
                        // important points you should understand.
                        //
                        // The first thing that should be pointed out is that, since this is a sliding
                        // window classifier, it can't output an arbitrary rectangle as a detection.  In
                        // this example our sliding window is 80 by 80 pixels and is run over an image
                        // pyramid.  This means that it can only output detections that are at least 80 by
                        // 80 pixels in size (recall that this is why we upsampled the images after loading
                        // them).  It also means that the aspect ratio of the outputs is 1.  So if,
                        // for example, you had a box in your training data that was 200 pixels by 10
                        // pixels then it would simply be impossible for the detector to learn to detect
                        // it.  Similarly, if you had a really small box it would be unable to learn to
                        // detect it.
                        //
                        // So the training code performs an input validation check on the training data and
                        // will throw an exception if it detects any boxes that are impossible to detect
                        // given your setting of scanning window size and image pyramid resolution.  You
                        // can use a statement like:
                        //   remove_unobtainable_rectangles(trainer, images_train, face_boxes_train)
                        // to automatically discard these impossible boxes from your training dataset
                        // before running the trainer.  This will avoid getting the "impossible box"
                        // exception.  However, I would recommend you be careful that you are not throwing
                        // away truth boxes you really care about.  The remove_unobtainable_rectangles()
                        // will return the set of removed rectangles so you can visually inspect them and
                        // make sure you are OK that they are being removed.
                        //
                        // Next, note that any location in the images not marked with a truth box is
                        // implicitly treated as a negative example.  This means that when creating
                        // training data it is critical that you label all the objects you want to detect.
                        // So for example, if you are making a face detector then you must mark all the
                        // faces in each image.  However, sometimes there are objects in images you are
                        // unsure about or simply don't care if the detector identifies or not.  For these
                        // objects you can pass in a set of "ignore boxes" as a third argument to the
                        // trainer.train() function.  The trainer will simply disregard any detections that
                        // happen to hit these boxes.
                        //
                        // Another useful thing you can do is evaluate multiple HOG detectors together. The
                        // benefit of this is increased testing speed since it avoids recomputing the HOG
                        // features for each run of the detector.  You do this by storing your detectors
                        // into a std::vector and then invoking evaluate_detectors() like so:
                        var myDetectors = new List <ObjectDetector <ScanFHogPyramid <PyramidDown, DefaultFHogFeatureExtractor> > >();
                        myDetectors.Add(detector);
                        var dect2 = Dlib.EvaluateDetectors(myDetectors, imageTrain[0]);
                        //
                        //
                        // Finally, you can add a nuclear norm regularizer to the SVM trainer.  Doing has
                        // two benefits.  First, it can cause the learned HOG detector to be composed of
                        // separable filters and therefore makes it execute faster when detecting objects.
                        // It can also help with generalization since it tends to make the learned HOG
                        // filters smoother.  To enable this option you call the following function before
                        // you create the trainer object:
                        //    scanner.set_nuclear_norm_regularization_strength(1.0);
                        // The argument determines how important it is to have a small nuclear norm.  A
                        // bigger regularization strength means it is more important.  The smaller the
                        // nuclear norm the smoother and faster the learned HOG filters will be, but if the
                        // regularization strength value is too large then the SVM will not fit the data
                        // well.  This is analogous to giving a C value that is too small.
                        //
                        // You can see how many separable filters are inside your detector like so:
                        Console.WriteLine($"num filters: {Dlib.NumSeparableFilters(detector)}");
                        // You can also control how many filters there are by explicitly thresholding the
                        // singular values of the filters like this:
                        using (var newDetector = Dlib.ThresholdFilterSingularValues(detector, 0.1))
                        {
                        }
                        // That removes filter components with singular values less than 0.1.  The bigger
                        // this number the fewer separable filters you will have and the faster the
                        // detector will run.  However, a large enough threshold will hurt detection
                        // accuracy.
                    }
                }
            }
            catch (Exception e)
            {
                Console.WriteLine(e);
            }
        }
Ejemplo n.º 24
0
        /// <summary>
        /// Called by the Blazor runtime - this is where we setup the HTTP request pipeline and
        /// initialise all the bits and pieces we need to run.
        /// </summary>
        /// <param name="app"></param>
        /// <param name="env"></param>
        public void Configure(IApplicationBuilder app, IWebHostEnvironment env,
                              DownloadService download, ThemeService themes, TaskService tasks,
                              ExifService exifService, ThumbnailService thumbService,
                              IndexingService indexService, ImageProcessService imageProcessing,
                              AzureFaceService azureFace, ImageRecognitionService aiService,
                              UserService userService, ConfigService configService, WorkService workService,
                              ImageCache imageCache, MetaDataService metaDataService, ObjectDetector objectDetector)
        {
            SyncfusionLicenseProvider.RegisterLicense("NTUxMzEwQDMxMzkyZTM0MmUzMGFRSFpzQUhjdUE2M2V4S1BmYSs5bk13dkpGbkhvam5Wb1VRbGVURkRsOHM9");

            var logLevel = configService.Get(ConfigSettings.LogLevel, Serilog.Events.LogEventLevel.Information);

            Logging.ChangeLogLevel(logLevel);

            if (env.IsDevelopment())
            {
                app.UseDeveloperExceptionPage();
            }
            else
            {
                app.UseExceptionHandler("/Error");
                // The default HSTS value is 30 days. You may want to change this for production scenarios, see https://aka.ms/aspnetcore-hsts.
                app.UseHsts();
            }

            if (Logging.Verbose)
            {
                app.UseSerilogRequestLogging();
            }

            app.UseResponseCompression();
            app.UseRouting();
            app.UseResponseCaching();

            // Disable this for now
            // app.UseHttpsRedirection();

            // TODO: Do we need this if we serve all the images via the controller?
            app.UseStaticFiles();
            app.UseStaticFiles(new StaticFileOptions
            {
                FileProvider = new PhysicalFileProvider(ThumbnailService.PicturesRoot),
                RequestPath  = ThumbnailService.RequestRoot
            });

            // Enable auth
            app.UseAuthentication();
            app.UseAuthorization();

            app.UseEndpoints(endpoints =>
            {
                //endpoints.MapControllerRoute(name: "default", pattern: "{controller}/{action}");
                endpoints.MapControllers();
                endpoints.MapBlazorHub();
                endpoints.MapFallbackToPage("/_Host");
            });

            // Prime the cache
            imageCache.WarmUp().Wait();

            // TODO: Save this in ConfigService
            string contentRootPath = Path.Combine(env.ContentRootPath, "wwwroot");

            // TODO: Fix this, or not if Skia doesn't need it
            imageProcessing.SetContentPath(contentRootPath);
            download.SetDownloadPath(contentRootPath);
            themes.SetContentPath(contentRootPath);

            // Start the work processing queue for AI, Thumbs, etc
            workService.StartService();

            // Start the face service before the thumbnail service
            azureFace.StartService().Wait();
            metaDataService.StartService();
            indexService.StartService();
            aiService.StartService();

            // ObjectDetector can throw a segmentation fault if the docker container is pinned
            // to a single CPU, so for now, to aid debugging, let's not even try and initialise
            // it if AI is disabled. See https://github.com/Webreaper/Damselfly/issues/334
            if (!configService.GetBool(ConfigSettings.DisableObjectDetector, false))
            {
                objectDetector.InitScorer();
            }

            // Validation check to ensure at least one user is an Admin
            userService.CheckAdminUser().Wait();

            StartTaskScheduler(tasks, download, thumbService, exifService);

            Logging.StartupCompleted();
            Logging.Log("Starting Damselfly webserver...");
        }
Ejemplo n.º 25
0
 public void Constructor()
 {
     ObjectDetector objectDetector = new ObjectDetector();
 }
        private void DetectObjectsUsingImageCount(ObjectDetector objectDetector, IEnumerable<string> filepaths)
        {
            List<Rectangle> rectObjectList = new List<Rectangle>();
            List<string> filepathsList = new List<string>(filepaths);
            int count = filepathsList.Count;
            filepathsList.Sort();
            for (int i = 0; i < filepathsList.Count; ++i)
            {
                string filepath = filepathsList[i];
                byte[] imagebitArray = GetImageByteArray(filepath);

                MemoryStream stream = new MemoryStream(imagebitArray);

                Bitmap image = (Bitmap)Image.FromStream(stream);

                //lets check if the image is what we expect
                if (image.PixelFormat != PixelFormat.Format24bppRgb)
                {
                    string message = String.Format("Image format from is not correct. PixelFormat: {1}", image.PixelFormat);
                    throw new Exception(message);
                }

                // Lock the bitmap's bits.  
                Rectangle rect = new Rectangle(0, 0, image.Width, image.Height);
                BitmapData bmpData = image.LockBits(rect, ImageLockMode.ReadOnly, image.PixelFormat);

                // Get the address of the first line.
                IntPtr ptr = bmpData.Scan0;

                unsafe
                {
                    if (!objectDetector.IsInitialized())
                    {
                        IntPtr ptrPath = IntPtr.Zero;
                        try
                        {
                            ptrPath = Marshal.StringToHGlobalAnsi(ObjectDetectConfigFilePath);

                            objectDetector.InitializeFromFrame((byte*)ptr, GetBitmapSizeInBytes(bmpData), bmpData.Width, bmpData.Height, (sbyte*)ptrPath);
                        }
                        finally
                        {
                            if (ptr != IntPtr.Zero)
                            {
                                Marshal.FreeHGlobal(ptrPath);
                            }
                        }
                    }
                    else
                    {
                        Rectangle rectObject = objectDetector.GetObjectRect((byte*)ptr, GetBitmapSizeInBytes(bmpData));
                        byte[] binaryImageArray = new byte[image.Width * image.Height];
                        unsafe
                        {
                            fixed (byte* binaryImage = binaryImageArray)
                            {
                                objectDetector.GetBinaryImage(binaryImage);
                            }
                        }
                        string filenameAppend = String.Format("{0}", i + 1);
                        TextWriter binaryFileWriter = new StreamWriter(TestOutputFilesPath + "\\" + "binaryimage" + filenameAppend + ".txt");
                        for (int j = 0; j < image.Height; ++j)
                        {
                            for (int k = 0; k < image.Width; ++k)
                            {
                                if (binaryImageArray[k + j * image.Width] == 255)
                                {
                                    binaryFileWriter.Write("1");
                                }
                                else if (binaryImageArray[k + j * image.Width] == 0)
                                {
                                    binaryFileWriter.Write("0");
                                }
                                else
                                {
                                    Debug.Assert(false, "Should never happen");
                                }
                            }
                            binaryFileWriter.WriteLine("");
                        }
                        binaryFileWriter.Flush();
                        binaryFileWriter.Close();
                        rectObjectList.Add(rectObject);
                    }
                }

                image.UnlockBits(bmpData);

            }

            ValidateObjectRectsList(rectObjectList);
        }
Ejemplo n.º 27
0
        void UseTextRecognitionModel()
        {
            System.Diagnostics.Debug.WriteLine($"##### {currentTxtRecogScript}");

            CommonTextRecognizerOptions options = currentTxtRecogScript switch {
                TextRecognitionScript.Latin => new LatinTextRecognizerOptions(),
                TextRecognitionScript.Chinese => new ChineseTextRecognizerOptions(),
                TextRecognitionScript.Devanagari => new DevanagariTextRecognizerOptions(),
                TextRecognitionScript.Japanese => new JapaneseTextRecognizerOptions(),
                TextRecognitionScript.Korean => new KoreanTextRecognizerOptions(),
                _ => throw new NotImplementedException()
            };

            var textRecognizer = TextRecognizer.TextRecognizerWithOptions(options);
            var image          = new MLImage(ImgSample.Image);

            textRecognizer.ProcessImage(image, (text, err) => {
                TxtData.Text = err?.Description ?? text?.Text;
            });
        }

        void UseFaceDetectionModel()
        {
            var options = new FaceDetectorOptions();

            options.PerformanceMode    = FacePerformanceMode.Accurate;
            options.LandmarkMode       = FaceLandmarkMode.All;
            options.ClassificationMode = FaceClassificationMode.All;
            var faceDetector = FaceDetector.FaceDetectorWithOptions(options);

            var image = new MLImage(ImgSample.Image);

            faceDetector.ProcessImage(image, HandleFaceDetectorCallback);

            void HandleFaceDetectorCallback(Face [] faces, NSError error)
            {
                if (error != null)
                {
                    TxtData.Text = error.Description;
                    return;
                }

                if (faces == null || faces.Length == 0)
                {
                    TxtData.Text = "No faces were found.";
                    return;
                }

                var imageSize = ImgSample.Image.Size;

                UIGraphics.BeginImageContextWithOptions(imageSize, false, 0);
                var context = UIGraphics.GetCurrentContext();

                context.SetStrokeColor(UIColor.Red.CGColor);
                context.SetLineWidth(10);

                ImgSample.Image.Draw(CGPoint.Empty);

                foreach (var face in faces)
                {
                    context.AddRect(face.Frame);
                    context.DrawPath(CGPathDrawingMode.Stroke);
                }

                var newImage = UIGraphics.GetImageFromCurrentImageContext();

                UIGraphics.EndImageContext();

                ImgSample.Image = newImage;
            }
        }

        void UseBarcodeScanningModel()
        {
            var options        = new BarcodeScannerOptions(BarcodeFormat.All);
            var barcodeScanner = BarcodeScanner.BarcodeScannerWithOptions(options);

            var image = new MLImage(ImgSample.Image);

            barcodeScanner.ProcessImage(image, HandleBarcodeScannerCallback);

            void HandleBarcodeScannerCallback(Barcode [] barcodes, NSError error)
            {
                if (error != null)
                {
                    TxtData.Text = error.Description;
                    return;
                }

                if (barcodes == null || barcodes.Length == 0)
                {
                    TxtData.Text = "No barcodes were found.";
                    return;
                }

                var stringBuilder = new StringBuilder();

                foreach (var barcode in barcodes)
                {
                    stringBuilder.AppendLine($"Raw Value: {barcode.RawValue}");
                    stringBuilder.AppendLine($"Display Value: {barcode.DisplayValue}");
                    stringBuilder.AppendLine($"Format: {barcode.Format}");
                    stringBuilder.AppendLine($"Value Type: {barcode.ValueType}");
                    stringBuilder.AppendLine();
                }

                TxtData.Text = stringBuilder.ToString();
            }
        }

        void UseDigitalInkRecognitionModel()
        {
            strokes.Clear();

            if (inkRecognizer == null)
            {
                var lang         = "en-US";
                var identifier   = DigitalInkRecognitionModelIdentifier.ModelIdentifierForLanguageTag(lang);
                var model        = new DigitalInkRecognitionModel(identifier);
                var modelManager = ModelManager.DefaultInstance;
                var conditions   = new ModelDownloadConditions(true, true);
                // This works on device, but downloads seems to fail on simulators
                modelManager.DownloadModel(model, conditions);

                var options = new DigitalInkRecognizerOptions(model);
                inkRecognizer = DigitalInkRecognizer.DigitalInkRecognizerWithOptions(options);
            }
        }

        void UseImageLabeling()
        {
            var options = new ImageLabelerOptions();

            options.ConfidenceThreshold = 0.7;
            var labeler = ImageLabeler.ImageLabelerWithOptions(options);

            var image = new MLImage(ImgSample.Image);

            labeler.ProcessImage(image, (labels, error) => {
                if (error != null)
                {
                    TxtData.Text = error.Description;
                    return;
                }

                if (labels == null || labels.Length == 0)
                {
                    TxtData.Text = "No labels were found.";
                    return;
                }

                var stringBuilder = new StringBuilder();

                for (var i = 0; i < labels.Length; i++)
                {
                    stringBuilder.AppendLine($"Label: {i}");
                    stringBuilder.AppendLine($"Text: {labels [i].Text}");
                    stringBuilder.AppendLine($"Confidence: {labels [i].Confidence}");
                    stringBuilder.AppendLine($"Index: {labels [i].Index}");
                    stringBuilder.AppendLine();
                }

                TxtData.Text = stringBuilder.ToString();
            });
        }

        void UseObjectDetectionAndTracking()
        {
            var options = new ObjectDetectorOptions();

            options.Mode = DetectorMode.SingleImage;
            options.ShouldEnableClassification  = true;
            options.ShouldEnableMultipleObjects = true;
            var objectDetector = ObjectDetector.ObjectDetectorWithOptions(options);

            var image = new MLImage(ImgSample.Image);

            objectDetector.ProcessImage(image, (objects, error) => {
                if (error != null)
                {
                    TxtData.Text = error.Description;
                    return;
                }

                if (objects == null || objects.Length == 0)
                {
                    TxtData.Text = "No objects were found.";
                    return;
                }

                var imageSize = ImgSample.Image.Size;

                UIGraphics.BeginImageContextWithOptions(imageSize, false, 0);
                var context = UIGraphics.GetCurrentContext();
                context.SetStrokeColor(UIColor.Red.CGColor);
                context.SetLineWidth(10);

                ImgSample.Image.Draw(CGPoint.Empty);

                var stringBuilder = new StringBuilder();

                for (var i = 0; i < objects.Length; i++)
                {
                    stringBuilder.AppendLine($"Object: {i}");
                    stringBuilder.AppendLine($"Tracking ID: {objects [i].TrackingId?.Int32Value ?? 0}");

                    foreach (var lbl in objects[i].Labels)
                    {
                        stringBuilder.AppendLine($" - Text: {lbl.Text}");
                        stringBuilder.AppendLine($" - Confidence: {lbl.Confidence}");
                        stringBuilder.AppendLine($" - Index: {lbl.Index}");
                    }

                    stringBuilder.AppendLine();

                    context.AddRect(objects [i].Frame);
                    context.DrawPath(CGPathDrawingMode.Stroke);
                }

                var newImage = UIGraphics.GetImageFromCurrentImageContext();
                UIGraphics.EndImageContext();

                ImgSample.Image = newImage;

                TxtData.Text = stringBuilder.ToString();
            });
        }

        #endregion

        #region Digital Ink
        const int msPerTimeInterval = 1000;
 public ObjectDetectionManagerController(TaggerService taggerService, ObjectDetector objectDetection)
 {
     this.taggerService  = taggerService;
     this.objectDetector = objectDetection;
 }
Ejemplo n.º 29
0
 private void Start()
 {
     enemyDetetor = GetComponent <ObjectDetector>();
     InvokeRepeating(nameof(GetTarget), 0, targetUpdateTime);
 }