示例#1
0
        /// <summary>
        /// Called when page is loaded
        /// Initialize app assets such as skills
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        protected override async void OnNavigatedTo(NavigationEventArgs e)
        {
            // Disable buttons while we initialize
            await UpdateMediaSourceButtonsAsync(false);

            // Reset bitmap rendering component
            m_processedBitmapSource = new SoftwareBitmapSource();
            ProcessedPreview.Source = m_processedBitmapSource;
            m_bboxRenderer          = new BoundingBoxRenderer(OverlayCanvas);

            m_skillLock.Wait();
            {
                NotifyUser("Initializing skill...");
                m_descriptor = new ObjectDetectorDescriptor();
                m_availableExecutionDevices = await m_descriptor.GetSupportedExecutionDevicesAsync();

                await InitializeObjectDetectorAsync();
                await UpdateSkillUIAsync();
            }
            m_skillLock.Release();

            // Ready to begin, enable buttons
            NotifyUser("Skill initialized. Select a media source from the top to begin.");
            await UpdateMediaSourceButtonsAsync(true);
        }
        /// <summary>
        /// Called when page is loaded
        /// Initialize app assets such as skills
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        private async void Page_Loaded(object sender, RoutedEventArgs e)
        {
            // Disable buttons while we initialize
            await UpdateMediaSourceButtonsAsync(false);

            // Initialize helper class used to render the skill results on screen
            m_renderer = new ObjectTrackRenderer(UIOverlayCanvas);

            m_lock.Wait();
            {
                NotifyUser("Initializing skills...");
                m_detectorDescriptor       = new ObjectDetectorDescriptor();
                m_detectorExecutionDevices = await m_detectorDescriptor.GetSupportedExecutionDevicesAsync();
                await InitializeObjectDetectorAsync();

                m_trackerDescriptor       = new ObjectTrackerDescriptor();
                m_trackerExecutionDevices = await m_trackerDescriptor.GetSupportedExecutionDevicesAsync();
                await InitializeObjectTrackerAsync();

                await UpdateSkillUIAsync();
            }
            m_lock.Release();

            // Ready to begin, enable buttons
            NotifyUser("Skill initialized. Select a media source from the top to begin.");
            await UpdateMediaSourceButtonsAsync(true);
        }
        async void Setup()
        {
            ObjectDetectorDescriptor descriptor = new ObjectDetectorDescriptor();
            var m_availableExecutionDevices     = await descriptor.GetSupportedExecutionDevicesAsync();

            skill = await descriptor.CreateSkillAsync() as ObjectDetectorSkill; // If you don't specify an ISkillExecutionDevice, a default will be automatically selected

            binding = await skill.CreateSkillBindingAsync() as ObjectDetectorBinding;
        }
        private async void Page_Loaded(object sender, RoutedEventArgs e)
        {
            try
            {
                m_faceDetector = await FaceDetector.CreateAsync();

                MaskDetect = new Helpers.CustomVision.MaskDetection(new string[] { "mask", "no-mask" });
                // Load and create the model
                var modelFile = await StorageFile.GetFileFromApplicationUriAsync(new Uri($"ms-appx:///Assets/facemask.onnx"));

                await MaskDetect.Init(modelFile);
            }
            catch (Exception ex)
            {
                Debug.WriteLine($"error: {ex.Message}");
                MaskDetect = null;
            }
            for (int i = 0; i < 4; i++)
            {
                LastSaved[i] = DateTime.MinValue;
            }
            if (Sounds.Count <= 0)
            {
                Sounds.Add("wengi.mp3");
                Sounds.Add("setan.wav");
                Sounds.Add("setan2.wav");
                Sounds.Add("zombie.wav");
                Sounds.Add("zombie2.wav");
                Sounds.Add("scream.mp3");
                Sounds.Add("monster.mp3");
            }

            m_processedBitmapSource[0] = new SoftwareBitmapSource();
            CCTV1.Source = m_processedBitmapSource[0];

            m_processedBitmapSource[1] = new SoftwareBitmapSource();
            CCTV2.Source = m_processedBitmapSource[1];

            m_processedBitmapSource[2] = new SoftwareBitmapSource();
            CCTV3.Source = m_processedBitmapSource[2];

            m_processedBitmapSource[3] = new SoftwareBitmapSource();
            CCTV4.Source = m_processedBitmapSource[3];
            // Initialize helper class used to render the skill results on screen
            m_bboxRenderer[0] = new BoundingBoxRenderer(UIOverlayCanvas1);
            m_bboxRenderer[1] = new BoundingBoxRenderer(UIOverlayCanvas2);
            m_bboxRenderer[2] = new BoundingBoxRenderer(UIOverlayCanvas3);
            m_bboxRenderer[3] = new BoundingBoxRenderer(UIOverlayCanvas4);

            m_lock.Wait();
            {
                NotifyUser("Initializing skill...");
                m_descriptor = new ObjectDetectorDescriptor();
                m_availableExecutionDevices = await m_descriptor.GetSupportedExecutionDevicesAsync();

                await InitializeObjectDetectorAsync();
                await UpdateSkillUIAsync();
            }
            m_lock.Release();

            // Ready to begin, enable buttons
            NotifyUser("Skill initialized. Select a media source from the top to begin.");
            Loop();
        }
示例#5
0
        /// <summary>
        /// Entry point of program
        /// </summary>
        /// <param name="args"></param>
        /// <returns></returns>
        static void Main(string[] args)
        {
            Console.WriteLine("Object Detector .NetCore 3.0 Console App: Place something to detect in front of the camera");

            Task.Run(async() =>
            {
                var skillDescriptor = new ObjectDetectorDescriptor();
                var skill           = await skillDescriptor.CreateSkillAsync() as ObjectDetectorSkill;
                var skillDevice     = skill.Device;
                Console.WriteLine("Running Skill on : " + skillDevice.ExecutionDeviceKind.ToString() + ": " + skillDevice.Name);

                var binding = await skill.CreateSkillBindingAsync() as ObjectDetectorBinding;

                m_cameraHelper = await CameraHelper.CreateCameraHelperAsync(

                    // Register a failure callback
                    new CameraHelper.CameraHelperFailedHandler(message =>
                {
                    var failureException = new Exception(message);
                    Console.WriteLine(message);
                    Environment.Exit(failureException.HResult);
                }),

                    // Register the main loop callback to handlke each frame as they come in
                    new CameraHelper.NewFrameArrivedHandler(async(videoFrame) =>
                {
                    try
                    {
                        // Process 1 frame at a time, if busy return right away
                        if (0 == Interlocked.Exchange(ref m_lock, 1))
                        {
                            m_evalPerfStopwatch.Restart();

                            // Update input image and run the skill against it
                            await binding.SetInputImageAsync(videoFrame);

                            var inputBindTime = (float)m_evalPerfStopwatch.ElapsedTicks / Stopwatch.Frequency * 1000f;
                            m_evalPerfStopwatch.Restart();

                            await skill.EvaluateAsync(binding);

                            var detectionRunTime = (float)m_evalPerfStopwatch.ElapsedTicks / Stopwatch.Frequency * 1000f;
                            m_evalPerfStopwatch.Stop();

                            // Display bind and eval time
                            string outText = $"bind: {inputBindTime.ToString("F2")}ms, eval: {detectionRunTime.ToString("F2")}ms | ";
                            if (binding.DetectedObjects == null)
                            {
                                // If no face found, hide the rectangle in the UI
                                outText += "No object found";
                            }
                            else     // Display the objects detected on the console
                            {
                                outText += $"Found {binding.DetectedObjects.Count} objects";
                                foreach (var result in binding.DetectedObjects)
                                {
                                    outText += $" {result.Kind},";
                                }
                            }

                            Console.Write("\r" + outText);

                            // Release the lock
                            Interlocked.Exchange(ref m_lock, 0);
                        }
                    }
                    catch (Exception e)
                    {
                        Console.WriteLine("Error:: " + e.Message.ToString() + e.TargetSite.ToString() + e.Source.ToString() + e.StackTrace.ToString());
                        Environment.Exit(e.HResult);
                    }
                }));
            }).Wait();

            Console.WriteLine("\nPress Any key to stop\n\n");

            var key = Console.ReadKey();

            Console.WriteLine("\n\n\nExiting...\n\n\n");

            m_cameraHelper.CleanupAsync().Wait();
        }