ShapeModel smodel; //shape model

    #endregion Fields

    #region Constructors

    public FaceTracker(string filepath)
    {
        points = new List<Point[]> ();

                string jsonText = null;
                using (System.IO.StreamReader reader = new System.IO.StreamReader(filepath)) {
                        jsonText = reader.ReadToEnd ();
                        reader.Close ();
                }

        //				TextAsset textAsset = Resources.Load (filename) as TextAsset;
        //				string jsonText = textAsset.text;

                IDictionary json = (IDictionary)Json.Deserialize (jsonText);

                IDictionary ft = (IDictionary)json ["ft object"];

                detector = new FaceDetector ();
                detector.read (ft ["detector"]);

                smodel = new ShapeModel ();
                smodel.read (ft ["smodel"]);

                pmodel = new PatchModels ();
                pmodel.read (ft ["pmodel"]);
    }
Beispiel #2
0
		PatchModels pmodel;              //feature detectors
	
		public FaceTracker (string filepath)
		{
				points = new List<Point[]> ();

				string jsonText = null;


#if UNITY_WSA
                var data = File.ReadAllBytes(filepath);
                jsonText = Encoding.UTF8.GetString(data, 0, data.Length);
#else
                jsonText = File.ReadAllText(filepath);
#endif



//				TextAsset textAsset = Resources.Load (filename) as TextAsset;
//				string jsonText = textAsset.text;
		
				IDictionary json = (IDictionary)Json.Deserialize (jsonText);

				IDictionary ft = (IDictionary)json ["ft object"];

				detector = new FaceDetector ();
				detector.read (ft ["detector"]);

				smodel = new ShapeModel ();
				smodel.read (ft ["smodel"]);

				pmodel = new PatchModels ();
				pmodel.read (ft ["pmodel"]);
		}
        public void ControlLoad(object sender, EventArgs e)
        {
            var environmentService = new EnvironmentService();
            var haarEyeFile = new FileInfo(environmentService.GetAbsolutePathFromAssemblyRelative("haarcascades/haarcascade_eye.xml"));
            var haarFaceFile = new FileInfo(environmentService.GetAbsolutePathFromAssemblyRelative("haarcascades/haarcascade_frontalface_default.xml"));

            _faceDetector = new FaceDetector(haarFaceFile.FullName, haarEyeFile.FullName);

            _sunglassOverlay2 = new AccessoryOverlay(environmentService.GetAbsolutePathFromAssemblyRelative("Resources/Images/sunglasses2.png"));
            _hatOverlay1 = new AccessoryOverlay(environmentService.GetAbsolutePathFromAssemblyRelative("Resources/Images/partyhat.png"));

            _classiferParams = new ClassifierParameters();
            classifierConfigControl.ConfigChanged += classifierConfigControl_ConfigChanged;
        }
Beispiel #4
0
        async Task OnProcessFrameAsync(SoftwareBitmap bitmap)
        {
            if (this.faceDetector == null)
            {
                this.faceDetector = await FaceDetector.CreateAsync();
            }
            var faces = await this.faceDetector.DetectFacesAsync(bitmap);

            this.cameraDisplay.ShowCamera(faces.Count > 0);

            if (faces.Count > 0)
            {
                foreach (var face in faces)
                {
                    this.cameraDisplay.HighlightFace(face.FaceBox);
                }
            }
        }
Beispiel #5
0
        private static void FindManyFaces(FaceDetector faceDetector, string imagesFilePath, string outputDir, bool debug)
        {
            if (!Directory.Exists(outputDir))
            {
                Directory.CreateDirectory(outputDir);
            }

            var images = Directory.EnumerateFiles(imagesFilePath, "*.jpg");

            int cnt = 0;

            foreach (var image in images)
            {
                var outputFile = Path.Combine(outputDir, $"output_{cnt}.png");
                FindFaces(faceDetector, image, outputFile, debug);
                cnt++;
            }
        }
Beispiel #6
0
        public int InitializeWithEyeList(BackgroundCanvas mainCanvas, string filename, List <Point> leftEyeList, List <Point> rightEyeList)
        {
            if (null == _detector)
            {
                _detector = new FaceDetector(
                    mainCanvas.OptionDialog.FaceDetectorDataPath,
                    true,
                    mainCanvas.OptionDialog.FaceDetectorThreshold);
            }
            // VIOLA: Removed face detection...  since we have the eye are are relying on them


            List <Rect> faceRects = new List <Rect>();

            for (int i = 0; i < leftEyeList.Count; ++i)
            {
                Rect rect = new Rect();


                Vector leye = new Vector(leftEyeList[i].X, leftEyeList[i].Y);
                Vector reye = new Vector(rightEyeList[i].X, rightEyeList[i].Y);

                Vector delta = reye - leye;

                double eyeWidth = delta.Length;

                Vector center = leye + 0.5 * delta;

                rect.X      = center.X - 1.5 * eyeWidth;
                rect.Y      = center.Y - 1.0 * eyeWidth;
                rect.Width  = 3.0 * eyeWidth;
                rect.Height = 3.0 * eyeWidth;

                faceRects.Add(rect);
            }

            _targetRect       = new Rect();
            _faceDisplayWidth = mainCanvas.OptionDialog.FaceDisplayWidth;
            _defaultDPI       = mainCanvas.OptionDialog.DefaultDPI;
            _leftEyeList      = leftEyeList;
            _rightEyeList     = rightEyeList;

            return(InitializeInternal(mainCanvas, filename, faceRects, mainCanvas.OptionDialog.BorderWidth, null));
        }
Beispiel #7
0
        void AllocateObjects(ResourceSet resources)
        {
            _resources = resources;

            _faceDetector = new FaceDetector(_resources.blazeFace);
            _faceMesh     = new FaceLandmarkDetector(_resources.faceMesh);
            _irisMeshL    = new EyeLandmarkDetector(_resources.iris);
            _irisMeshR    = new EyeLandmarkDetector(_resources.iris);

            _cropMaterial = new Material(_resources.cropShader);

            _faceCrop  = new RenderTexture(192, 192, 0);
            _irisCropL = new RenderTexture(64, 64, 0);
            _irisCropR = new RenderTexture(64, 64, 0);

            _refineBuffer = new ComputeBuffer(FaceLandmarkDetector.VertexCount,
                                              sizeof(float) * 4);
            _eyeToFace = IndexTable.CreateEyeToFaceLandmarkBuffer();
        }
        public async void ProcessCurrentVideoFrame(ThreadPoolTimer timer)
        {
            if (!frameProcessingSemaphore.Wait(0))
            {
                return;
            }

            IList <DetectedFace> detectedFaces = null;

            try
            {
                const BitmapPixelFormat faceDetectionPixelFormat = BitmapPixelFormat.Nv12;

                using (VideoFrame previewFrame = new VideoFrame(faceDetectionPixelFormat, (int)this.videoProperties.Width, (int)this.videoProperties.Height))
                {
                    await this.mediaCapture.GetPreviewFrameAsync(previewFrame);

                    if (FaceDetector.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat))
                    {
                        detectedFaces = await this.faceTracker.ProcessNextFrameAsync(previewFrame);
                    }
                    else
                    {
                        frameProcessingSemaphore.Release();
                        return;
                    }

                    var previewFrameSize = new Size(previewFrame.SoftwareBitmap.PixelWidth, previewFrame.SoftwareBitmap.PixelHeight);
                    var ignored          = this.Dispatcher.RunAsync(CoreDispatcherPriority.Normal, () =>
                    {
                        this.SetupVisualization(previewFrameSize, detectedFaces);
                    });
                }
            }
            catch (Exception e)
            {
                // Face tracking failed
            }
            finally
            {
                frameProcessingSemaphore.Release();
            }
        }
        /// <summary>
        ///
        /// </summary>
        /// <param name="timer"></param>
        private async void CurrentVideoFrame(ThreadPoolTimer timer)
        {
            if (!semaphore.Wait(0))
            {
                return;
            }

            try
            {
                IList <DetectedFace>    faces            = null;
                const BitmapPixelFormat inputPixelFormat = BitmapPixelFormat.Nv12;

                using (VideoFrame previewFrame = new VideoFrame(inputPixelFormat, 320, 240))
                {
                    await capture.GetPreviewFrameAsync(previewFrame);

                    //顔検出実行
                    if (FaceDetector.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat))
                    {
                        faces = await faceTracker.ProcessNextFrameAsync(previewFrame);
                    }
                    else
                    {
                        throw new System.NotSupportedException("PixelFormat '" + inputPixelFormat.ToString() + "' is not supported by FaceDetector");
                    }

                    //顔が検出されたら録画スタート
                    if (faces.Count != 0)
                    {
                        Debug.WriteLine("Found Face");
                        await startRecoding();
                    }
                }
            }
            catch (Exception ex)
            {
                Debug.WriteLine(ex.Message);
            }
            finally
            {
                semaphore.Release();
            }
        }
Beispiel #10
0
        public static IEnumerable <FaceModel> FaceAnalysis(FaceMaskDetector faceMaskDetector, byte[] imageData, IEnumerable <FaceRectangle> faceRectangles, bool mask)
        {
            if (mask)
            {
                IEnumerable <Rectangle> rectangles = faceRectangles.Select(faceRectangle => new Rectangle(faceRectangle.Top, faceRectangle.Left, faceRectangle.Width, faceRectangle.Height));
                IEnumerable <Bitmap>    faces      = rectangles.Select(rectangle => FaceDetector.GetFaceImage(imageData, rectangle).ToBitmap());
                IEnumerable <bool>      isMasks    = faceMaskDetector.Detect(faces);

                return(faceRectangles.Zip(isMasks, (faceRectangle, isMask) => new FaceModel {
                    FaceRectangle = faceRectangle, FaceAttributes = new FaceAttributes {
                        IsMask = isMask
                    }
                }));
            }

            return(faceRectangles.Select(faceRectangle => new FaceModel {
                FaceRectangle = faceRectangle, FaceAttributes = new FaceAttributes()
            }));
        }
        /// <summary>
        /// Initializes a new MediaCapture instance and starts the Preview streaming to the CamPreview UI element.
        /// </summary>
        /// <returns>Async Task object returning true if initialization and streaming were successful and false if an exception occurred.</returns>
        private async Task <bool> StartWebcamStreamingAsync()
        {
            bool successful = false;

            this.faceDetector = await FaceDetector.CreateAsync();

            try
            {
                this.mediaCapture = new MediaCapture();

                // For this scenario, we only need Video (not microphone) so specify this in the initializer.
                // NOTE: the appxmanifest only declares "webcam" under capabilities and if this is changed to include
                // microphone (default constructor) you must add "microphone" to the manifest or initialization will fail.
                MediaCaptureInitializationSettings settings = new MediaCaptureInitializationSettings();
                settings.StreamingCaptureMode = StreamingCaptureMode.Video;
                await this.mediaCapture.InitializeAsync(settings);

                this.mediaCapture.CameraStreamStateChanged += this.MediaCapture_CameraStreamStateChanged;

                // Cache the media properties as we'll need them later.
                var deviceController = this.mediaCapture.VideoDeviceController;
                this.videoProperties = deviceController.GetMediaStreamProperties(MediaStreamType.VideoPreview) as VideoEncodingProperties;

                // Immediately start streaming to our CaptureElement UI.
                // NOTE: CaptureElement's Source must be set before streaming is started.
                this.CamPreview.Source = this.mediaCapture;
                await this.mediaCapture.StartPreviewAsync();

                successful = true;
            }
            catch (System.UnauthorizedAccessException)
            {
                // If the user has disabled their webcam this exception is thrown; provide a descriptive message to inform the user of this fact.
                this.rootPage.NotifyUser("Webcam is disabled or access to the webcam is disabled for this app.\nEnsure Privacy Settings allow webcam usage.", NotifyType.ErrorMessage);
            }
            catch (Exception ex)
            {
                this.rootPage.NotifyUser(ex.ToString(), NotifyType.ErrorMessage);
            }

            return(successful);
        }
        /// <summary>
        /// This method is invoked by a ThreadPoolTimer to execute the FaceTracker and Visualization logic at approximately 15 frames per second.
        /// </summary>
        /// <remarks>
        /// Keep in mind this method is called from a Timer and not synchronized with the camera stream. Also, the processing time of FaceTracker
        /// will vary depending on the size of each frame and the number of faces being tracked. That is, a large image with several tracked faces may
        /// take longer to process.
        /// </remarks>
        /// <param name="timer">Timer object invoking this call</param>
        private async void ProcessCurrentVideoFrame(ThreadPoolTimer timer)
        {
            // If a lock is being held it means we're still waiting for processing work on the previous frame to complete.
            // In this situation, don't wait on the semaphore but exit immediately.
            if (!frameProcessingSemaphore.Wait(0))
            {
                return;
            }

            try {
                IList <DetectedFace> faces = null;

                // Create a VideoFrame object specifying the pixel format we want our capture image to be (NV12 bitmap in this case).
                // GetPreviewFrame will convert the native webcam frame into this format.
                const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Nv12;
                using (VideoFrame previewFrame = new VideoFrame(InputPixelFormat, (int)this.videoProperties.Width, (int)this.videoProperties.Height)) {
                    await this.camera.MediaCapture.GetPreviewFrameAsync(previewFrame);

                    // The returned VideoFrame should be in the supported NV12 format but we need to verify this.
                    if (FaceDetector.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat))
                    {
                        faces = await this.faceTracker.ProcessNextFrameAsync(previewFrame);
                    }
                    else
                    {
                        throw new System.NotSupportedException("PixelFormat '" + InputPixelFormat.ToString() + "' is not supported by FaceDetector");
                    }

                    // Create our visualization using the frame dimensions and face results but run it on the UI thread.
                    var previewFrameSize = new Windows.Foundation.Size(previewFrame.SoftwareBitmap.PixelWidth, previewFrame.SoftwareBitmap.PixelHeight);
                    var ignored          = this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => {
                        this.SetupVisualization(previewFrameSize, faces);
                    });
                }
            }
            catch (Exception ex) {
                Debug.WriteLine(ex);
            }
            finally {
                frameProcessingSemaphore.Release();
            }
        }
Beispiel #13
0
        private void Initialize(object sender, PipelineRunEventArgs e)
        {
            string rootDirectory = AppDomain.CurrentDomain.BaseDirectory;

            faceModelParameters = new FaceModelParameters(rootDirectory, true, false, false);
            faceModelParameters.optimiseForVideo();

            faceDetector = new FaceDetector(faceModelParameters.GetHaarLocation(), faceModelParameters.GetMTCNNLocation());
            if (!faceDetector.IsMTCNNLoaded())
            {
                faceModelParameters.SetFaceDetector(false, true, false);
            }

            landmarkDetector = new CLNF(faceModelParameters);
            faceAnalyser     = new FaceAnalyser(rootDirectory, dynamic: true, output_width: 112, mask_aligned: true);
            gazeAnalyser     = new GazeAnalyser();

            landmarkDetector.Reset();
            faceAnalyser.Reset();
        }
Beispiel #14
0
        protected ExtractorBase(FaceModelParameters faceModelParameters)
        {
            if (_initialized)
            {
                return;
            }

            ModelParams  = faceModelParameters;
            GazeAnalyzer = new GazeAnalyserManaged();

            var face_detector = new FaceDetector(ModelParams.GetHaarLocation(), ModelParams.GetMTCNNLocation());

            if (!face_detector.IsMTCNNLoaded()) // If MTCNN model not available, use HOG
            {
                ModelParams.SetFaceDetector(false, true, false);
            }

            FaceModel    = new CLNF(ModelParams);
            _initialized = true;
        }
Beispiel #15
0
    public void StartDetection()
    {
        if (CoreXT.IsDevice)
        {
            if (_faceDetector == null)
            {
                // create detector with low accuracy and don't track faces
                _faceDetector = new FaceDetector(false, false);

                // shrink image to 12.5% first for faster detection
                _faceDetector.preprocessImageScale = 0.125f;
            }

            _isDetecting = true;
        }
        else
        {
            Debug.Log("Not on device.");
        }
    }
Beispiel #16
0
    IEnumerator Start()
    {
        Input.backButtonLeavesApp = true;
        Screen.sleepTimeout       = SleepTimeout.NeverSleep;

        _detector       = GetComponent <FaceDetector>();
        _apiManager     = GetComponent <FaceApiManager>();
        _toMatHelperMgr = GetComponent <WebCamTextureToMatHelperManager>();
        _toMatHelper    = GetComponent <WebCamTextureToMatHelper>();

        //カメラ等の初期化完了後,画像サイズを取得する
        _toMatHelper.Initialize();
        yield return(WaitInitialization());

        var imgSize = new Size(_toMatHelper.GetWidth(), _toMatHelper.GetHeight());

        _zeroMat = new ZeroMat(imgSize);

        _volume.profile.TryGetSettings(out _distortion);
    }
	void Start() {
		
		if (CoreXT.IsDevice)
			webCam = new WebCamTexture("Front Camera");
		else
			webCam = new WebCamTexture();
		
		RotatableGUIItem guiItem = ((RotatableGUIItem)(GameObject.Find ("CameraVideo").GetComponent("RotatableGUIItem")));
		guiItem.texture = webCam;
		guiItem.size = new Vector2(Screen.width / 8, Screen.height / 8);
		guiItem.relativePosition = new Vector2(-Screen.width / 16, Screen.height / 16 + 75);
		
		if (CoreXT.IsDevice) {
			
			faceDetector = new FaceDetector(false, false);
			faceDetector.IsMirrored = true;
		}

		StartDetect();
	}
Beispiel #18
0
        public async Task <Rect?> ProcessCameraFrameAsync(SoftwareBitmap bitmap)
        {
            if (this.faceDetector == null)
            {
                this.faceDetector = await FaceDetector.CreateAsync();
            }
            var result = await this.faceDetector.DetectFacesAsync(bitmap);

            this.photoControl.Switch(result?.Count > 0);

            Rect?returnValue = null;

            if (result?.Count > 0)
            {
                returnValue = new Rect(
                    (double)result[0].FaceBox.X / bitmap.PixelWidth,
                    (double)result[0].FaceBox.Y / bitmap.PixelHeight,
                    (double)result[0].FaceBox.Width / bitmap.PixelWidth,
                    (double)result[0].FaceBox.Height / bitmap.PixelHeight);
            }
            return(returnValue);
        }
        private async void _timer_Tick(object sender, object e)
        {
            try
            {
                this.FaceCanvas.Children.Clear();
                IEnumerable <DetectedFace> faces = null;

                // Create a VideoFrame object specifying the pixel format we want our capture image to be (NV12 bitmap in this case).
                // GetPreviewFrame will convert the native webcam frame into this format.
                const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Nv12;
                using (VideoFrame previewFrame = new VideoFrame(InputPixelFormat, 1280, 720))
                {
                    await this._mediaCapture.GetPreviewFrameAsync(previewFrame);

                    // The returned VideoFrame should be in the supported NV12 format but we need to verify this.
                    if (FaceDetector.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat))
                    {
                        faces = await this.faceTracker.ProcessNextFrameAsync(previewFrame);
                    }
                }
                if (faces != null)
                {
                    foreach (DetectedFace face in faces)
                    {
                        Face.Margin = new Thickness(face.FaceBox.X, face.FaceBox.Y, 0, 0);

                        //faceBorder.ShowFaceRectangle(0, 0, (uint)(face.FaceBox.Width), (uint)(face.FaceBox.Height ));
                        FaceText.Text = face.FaceBox.X.ToString() + face.FaceBox.Y.ToString();
                    }
                }


                PicBtn.Content = DateTime.Now.ToString();
                await Task.Delay(50);
            }
            catch (Exception)
            {
            }
        }
        /// <summary>
        /// Creates and initializes a FaceSentimentAnalyzerSkill instance
        /// </summary>
        /// <param name="descriptor"></param>
        /// <param name="device"></param>
        /// <returns></returns>
        internal static IAsyncOperation <FaceSentimentAnalyzerSkill> CreateAsync(
            ISkillDescriptor descriptor,
            ISkillExecutionDevice device)
        {
            return(AsyncInfo.Run(async(token) =>
            {
                // Create instance
                var skillInstance = new FaceSentimentAnalyzerSkill(descriptor, device);

                // Instantiate the FaceDetector
                skillInstance.m_faceDetector = await FaceDetector.CreateAsync();

                // Load WinML model
                var modelFile = await StorageFile.GetFileFromApplicationUriAsync(new Uri($"ms-appx:///Contoso.FaceSentimentAnalyzer/{FaceSentimentAnalyzerConst.WINML_MODEL_FILENAME}"));
                var winmlModel = LearningModel.LoadFromFilePath(modelFile.Path);

                // Create WinML session
                skillInstance.m_winmlSession = new LearningModelSession(winmlModel, GetWinMLDevice(device));

                return skillInstance;
            }));
        }
        public async Task <int> DetectFacesAsync(byte[] photoByteArray)
        {
            BitmapDecoder decoder = await BitmapDecoder.CreateAsync(photoByteArray.ToRandomAccessMemory());

            BitmapTransform transform = new BitmapTransform();
            const float     sourceImageHeightLimit = 1280;

            if (decoder.PixelHeight > sourceImageHeightLimit)
            {
                float scalingFactor = (float)sourceImageHeightLimit / (float)decoder.PixelHeight;
                transform.ScaledWidth  = (uint)Math.Floor(decoder.PixelWidth * scalingFactor);
                transform.ScaledHeight = (uint)Math.Floor(decoder.PixelHeight * scalingFactor);
            }

            SoftwareBitmap sourceBitmap = await decoder.GetSoftwareBitmapAsync(decoder.BitmapPixelFormat, BitmapAlphaMode.Premultiplied, transform, ExifOrientationMode.IgnoreExifOrientation, ColorManagementMode.DoNotColorManage);

            SoftwareBitmap convertedBitmap = sourceBitmap;

            if (sourceBitmap.BitmapPixelFormat != faceDetectionPixelFormat)
            {
                convertedBitmap = SoftwareBitmap.Convert(sourceBitmap, faceDetectionPixelFormat);
            }

            FaceDetector detector = await FaceDetector.CreateAsync();

            IList <DetectedFace> faces = null;

            faces = await detector.DetectFacesAsync(convertedBitmap);

            /* ICollection<System.Drawing.Rectangle> rectangles = new List<System.Drawing.Rectangle>();
             *
             * foreach(DetectedFace face in faces)
             *   rectangles.Add(new System.Drawing.Rectangle(Convert.ToInt32(face.FaceBox.X), Convert.ToInt32(face.FaceBox.Y), Convert.ToInt32(face.FaceBox.Width), Convert.ToInt32(face.FaceBox.Height)));
             */
            sourceBitmap.Dispose();
            convertedBitmap.Dispose();

            return(faces.Count);
        }
Beispiel #22
0
        /// <summary>
        /// Initialize a photo - run faceDetection
        /// </summary>
        /// <param name="mainCanvas">Main canvas reference</param>
        /// <param name="filename">Full path name to image file</param>
        public int InitializeWithFaceDetection(BackgroundCanvas mainCanvas, string filename)
        {
            if (null == _detector)
            {
                _detector = new FaceDetector(
                    mainCanvas.OptionDialog.FaceDetectorDataPath,
                    true,
                    mainCanvas.OptionDialog.FaceDetectorThreshold);
            }
            _detector.SetTargetDimension(mainCanvas.OptionDialog.FaceDetectTargetWidth,
                                         mainCanvas.OptionDialog.FaceDetectTargetHeight);

            DetectionResult   detectionResult  = _detector.DetectObject(filename);
            List <ScoredRect> scoredResultList = detectionResult.GetMergedRectList(0.0F);

            if (scoredResultList.Count < 0)
            {
                return(0);
            }

            List <Rect> faceRects = new List <Rect>();

            foreach (ScoredRect scoredRect in scoredResultList)
            {
                Rect rect = new Rect();

                rect.X      = scoredRect.X;
                rect.Y      = scoredRect.Y;
                rect.Width  = scoredRect.Width;
                rect.Height = scoredRect.Height;

                faceRects.Add(rect);
            }

            _targetRect       = new Rect();
            _faceDisplayWidth = mainCanvas.OptionDialog.FaceDisplayWidth;
            _defaultDPI       = mainCanvas.OptionDialog.DefaultDPI;
            return(InitializeInternal(mainCanvas, filename, faceRects, mainCanvas.OptionDialog.BorderWidth, null));
        }
    private async void ProcessCurrentVideoFrame(ThreadPoolTimer timer)
    {
        if (m_mediaCapture == null)
        {
            return;
        }

        if (m_mediaCapture.CameraStreamState != CameraStreamState.Streaming)
        {
            return;
        }

        if (!m_faceProcessingSemaphore.Wait(0))
        {
            return;
        }

        IList <DetectedFace> faces = null;

        const BitmapPixelFormat inputPixelFormat = BitmapPixelFormat.Nv12;

        using (VideoFrame previewFrame = new VideoFrame(inputPixelFormat, (int)m_videoProperties.Width, (int)m_videoProperties.Height))
        {
            await m_mediaCapture.GetPreviewFrameAsync(previewFrame);

            if (FaceDetector.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat))
            {
                faces = await m_faceTracker.ProcessNextFrameAsync(previewFrame);
            }
        };

        foreach (DetectedFace face in faces)
        {
            Debug.Log(string.Format("x={0}, y={1}, w={2}, h={3}", face.FaceBox.X, face.FaceBox.Y, face.FaceBox.Width, face.FaceBox.Height));
        }

        m_faceProcessingSemaphore.Release();
    }
        private void StartFaceDetection(MediaCapture capture)
        {
            if (capture == null)
            {
                throw new ArgumentNullException();
            }

            if (_faceDetectionCancellationTokenSource != null)
            {
                return;
            }

            _faceDetectionCancellationTokenSource = new CancellationTokenSource();
            var token = _faceDetectionCancellationTokenSource.Token;

            _faceDetectionTask = Task.Factory.StartNew(async() =>
            {
                try
                {
                    FaceDetector detector = new FaceDetector();
                    detector.Initialize(new FaceDetectorInitializationData()
                    {
                        FaceData = Package.Current.InstalledLocation.Path + "\\" + FacePredictorFileName
                    });

                    while (!token.IsCancellationRequested)
                    {
                        await FaceDetectAsync(detector, capture, token);
                    }
                }
                catch (OperationCanceledException) { }
                catch (Exception e)
                {
                    Debug.WriteLine("Face detection failed.");
                    Debug.WriteLine(e.Message);
                }
            }, token);
        }
Beispiel #25
0
        private async Task <string> DetectEmotionWithWinML()
        {
            var videoFrame = lastFrame;

            if (faceDetector == null)
            {
                faceDetector = await FaceDetector.CreateAsync();
            }

            var detectedFaces = await faceDetector.DetectFacesAsync(videoFrame.SoftwareBitmap);

            if (detectedFaces != null && detectedFaces.Any())
            {
                var face = detectedFaces.OrderByDescending(s => s.FaceBox.Height * s.FaceBox.Width).First();
                var randomAccessStream = new InMemoryRandomAccessStream();
                var decoder            = await BitmapDecoder.CreateAsync(randomAccessStream);

                var croppedImage = await decoder.GetSoftwareBitmapAsync(decoder.BitmapPixelFormat, BitmapAlphaMode.Ignore, new BitmapTransform()
                {
                    Bounds = new BitmapBounds()
                    {
                        X = face.FaceBox.X, Y = face.FaceBox.Y, Width = face.FaceBox.Width, Height = face.FaceBox.Height
                    }
                }, ExifOrientationMode.IgnoreExifOrientation, ColorManagementMode.DoNotColorManage);

                videoFrame = VideoFrame.CreateWithSoftwareBitmap(croppedImage);
            }

            var emotion = await model.EvaluateAsync(new CNTKGraphModelInput()
            {
                Input338 = videoFrame
            });

            var    index = emotion.Plus692_Output_0.IndexOf(emotion.Plus692_Output_0.Max());
            string label = labels[index];

            return(label);
        }
Beispiel #26
0
        private async Task InitializeCamera()
        {
            _requestStopCancellationToken = new CancellationTokenSource();
            _captureElement = new CaptureElement();
            var videoCaptureDevices = await DeviceInformation.FindAllAsync(DeviceClass.VideoCapture);

            var camera = videoCaptureDevices.FirstOrDefault();
            MediaCaptureInitializationSettings initialisationSettings = new MediaCaptureInitializationSettings()
            {
                StreamingCaptureMode = StreamingCaptureMode.Video,
                VideoDeviceId        = camera.Id
            };

            _mediaCapture = new MediaCapture();
            await _mediaCapture.InitializeAsync(initialisationSettings);

            _captureElement.Source = _mediaCapture;
            await _mediaCapture.StartPreviewAsync();

            var videoProperties = (_mediaCapture.VideoDeviceController.GetMediaStreamProperties(MediaStreamType.VideoPreview) as VideoEncodingProperties);
            var videoSize       = new Rect(0, 0, videoProperties.Width, videoProperties.Height);
            var detector        = await FaceDetector.CreateAsync();

            var bitmap = FaceDetector.GetSupportedBitmapPixelFormats().First();

            try
            {
                await Task.Run(async() =>
                {
                    VideoFrame frame       = new VideoFrame(bitmap, (int)videoSize.Width, (int)videoSize.Height);
                    TimeSpan?lastFrameTime = null;
                    while (true)
                    {
                        if (!_requestStopCancellationToken.Token.IsCancellationRequested)
                        {
                            await _mediaCapture.GetPreviewFrameAsync(frame);

                            if ((!lastFrameTime.HasValue) || (lastFrameTime != frame.RelativeTime))
                            {
                                var detectedFaces = await detector.DetectFacesAsync(frame.SoftwareBitmap);
                                if (detectedFaces.Count == 1)
                                {
                                    var convertedRgba16Bitmap         = SoftwareBitmap.Convert(frame.SoftwareBitmap, BitmapPixelFormat.Rgba16);
                                    InMemoryRandomAccessStream stream = new InMemoryRandomAccessStream();
                                    BitmapEncoder encoder             = await BitmapEncoder.CreateAsync(BitmapEncoder.JpegEncoderId, stream);
                                    encoder.SetSoftwareBitmap(convertedRgba16Bitmap);
                                    await encoder.FlushAsync();

                                    var detectedPerson = await _faceService.DetectFace(stream.AsStream());

                                    if (detectedPerson != null && detectedPerson.PersonId.HasValue)
                                    {
                                        _userService.PersonId = detectedPerson.PersonId.Value;
                                        var user = await _userService.GetModelAsync();
                                        if (user == null)
                                        {
                                            user          = new UserProfileModel().RandomData(detectedPerson.Gender);
                                            user.PersonId = detectedPerson.PersonId.Value;
                                            user.FaceIds.Add(detectedPerson.FaceId.Value);
                                            user = await _userService.AddUserAsync(user);
                                        }
                                        await UserViewModel.SetValuesAsync(User, user);
                                    }
                                    else
                                    {
                                        // bug: when a person was not detected, the stream gets disposed
                                        //stream.Seek(0);
                                        stream  = new InMemoryRandomAccessStream();
                                        encoder = await BitmapEncoder.CreateAsync(BitmapEncoder.JpegEncoderId, stream);
                                        encoder.SetSoftwareBitmap(convertedRgba16Bitmap);
                                        await encoder.FlushAsync();

                                        // TODO: ask new user for initial profile data
                                        var user      = new UserProfileModel().RandomData(detectedPerson.Gender);
                                        user.PersonId = await _faceService.CreatePersonAsync(user.FullName);
                                        var faceIds   = new List <Guid>();
                                        faceIds.Add(await _faceService.AddFaceAsync(user.PersonId, stream.AsStream()));
                                        user.FaceIds.AddRange(faceIds);
                                        user = await _userService.AddUserAsync(user);
                                        await UserViewModel.SetValuesAsync(User, user);
                                    }

                                    await Task.Delay(CHECK_INTERVAL * 1000, _requestStopCancellationToken.Token);
                                }
                            }
                            lastFrameTime = frame.RelativeTime;
                        }
                    }
                }, _requestStopCancellationToken.Token);
            }
            catch (Microsoft.ProjectOxford.Face.FaceAPIException fex)
            {
                Debug.WriteLine(fex.ErrorMessage);
            }
            catch (Exception ex)
            {
                Debug.WriteLine(ex.Message);
            }
            if (_requestStopCancellationToken.IsCancellationRequested)
            {
                await _mediaCapture.StopPreviewAsync();

                _captureElement.Source = null;
                _requestStopCancellationToken.Dispose();
            }
        }
Beispiel #27
0
    void Start()
    {
        if (CoreXT.IsDevice) {
            // subscribes to events
            GUIXT.MediaPicked += OnMediaPicked;

            _faceDetector = new FaceDetector(true, false);
            _imageFilter = new ImageFilter();
        }
    }
Beispiel #28
0
        private void ProcessIndividualImages(ImageReader reader)
        {
            // Make sure the GUI is setup appropriately
            SetupFeatureExtractionMode();

            // Indicate we will start running the thread
            thread_running = true;

            // Reload the face landmark detector if needed
            ReloadLandmarkDetector();

            if (!landmark_detector.isLoaded())
            {
                DetectorNotFoundWarning();
                EndMode();
                thread_running = false;
                return;
            }

            // Setup the parameters optimized for working on individual images rather than sequences
            face_model_params.optimiseForImages();

            // Setup the visualization
            Visualizer visualizer_of = new Visualizer(ShowTrackedVideo || RecordTracked, ShowAppearance, ShowAppearance, false);

            // Initialize the face detector if it has not been initialized yet
            if (face_detector == null)
            {
                face_detector = new FaceDetector(face_model_params.GetHaarLocation(), face_model_params.GetMTCNNLocation());
            }

            // Initialize the face analyser
            face_analyser = new FaceAnalyserManaged(AppDomain.CurrentDomain.BaseDirectory, false, image_output_size, MaskAligned);

            // Loading an image file
            var frame      = new RawImage(reader.GetNextImage());
            var gray_frame = new RawImage(reader.GetCurrentFrameGray());

            // For FPS tracking
            DateTime?startTime     = CurrentTime;
            var      lastFrameTime = CurrentTime;

            // This will be false when the image is not available
            while (reader.isOpened())
            {
                if (!thread_running)
                {
                    break;
                }

                // Setup recording
                RecorderOpenFaceParameters rec_params = new RecorderOpenFaceParameters(false, false,
                                                                                       Record2DLandmarks, Record3DLandmarks, RecordModelParameters, RecordPose, RecordAUs,
                                                                                       RecordGaze, RecordHOG, RecordTracked, RecordAligned, true,
                                                                                       reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy(), 0);

                RecorderOpenFace recorder = new RecorderOpenFace(reader.GetName(), rec_params, record_root);

                // Detect faces here and return bounding boxes
                List <Rect>  face_detections = new List <Rect>();
                List <float> confidences     = new List <float>();
                if (DetectorHOG)
                {
                    face_detector.DetectFacesHOG(face_detections, gray_frame, confidences);
                }
                else if (DetectorCNN)
                {
                    face_detector.DetectFacesMTCNN(face_detections, frame, confidences);
                }
                else if (DetectorHaar)
                {
                    face_detector.DetectFacesHaar(face_detections, gray_frame, confidences);
                }

                // For visualization
                double progress = reader.GetProgress();

                for (int i = 0; i < face_detections.Count; ++i)
                {
                    bool detection_succeeding = landmark_detector.DetectFaceLandmarksInImage(frame, face_detections[i], face_model_params, gray_frame);

                    var landmarks = landmark_detector.CalculateAllLandmarks();

                    // Predict action units
                    var au_preds = face_analyser.PredictStaticAUsAndComputeFeatures(frame, landmarks);

                    // Predic eye gaze
                    gaze_analyser.AddNextFrame(landmark_detector, detection_succeeding, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy());

                    // Only the final face will contain the details
                    VisualizeFeatures(frame, visualizer_of, landmarks, landmark_detector.GetVisibilities(), detection_succeeding, i == 0, true, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy(), progress);

                    // Record an observation
                    RecordObservation(recorder, visualizer_of.GetVisImage(), i, detection_succeeding, reader.GetFx(), reader.GetFy(), reader.GetCx(), reader.GetCy(), 0, 0);
                }

                frame      = new RawImage(reader.GetNextImage());
                gray_frame = new RawImage(reader.GetCurrentFrameGray());

                // Write out the tracked image
                if (RecordTracked)
                {
                    recorder.WriteObservationTracked();
                }

                // Do not cary state accross images
                landmark_detector.Reset();
                face_analyser.Reset();
                recorder.Close();

                lastFrameTime = CurrentTime;
                processing_fps.AddFrame();

                // TODO how to report errors from the reader here? exceptions? logging? Problem for future versions?
            }

            EndMode();
        }
Beispiel #29
0
        private async void OpenImg_Click(object sender, RoutedEventArgs e)
        {
            IList <DetectedFace> faces         = null;
            SoftwareBitmap       detectorInput = null;
            WriteableBitmap      displaySource = null;

            try
            {
                FileOpenPicker photoPicker = new FileOpenPicker();
                photoPicker.ViewMode = PickerViewMode.Thumbnail;
                photoPicker.SuggestedStartLocation = PickerLocationId.PicturesLibrary;
                photoPicker.FileTypeFilter.Add(".jpg");
                photoPicker.FileTypeFilter.Add(".jpeg");
                photoPicker.FileTypeFilter.Add(".png");
                photoPicker.FileTypeFilter.Add(".bmp");
                photoFile = await photoPicker.PickSingleFileAsync();

                if (photoFile == null)
                {
                    return;
                }

                using (IRandomAccessStream fileStream = await photoFile.OpenAsync(Windows.Storage.FileAccessMode.Read))
                {
                    BitmapImage bitmapImage = new BitmapImage();
                    bitmapImage.SetSource(fileStream);
                    sourceImg.Source = bitmapImage;

                    BitmapDecoder decoder = await BitmapDecoder.CreateAsync(fileStream);

                    BitmapTransform transform = this.ComputeScalingTransformForSourceImage(decoder);

                    using (SoftwareBitmap originalBitmap = await decoder.GetSoftwareBitmapAsync(decoder.BitmapPixelFormat, BitmapAlphaMode.Ignore, transform, ExifOrientationMode.IgnoreExifOrientation, ColorManagementMode.DoNotColorManage))
                    {
                        // face can detect Gray8 file
                        const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Gray8;
                        if (FaceDetector.IsBitmapPixelFormatSupported(InputPixelFormat))
                        {
                            using (detectorInput = SoftwareBitmap.Convert(originalBitmap, InputPixelFormat))
                            {
                                // Create a WritableBitmap for our visualization display; copy the original bitmap pixels to wb's buffer.
                                displaySource = new WriteableBitmap(originalBitmap.PixelWidth, originalBitmap.PixelHeight);
                                originalBitmap.CopyToBuffer(displaySource.PixelBuffer);
                                FaceDetector detector = await FaceDetector.CreateAsync();  // should reuse the detect obj

                                faces = await detector.DetectFacesAsync(detectorInput);

                                // Create our display using the available image and face results.
                                this.SetupVisualization(displaySource, faces);
                            }
                        }
                        else
                        {
                        }
                    }
                }
            }
            catch (Exception ex)
            {
                this.ClearVisualization();
            }
        }
        /// <summary>
        /// Responds when we navigate to this page.
        /// </summary>
        /// <param name="e">Event data</param>
        protected override async void OnNavigatedTo(NavigationEventArgs e)
        {
            this.rootPage = MainPage.Current;

            // The 'await' operation can only be used from within an async method but class constructors
            // cannot be labeled as async, and so we'll initialize FaceDetector here.
            if (this.faceDetector == null)
            {
                this.faceDetector = await FaceDetector.CreateAsync();
            }
        }
Beispiel #31
0
 public FaceDetectionBenchmark()
 {
     faceDetector = new FaceDetector();
 }
Beispiel #32
0
 public LocalFaceDetector(FaceDetector faceDetector, IImagePersiter imagePersiter)
 {
     _faceDetector  = faceDetector;
     _imagePersiter = imagePersiter;
 }
Beispiel #33
0
        public async Task <ImageAnalyzer> CaptureFrameAsync(CurrentFrameModel currentFrame)
        {
            try
            {
                using (Stream stream = currentFrame.DataCurrent.AsBuffer().AsStream())
                {
                    stream.Position = 0;
                    var decoder = await BitmapDecoder.CreateAsync(stream.AsRandomAccessStream());

                    var softwareBitmap = await decoder.GetSoftwareBitmapAsync();

                    var detector = await FaceDetector.CreateAsync();

                    using (SoftwareBitmap convertedBitmap = SoftwareBitmap.Convert(softwareBitmap, BitmapPixelFormat.Gray8))
                    {
                        faces = await detector.DetectFacesAsync(convertedBitmap, SearchArea);

                        convertedBitmap.Dispose();
                    }

                    this.NumFacesOnLastFrame = faces.Count();

                    var previewFrameSize = new Windows.Foundation.Size(softwareBitmap.PixelWidth, softwareBitmap.PixelHeight);
                    this.ShowFaceTrackingVisualization(previewFrameSize, faces);

                    softwareBitmap.Dispose();
                    stream.Dispose();
                }

                //Không có face thì không phân tích
                if (this.NumFacesOnLastFrame == 0)
                {
                    faces = null;
                    CoreUtil.FreeMemory();
                    return(null);
                }

                //Hai khung hình có số lượng khung mật giống nhau quá nửa thì không phân tích nữa
                if (this.AreFacesStill(this.detectedFacesFromPreviousFrame, faces))
                {
                    faces = null;
                    CoreUtil.FreeMemory();
                    return(null);
                }

                this.detectedFacesFromPreviousFrame = faces;

                imageWithFace = new ImageAnalyzer(currentFrame.DataCurrent);

                imageWithFace.CameraIPAdres        = CameraIPAdres;
                imageWithFace.imageWidth           = InfoSettingFix.FixImageWidth;
                imageWithFace.imageHeight          = InfoSettingFix.FixImageHeight;
                imageWithFace.CaptureTime          = currentFrame.CaptureTime;
                imageWithFace.ListDetectedFaceJson = JsonConvert.SerializeObject(faces.Select(r => r.FaceBox).ToList());

                faces = null;
                CoreUtil.FreeMemory();

                return(imageWithFace);
            }
            catch (Exception ex)
            {
                CoreUtil.FreeMemory();
                return(null);
            }
        }
Beispiel #34
0
        /// <summary>
        /// detect faces on a picture and draw a square in each face
        /// </summary>
        private void detectFaces()
        {
            //first check if picture has been taken
            if (null != cameraBitmap)
            {
                //get width of a picture
                int width = cameraBitmap.Width;
                //get height of a picture
                int height = cameraBitmap.Height;
                //Initialize a facedetector with the picture dimensions and the max number of faces to check
                FaceDetector detector = new FaceDetector(width, height, MainActivity.MAX_FACES);
                //Create an array of faces with the number of max faces to check
                Android.Media.FaceDetector.Face[] faces = new Android.Media.FaceDetector.Face[MainActivity.MAX_FACES];

                //create a main bitmap
                Bitmap bitmap565 = Bitmap.CreateBitmap(width, height, Bitmap.Config.Rgb565);
                //create a dither paint
                Paint ditherPaint = new Paint();
                //create a draw paint
                Paint drawPaint = new Paint();

                //set true dither to dither paint variable
                ditherPaint.Dither = true;
                //set color red for the square
                drawPaint.Color = Color.Red;
                //set stroke to style
                drawPaint.SetStyle(Paint.Style.Stroke);
                //set stroke width
                drawPaint.StrokeWidth = 2;

                //Create a canvas
                Canvas canvas = new Canvas();
                //set bitmap to canvas
                canvas.SetBitmap(bitmap565);
                //draw bitmap to canvas
                canvas.DrawBitmap(cameraBitmap, 0, 0, ditherPaint);

                //get a number of faces detected
                int facesFound = detector.FindFaces(bitmap565, faces);
                //mid face point
                PointF midPoint = new PointF();
                //eye distance variable
                float eyeDistance = 0.0f;
                //confidence variable
                float confidence = 0.0f;
                //print numbre of faces found
                System.Console.WriteLine("Number of faces found: " + facesFound);

                //check if found at least one face
                if (facesFound > 0)
                {
                    //for each face draw a red squeare
                    for (int index = 0; index < facesFound; ++index)
                    {
                        // get midpoint of a face
                        faces[index].GetMidPoint(midPoint);
                        //get eye distance
                        eyeDistance = faces[index].EyesDistance();
                        //get confidence
                        confidence = faces [index].Confidence();
                        //print all parameters
                        System.Console.WriteLine("Confidence: " + confidence +
                                                 ", Eye distance: " + eyeDistance +
                                                 ", Mid Point: (" + midPoint.X + ", " + midPoint.Y + ")");
                        //draw a square in the picture
                        canvas.DrawRect((int)midPoint.X - eyeDistance,
                                        (int)midPoint.Y - eyeDistance,
                                        (int)midPoint.X + eyeDistance,
                                        (int)midPoint.Y + eyeDistance, drawPaint);
                    }
                }

                //get imageview from layout
                ImageView imageView = (ImageView)FindViewById(Resource.Id.image_view);
                //set image with the red squares to imageview
                imageView.SetImageBitmap(bitmap565);
            }
        }
 public FaceDetectorController(FaceDetector faceDetector, FaceMaskDetector faceMaskDetector)
 {
     this.faceDetector     = faceDetector;
     this.faceMaskDetector = faceMaskDetector;
 }
Beispiel #36
0
        //Start the process
        private async void button_Click(object sender, RoutedEventArgs e)
        {
            FolderPicker folderPicker = new FolderPicker();

            folderPicker.FileTypeFilter.Add(".jpg");
            folderPicker.FileTypeFilter.Add(".jpeg");
            folderPicker.FileTypeFilter.Add(".png");
            folderPicker.FileTypeFilter.Add(".bmp");
            folderPicker.ViewMode = PickerViewMode.Thumbnail;

            StorageFolder photoFolder = await folderPicker.PickSingleFolderAsync();

            if (photoFolder == null)
            {
                return;
            }

            var files = await photoFolder.GetFilesAsync();

            List <Scores> E = new List <Scores>();

            int[] num = new int[files.Count];

            for (int i = 0; i < files.Count; i++)
            {
                IRandomAccessStream fileStream = await files[i].OpenAsync(FileAccessMode.Read);
                BitmapDecoder       decoder    = await BitmapDecoder.CreateAsync(fileStream);

                BitmapTransform transform = new BitmapTransform();
                const float     sourceImageHeightLimit = 1280;

                if (decoder.PixelHeight > sourceImageHeightLimit)
                {
                    float scalingFactor = (float)sourceImageHeightLimit / (float)decoder.PixelHeight;
                    transform.ScaledWidth  = (uint)Math.Floor(decoder.PixelWidth * scalingFactor);
                    transform.ScaledHeight = (uint)Math.Floor(decoder.PixelHeight * scalingFactor);
                }

                SoftwareBitmap sourceBitmap = await decoder.GetSoftwareBitmapAsync(decoder.BitmapPixelFormat, BitmapAlphaMode.Premultiplied, transform, ExifOrientationMode.IgnoreExifOrientation, ColorManagementMode.DoNotColorManage);

                const BitmapPixelFormat faceDetectionPixelFormat = BitmapPixelFormat.Gray8;

                SoftwareBitmap convertedBitmap;

                if (sourceBitmap.BitmapPixelFormat != faceDetectionPixelFormat)
                {
                    convertedBitmap = SoftwareBitmap.Convert(sourceBitmap, faceDetectionPixelFormat);
                }
                else
                {
                    convertedBitmap = sourceBitmap;
                }

                if (faceDetector == null)
                {
                    faceDetector = await FaceDetector.CreateAsync();
                }

                detectedFaces = await faceDetector.DetectFacesAsync(convertedBitmap);

                Scores sc = null;

                if (detectedFaces.Count > 0)
                {
                    num[i] = detectedFaces.Count;
                    FaceRectangle rectID = new FaceRectangle();
                    rectID = await UploadAndDetectFaces(files[i].Path);

                    if (rectID != null)
                    {
                        sc = await EstimateEmotions(files[i].Path, rectID);
                    }
                }

                E.Add(sc);
                if (sc != null)
                {
                    Items.Add(new DataItem(i.ToString(), (int)(sc.Happiness * 100)));
                }

                sourceBitmap.Dispose();
                fileStream.Dispose();
                convertedBitmap.Dispose();
            }
        }