// Use this for initialization
				void Start ()
				{

						//initialize FaceTracker
						FaceTracker faceTracker = new FaceTracker (Utils.getFilePath ("tracker_model.json"));
						//initialize FaceTrackerParams
						FaceTrackerParams faceTrackerParams = new FaceTrackerParams ();


						gameObject.transform.localScale = new Vector3 (imgTexture.width, imgTexture.height, 1);
						Camera.main.orthographicSize = imgTexture.height / 2;
		
						Mat imgMat = new Mat (imgTexture.height, imgTexture.width, CvType.CV_8UC4);
		
						Utils.texture2DToMat (imgTexture, imgMat);
						Debug.Log ("imgMat dst ToString " + imgMat.ToString ());


						CascadeClassifier cascade = new CascadeClassifier (Utils.getFilePath ("haarcascade_frontalface_alt.xml"));
						if (cascade.empty ()) {
								Debug.LogError ("cascade file is not loaded.Please copy from “FaceTrackerSample/StreamingAssets/” to “Assets/StreamingAssets/” folder. ");
						}

						//convert image to greyscale
						Mat gray = new Mat ();
						Imgproc.cvtColor (imgMat, gray, Imgproc.COLOR_RGBA2GRAY);

		
						MatOfRect faces = new MatOfRect ();
		
						Imgproc.equalizeHist (gray, gray);
		
						cascade.detectMultiScale (gray, faces, 1.1f, 2, 0
//								                           | Objdetect.CASCADE_FIND_BIGGEST_OBJECT
								| Objdetect.CASCADE_SCALE_IMAGE, new OpenCVForUnity.Size (gray.cols () * 0.05, gray.cols () * 0.05), new Size ());
		
						Debug.Log ("faces " + faces.dump ());
		
						if (faces.rows () > 0) {
								//add initial face points from MatOfRect
								faceTracker.addPoints (faces);
						}


						//track face points.if face points <= 0, always return false.
						if (faceTracker.track (imgMat, faceTrackerParams))
								faceTracker.draw (imgMat, new Scalar (255, 0, 0, 255), new Scalar (0, 255, 0, 255));


		
						Texture2D texture = new Texture2D (imgMat.cols (), imgMat.rows (), TextureFormat.RGBA32, false);
		
		
						Utils.matToTexture2D (imgMat, texture);
		
						gameObject.GetComponent<Renderer> ().material.mainTexture = texture;
				}
				// Use this for initialization
				void Start ()
				{
						//initialize FaceTracker
						faceTracker = new FaceTracker (Utils.getFilePath ("tracker_model.json"));
						//initialize FaceTrackerParams
						faceTrackerParams = new FaceTrackerParams ();

						webCamTextureToMatHelper = gameObject.GetComponent<WebCamTextureToMatHelper> ();
						webCamTextureToMatHelper.Init (OnWebCamTextureToMatHelperInited, OnWebCamTextureToMatHelperDisposed);
				}
Beispiel #3
0
        private FaceMatrix(FaceTracker faceTracker, MediaCapture mediaCapture, int rowsCount, int columnsCount)
        {
            _faceTracker = faceTracker;
            _mediaCapture = mediaCapture;
            
            // get properties of the stream, we need them to get width/height for face detection
            var videoProperties = _mediaCapture.VideoDeviceController.GetMediaStreamProperties(MediaStreamType.VideoPreview) as VideoEncodingProperties;
            _previewFrame = new VideoFrame(BitmapPixelFormat.Nv12, (int)videoProperties.Width, (int)videoProperties.Height);

            _rowsCount = rowsCount;
            _columnsCount = columnsCount;
        }
Beispiel #4
0
        public async Task Initialize(string cameraName = "LifeCam")
        {
            // select the camera
            var devices = await DeviceInformation.FindAllAsync(DeviceClass.VideoCapture);

            var device   = devices.FirstOrDefault(d => d.Name.ToLowerInvariant().Contains(cameraName.ToLower())) ?? devices.FirstOrDefault();
            var settings = new MediaCaptureInitializationSettings()
            {
                VideoDeviceId = device.Id
            };

            // initialize the camera
            mediaCapture = new MediaCapture();
            await mediaCapture.InitializeAsync(settings);

            // select a lower framerate and resolution to reduce USB bandwidth
            var props = mediaCapture
                        .VideoDeviceController
                        .GetAvailableMediaStreamProperties(MediaStreamType.VideoPreview)
                        .Cast <VideoEncodingProperties>()
                        .First(p => p.FrameRate.Numerator == 10 && p.Height == 720);
            await mediaCapture.VideoDeviceController.SetMediaStreamPropertiesAsync(MediaStreamType.VideoPreview, props);

            // start the preview feed (a CaptureElement is required to sync the feed)
            captureElement = new CaptureElement()
            {
                Source = mediaCapture
            };
            await mediaCapture.StartPreviewAsync();

            // get the video properties
            var previewProperties = mediaCapture.VideoDeviceController.GetMediaStreamProperties(MediaStreamType.VideoPreview) as VideoEncodingProperties;

            ImageHeight = (int)previewProperties.Height;
            ImageWidth  = (int)previewProperties.Width;

            // intialize face tracking
            faceTracker = await FaceTracker.CreateAsync();

            // Get the known persons
            var persons = await faceClient.GetPersonsAsync(personGroupId);

            personMap = persons.ToDictionary(p => p.PersonId, p => p.Name);
        }
Beispiel #5
0
            /// <summary>
            /// Updates the face tracking information for this skeleton
            /// </summary>
            internal void OnFrameReady(KinectSensor kinectSensor, ColorImageFormat colorImageFormat, byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage, Skeleton skeletonOfInterest)
            {
                this.skeletonTrackingState = skeletonOfInterest.TrackingState;

                if (this.skeletonTrackingState != SkeletonTrackingState.Tracked)
                {
                    // nothing to do with an untracked skeleton.
                    return;
                }

                if (this.faceTracker == null)
                {
                    try
                    {
                        this.faceTracker = new FaceTracker(kinectSensor);
                    }
                    catch (InvalidOperationException)
                    {
                        // During some shutdown scenarios the FaceTracker
                        // is unable to be instantiated.  Catch that exception
                        // and don't track a face.
                        Debug.WriteLine("AllFramesReady - creating a new FaceTracker threw an InvalidOperationException");
                        this.faceTracker = null;
                    }
                }

                if (this.faceTracker != null)
                {
                    FaceTrackFrame frame = this.faceTracker.Track(
                        colorImageFormat, colorImage, depthImageFormat, depthImage, skeletonOfInterest);

                    this.lastFaceTrackSucceeded = frame.TrackSuccessful;
                    if (this.lastFaceTrackSucceeded)
                    {
                        if (faceTriangles == null)
                        {
                            // only need to get this once.  It doesn't change.
                            faceTriangles = frame.GetTriangles();
                        }

                        this.facePoints = frame.GetProjected3DShape();
                    }
                }
            }
Beispiel #6
0
    public void Run()
    {
        //initialize FaceTracker
        faceTracker = new FaceTracker(tracker_model_json_filepath);
        //initialize FaceTrackerParams
        faceTrackerParams = new FaceTrackerParams();

        cascade = new CascadeClassifier();
        cascade.load(haarcascade_frontalface_alt_xml_filepath);
//            if (cascade.empty())
//            {
//                Debug.LogError("cascade file is not loaded.Please copy from “FaceTrackerExample/StreamingAssets/” to “Assets/StreamingAssets/” folder. ");
//            }

        #if UNITY_ANDROID && !UNITY_EDITOR
        // Avoids the front camera low light issue that occurs in only some Android devices (e.g. Google Pixel, Pixel2).
        webCamTextureToMatHelper.avoidAndroidFrontCameraLowLightIssue = true;
        #endif
        webCamTextureToMatHelper.Initialize();
        if (SpeechRecognizer.ExistsOnDevice())
        {
            resultText.text = "I am running run";
            SpeechRecognizerListener listener = GameObject.FindObjectOfType <SpeechRecognizerListener>();
            listener.onAuthorizationStatusFetched.AddListener(OnAuthorizationStatusFetched);
            listener.onAvailabilityChanged.AddListener(OnAvailabilityChange);
            listener.onErrorDuringRecording.AddListener(OnError);
            listener.onErrorOnStartRecording.AddListener(OnError);
            listener.onFinalResults.AddListener(OnFinalResult);
            listener.onPartialResults.AddListener(OnPartialResult);
            listener.onEndOfSpeech.AddListener(OnEndOfSpeech);
            //startRecordingButton.enabled = false;
            SpeechRecognizer.RequestAccess();
            SpeechRecognizer.StartRecording(true);
            resultText.text = "Say something :-)";
        }
        else
        {
            resultText.text = "Sorry, but this device doesn't support speech recognition";
            Debug.Log("Next Command is crossfade from run function");
            //GameObject.FindGameObjectWithTag("twohand)").GetComponent<Animator>().CrossFade("V", -1);
            //startRecordingButton.enabled = false;
        }
    }
Beispiel #7
0
        private async Task StartPreviewAsync()
        {
            try
            {
                _mediaCapture = new MediaCapture();

                await _mediaCapture.InitializeAsync();

                _displayRequest.RequestActive();
                DisplayInformation.AutoRotationPreferences = DisplayOrientations.Landscape;
            }
            catch (UnauthorizedAccessException)
            {
                // This will be thrown if the user denied access to the camera in privacy settings
                //("The app was denied access to the camera");
                return;
            }

            try
            {
                PreviewControl.Source = _mediaCapture;
                await _mediaCapture.StartPreviewAsync();

                _isPreviewing = true;
            }
            catch (System.IO.FileLoadException)
            {
                _mediaCapture.CaptureDeviceExclusiveControlStatusChanged += _mediaCapture_CaptureDeviceExclusiveControlStatusChanged;
            }


            //initialize UI facetracking
            this.faceTracker = await FaceTracker.CreateAsync();

            TimeSpan UIUpdateTimer = TimeSpan.FromMilliseconds(66); // 15 fps

            this.frameProcessingTimer = Windows.System.Threading.ThreadPoolTimer.CreatePeriodicTimer(new Windows.System.Threading.TimerElapsedHandler(ProcessCurrentVideoFrame), UIUpdateTimer);

            //initialize Remote emotiog Detection
            TimeSpan EmotionUpdateTimer = TimeSpan.FromMilliseconds(2000); // every 2 seconds

            this.EmotionProcessingTimer = Windows.System.Threading.ThreadPoolTimer.CreatePeriodicTimer(new Windows.System.Threading.TimerElapsedHandler(AnalyzeEmotion), EmotionUpdateTimer);
        }
Beispiel #8
0
        private async Task InitFacialeRecon()
        {
            // Creates the Face tracker object
            this.faceTracker = await FaceTracker.CreateAsync();

            // Set the frame rate
            TimeSpan timerInterval = TimeSpan.FromMilliseconds(66); // 15 fps

            Debug.WriteLine("Face tracker initializating");
            this.frameProcessingTimer = Windows.System.Threading.ThreadPoolTimer.CreatePeriodicTimer(new Windows.System.Threading.TimerElapsedHandler(ProcessCurrentVideoFrame), timerInterval);
            Debug.WriteLine("Face tracker initializated !");

            // Gets the video properties
            var deviceController = this._mediaCapture.VideoDeviceController;

            this.videoProperties = deviceController.GetMediaStreamProperties(MediaStreamType.VideoPreview) as VideoEncodingProperties;

            // Process frames with the tracker
            ProcessCurrentVideoFrame(frameProcessingTimer);
        }
        private void Run()
        {
            //initialize FaceTracker
            faceTracker = new FaceTracker(tracker_model_json_filepath);
            //initialize FaceTrackerParams
            faceTrackerParams = new FaceTrackerParams();

            cascade = new CascadeClassifier();
            cascade.load(haarcascade_frontalface_alt_xml_filepath);
//            if (cascade.empty())
//            {
//                Debug.LogError("cascade file is not loaded.Please copy from “FaceTrackerExample/StreamingAssets/” to “Assets/StreamingAssets/” folder. ");
//            }

            #if UNITY_ANDROID && !UNITY_EDITOR
            // Avoids the front camera low light issue that occurs in only some Android devices (e.g. Google Pixel, Pixel2).
            webCamTextureToMatHelper.avoidAndroidFrontCameraLowLightIssue = true;
            #endif
            webCamTextureToMatHelper.Initialize();
        }
            public TrackerVisualizer(FaceTracker tracker)
            {
                this.tracker = tracker;

                Add(box = new Box {
                    Colour = Colour4.Red, Alpha = 0.5f
                });

                for (int i = 0; i < 73; i++)
                {
                    var circle = new Circle
                    {
                        Size   = new Vector2(2.5f),
                        Colour = Colour4.Blue,
                        Alpha  = 0.0f
                    };

                    circles.Add(circle);
                    Add(circle);
                }
            }
        public void StartKinectST()
        {
            foreach (double filterValue in gaussFilter)
            {
                gaussFactor += filterValue;
            }

            Kinect = KinectSensor.KinectSensors.FirstOrDefault(s => s.Status == KinectStatus.Connected); // Get first Kinect Sensor
            Kinect.SkeletonStream.Enable();                                                              // Enable skeletal tracking
            Kinect.ColorStream.Enable();
            Kinect.DepthStream.Enable();
            SkeletonData = new Skeleton[Kinect.SkeletonStream.FrameSkeletonArrayLength]; // Allocate ST data
            Kinect.Start();                                                              // Start Kinect sensor
            faceTracker            = new FaceTracker(Kinect);
            Kinect.AllFramesReady += this.OnAllFramesReady;

            //Set Near and Seated Mode
            Kinect.SkeletonStream.EnableTrackingInNearRange = true;
            Kinect.DepthStream.Range           = DepthRange.Near;
            Kinect.SkeletonStream.TrackingMode = SkeletonTrackingMode.Seated;
        }
Beispiel #12
0
        public TestSceneFaceTracking()
        {
            Children = new Drawable[]
            {
                tracker = new FaceTracker(),
                camera  = new CameraSprite
                {
                    CameraID = 0,
                },
                faceLocationsContainer = new Container <TrackingBox>
                {
                    Name = @"face locations",
                    Size = new Vector2(640, 480),
                },
                new Container
                {
                    AutoSizeAxes = Axes.Both,
                    Children     = new Drawable[]
                    {
                        new Box
                        {
                            Colour           = Colour4.Black,
                            RelativeSizeAxes = Axes.Both,
                        },
                        status = new SpriteText
                        {
                            AlwaysPresent = true,
                            Margin        = new MarginPadding(5)
                        },
                    }
                }
            };

            tracker.StartTracking(camera);
            tracker.OnTrackerUpdate += _ =>
            {
                trackerDeltaTime = Time.Current - lastTrackingTime;
                lastTrackingTime = Time.Current;
            };
        }
Beispiel #13
0
            /// <summary>
            /// Updates the face tracking information for this skeleton
            /// </summary>
            internal void OnFrameReady(KinectSensor kinectSensor, ColorImageFormat colorImageFormat, byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage, Skeleton skeletonOfInterest)
            {
                this.skeletonTrackingState = skeletonOfInterest.TrackingState;

                if (this.skeletonTrackingState != SkeletonTrackingState.Tracked)
                {
                    // nothing to do with an untracked skeleton.
                    return;
                }

                if (this.faceTracker == null)
                {
                    try
                    {
                        this.faceTracker = new FaceTracker(kinectSensor);
                    }
                    catch (InvalidOperationException)
                    {
                        this.faceTracker = null;
                    }
                }

                if (this.faceTracker != null)
                {
                    frame = this.faceTracker.Track(
                        colorImageFormat, colorImage, depthImageFormat, depthImage, skeletonOfInterest).Clone() as FaceTrackFrame;

                    this.lastFaceTrackSucceeded = frame.TrackSuccessful;
                    if (this.lastFaceTrackSucceeded)
                    {
                        /*if (faceTriangles == null)
                         * {
                         *  // only need to get this once.  It doesn't change.
                         *  faceTriangles = frame.GetTriangles();
                         * }
                         *
                         * this.facePoints = frame.GetProjected3DShape();*/
                    }
                }
            }
    private async void InitializeFacialRecognition()
    {
        if (m_faceTracker == null)
        {
            m_faceTracker = await FaceTracker.CreateAsync();
        }

        m_mediaCapture = new MediaCapture();
        MediaCaptureInitializationSettings settings = new MediaCaptureInitializationSettings();

        settings.StreamingCaptureMode = StreamingCaptureMode.Video;
        await m_mediaCapture.InitializeAsync(settings);

        VideoDeviceController deviceController = m_mediaCapture.VideoDeviceController;

        m_videoProperties = deviceController.GetMediaStreamProperties(MediaStreamType.VideoPreview) as VideoEncodingProperties;
        await m_mediaCapture.StartPreviewAsync();

        TimeSpan timerInterval = TimeSpan.FromMilliseconds(66);

        m_frameProcessingTimer = ThreadPoolTimer.CreatePeriodicTimer(new TimerElapsedHandler(ProcessCurrentVideoFrame), timerInterval);
    }
Beispiel #15
0
        /// <summary>
        /// Face rotation display angle increment in degrees
        /// </summary>
        // const double FaceRotationIncrementInDegrees = 0.1;

        public MainWindow()
        {
            // 色々よしなにしてくれるおまじない.
            InitializeComponent();

            try
            {
                // kinect is connected?
                if (KinectSensor.KinectSensors.Count == 0)
                {
                    // 例外処理
                    MessageBox.Show("No KinectSensors ditected");
                    Close();
                }
                // 1個目のKinectのKinectsensor instanceを取得, kinectに格納
                // 複数のときはforeach文が便利. その場合はthisを使う(?)
                kinect = KinectSensor.KinectSensors[0];

                // Color,Depth,Skeltonを有効化
                kinect.ColorStream.Enable(ColorImageFormat.RgbResolution640x480Fps30);
                kinect.DepthStream.Enable(DepthImageFormat.Resolution640x480Fps30);
                kinect.SkeletonStream.Enable();
                // kinect.ColorStream.Enable(ColorImageFormat.InfraredResolution640x480Fps30);

                // すべての情報の更新について実行されるイベント
                kinect.AllFramesReady += new EventHandler <AllFramesReadyEventArgs>(Kinect_AllFramesReady);

                // 動作開始
                kinect.Start();

                // 顔追跡用インスタンス生成
                faceTracker = new FaceTracker(kinect);
            }
            catch (Exception ex)
            {
                MessageBox.Show(ex.Message);
                Close();
            }
        }
Beispiel #16
0
        private void Init()
        {
            try
            {
                string[] splitString = { ", " };
                gaussFactor = 0;
                gaussFilter.Clear();
                string[] filterValues = tbSmoothingFilter.Text.Split(splitString, StringSplitOptions.None);

                foreach (var filterValue in filterValues)
                {
                    int value = Int32.Parse(filterValue);
                    gaussFilter.Add(value);
                    gaussFactor += value;
                }
                Kinect = KinectSensor.KinectSensors.FirstOrDefault(s => s.Status == KinectStatus.Connected); // Get first Kinect Sensor
                Kinect.SkeletonStream.Enable();                                                              // Enable skeletal tracking
                Kinect.ColorStream.Enable();
                Kinect.DepthStream.Enable();
                SkeletonData = new Skeleton[Kinect.SkeletonStream.FrameSkeletonArrayLength]; // Allocate ST data
                Kinect.Start();                                                              // Start Kinect sensor
                faceTracker            = new FaceTracker(Kinect);
                Kinect.AllFramesReady += this.OnAllFramesReady;

                //Set Near and Seated Mode
                //Not available for Xbox Sensor!
                Kinect.SkeletonStream.EnableTrackingInNearRange = true;
                Kinect.DepthStream.Range           = DepthRange.Near;
                Kinect.SkeletonStream.TrackingMode = SkeletonTrackingMode.Seated;
                File.AppendAllText("init.txt", DateTime.Now + " - Kinect sensor initialized successfully.\n");
                useSpeach = InitSpeechRecognition();
            }
            catch (Exception e)
            {
                System.Windows.MessageBox.Show("Error during Kinect Initialization. Ensure that Kinect sensor is connected correctly.\n\nError Message:\n" + e.ToString());
                File.AppendAllText("init.txt", DateTime.Now + " - Error during Kinect initialization.\n");
            }
        }
Beispiel #17
0
            internal void OnFrameReady(KinectSensor kinectSensor, ColorImageFormat colorImageFormat, byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage, Skeleton skeletonOfInterest)
            {
                this.skeletonTrackingState = skeletonOfInterest.TrackingState;

                if (this.skeletonTrackingState != SkeletonTrackingState.Tracked)
                {
                    return;
                }

                if (this.faceTracker == null)
                {
                    try
                    {
                        this.faceTracker = new FaceTracker(kinectSensor);
                    }
                    catch (InvalidOperationException)
                    {
                        Debug.WriteLine("AllFramesReady - creating a new FaceTracker threw an InvalidOperationException");
                        this.faceTracker = null;
                    }
                }

                if (this.faceTracker != null)
                {
                    FaceTrackFrame frame = this.faceTracker.Track(colorImageFormat, colorImage, depthImageFormat, depthImage, skeletonOfInterest);

                    this.lastFaceTrackSucceeded = frame.TrackSuccessful;
                    if (this.lastFaceTrackSucceeded)
                    {
                        if (faceTriangles == null)
                        {
                            faceTriangles = frame.GetTriangles();
                        }

                        this.facePoints = frame.GetProjected3DShape();
                    }
                }
            }
        protected override async void OnNavigatedTo(NavigationEventArgs e)
        {
            _state = StreamingState.Idle;
            if (_faceTracker == null)
            {
                _faceTracker = await FaceTracker.CreateAsync();
            }

            if (_faceApiHelper == null)
            {
                try
                {
                    _faceApiHelper = new FaceApiHelper();
                    await _faceApiHelper.CheckGroupExistAsync();
                }
                catch (Microsoft.ProjectOxford.Face.FaceAPIException faceEx)
                {
                    ShowAlertHelper.ShowDialog(faceEx.ErrorMessage, faceEx.ErrorCode);
                }
                catch (Exception ex)
                {
                    ShowAlertHelper.ShowDialog(ex.Message);
                }
            }

            if (_dataHelper == null)
            {
                try
                {
                    _dataHelper = new DataHelper();
                }
                catch (Exception ex)
                {
                    ShowAlertHelper.ShowDialog(ex.Message);
                }
            }
        }
        private async Task <bool> StartWebcamStreamingAsync()
        {
            _faceTracker = await FaceTracker.CreateAsync();

            try
            {
                _mediaCapture = new MediaCapture();
                await _mediaCapture.InitializeAsync(new MediaCaptureInitializationSettings
                {
                    StreamingCaptureMode = StreamingCaptureMode.Video
                });

                _mediaCapture.Failed += (s, a) => AbandonStreaming();

                var deviceController = _mediaCapture.VideoDeviceController;
                _videoProperties = deviceController.GetMediaStreamProperties(MediaStreamType.VideoPreview) as VideoEncodingProperties;

                CameraPreview.Source = _mediaCapture;
                await _mediaCapture.StartPreviewAsync();

                var timerInterval = TimeSpan.FromMilliseconds(66); // 66ms, aprox 15 fps
                _frameProcessingTimer = ThreadPoolTimer.CreatePeriodicTimer(ProcessCurrentVideoFrame, timerInterval);

                return(true);
            }
            catch (UnauthorizedAccessException)
            {
                NavigateToPermissionsPage();
                return(false);
            }
            catch (Exception exception)
            {
                await DisplayMessage($"Error al iniciar el stream de la cámara: {exception.Message}");

                return(false);
            }
        }
Beispiel #20
0
        public void init(Context context, FaceEnvironment environment)
        {
            this.context         = context;
            this.faceEnvironment = environment;

            if (mFaceTracker == null)
            {
                mFaceTracker = new FaceTracker(context);
                mFaceTracker.Set_isFineAlign(false);
                mFaceTracker.Set_isVerifyLive(false);
                mFaceTracker.Set_isCheckQuality(environment.isCheckQuality());
                mFaceTracker.Set_DetectMethodType(1);
                mFaceTracker.Set_isCheckQuality(environment.isCheckQuality());
                mFaceTracker.Set_notFace_thr(environment.getNotFaceThreshold());
                mFaceTracker.Set_min_face_size(environment.getMiniFaceSize());
                mFaceTracker.Set_cropFaceSize(FaceEnvironment.VALUE_CROP_FACE_SIZE);
                mFaceTracker.Set_illum_thr(environment.getIlluminationThreshold());
                mFaceTracker.Set_blur_thr(environment.getBlurrinessThreshold());
                mFaceTracker.Set_occlu_thr(environment.getOcclulationThreshold());
                mFaceTracker.Set_max_reg_img_num(FaceEnvironment.VALUE_MAX_CROP_IMAGE_NUM);
                mFaceTracker.Set_eulur_angle_thr(environment.getPitch(), environment.getYaw(), environment.getRoll());
                // mFaceTracker.set_track_by_detection_interval(50);
            }
        }
        private void Run()
        {
            //set 3d face object points.
            objectPoints = new MatOfPoint3f(new Point3(-31, 72, 86), //l eye
                                            new Point3(31, 72, 86),  //r eye
                                            new Point3(0, 40, 114),  //nose
                                            new Point3(-20, 15, 90), //l mouse
                                            new Point3(20, 15, 90)   //r mouse
//                                                                                                                                                            ,
//                new Point3 (-70, 60, -9),//l ear
//                new Point3 (70, 60, -9)//r ear
                                            );
            imagePoints = new MatOfPoint2f();
            rvec        = new Mat();
            tvec        = new Mat();
            rotM        = new Mat(3, 3, CvType.CV_64FC1);

            //initialize FaceTracker
            faceTracker = new FaceTracker(tracker_model_json_filepath);
            //initialize FaceTrackerParams
            faceTrackerParams = new FaceTrackerParams();

            cascade = new CascadeClassifier();
            cascade.load(haarcascade_frontalface_alt_xml_filepath);
//            if (cascade.empty())
//            {
//                Debug.LogError("cascade file is not loaded.Please copy from “FaceTrackerExample/StreamingAssets/” to “Assets/StreamingAssets/” folder. ");
//            }


            #if UNITY_ANDROID && !UNITY_EDITOR
            // Avoids the front camera low light issue that occurs in only some Android devices (e.g. Google Pixel, Pixel2).
            webCamTextureToMatHelper.avoidAndroidFrontCameraLowLightIssue = true;
            #endif
            webCamTextureToMatHelper.Initialize();
        }
Beispiel #22
0
        /// <summary>
        /// Responds when we navigate to this page.
        /// </summary>
        /// <param name="e">Event data</param>
        protected override async void OnNavigatedTo(NavigationEventArgs e)
        {
            if (ApplicationData.Current.LocalSettings.Values.ContainsKey("ApiKey"))
            {
                ApiKey.Text = ApplicationData.Current.LocalSettings.Values["ApiKey"].ToString();
            }
            if (ApplicationData.Current.LocalSettings.Values.ContainsKey("ApiEndPoint"))
            {
                ApiEndPoint.Text = ApplicationData.Current.LocalSettings.Values["ApiEndPoint"].ToString();
            }

            if (this.faceTracker == null)
            {
                this.faceTracker = await FaceTracker.CreateAsync();
            }

            if (this.faceClient == null)
            {
                this.faceClient = new FaceClient(
                    new ApiKeyServiceClientCredentials(ApiKey.Text),
                    new System.Net.Http.DelegatingHandler[] { })
                {
                    Endpoint = ApiEndPoint.Text
                };
            }

            if (captureFolder == null)
            {
                var picturesLibrary = await StorageLibrary.GetLibraryAsync(KnownLibraryId.Pictures);

                // Fall back to the local app storage if the Pictures Library is not available
                captureFolder = picturesLibrary.SaveFolder ?? ApplicationData.Current.LocalFolder;
            }

            await this.StartWebcamStreaming();
        }
        /// <summary>
        /// Selects the FeaturePoints of all tracked skeletons from the source observable.
        /// </summary>
        /// <param name="observable">The source observable.</param>
        /// <param name="faceTracker">The FaceTracker that is used to track the faces.</param>
        /// <returns>A sequence of a collection of FeaturePoints and their identifiers in a tuple.</returns>
        public static IObservable <IEnumerable <Tuple <Int32, SkeletonTrackingState, JointCollection, EnumIndexableCollection <FeaturePoint, PointF> > > > SelectPersonPoints(this IObservable <Tuple <ColorImageFormat, byte[], DepthImageFormat, short[], Skeleton[]> > observable, FaceTracker faceTracker)
        {
            if (observable == null)
            {
                throw new ArgumentNullException("observable");
            }
            if (faceTracker == null)
            {
                throw new ArgumentNullException("faceTracker");
            }

            return(observable.Select(_ => _.Item5.ForEach <Skeleton, Tuple <Int32, SkeletonTrackingState, JointCollection, EnumIndexableCollection <FeaturePoint, PointF> > >(__ =>
            {
                if (__.TrackingState == SkeletonTrackingState.PositionOnly)
                {
                    return Tuple.Create <Int32, SkeletonTrackingState, JointCollection, EnumIndexableCollection <FeaturePoint, PointF> >(__.TrackingId, __.TrackingState, __.Joints, null);
                }

                var faceTrackFrame = faceTracker.Track(_.Item1, _.Item2, _.Item3, _.Item4, __);

                if (!faceTrackFrame.TrackSuccessful)
                {
                    return Tuple.Create <Int32, SkeletonTrackingState, JointCollection, EnumIndexableCollection <FeaturePoint, PointF> >(__.TrackingId, __.TrackingState, __.Joints, null);
                }

                return Tuple.Create(__.TrackingId, __.TrackingState, __.Joints, faceTrackFrame.GetProjected3DShape());
            })));
        }
 public CalibrationCompletedDataSender(IMessageSender sender, FaceTracker faceTracker)
 {
     faceTracker.CalibrationCompleted +=
         data => sender.SendCommand(MessageFactory.Instance.SetCalibrationFaceData(data));
 }
        public FaceTrackerProxy (Canvas canvas, MainPage page, CaptureElement capture, MediaCapture mediacapture ) {


            if (this.faceTracker == null)
            {
                this.faceTracker = FaceTracker.CreateAsync().AsTask().Result;
            }

            rootPage = page;
            VisualizationCanvas = canvas;

            this.VisualizationCanvas.Children.Clear();

            mediaCapture = mediacapture;

            var deviceController = mediaCapture.VideoDeviceController;
            this.videoProperties = deviceController.GetMediaStreamProperties(MediaStreamType.VideoPreview) as VideoEncodingProperties;
            currentState = ScenarioState.Streaming;

            // Ensure the Semaphore is in the signalled state.
            this.frameProcessingSemaphore.Release();

            // Use a 66 milisecond interval for our timer, i.e. 15 frames per second 
            TimeSpan timerInterval = TimeSpan.FromMilliseconds(200);
            this.frameProcessingTimer = Windows.System.Threading.ThreadPoolTimer.CreatePeriodicTimer(new Windows.System.Threading.TimerElapsedHandler(ProcessCurrentVideoFrame), timerInterval);


        }
        // Use this for initialization
        void Start()
        {
            //initialize FaceTracker
                        faceTracker = new FaceTracker (Utils.getFilePath ("tracker_model.json"));
                        //initialize FaceTrackerParams
                        faceTrackerParams = new FaceTrackerParams ();

                        StartCoroutine (init ());
        }
Beispiel #27
0
            internal void OnFrameReady(KinectSensor kinectSensor, ColorImageFormat colorImageFormat, byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage, Skeleton skeletonOfInterest)
            {
                if (!timerStarted)
                {
                    timer.Start();
                    timerStarted = true;
                }
                //increment our frames
                numberOfFrames++;


                this.skeletonTrackingState = skeletonOfInterest.TrackingState;

                if (this.skeletonTrackingState != SkeletonTrackingState.Tracked)
                {
                    // nothing to do with an untracked skeleton.
                    return;
                }

                if (this.faceTracker == null)
                {
                    try
                    {
                        this.faceTracker = new FaceTracker(kinectSensor);
                    }
                    catch (InvalidOperationException)
                    {
                        // During some shutdown scenarios the FaceTracker
                        // is unable to be instantiated.  Catch that exception
                        // and don't track a face.
                        Debug.WriteLine("AllFramesReady - creating a new FaceTracker threw an InvalidOperationException");
                        this.faceTracker = null;
                    }
                }

                if (this.faceTracker != null)
                {
                    FaceTrackFrame frame = this.faceTracker.Track(
                        colorImageFormat, colorImage, depthImageFormat, depthImage, skeletonOfInterest);

                    this.lastFaceTrackSucceeded = frame.TrackSuccessful;
                    if (this.lastFaceTrackSucceeded)
                    {
                        if (faceTriangles == null)
                        {
                            // only need to get this once.  It doesn't change.
                            faceTriangles = frame.GetTriangles();
                        }

                        this.facePoints = frame.GetProjected3DShape();

                        /*if ()
                         * {
                         *  Debug.WriteLine("hit " + (frameIter * sampleRate) + " frames in " + (timer.Elapsed) + " seconds");
                         *  frameIter++;
                         * }*/

                        //Also grab our points
                        EnumIndexableCollection <FeaturePoint, Vector3DF> facePoints3D = frame.Get3DShape();
                        int index = 0;
                        if (numberOfFrames > frameIter * sampleRate && frameIter < 5) //only grab 4 samples over our given sample rate
                        {
                            //Create a new thread so we don't make the visual thread throw up all over the place
                            new Thread(() =>
                            {
                                Thread.CurrentThread.IsBackground = true;

                                List <Tuple <float, float, float> > myPoints = new List <Tuple <float, float, float> >();
                                foreach (Vector3DF vector in facePoints3D)
                                {
                                    //csv.Append(string.Format("( ({1}, {2}, {3}){4}",vector.X, vector.Y, vector.Z, Environment.NewLine));
                                    myPoints.Add(new Tuple <float, float, float>(vector.X, vector.Y, vector.Z));
                                    index++;
                                }
                                calculateDistances(myPoints);
                                frameIter++;
                            }).Start();
                            //once = true;
                        }

                        if (frameIter == 5)
                        {
                            SetStatusText("Generating histograms...");
                            Console.WriteLine("We are ready to sample");
                            foreach (float distance in sampleOneDistances)
                            {
                                int sampleOneIndex = (int)Math.Floor(64 * distance / sampleOneMaxDistance);
                                sampleOneHistogram[sampleOneIndex]++;
                            }
                            foreach (float distance in sampleTwoDistances)
                            {
                                sampleTwoHistogram[(int)Math.Floor(64 * distance / sampleTwoMaxDistance)]++;
                            }
                            foreach (float distance in sampleThreeDistances)
                            {
                                sampleThreeHistogram[(int)Math.Floor(64 * distance / sampleThreeMaxDistance)]++;
                            }
                            foreach (float distance in sampleFourDistances)
                            {
                                sampleFourHistogram[(int)Math.Floor(64 * distance / sampleFourMaxDistance)]++;
                            }

                            //Go through histogram and divide by distances



                            //Get
                            for (int i = 0; i < sampleOneHistogram.Length; i++)
                            {
                                sampleOneHistogram[i] = sampleOneHistogram[i] / sampleOneDistances.Count;
                            }

                            for (int i = 0; i < sampleTwoHistogram.Length; i++)
                            {
                                sampleTwoHistogram[i] = sampleTwoHistogram[i] / sampleTwoDistances.Count;
                            }

                            for (int i = 0; i < sampleThreeHistogram.Length; i++)
                            {
                                sampleThreeHistogram[i] = sampleThreeHistogram[i] / sampleThreeDistances.Count;
                            }

                            for (int i = 0; i < sampleFourHistogram.Length; i++)
                            {
                                sampleFourHistogram[i] = sampleFourHistogram[i] / sampleFourDistances.Count;
                            }

                            int iter = 0;

                            foreach (int count in sampleTwoHistogram)//can iterate through any histogram, they're all of size 65
                            {
                                Console.WriteLine("Count for hist1/2/3/4[" + iter + "] is " + count + "/" + sampleOneHistogram[iter] + "/" + sampleThreeHistogram[iter] + "/" + sampleFourHistogram[iter]);
                                iter++;
                            }

                            //Write our histograms to a csv file
                            String[] sampleOneHistString = Array.ConvertAll(sampleOneHistogram, x => x.ToString());


                            using (System.IO.StreamWriter file = new System.IO.StreamWriter(testFilePath))
                            {
                                file.Write(string.Join(",", Enumerable.Range(1, 65).ToArray()) + Environment.NewLine);
                                file.Write(string.Join(",", sampleOneHistString));
                                file.Write(Environment.NewLine);
                                file.Write(string.Join(",", Array.ConvertAll(sampleTwoHistogram, x => x.ToString())));
                                file.Write(Environment.NewLine);
                                file.Write(string.Join(",", Array.ConvertAll(sampleThreeHistogram, x => x.ToString())));
                                file.Write(Environment.NewLine);
                                file.Write(string.Join(",", Array.ConvertAll(sampleFourHistogram, x => x.ToString())));
                            }
                            //pass that data file to jar
                            String jarPath = "C:\\Users\\Datalab\\Documents\\GitHub\\WekaClassifier\\jar\\wekaClassifier.jar";
                            System.Diagnostics.Process clientProcess = new Process();
                            String jarargs = "C:\\Users\\Datalab\\Documents\\GitHub\\WekaClassifier\\data\\training_data.arff  C:\\Users\\Datalab\\Documents\\GitHub\\WekaClassifier\\data\\testFormat.dat";
                            clientProcess.StartInfo.FileName  = "java";
                            clientProcess.StartInfo.Arguments = "-jar " + jarPath + " " + jarargs;
                            clientProcess.StartInfo.RedirectStandardOutput = true;
                            clientProcess.StartInfo.UseShellExecute        = false;
                            clientProcess.Start();


                            String output = clientProcess.StandardOutput.ReadToEnd();
                            Console.WriteLine(output);
                            clientProcess.WaitForExit();
                            int code = clientProcess.ExitCode;

                            //write to dat file with 4 histograms averaged


                            frameIter++; //only do this once (will make conditional evaluate to false. Is this clean and clear? Not really? Do I care? Not particularly. At least it's documented.
                            ftNumPeople++;
                            SetPeopleText("People tracked : " + ftNumPeople);
                            SetStatusText("Status: waiting....");
                            SetPredictionText("Guess: " + output);
                        }
                    }
                }
            }
        public async Task StartStreamAsync(bool restarted = false, bool isForRealTimeProcessing = false)
        {
            try
            {
                if (captureManager == null || captureManager.CameraStreamState == CameraStreamState.Shutdown)
                {
                    captureManager = new MediaCapture();

                    MediaCaptureInitializationSettings settings = new MediaCaptureInitializationSettings();
                    var allCameras = await DeviceInformation.FindAllAsync(DeviceClass.VideoCapture);

                    var selectedCamera = allCameras.FirstOrDefault(c => c.Name == SettingsHelper.Instance.CameraName);
                    if (selectedCamera != null)
                    {
                        settings.VideoDeviceId = selectedCamera.Id;
                    }

                    await captureManager.InitializeAsync(settings);
                    await SetVideoEncodingToHighestResolution(isForRealTimeProcessing);

                    this.webCamCaptureElement.Source = captureManager;
                }

                if (captureManager.CameraStreamState == CameraStreamState.NotStreaming)
                {
                    if (this.faceTracker == null)
                    {
                        this.faceTracker = await FaceTracker.CreateAsync();
                    }

                    this.videoProperties = this.captureManager.VideoDeviceController.GetMediaStreamProperties(MediaStreamType.VideoPreview) as VideoEncodingProperties;

                    await captureManager.StartPreviewAsync();

                    if (this.frameProcessingTimer != null)
                    {
                        this.frameProcessingTimer.Stop();
                        frameProcessingSemaphore.Release();
                    }
                    TimeSpan timerInterval = TimeSpan.FromMilliseconds(66); //15fps
                                                                            // this.frameProcessingTimer = ThreadPoolTimer.CreatePeriodicTimer(new TimerElapsedHandler(ProcessCurrentVideoFrame), timerInterval);
                    DispatcherTimer tim = new DispatcherTimer {
                        Interval = timerInterval
                    };
                    tim.Tick += (a, b) => { ProcessCurrentVideoFrame(tim); };

                    this.frameProcessingTimer = tim; // new DispatcherTimer { Interval = timerInterval };
                                                     //  ThreadPoolTimer.CreatePeriodicTimer(new TimerElapsedHandler(ProcessCurrentVideoFrame), timerInterval);
                                                     //     this.cameraControlSymbol.Symbol = Symbol.Camera;
                    tim.Start();
                    this.webCamCaptureElement.Visibility = Visibility.Visible;
                }

                this.captureManager.Failed += CaptureManager_Failed;
                if (restarted)
                {
                    this.CameraRestarted.Invoke(this, EventArgs.Empty);
                }
            }
            catch (Exception ex)
            {
                await Util.GenericApiCallExceptionHandler(ex, "Error starting the camera.");
            }
        }
        /// <summary>
        /// Track a face and update the states.
        /// </summary>
        /// <param name="sensor">Instance of KinectSensor</param>
        /// <param name="colorImageFormat">Format of the colorImage array</param>
        /// <param name="colorImage">Input color image frame retrieved from Kinect sensor</param>
        /// <param name="depthImageFormat">Format of the depthImage array</param>
        /// <param name="depthImage">Input depth image frame retrieved from Kinect sensor</param>
        /// <param name="skeletonOfInterest">Input skeleton to track. Head and shoulder joints in the skeleton are used to calculate the head vector</param>
        /// <param name="computedBoundingBox">Whether compute the bounding box of the face mask</param>
        public void TrackFace(
            KinectSensor sensor,
            ColorImageFormat colorImageFormat,
            byte[] colorImage,
            DepthImageFormat depthImageFormat,
            short[] depthImage,
            Skeleton skeletonOfInterest,
            bool computedBoundingBox)
        {
            // Reset the valid flag
            this.faceInfo.TrackValid = false;

            if (null == this.faceTracker)
            {
                try
                {
                    this.faceTracker = new FaceTracker(sensor);
                }
                catch (InvalidOperationException)
                {
                    // Fail silently
                    this.faceTracker = null;

                    return;
                }
            }

            // Set the color image width
            Size colorImageSize = Helper.GetImageSize(colorImageFormat);

            this.colorWidth = colorImageSize.Width;

            // Track the face and update the states
            if (this.faceTracker != null && skeletonOfInterest != null && skeletonOfInterest.TrackingState == SkeletonTrackingState.Tracked)
            {
                FaceTrackFrame faceTrackFrame = this.faceTracker.Track(
                    colorImageFormat, colorImage, depthImageFormat, depthImage, skeletonOfInterest);

                this.faceInfo.TrackValid = faceTrackFrame.TrackSuccessful;
                if (this.faceInfo.TrackValid)
                {
                    this.faceInfo.FaceRect    = faceTrackFrame.FaceRect;
                    this.faceInfo.Rotation    = faceTrackFrame.Rotation;
                    this.faceInfo.Translation = faceTrackFrame.Translation;

                    // Get the bounding box of face mask
                    if (computedBoundingBox)
                    {
                        var shapePoints = faceTrackFrame.Get3DShape();
                        this.ResetBoundingBox();  // Reset the minimum and maximum points of bounding box

                        foreach (var point in shapePoints)
                        {
                            if (point.X < this.minimumPoint.X)
                            {
                                this.minimumPoint.X = point.X;
                            }

                            if (point.X > this.maximumPoint.X)
                            {
                                this.maximumPoint.X = point.X;
                            }

                            if (point.Y < this.minimumPoint.Y)
                            {
                                this.minimumPoint.Y = point.Y;
                            }

                            if (point.Y > this.maximumPoint.Y)
                            {
                                this.maximumPoint.Y = point.Y;
                            }

                            if (point.Z < this.minimumPoint.Z)
                            {
                                this.minimumPoint.Z = point.Z;
                            }

                            if (point.Z > this.maximumPoint.Z)
                            {
                                this.maximumPoint.Z = point.Z;
                            }
                        }
                    }
                }
            }

            // To render the face rectangle
            Dispatcher.BeginInvoke((Action)(() => this.InvalidateVisual()));
        }
Beispiel #30
0
        void KinectFaceNode_AllFrameReady(object sender, AllFramesReadyEventArgs e)
        {
            ColorImageFrame colorImageFrame = null;
            DepthImageFrame depthImageFrame = null;
            SkeletonFrame   skeletonFrame   = null;

            if (face == null)
            {
                face = new FaceTracker(this.runtime.Runtime);
            }

            colorImageFrame = e.OpenColorImageFrame();
            depthImageFrame = e.OpenDepthImageFrame();
            skeletonFrame   = e.OpenSkeletonFrame();

            if (colorImageFrame == null || depthImageFrame == null || skeletonFrame == null)
            {
                return;
            }

            if (this.depthImage == null)
            {
                this.depthImage = new short[depthImageFrame.PixelDataLength];
            }

            if (this.colorImage == null)
            {
                this.colorImage = new byte[colorImageFrame.PixelDataLength];
            }

            if (this.skeletonData == null || this.skeletonData.Length != skeletonFrame.SkeletonArrayLength)
            {
                this.skeletonData = new Skeleton[skeletonFrame.SkeletonArrayLength];
            }

            colorImageFrame.CopyPixelDataTo(this.colorImage);
            depthImageFrame.CopyPixelDataTo(this.depthImage);
            skeletonFrame.CopySkeletonDataTo(this.skeletonData);

            foreach (Skeleton skeleton in this.skeletonData)
            {
                if (skeleton.TrackingState == SkeletonTrackingState.Tracked ||
                    skeleton.TrackingState == SkeletonTrackingState.PositionOnly)
                {
                    // We want keep a record of any skeleton, tracked or untracked.
                    if (!this.trackedSkeletons.ContainsKey(skeleton.TrackingId))
                    {
                        this.trackedSkeletons.Add(skeleton.TrackingId, new SkeletonFaceTracker());
                    }

                    // Give each tracker the upated frame.
                    SkeletonFaceTracker skeletonFaceTracker;
                    if (this.trackedSkeletons.TryGetValue(skeleton.TrackingId, out skeletonFaceTracker))
                    {
                        skeletonFaceTracker.OnFrameReady(this.runtime.Runtime, ColorImageFormat.RgbResolution640x480Fps30, colorImage, DepthImageFormat.Resolution320x240Fps30, depthImage, skeleton);
                        skeletonFaceTracker.LastTrackedFrame = skeletonFrame.FrameNumber;
                    }
                }
            }

            this.RemoveOldTrackers(skeletonFrame.FrameNumber);

            colorImageFrame.Dispose();
            depthImageFrame.Dispose();
            skeletonFrame.Dispose();

            this.FInvalidate = true;
        }
		/// <summary>
		/// Creates new face tracker
		/// Recycles inactive trackers
		/// </summary>
		/// <param name="trackingRegion">Region of image to track</param>
		/// <param name="xScale">X scale of downsampled image to original image</param>
		/// <param name="yScale">Y scale if downsampled image to original image</param>
		/// <param name="image">Downsampled image from which we trace given rectangle</param>
		private void CreateFaceTracker(Rectangle trackingRegion, double xScale,
									   double yScale, UnmanagedImage image) {
			// Find first not active tracker 
			// object or create new one if none found
			int faceTrackerIndex = -1;
			for (int index = 0; index < this.trackers.Count; index++) {
				if (!this.trackers.ElementAt(index).IsActive) {
					faceTrackerIndex = index;
					break;
				}
			}
			FaceTracker faceTracker;
			if (faceTrackerIndex < 0) {
				faceTracker = new FaceTracker();
				this.trackers.Add(faceTracker);
			}
			else faceTracker = this.trackers.ElementAt(faceTrackerIndex);

			// Reduce the face size to avoid tracking background
			var trackingArea = new Rectangle(
				(int)((trackingRegion.X + trackingRegion.Width / 2f) * xScale),
				(int)((trackingRegion.Y + trackingRegion.Height / 2f) * yScale),
				1, 1);
			trackingArea.Inflate(
				(int)(0.25f * trackingRegion.Width * xScale),
				(int)(0.40f * trackingRegion.Height * yScale));

			// Apply tracking area to tracker object
			faceTracker.IsActive = true;
			faceTracker.Tracker.Reset();
			faceTracker.Tracker.SearchWindow = trackingArea;
			faceTracker.Tracker.ProcessFrame(image);
		}
        public async Task StartStreamAsync(bool isForRealTimeProcessing = false, DeviceInformation desiredCamera = null)
        {
            try
            {
                if (captureManager == null ||
                    captureManager.CameraStreamState == CameraStreamState.Shutdown ||
                    captureManager.CameraStreamState == CameraStreamState.NotStreaming)
                {
                    loadingOverlay.Visibility = Visibility.Visible;

                    if (captureManager != null)
                    {
                        captureManager.Dispose();
                    }

                    captureManager = new MediaCapture();

                    MediaCaptureInitializationSettings settings = new MediaCaptureInitializationSettings();
                    var allCameras = await DeviceInformation.FindAllAsync(DeviceClass.VideoCapture);

                    var selectedCamera = allCameras.FirstOrDefault(c => c.Name == SettingsHelper.Instance.CameraName);
                    if (desiredCamera != null)
                    {
                        selectedCamera = desiredCamera;
                    }
                    else if (lastUsedCamera != null)
                    {
                        selectedCamera = lastUsedCamera;
                    }

                    if (selectedCamera != null)
                    {
                        settings.VideoDeviceId = selectedCamera.Id;
                        lastUsedCamera         = selectedCamera;
                    }

                    cameraSwitchButton.Visibility = allCameras.Count > 1 ? Visibility.Visible : Visibility.Collapsed;

                    await captureManager.InitializeAsync(settings);

                    await SetVideoEncodingToHighestResolution(isForRealTimeProcessing);

                    isStreamingOnRealtimeResolution = isForRealTimeProcessing;

                    this.webCamCaptureElement.Source = captureManager;
                }

                if (captureManager.CameraStreamState == CameraStreamState.NotStreaming)
                {
                    if (PerformFaceTracking || CameraFrameProcessor != null)
                    {
                        if (this.faceTracker == null)
                        {
                            this.faceTracker = await FaceTracker.CreateAsync();
                        }

                        if (this.frameProcessingTimer != null)
                        {
                            this.frameProcessingTimer.Cancel();
                            frameProcessingSemaphore.Release();
                        }
                        TimeSpan timerInterval = TimeSpan.FromMilliseconds(66); //15fps
                        this.frameProcessingTimer = ThreadPoolTimer.CreatePeriodicTimer(new TimerElapsedHandler(ProcessCurrentVideoFrame), timerInterval);
                    }

                    this.videoProperties = this.captureManager.VideoDeviceController.GetMediaStreamProperties(MediaStreamType.VideoPreview) as VideoEncodingProperties;
                    await captureManager.StartPreviewAsync();

                    this.webCamCaptureElement.Visibility = Visibility.Visible;

                    loadingOverlay.Visibility = Visibility.Collapsed;
                }
            }
            catch (Exception ex)
            {
                await Util.GenericApiCallExceptionHandler(ex, "Error starting the camera.");
            }
        }
		/// <summary>
		/// Processes given frame for given face tracker
		/// </summary>
		/// <param name="faceTracker">Face tracker to process</param>
		/// <param name="image">Frame to process for tracker</param>
		/// <returns>Returns asynchronous void Task</returns>
		private async Task TrackFaceAsync(FaceTracker faceTracker, UnmanagedImage image) {
			// Check if face tracker is active
			if (!faceTracker.IsActive) return;

			// Process given frame
			await Task.Run(() => faceTracker.Tracker.ProcessFrame(image));

			// Get object properties
			var trackingObject = faceTracker.Tracker.TrackingObject;

			// If tracking object is empty, start new detection
			if (trackingObject.IsEmpty || trackingObject.Area < 5) {
				faceTracker.IsActive = false;
				this.log.Info("Face not in view, tracker set to inactive state");
				return;
			}
		}
Beispiel #34
0
        //private async void ProcessCurrentVideoFrame(ThreadPoolTimer timer)
        private async Task <int> ProcessCurrentVideoFrame()
        {
            // If a lock is being held it means we're still waiting for processing work on the previous frame to complete.
            // In this situation, don't wait on the semaphore but exit immediately.

            /*if (!frameProcessingSemaphore.Wait(0))
             * {
             *  Debug.WriteLine("\t --> ProcessCurrentFrame call blocked !");
             *  return 0;
             * }*/
            Debug.WriteLine("\t --> ProcessCurrentFrame called properly!");
            // List of detected faces

            /* if (videoProcessor.GetLatestFrame() != null)
             * {*/
            Debug.WriteLine("\t --> calling videoProcessor.GetLatestFrame() !");
            frame = videoProcessor.GetLatestFrame();
            // frame = videoProcessor.latestFrame;

            /*  }
             * else
             * {
             *    //frameProcessingSemaphore.Release();
             *    return 0;
             * }*/
            Debug.WriteLine("\t --> videoProcessor.GetLatestFrame() called !");
            if (frame != null)
            {
                if (FaceTracker.IsBitmapPixelFormatSupported(frame.VideoMediaFrame.SoftwareBitmap.BitmapPixelFormat))
                {
                    Debug.WriteLine("\t --> Format: OK!");

                    // this.faces = await this.faceTracker.ProcessNextFrameAsync(frame.VideoMediaFrame.GetVideoFrame());
                    var faceTask = this.faceTracker.ProcessNextFrameAsync(frame.VideoMediaFrame.GetVideoFrame());
                    this.faces = faceTask.GetResults();
                    Debug.WriteLine("\t --> Frame processed!");
                }
                else
                {
                    Debug.WriteLine("\t--> Format : NOT OK!");
                }
            }
            else
            {
                Debug.WriteLine("\t --> last frame was null !");
                return(0);
            }


            if (this.faces.Count == 0)
            {
                this.faces.Clear();
                Debug.WriteLine("No Face detected");
                //FaceCoord.x = "0";
                // FaceCoord.y = "0";
            }
            else
            {
                Debug.WriteLine("Face detected");
                latestfaces.Clear();

                foreach (DetectedFace face in faces)
                {
                    Debug.WriteLine("faces size: " + faces.Count.ToString());
                    latestfaces.Add(face.FaceBox);
                }
                foreach (BitmapBounds latestface in latestfaces)
                {
                    Debug.WriteLine("faces size: " + latestfaces.Count.ToString());
                    Debug.WriteLine("faceX" + latestface.X.ToString());
                    Debug.WriteLine("faceY" + latestface.Y.ToString());
                    //  FaceCoord.x = latestface.X.ToString();
                    //  FaceCoord.y = latestface.Y.ToString();
                }
            }
            // frameProcessingSemaphore.Release();
            return(0);
        }
Beispiel #35
0
        private void AllFramesReady(object sender, AllFramesReadyEventArgs allFramesReadyEventArgs)
        {
            ColorImageFrame colorImageFrame = null;
            DepthImageFrame depthImageFrame = null;
            SkeletonFrame   skeletonFrame   = null;

            try
            {
                colorImageFrame = allFramesReadyEventArgs.OpenColorImageFrame();
                depthImageFrame = allFramesReadyEventArgs.OpenDepthImageFrame();
                skeletonFrame   = allFramesReadyEventArgs.OpenSkeletonFrame();

                if (colorImageFrame == null || depthImageFrame == null || skeletonFrame == null)
                {
                    return;
                }

                // Check for changes in any of the data this function is receiving
                // and reset things appropriately.
                if (this.depthImageFormat != depthImageFrame.Format)
                {
                    this.DestroyFaceTracker();
                    this.depthImage       = null;
                    this.depthImageFormat = depthImageFrame.Format;
                }

                if (this.colorImageFormat != colorImageFrame.Format)
                {
                    this.DestroyFaceTracker();
                    this.colorImage               = null;
                    this.colorImageFormat         = colorImageFrame.Format;
                    this.colorImageWritableBitmap = null;
                    this.ColorImage.Source        = null;
                    this.theMaterial.Brush        = null;
                }

                if (this.skeletonData != null && this.skeletonData.Length != skeletonFrame.SkeletonArrayLength)
                {
                    this.skeletonData = null;
                }

                // Create any buffers to store copies of the data we work with
                if (this.depthImage == null)
                {
                    this.depthImage = new short[depthImageFrame.PixelDataLength];
                }

                if (this.colorImage == null)
                {
                    this.colorImage = new byte[colorImageFrame.PixelDataLength];
                }

                if (this.colorImageWritableBitmap == null)
                {
                    this.colorImageWritableBitmap = new WriteableBitmap(
                        colorImageFrame.Width, colorImageFrame.Height, 96, 96, PixelFormats.Bgr32, null);
                    this.ColorImage.Source = this.colorImageWritableBitmap;
                    this.theMaterial.Brush = new ImageBrush(this.colorImageWritableBitmap)
                    {
                        ViewportUnits = BrushMappingMode.Absolute
                    };
                }

                if (this.skeletonData == null)
                {
                    this.skeletonData = new Skeleton[skeletonFrame.SkeletonArrayLength];
                }

                // Copy data received in this event to our buffers.
                colorImageFrame.CopyPixelDataTo(this.colorImage);
                depthImageFrame.CopyPixelDataTo(this.depthImage);
                skeletonFrame.CopySkeletonDataTo(this.skeletonData);
                this.colorImageWritableBitmap.WritePixels(
                    new Int32Rect(0, 0, colorImageFrame.Width, colorImageFrame.Height),
                    this.colorImage,
                    colorImageFrame.Width * Bgr32BytesPerPixel,
                    0);

                // Find a skeleton to track.
                // First see if our old one is good.
                // When a skeleton is in PositionOnly tracking state, don't pick a new one
                // as it may become fully tracked again.
                Skeleton skeletonOfInterest =
                    this.skeletonData.FirstOrDefault(
                        skeleton =>
                        skeleton.TrackingId == this.trackingId &&
                        skeleton.TrackingState != SkeletonTrackingState.NotTracked);

                if (skeletonOfInterest == null)
                {
                    // Old one wasn't around.  Find any skeleton that is being tracked and use it.
                    skeletonOfInterest =
                        this.skeletonData.FirstOrDefault(
                            skeleton => skeleton.TrackingState == SkeletonTrackingState.Tracked);

                    if (skeletonOfInterest != null)
                    {
                        // This may be a different person so reset the tracker which
                        // could have tuned itself to the previous person.
                        if (this.faceTracker != null)
                        {
                            this.faceTracker.ResetTracking();
                        }

                        this.trackingId = skeletonOfInterest.TrackingId;
                    }
                }

                bool displayFaceMesh = false;

                if (skeletonOfInterest != null && skeletonOfInterest.TrackingState == SkeletonTrackingState.Tracked)
                {
                    if (this.faceTracker == null)
                    {
                        try
                        {
                            this.faceTracker = new FaceTracker(this.Kinect);
                        }
                        catch (InvalidOperationException)
                        {
                            // During some shutdown scenarios the FaceTracker
                            // is unable to be instantiated.  Catch that exception
                            // and don't track a face.
                            Debug.WriteLine("AllFramesReady - creating a new FaceTracker threw an InvalidOperationException");
                            this.faceTracker = null;
                        }
                    }

                    if (this.faceTracker != null)
                    {
                        FaceTrackFrame faceTrackFrame = this.faceTracker.Track(
                            this.colorImageFormat,
                            this.colorImage,
                            this.depthImageFormat,
                            this.depthImage,
                            skeletonOfInterest);

                        if (faceTrackFrame.TrackSuccessful)
                        {
                            this.UpdateMesh(faceTrackFrame);

                            // Only display the face mesh if there was a successful track.
                            displayFaceMesh = true;
                        }
                    }
                }
                else
                {
                    this.trackingId = -1;
                }

                this.viewport3d.Visibility = displayFaceMesh ? Visibility.Visible : Visibility.Hidden;
            }
            finally
            {
                if (colorImageFrame != null)
                {
                    colorImageFrame.Dispose();
                }

                if (depthImageFrame != null)
                {
                    depthImageFrame.Dispose();
                }

                if (skeletonFrame != null)
                {
                    skeletonFrame.Dispose();
                }
            }
        }
            private bool CheckFace(KinectSensor kinectSensor, ColorImageFormat colorImageFormat, byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage, Skeleton skeletonOfInterest)
            {
                this.skeletonTrackingState = skeletonOfInterest.TrackingState;

                if (this.skeletonTrackingState != SkeletonTrackingState.Tracked)
                {
                    // nothing to do with an untracked skeleton.
                    return(false);
                }

                if (this.faceTracker == null)
                {
                    try
                    {
                        this.faceTracker = new FaceTracker(kinectSensor);
                    }
                    catch (InvalidOperationException)
                    {
                        // During some shutdown scenarios the FaceTracker
                        // is unable to be instantiated.  Catch that exception
                        // and don't track a face.
                        Debug.WriteLine("AllFramesReady - creating a new FaceTracker threw an InvalidOperationException");
                        this.faceTracker = null;
                    }
                }

                if (this.faceTracker != null)
                {
                    FaceTrackFrame frame = this.faceTracker.Track(
                        colorImageFormat, colorImage, depthImageFormat, depthImage, skeletonOfInterest);

                    this.lastFaceTrackSucceeded = frame.TrackSuccessful;
                    if (this.lastFaceTrackSucceeded)
                    {
                        if (faceTriangles == null)
                        {
                            // only need to get this once.  It doesn't change.
                            faceTriangles = frame.GetTriangles();
                        }

                        //getting the Animation Unit Coefficients
                        this.AUs = frame.GetAnimationUnitCoefficients();
                        var jawLowerer   = AUs[AnimationUnit.JawLower];
                        var browLower    = AUs[AnimationUnit.BrowLower];
                        var browRaiser   = AUs[AnimationUnit.BrowRaiser];
                        var lipDepressor = AUs[AnimationUnit.LipCornerDepressor];
                        var lipRaiser    = AUs[AnimationUnit.LipRaiser];
                        var lipStretcher = AUs[AnimationUnit.LipStretcher];
                        //set up file for output
                        using (System.IO.StreamWriter file = new System.IO.StreamWriter
                                                                 (@"C:\Users\Public\data.txt"))
                        {
                            file.WriteLine("FaceTrack Data, started recording at " + DateTime.Now.ToString("HH:mm:ss tt"));
                        }

                        //here is the algorithm to test different facial features

                        //BrowLower is messed up if you wear glasses, works if you don't wear 'em

                        string state = "";

                        //surprised
                        if ((jawLowerer < 0.25 || jawLowerer > 0.25) && browLower < 0)
                        {
                            state = "surprised";
                        }
                        //smiling
                        if (lipStretcher > 0.4 || lipDepressor < 0)
                        {
                            state = "smiling";
                        }
                        //sad
                        if (browRaiser < 0 && lipDepressor > 0)
                        {
                            state = "sad";
                        }
                        //angry
                        if ((browLower > 0 && (jawLowerer > 0.25 || jawLowerer < -0.25)) ||
                            (browLower > 0 && lipDepressor > 0))
                        {
                            state = "angry";
                        }
                        //System.Diagnostics.Debug.WriteLine(browLower);

                        this.facePoints = frame.GetProjected3DShape();

                        if (states[currentState] == state)
                        {
                            Trace.WriteLine("Yo!");
                            return(true);
                        }
                    }
                }

                return(false);
            }
Beispiel #37
0
        private void Run()
        {
            gameObject.transform.localScale = new Vector3(imgTexture.width, imgTexture.height, 1);
            Debug.Log("Screen.width " + Screen.width + " Screen.height " + Screen.height + " Screen.orientation " + Screen.orientation);

            float width  = 0;
            float height = 0;

            width  = gameObject.transform.localScale.x;
            height = gameObject.transform.localScale.y;

            float widthScale  = (float)Screen.width / width;
            float heightScale = (float)Screen.height / height;

            if (widthScale < heightScale)
            {
                Camera.main.orthographicSize = (width * (float)Screen.height / (float)Screen.width) / 2;
            }
            else
            {
                Camera.main.orthographicSize = height / 2;
            }


            //initialize FaceTracker
            FaceTracker faceTracker = new FaceTracker(tracker_model_json_filepath);
            //initialize FaceTrackerParams
            FaceTrackerParams faceTrackerParams = new FaceTrackerParams();


            Mat imgMat = new Mat(imgTexture.height, imgTexture.width, CvType.CV_8UC4);

            Utils.texture2DToMat(imgTexture, imgMat);
            Debug.Log("imgMat dst ToString " + imgMat.ToString());


            CascadeClassifier cascade = new CascadeClassifier();

            cascade.load(haarcascade_frontalface_alt_xml_filepath);
            //if (cascade.empty())
            //{
            //    Debug.LogError("cascade file is not loaded.Please copy from “FaceTrackerExample/StreamingAssets/” to “Assets/StreamingAssets/” folder. ");
            //}

            //convert image to greyscale
            Mat gray = new Mat();

            Imgproc.cvtColor(imgMat, gray, Imgproc.COLOR_RGBA2GRAY);


            MatOfRect faces = new MatOfRect();

            Imgproc.equalizeHist(gray, gray);

            cascade.detectMultiScale(gray, faces, 1.1f, 2, 0
                                     // | Objdetect.CASCADE_FIND_BIGGEST_OBJECT
                                     | Objdetect.CASCADE_SCALE_IMAGE, new Size(gray.cols() * 0.05, gray.cols() * 0.05), new Size());

            Debug.Log("faces " + faces.dump());

            if (faces.rows() > 0)
            {
                //add initial face points from MatOfRect
                faceTracker.addPoints(faces);
            }


            //track face points.if face points <= 0, always return false.
            if (faceTracker.track(imgMat, faceTrackerParams))
            {
                faceTracker.draw(imgMat, new Scalar(255, 0, 0, 255), new Scalar(0, 255, 0, 255));
            }



            Texture2D texture = new Texture2D(imgMat.cols(), imgMat.rows(), TextureFormat.RGBA32, false);

            Utils.matToTexture2D(imgMat, texture);

            gameObject.GetComponent <Renderer>().material.mainTexture = texture;

            cascade.Dispose();
        }
        /// <summary>
        /// Responds when we navigate to this page.
        /// </summary>
        /// <param name="e">Event data</param>
        protected override async void OnNavigatedTo(NavigationEventArgs e)
        {
            this.rootPage = MainPage.Current;

            // The 'await' operation can only be used from within an async method but class constructors
            // cannot be labeled as async, and so we'll initialize FaceTracker here.
            if (this.faceTracker == null)
            {
                this.faceTracker = await FaceTracker.CreateAsync();
            }
        }