/// <summary> /// Updates the face tracking information for this skeleton /// </summary> internal void OnFrameReady(KinectSensor kinectSensor, ColorImageFormat colorImageFormat, byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage, Skeleton skeletonOfInterest) { this.skeletonTrackingState = skeletonOfInterest.TrackingState; if (this.skeletonTrackingState != SkeletonTrackingState.Tracked) { // nothing to do with an untracked skeleton. return; } if (this.faceTracker == null) { try { this.faceTracker = new FaceTracker(kinectSensor); } catch (InvalidOperationException) { // During some shutdown scenarios the FaceTracker // is unable to be instantiated. Catch that exception // and don't track a face. Debug.WriteLine("AllFramesReady - creating a new FaceTracker threw an InvalidOperationException"); this.faceTracker = null; } } if (this.faceTracker != null) { FaceTrackFrame frame = this.faceTracker.Track( colorImageFormat, colorImage, depthImageFormat, depthImage, skeletonOfInterest); this.lastFaceTrackSucceeded = frame.TrackSuccessful; if (this.lastFaceTrackSucceeded) { //if (faceTriangles == null) //{ // // only need to get this once. It doesn't change. // faceTriangles = frame.GetTriangles(); //} //this.facePoints = frame.GetProjected3DShape(); this.faceRect = frame.FaceRect; } } }
/// <summary> /// Starts face tracking from Kinect input data. Track() detects a face /// based on the passed parameters, then identifies characteristic /// points and begins tracking. The first call to this API is more /// expensive, but if the tracking succeeds then subsequent calls use /// the tracking information generated from first call and is faster, /// until a tracking failure happens. /// </summary> /// <param name="colorImageFormat">format of the colorImage array</param> /// <param name="colorImage">Input color image frame retrieved from Kinect sensor</param> /// <param name="depthImageFormat">format of the depthImage array</param> /// <param name="depthImage">Input depth image frame retrieved from Kinect sensor</param> /// <param name="skeletonOfInterest">Input skeleton to track. Head & shoulder joints in the skeleton are used to calculate the head vector</param> /// <param name="regionOfInterest">Region of interest in the passed video frame where the face tracker should search for a face to initiate tracking. /// Passing Rectangle.Empty (default) causes the entire frame to be searched.</param> /// <returns>Returns computed face tracking results for this image frame</returns> private FaceTrackFrame Track( ColorImageFormat colorImageFormat, byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage, Skeleton skeletonOfInterest, Rect regionOfInterest) { this.totalTracks++; this.trackStopwatch.Start(); if (this.operationMode != OperationMode.Kinect) { throw new InvalidOperationException( "Cannot use Track with Kinect input types when face tracker is initialized for tracking videos/images"); } if (colorImage == null) { throw new ArgumentNullException("colorImage"); } if (depthImage == null) { throw new ArgumentNullException("depthImage"); } if (colorImageFormat != this.initializationColorImageFormat) { throw new InvalidOperationException("Color image frame format different from initialization"); } if (depthImageFormat != this.initializationDepthImageFormat) { throw new InvalidOperationException("Depth image frame format different from initialization"); } if (colorImage.Length != this.videoCameraConfig.FrameBufferLength) { throw new ArgumentOutOfRangeException("colorImage", "Color image data size is needs to match initialization configuration."); } if (depthImage.Length != this.depthCameraConfig.FrameBufferLength) { throw new ArgumentOutOfRangeException("depthImage", "Depth image data size is needs to match initialization configuration."); } int hr; HeadPoints headPointsObj = null; Vector3DF[] headPoints = GetHeadPointsFromSkeleton(skeletonOfInterest); if (headPoints != null && headPoints.Length == 2) { headPointsObj = new HeadPoints { Points = headPoints }; } this.copyStopwatch.Start(); this.colorFaceTrackingImage.CopyFrom(colorImage); this.depthFaceTrackingImage.CopyFrom(depthImage); this.copyStopwatch.Stop(); var sensorData = new SensorData(this.colorFaceTrackingImage, this.depthFaceTrackingImage, DefaultZoomFactor, Point.Empty); FaceTrackingSensorData faceTrackSensorData = sensorData.FaceTrackingSensorData; this.startOrContinueTrackingStopwatch.Start(); if (this.trackSucceeded) { hr = this.faceTrackerInteropPtr.ContinueTracking(ref faceTrackSensorData, headPointsObj, this.frame.ResultPtr); } else { hr = this.faceTrackerInteropPtr.StartTracking( ref faceTrackSensorData, ref regionOfInterest, headPointsObj, this.frame.ResultPtr); } this.startOrContinueTrackingStopwatch.Stop(); this.trackSucceeded = hr == (int)ErrorCode.Success && this.frame.Status == ErrorCode.Success; this.trackStopwatch.Stop(); if (this.trackSucceeded) { ++this.totalSuccessTracks; this.totalSuccessTrackMs += this.trackStopwatch.ElapsedMilliseconds - this.lastSuccessTrackElapsedMs; this.lastSuccessTrackElapsedMs = this.trackStopwatch.ElapsedMilliseconds; } return this.frame; }
/// <summary> /// Starts face tracking from Kinect input data. Track() detects a face /// based on the passed parameters, then identifies characteristic /// points and begins tracking. The first call to this API is more /// expensive, but if the tracking succeeds then subsequent calls use /// the tracking information generated from first call and is faster, /// until a tracking failure happens. /// </summary> /// <param name="colorImageFormat"> /// format of the colorImage array /// </param> /// <param name="colorImage"> /// Input color image frame retrieved from Kinect sensor /// </param> /// <param name="depthImageFormat"> /// format of the depthImage array /// </param> /// <param name="depthImage"> /// Input depth image frame retrieved from Kinect sensor /// </param> /// <param name="regionOfInterest"> /// Region of interest in the passed video frame where the face tracker should search for a face to initiate tracking. /// Passing Rectangle.Empty (default) causes the entire frame to be searched. /// </param> /// <returns> /// Returns computed face tracking results for this image frame /// </returns> public FaceTrackFrame Track( ColorImageFormat colorImageFormat, byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage, Rect regionOfInterest) { return this.Track(colorImageFormat, colorImage, depthImageFormat, depthImage, null, regionOfInterest); }
public bool Equals(Rect other) { if (Left != other.Left) { return false; } if (Top != other.Top) { return false; } if (Right != other.Right) { return false; } return Bottom == other.Bottom; }
public IEnumerable<WeightedRect> DetectFaces( ColorImageFormat colorImageFormat, byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage, Rect roi) { if (this.operationMode != OperationMode.Kinect) { throw new InvalidOperationException( "Cannot use Track with Kinect input types when face tracker is initialized for tracking videos/images"); } if (colorImage == null) { throw new ArgumentNullException("colorImage"); } if (depthImage == null) { throw new ArgumentNullException("depthImage"); } if (colorImageFormat != this.initializationColorImageFormat) { throw new InvalidOperationException("Color image frame format different from initialization"); } if (depthImageFormat != this.initializationDepthImageFormat) { throw new InvalidOperationException("Depth image frame format different from initialization"); } if (colorImage.Length != this.videoCameraConfig.FrameBufferLength) { throw new ArgumentOutOfRangeException("colorImage", "Color image data size is needs to match initialization configuration."); } if (depthImage.Length != this.depthCameraConfig.FrameBufferLength) { throw new ArgumentOutOfRangeException("depthImage", "Depth image data size is needs to match initialization configuration."); } this.copyStopwatch.Start(); this.colorFaceTrackingImage.CopyFrom(colorImage); this.depthFaceTrackingImage.CopyFrom(depthImage); this.copyStopwatch.Stop(); var sensorData = new SensorData(this.colorFaceTrackingImage, this.depthFaceTrackingImage, DefaultZoomFactor, Point.Empty); FaceTrackingSensorData faceTrackSensorData = sensorData.FaceTrackingSensorData; int hr; uint count = 4; WeightedRect[] rects = new WeightedRect[count]; GCHandle handle = GCHandle.Alloc(rects, GCHandleType.Pinned); try { IntPtr rectsPtr = handle.AddrOfPinnedObject(); hr = this.faceTrackerInteropPtr.DetectFaces(ref faceTrackSensorData, ref roi, rectsPtr, ref count); } finally { if (handle.IsAllocated) { handle.Free(); } } this.trackSucceeded = hr == (int)ErrorCode.Success; return rects.Take((int)count); }
/// <summary> /// Updates the face tracking information for this skeleton /// </summary> internal void OnFrameReady(KinectSensor kinectSensor, ColorImageFormat colorImageFormat, byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage, Skeleton skeletonOfInterest) { this.skeletonTrackingState = skeletonOfInterest.TrackingState; if (this.skeletonTrackingState != SkeletonTrackingState.Tracked) { // if the current skeleton is not tracked, track it now //kinectSensor.SkeletonStream.ChooseSkeletons(skeletonOfInterest.TrackingId); } if (this.faceTracker == null) { try { this.faceTracker = new FaceTracker(kinectSensor); } catch (InvalidOperationException) { // During some shutdown scenarios the FaceTracker // is unable to be instantiated. Catch that exception // and don't track a face. Debug.WriteLine("AllFramesReady - creating a new FaceTracker threw an InvalidOperationException"); this.faceTracker = null; } } if (this.faceTracker != null) { // hack to make this face tracking detect the face even when it is not actually tracked // <!>need to confirm if it works //skeletonOfInterest.TrackingState = SkeletonTrackingState.Tracked; FaceTrackFrame frame = this.faceTracker.Track( colorImageFormat, colorImage, depthImageFormat, depthImage, skeletonOfInterest); //new Microsoft.Kinect.Toolkit.FaceTracking.Rect(skeletonOfInterest.Position.)); this.lastFaceTrackSucceeded = frame.TrackSuccessful; if (this.lastFaceTrackSucceeded) { if (faceTriangles == null) { // only need to get this once. It doesn't change. faceTriangles = frame.GetTriangles(); } if (faceTag == null) { // here call the face detection faceTag = new FaceRecognizer().getFaceTag(this.colorImageBmp); if (faceTag != null) { Global.StatusBarText.Text = "Found " + faceTag + "!"; if (Global.trackedPeople.ContainsKey(skeletonOfInterest)) Global.trackedPeople[skeletonOfInterest] = faceTag; else Global.trackedPeople.Add(skeletonOfInterest, faceTag); } } this.facePoints = frame.GetProjected3DShape(); this.faceRect = frame.FaceRect; } } }
public Rect Intersection(Rect other) { int maxLeft = Math.Max(this.Left, other.Left); int maxTop = Math.Max(this.Top, other.Top); int minRight = Math.Min(this.Right, other.Right); int minBottom = Math.Min(this.Bottom, other.Bottom); int width = Math.Max(0, minRight - maxLeft); int height = Math.Max(0, minBottom - maxTop); return new Rect(maxLeft, maxTop, maxLeft + width, maxTop + height); }
/// <summary> /// Updates the face tracking information for this skeleton /// </summary> internal void OnFrameReady(KinectSensor kinectSensor, ColorImageFormat colorImageFormat, byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage, Skeleton skeletonOfInterest) { this.skeletonTrackingState = skeletonOfInterest.TrackingState; if (this.skeletonTrackingState != SkeletonTrackingState.Tracked) { // if the current skeleton is not tracked, track it now //kinectSensor.SkeletonStream.ChooseSkeletons(skeletonOfInterest.TrackingId); } if (this.faceTracker == null) { try { this.faceTracker = new FaceTracker(kinectSensor); } catch (InvalidOperationException) { // During some shutdown scenarios the FaceTracker // is unable to be instantiated. Catch that exception // and don't track a face. Debug.WriteLine("AllFramesReady - creating a new FaceTracker threw an InvalidOperationException"); this.faceTracker = null; } } if (this.faceTracker != null) { // hack to make this face tracking detect the face even when it is not actually tracked // <!>need to confirm if it works //skeletonOfInterest.TrackingState = SkeletonTrackingState.Tracked; FaceTrackFrame frame = this.faceTracker.Track( colorImageFormat, colorImage, depthImageFormat, depthImage, skeletonOfInterest); //new Microsoft.Kinect.Toolkit.FaceTracking.Rect(skeletonOfInterest.Position.)); this.lastFaceTrackSucceeded = frame.TrackSuccessful; if (this.lastFaceTrackSucceeded) { if (faceTriangles == null) { // only need to get this once. It doesn't change. faceTriangles = frame.GetTriangles(); } if (faceTag == null) { // here call the face detection faceTag = new FaceRecognizer().getFaceTag(this.colorImageBmp); if (faceTag != null) { Global.StatusBarText.Text = "Found " + faceTag + "!"; if (Global.trackedPeople.ContainsKey(skeletonOfInterest)) { Global.trackedPeople[skeletonOfInterest] = faceTag; } else { Global.trackedPeople.Add(skeletonOfInterest, faceTag); } } } this.facePoints = frame.GetProjected3DShape(); this.faceRect = frame.FaceRect; } } }
public RegionFaceTracker(Microsoft.Kinect.Toolkit.FaceTracking.Rect regionOfInterest) { this.FaceRect = regionOfInterest; }