Ejemplo n.º 1
0
        /// <summary>
        /// Starts face tracking from Kinect input data. Track() detects a face
        /// based on the passed parameters, then identifies characteristic
        /// points and begins tracking. The first call to this API is more
        /// expensive, but if the tracking succeeds then subsequent calls use
        /// the tracking information generated from first call and is faster,
        /// until a tracking failure happens.
        /// </summary>
        /// <param name="colorImageFormat">format of the colorImage array</param>
        /// <param name="colorImage">Input color image frame retrieved from Kinect sensor</param>
        /// <param name="depthImageFormat">format of the depthImage array</param>
        /// <param name="depthImage">Input depth image frame retrieved from Kinect sensor</param>
        /// <param name="skeletonOfInterest">Input skeleton to track. Head & shoulder joints in the skeleton are used to calculate the head vector</param>
        /// <param name="regionOfInterest">Region of interest in the passed video frame where the face tracker should search for a face to initiate tracking.
        /// Passing Rectangle.Empty (default) causes the entire frame to be searched.</param>
        /// <returns>Returns computed face tracking results for this image frame</returns>
        private FaceTrackFrame Track(
            ColorImageFormat colorImageFormat,
            byte[] colorImage,
            DepthImageFormat depthImageFormat,
            short[] depthImage,
            Skeleton skeletonOfInterest,
            Rect regionOfInterest)
        {
            this.totalTracks++;
            this.trackStopwatch.Start();

            if (this.operationMode != OperationMode.Kinect)
            {
                throw new InvalidOperationException(
                          "Cannot use Track with Kinect input types when face tracker is initialized for tracking videos/images");
            }

            if (colorImage == null)
            {
                throw new ArgumentNullException("colorImage");
            }

            if (depthImage == null)
            {
                throw new ArgumentNullException("depthImage");
            }

            if (colorImageFormat != this.initializationColorImageFormat)
            {
                throw new InvalidOperationException("Color image frame format different from initialization");
            }

            if (depthImageFormat != this.initializationDepthImageFormat)
            {
                throw new InvalidOperationException("Depth image frame format different from initialization");
            }

            if (colorImage.Length != this.videoCameraConfig.FrameBufferLength)
            {
                throw new ArgumentOutOfRangeException("colorImage", "Color image data size is needs to match initialization configuration.");
            }

            if (depthImage.Length != this.depthCameraConfig.FrameBufferLength)
            {
                throw new ArgumentOutOfRangeException("depthImage", "Depth image data size is needs to match initialization configuration.");
            }

            int        hr;
            HeadPoints headPointsObj = null;

            Vector3DF[] headPoints = GetHeadPointsFromSkeleton(skeletonOfInterest);

            if (headPoints != null && headPoints.Length == 2)
            {
                headPointsObj = new HeadPoints {
                    Points = headPoints
                };
            }

            this.copyStopwatch.Start();
            this.colorFaceTrackingImage.CopyFrom(colorImage);
            this.depthFaceTrackingImage.CopyFrom(depthImage);
            this.copyStopwatch.Stop();

            var sensorData = new SensorData(this.colorFaceTrackingImage, this.depthFaceTrackingImage, DefaultZoomFactor, Point.Empty);
            FaceTrackingSensorData faceTrackSensorData = sensorData.FaceTrackingSensorData;

            this.startOrContinueTrackingStopwatch.Start();
            if (this.trackSucceeded)
            {
                hr = this.faceTrackerInteropPtr.ContinueTracking(ref faceTrackSensorData, null /*headPointsObj*/, this.frame.ResultPtr);
            }
            else
            {
                hr = this.faceTrackerInteropPtr.StartTracking(
                    ref faceTrackSensorData, null /*ref regionOfInterest*/, null /*headPointsObj*/, this.frame.ResultPtr);
            }

            this.startOrContinueTrackingStopwatch.Stop();

            this.trackSucceeded = hr == (int)ErrorCode.Success && this.frame.Status == ErrorCode.Success;
            this.trackStopwatch.Stop();

            if (this.trackSucceeded)
            {
                ++this.totalSuccessTracks;
                this.totalSuccessTrackMs      += this.trackStopwatch.ElapsedMilliseconds - this.lastSuccessTrackElapsedMs;
                this.lastSuccessTrackElapsedMs = this.trackStopwatch.ElapsedMilliseconds;
            }

            return(this.frame);
        }
Ejemplo n.º 2
0
        /// <summary>
        /// Starts face tracking from Kinect input data. Track() detects a face
        /// based on the passed parameters, then identifies characteristic
        /// points and begins tracking. The first call to this API is more
        /// expensive, but if the tracking succeeds then subsequent calls use
        /// the tracking information generated from first call and is faster,
        /// until a tracking failure happens. 
        /// </summary>
        /// <param name="colorImageFormat">format of the colorImage array</param>
        /// <param name="colorImage">Input color image frame retrieved from Kinect sensor</param>
        /// <param name="depthImageFormat">format of the depthImage array</param>
        /// <param name="depthImage">Input depth image frame retrieved from Kinect sensor</param>
        /// <param name="skeletonOfInterest">Input skeleton to track. Head & shoulder joints in the skeleton are used to calculate the head vector</param>
        /// <param name="regionOfInterest">Region of interest in the passed video frame where the face tracker should search for a face to initiate tracking. 
        /// Passing Rectangle.Empty (default) causes the entire frame to be searched.</param>
        /// <returns>Returns computed face tracking results for this image frame</returns>
        private FaceTrackFrame Track(
            ColorImageFormat colorImageFormat,
            byte[] colorImage,
            DepthImageFormat depthImageFormat,
            short[] depthImage,
            Skeleton skeletonOfInterest,
            Rect regionOfInterest)
        {
            this.totalTracks++;
            this.trackStopwatch.Start();

            if (this.operationMode != OperationMode.Kinect)
            {
                throw new InvalidOperationException(
                    "Cannot use Track with Kinect input types when face tracker is initialized for tracking videos/images");
            }

            if (colorImage == null)
            {
                throw new ArgumentNullException("colorImage");
            }

            if (depthImage == null)
            {
                throw new ArgumentNullException("depthImage");
            }

            if (colorImageFormat != this.initializationColorImageFormat)
            {
                throw new InvalidOperationException("Color image frame format different from initialization");
            }

            if (depthImageFormat != this.initializationDepthImageFormat)
            {
                throw new InvalidOperationException("Depth image frame format different from initialization");
            }

            if (colorImage.Length != this.videoCameraConfig.FrameBufferLength)
            {
                throw new ArgumentOutOfRangeException("colorImage", "Color image data size is needs to match initialization configuration.");
            }

            if (depthImage.Length != this.depthCameraConfig.FrameBufferLength)
            {
                throw new ArgumentOutOfRangeException("depthImage", "Depth image data size is needs to match initialization configuration.");
            }

            int hr;
            HeadPoints headPointsObj = null;
            Vector3DF[] headPoints = GetHeadPointsFromSkeleton(skeletonOfInterest);

            if (headPoints != null && headPoints.Length == 2)
            {
                headPointsObj = new HeadPoints { Points = headPoints };
            }

            this.copyStopwatch.Start();
            this.colorFaceTrackingImage.CopyFrom(colorImage);
            this.depthFaceTrackingImage.CopyFrom(depthImage);
            this.copyStopwatch.Stop();

            var sensorData = new SensorData(this.colorFaceTrackingImage, this.depthFaceTrackingImage, DefaultZoomFactor, Point.Empty);
            FaceTrackingSensorData faceTrackSensorData = sensorData.FaceTrackingSensorData;

            this.startOrContinueTrackingStopwatch.Start();
            if (this.trackSucceeded)
            {
                hr = this.faceTrackerInteropPtr.ContinueTracking(ref faceTrackSensorData, headPointsObj, this.frame.ResultPtr);
            }
            else
            {
                hr = this.faceTrackerInteropPtr.StartTracking(
                    ref faceTrackSensorData, ref regionOfInterest, headPointsObj, this.frame.ResultPtr);
            }

            this.startOrContinueTrackingStopwatch.Stop();

            this.trackSucceeded = hr == (int)ErrorCode.Success && this.frame.Status == ErrorCode.Success;
            this.trackStopwatch.Stop();

            if (this.trackSucceeded)
            {
                ++this.totalSuccessTracks;
                this.totalSuccessTrackMs += this.trackStopwatch.ElapsedMilliseconds - this.lastSuccessTrackElapsedMs;
                this.lastSuccessTrackElapsedMs = this.trackStopwatch.ElapsedMilliseconds;
            }

            return this.frame;
        }