/// <summary> /// Initializes a new instance of the TrackingResults class from a set of Kinect face points /// </summary> //public TrackingResults(EnumIndexableCollection<FeaturePoint, PointF> facePoints) public TrackingResults(FaceTrackFrame faceTrackFrame) { this.FacePoints = this.FaceBoundaryPoints(faceTrackFrame.GetProjected3DShape()); this.Face3DPoints = faceTrackFrame.Get3DShape(); // Calculate facerect manually from facepoints var rectX = this.FacePoints.Min(x => x.X); var rectWidth = this.FacePoints.Max(x => x.X) - rectX; var rectY = this.FacePoints.Min(x => x.Y); var rectHeight = this.FacePoints.Max(x => x.Y) - rectY; this.FaceRect = new System.Drawing.Rectangle(rectX, rectY, rectWidth, rectHeight); }
// create face with frame and ID public Face(FaceTrackFrame f, int i) { this.Frame = f; this.Vector = getVector(f); this.LastVectorTime = DateTime.Now; this.Id = i; //TODO make this unique! this.TimeCreated = DateTime.Now; this.ProcessingBetaface = false; this.TrackingBody = true; this.BetafaceReturned = false; foreach (Guid ad in DatabaseManager.getAllAds()) { this.SecondsFacing.Add(ad, Double.NaN); } }
void EnqueueFaceTracking(KinectSensor sensor, int user, Skeleton s) { Dictionary <int, FaceTracker> faceTrackers = this.faceTrackers[sensor]; if (!faceTrackers.ContainsKey(user)) { if (faceTrackers.Count > 10) { faceTrackers.Clear(); } try { faceTrackers.Add(user, new Microsoft.Kinect.Toolkit.FaceTracking.FaceTracker(sensor)); } catch (InvalidOperationException) { // During some shutdown scenarios the FaceTracker // is unable to be instantiated. Catch that exception // and don't track a face. Debug.WriteLine("AllFramesReady - creating a new FaceTracker threw an InvalidOperationException"); return; } } Microsoft.Kinect.Toolkit.FaceTracking.FaceTrackFrame faceFrame = faceTrackers[user].Track( sensor.ColorStream.Format, colorPixelData[sensor], sensor.DepthStream.Format, depthPixelData[sensor], s); if (faceFrame.TrackSuccessful) { if (faceTracking2DMesh) { // TODO // faceFrame.Get3DShape[FeaturePoint.] } if (faceTrackingHeadPose) { EnqueueFacePoseMessage(sensorId, user, s.Joints[JointType.Head].Position.X, s.Joints[JointType.Head].Position.Y, s.Joints[JointType.Head].Position.Z, faceFrame.Rotation.X, faceFrame.Rotation.Y, faceFrame.Rotation.Z, useUnixEpochTime ? getUnixEpochTime() : stopwatch.ElapsedMilliseconds); } if (faceTrackingAnimationUnits) { EnqueueFaceAnimationMessage(sensorId, user, faceFrame.GetAnimationUnitCoefficients(), useUnixEpochTime ? getUnixEpochTime() : stopwatch.ElapsedMilliseconds); } } }
/// <summary> /// Gets the 3D shape associated with the result /// </summary> /// <param name="faceTrackFrame"> /// result object /// </param> /// <returns> /// the 3d shape /// </returns> public Vector3DF[] Get3DShape(FaceTrackFrame faceTrackFrame) { IntPtr shapeUnitCoeffPtr; uint shapeUnitCount = 0; IntPtr animUnitCoeffPtr; uint animUnitPointsCount; bool hasSuConverged; float scale; faceTrackFrame.ResultPtr.GetAUCoefficients(out animUnitCoeffPtr, out animUnitPointsCount); this.faceTracker.FaceTrackerPtr.GetShapeUnits(out scale, out shapeUnitCoeffPtr, ref shapeUnitCount, out hasSuConverged); return this.Get3DShape( shapeUnitCoeffPtr, shapeUnitCount, animUnitCoeffPtr, animUnitPointsCount, faceTrackFrame.Scale, faceTrackFrame.Rotation, faceTrackFrame.Translation); }
/// <summary> /// Helper method that does the core instantiation & initialization of face tracking engine /// </summary> /// <param name="newColorCameraConfig">Color camera configuration</param> /// <param name="newDepthCameraConfig">Depth camera configuration</param> /// <param name="colorImagePtr">Allows face tracking engine to read color image from native memory pointer. /// If set to IntPtr.Zero, image data needs to be provided for tracking to this instance. </param> /// <param name="depthImagePtr">Allows face tracking engine to read depth image from native memory pointer. /// If set to IntPtr.Zero, image data needs to be provided for tracking to this instance.</param> /// <param name="newRegisterDepthToColorDelegate">Callback which maps of depth to color pixels</param> private void Initialize( CameraConfig newColorCameraConfig, CameraConfig newDepthCameraConfig, IntPtr colorImagePtr, IntPtr depthImagePtr, FaceTrackingRegisterDepthToColor newRegisterDepthToColorDelegate) { if (newColorCameraConfig == null) { throw new ArgumentNullException("newColorCameraConfig"); } if (newDepthCameraConfig == null) { throw new ArgumentNullException("newDepthCameraConfig"); } if (newRegisterDepthToColorDelegate == null) { throw new ArgumentNullException("newRegisterDepthToColorDelegate"); } // initialize perf counters this.totalTracks = 0; this.trackStopwatch.Reset(); // get configuration & trace settings this.traceLevel = new TraceSwitch(FaceTrackTraceSwitchName, FaceTrackTraceSwitchName).Level; this.videoCameraConfig = newColorCameraConfig; this.depthCameraConfig = newDepthCameraConfig; this.registerDepthToColorDelegate = newRegisterDepthToColorDelegate; this.faceTrackerInteropPtr = NativeMethods.FTCreateFaceTracker(IntPtr.Zero); if (this.faceTrackerInteropPtr == null) { throw new InsufficientMemoryException("Cannot create face tracker."); } IntPtr funcPtr = Marshal.GetFunctionPointerForDelegate(this.registerDepthToColorDelegate); if (funcPtr == IntPtr.Zero) { throw new InsufficientMemoryException("Cannot setup callback for retrieving color to depth pixel mapping"); } int hr = this.faceTrackerInteropPtr.Initialize(this.videoCameraConfig, this.depthCameraConfig, funcPtr, null); if (hr != 0) { throw new InvalidOperationException( string.Format(CultureInfo.CurrentCulture, "Failed to initialize face tracker - Error code from native=0x{0:X}", hr)); } this.frame = this.CreateResult(out hr); if (this.frame == null || hr != 0) { throw new InvalidOperationException( string.Format(CultureInfo.CurrentCulture, "Failed to create face tracking result. Error code from native=0x{0:X}", hr)); } this.colorFaceTrackingImage = new Image(); if (colorImagePtr == IntPtr.Zero) { this.colorFaceTrackingImage.Allocate( this.videoCameraConfig.Width, this.videoCameraConfig.Height, this.videoCameraConfig.ImageFormat); } else { this.colorFaceTrackingImage.Attach( this.videoCameraConfig.Width, this.videoCameraConfig.Height, colorImagePtr, this.videoCameraConfig.ImageFormat, this.videoCameraConfig.Stride); } this.depthFaceTrackingImage = new Image(); if (depthImagePtr == IntPtr.Zero) { this.depthFaceTrackingImage.Allocate( this.depthCameraConfig.Width, this.depthCameraConfig.Height, this.depthCameraConfig.ImageFormat); } else { this.depthFaceTrackingImage.Attach( this.depthCameraConfig.Width, this.depthCameraConfig.Height, depthImagePtr, this.depthCameraConfig.ImageFormat, this.depthCameraConfig.Stride); } }
private void UpdateFace(FaceTrackFrame face) { if (!face.TrackSuccessful) return; var faceShape = face.GetProjected3DShape(); foreach (var featureName in Enum.GetNames(typeof(FeaturePoint))) { var featurePoint = (FeaturePoint)Enum.Parse(typeof(FeaturePoint), featureName); if (faceShape[featurePoint] == PointF.Empty) continue; if (!_faceEllipses.ContainsKey(featurePoint)) { _faceEllipses[featurePoint] = new Ellipse { Width = 5, Height = 5, Fill = Brushes.DarkBlue }; FaceCanvas.Children.Add(_faceEllipses[featurePoint]); } Canvas.SetLeft(_faceEllipses[featurePoint], faceShape[featurePoint].X - _faceEllipses[featurePoint].Width / 2); Canvas.SetTop(_faceEllipses[featurePoint], faceShape[featurePoint].Y - _faceEllipses[featurePoint].Height / 2); } }
/// <summary> /// Creates a frame object instance. Can be used for caching of the face tracking /// frame. FaceTrackFrame should be disposed after use. /// </summary> /// <returns> /// newly created frame object /// </returns> internal FaceTrackFrame CreateResult(out int hr) { IFTResult faceTrackResultPtr; FaceTrackFrame faceTrackFrame = null; this.CheckPtrAndThrow(); hr = this.faceTrackerInteropPtr.CreateFTResult(out faceTrackResultPtr); if (faceTrackResultPtr != null) { faceTrackFrame = new FaceTrackFrame(faceTrackResultPtr, this); } return faceTrackFrame; }
/// <summary> /// Allows calling dispose explicitly or from the finalizer /// </summary> /// <param name="disposing">true to release both managed and unmanaged resources; false to release only unmanaged resources</param> protected virtual void Dispose(bool disposing) { if (!this.disposed) { string traceStr = string.Format( CultureInfo.InvariantCulture, "FaceTracker::Dispose() - TotalTracks={0}, TotalSuccessTracks={1}, TimePerTrack={2:F3}ms, TimePerSuccessTrack={3:F3}ms, TimePerDataCopy={4:F3}ms, TimePerStartOrContinueTracking={5:F3}ms", this.totalTracks, this.totalSuccessTracks, this.totalTracks > 0 ? (double)this.trackStopwatch.ElapsedMilliseconds / this.totalTracks : 0, this.totalSuccessTracks > 0 ? (double)this.totalSuccessTrackMs / this.totalSuccessTracks : 0, this.totalTracks > 0 ? (double)this.copyStopwatch.ElapsedMilliseconds / this.totalTracks : 0, this.totalTracks > 0 ? (double)this.startOrContinueTrackingStopwatch.ElapsedMilliseconds / this.totalTracks : 0); #if DEBUG Debug.WriteLine(traceStr); #else Trace.WriteLineIf(traceLevel >= TraceLevel.Info, traceStr); #endif if (this.faceModel != null) { this.faceModel.Dispose(); this.faceModel = null; } if (this.frame != null) { this.frame.Dispose(); this.frame = null; } if (this.colorFaceTrackingImage != null) { this.colorFaceTrackingImage.Dispose(); this.colorFaceTrackingImage = null; } if (this.depthFaceTrackingImage != null) { this.depthFaceTrackingImage.Dispose(); this.depthFaceTrackingImage = null; } if (this.faceTrackerInteropPtr != null) { Marshal.FinalReleaseComObject(this.faceTrackerInteropPtr); this.faceTrackerInteropPtr = null; } this.disposed = true; } }
/// <summary> /// Helper method that does the core instantiation & initialization of face tracking engine /// </summary> /// <param name="newColorCameraConfig">Color camera configuration</param> /// <param name="newDepthCameraConfig">Depth camera configuration</param> /// <param name="colorImagePtr">Allows face tracking engine to read color image from native memory pointer. /// If set to IntPtr.Zero, image data needs to be provided for tracking to this instance. </param> /// <param name="depthImagePtr">Allows face tracking engine to read depth image from native memory pointer. /// If set to IntPtr.Zero, image data needs to be provided for tracking to this instance.</param> /// <param name="newRegisterDepthToColorDelegate">Callback which maps of depth to color pixels</param> private void Initialize( CameraConfig newColorCameraConfig, CameraConfig newDepthCameraConfig, IntPtr colorImagePtr, IntPtr depthImagePtr, FaceTrackingRegisterDepthToColor newRegisterDepthToColorDelegate) { if (newColorCameraConfig == null) { throw new ArgumentNullException("newColorCameraConfig"); } if (newDepthCameraConfig == null) { throw new ArgumentNullException("newDepthCameraConfig"); } if (newRegisterDepthToColorDelegate == null) { throw new ArgumentNullException("newRegisterDepthToColorDelegate"); } // initialize perf counters this.totalTracks = 0; this.trackStopwatch.Reset(); // get configuration & trace settings this.traceLevel = new TraceSwitch(FaceTrackTraceSwitchName, FaceTrackTraceSwitchName).Level; this.videoCameraConfig = newColorCameraConfig; this.depthCameraConfig = newDepthCameraConfig; this.registerDepthToColorDelegate = newRegisterDepthToColorDelegate; this.faceTrackerInteropPtr = NativeMethods.FTCreateFaceTracker(IntPtr.Zero); if (this.faceTrackerInteropPtr == null) { throw new InsufficientMemoryException("Cannot create face tracker."); } IntPtr funcPtr = Marshal.GetFunctionPointerForDelegate(this.registerDepthToColorDelegate); if (funcPtr == IntPtr.Zero) { throw new InsufficientMemoryException("Cannot setup callback for retrieving color to depth pixel mapping"); } int hr = this.faceTrackerInteropPtr.Initialize(this.videoCameraConfig, this.depthCameraConfig, funcPtr, null); if (hr != 0) { throw new InvalidOperationException( string.Format(CultureInfo.CurrentCulture, "Failed to initialize face tracker - Error code from native=0x{0:X}", hr)); } this.frame = this.CreateResult(out hr); if (this.frame == null || hr != 0) { throw new InvalidOperationException( string.Format(CultureInfo.CurrentCulture, "Failed to create face tracking result. Error code from native=0x{0:X}", hr)); } this.colorFaceTrackingImage = new Image(); if (colorImagePtr == IntPtr.Zero) { this.colorFaceTrackingImage.Allocate( this.videoCameraConfig.Width, this.videoCameraConfig.Height, this.videoCameraConfig.ImageFormat); } else { this.colorFaceTrackingImage.Attach( this.videoCameraConfig.Width, this.videoCameraConfig.Height, colorImagePtr, this.videoCameraConfig.ImageFormat, this.videoCameraConfig.Stride); } this.depthFaceTrackingImage = new Image(); if (depthImagePtr == IntPtr.Zero) { this.depthFaceTrackingImage.Allocate( this.depthCameraConfig.Width, this.depthCameraConfig.Height, this.depthCameraConfig.ImageFormat); } else { this.depthFaceTrackingImage.Attach( this.depthCameraConfig.Width, this.depthCameraConfig.Height, depthImagePtr, this.depthCameraConfig.ImageFormat, this.depthCameraConfig.Stride); } }
private void UpdateMesh(FaceTrackFrame faceTrackingFrame) { EnumIndexableCollection<FeaturePoint, Vector3DF> shapePoints = faceTrackingFrame.Get3DShape(); EnumIndexableCollection<FeaturePoint, PointF> projectedShapePoints = faceTrackingFrame.GetProjected3DShape(); if (this.triangleIndices == null) { // Update stuff that doesn't change from frame to frame this.triangleIndices = faceTrackingFrame.GetTriangles(); var indices = new Int32Collection(this.triangleIndices.Length * 3); foreach (FaceTriangle triangle in this.triangleIndices) { indices.Add(triangle.Third); indices.Add(triangle.Second); indices.Add(triangle.First); } this.theGeometry.TriangleIndices = indices; this.theGeometry.Normals = null; // Let WPF3D calculate these. this.theGeometry.Positions = new Point3DCollection(shapePoints.Count); this.theGeometry.TextureCoordinates = new PointCollection(projectedShapePoints.Count); for (int pointIndex = 0; pointIndex < shapePoints.Count; pointIndex++) { this.theGeometry.Positions.Add(new Point3D()); this.theGeometry.TextureCoordinates.Add(new Point()); } } // Update the 3D model's vertices and texture coordinates for (int pointIndex = 0; pointIndex < shapePoints.Count; pointIndex++) { Vector3DF point = shapePoints[pointIndex]; this.theGeometry.Positions[pointIndex] = new Point3D(point.X, point.Y, -point.Z); PointF projected = projectedShapePoints[pointIndex]; this.theGeometry.TextureCoordinates[pointIndex] = new Point( projected.X / (double)this.colorImageWritableBitmap.PixelWidth, projected.Y / (double)this.colorImageWritableBitmap.PixelHeight); } }
/// <summary> /// Updates the face tracking information for this skeleton /// </summary> internal void OnFrameReady(KinectSensor kinectSensor, ColorImageFormat colorImageFormat, byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage, Skeleton skeletonOfInterest) { this.skeletonTrackingState = skeletonOfInterest.TrackingState; if (this.skeletonTrackingState != SkeletonTrackingState.Tracked) { // nothing to do with an untracked skeleton. return; } if (this.faceTracker == null) { try { this.faceTracker = new FaceTracker(kinectSensor); } catch (InvalidOperationException) { this.faceTracker = null; } } if (this.faceTracker != null) { frame = this.faceTracker.Track( colorImageFormat, colorImage, depthImageFormat, depthImage, skeletonOfInterest).Clone() as FaceTrackFrame; } }
private void AllFramesReady(object sender, AllFramesReadyEventArgs e) { using (ColorImageFrame colorImageFrame = e.OpenColorImageFrame()) { if (colorImageFrame != null) { if (ColorPixels == null) ColorPixels = new byte[colorImageFrame.PixelDataLength]; colorImageFrame.CopyPixelDataTo(ColorPixels); ColorImageFrame = colorImageFrame; } } using (DepthImageFrame depthImageFrame = e.OpenDepthImageFrame()) { if (depthImageFrame != null) { if (DepthImagePixels == null) DepthImagePixels = new DepthImagePixel[depthImageFrame.PixelDataLength]; depthImageFrame.CopyDepthImagePixelDataTo(DepthImagePixels); if (DepthPixels == null) DepthPixels = new short[depthImageFrame.PixelDataLength]; depthImageFrame.CopyPixelDataTo(DepthPixels); DepthImageFrame = depthImageFrame; _faceFrame = null; } } using (SkeletonFrame skeletonFrame = e.OpenSkeletonFrame()) { if (skeletonFrame != null) { if (Skeletons == null) Skeletons = new Skeleton[skeletonFrame.SkeletonArrayLength]; skeletonFrame.CopySkeletonDataTo(Skeletons); //CorrectRoomCoords(); } } FireAllFramesDispatched(); }
public PointF[] GetProjected3DShape(float zoomFactor, Point viewOffset, FaceTrackFrame faceTrackFrame) { this.CheckPtrAndThrow(); IntPtr shapeUnitCoeffPtr; uint shapeUnitCount = 0; IntPtr animUnitCoeffPtr; uint animUnitPointsCount; bool hasSuConverged; float scale; faceTrackFrame.ResultPtr.GetAUCoefficients(out animUnitCoeffPtr, out animUnitPointsCount); this.faceTracker.FaceTrackerPtr.GetShapeUnits(out scale, out shapeUnitCoeffPtr, ref shapeUnitCount, out hasSuConverged); return this.GetProjected3DShape( this.faceTracker.ColorCameraConfig, zoomFactor, viewOffset, shapeUnitCoeffPtr, shapeUnitCount, animUnitCoeffPtr, animUnitPointsCount, faceTrackFrame.Scale, faceTrackFrame.Rotation, faceTrackFrame.Translation); }
/// <summary> /// Updates the face tracking information for this skeleton /// </summary> internal void OnFrameReady(KinectSensor kinectSensor, ColorImageFormat colorImageFormat, byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage, Skeleton skeletonOfInterest) { this.skeletonTrackingState = skeletonOfInterest.TrackingState; if (this.skeletonTrackingState != SkeletonTrackingState.Tracked) { // nothing to do with an untracked skeleton. return; } if (this.faceTracker == null) { try { this.faceTracker = new FaceTracker(kinectSensor); } catch (InvalidOperationException) { // During some shutdown scenarios the FaceTracker // is unable to be instantiated. Catch that exception // and don't track a face. Debug.WriteLine("AllFramesReady - creating a new FaceTracker threw an InvalidOperationException"); this.faceTracker = null; } } if (this.faceTracker != null) { frame = this.faceTracker.Track( colorImageFormat, colorImage, depthImageFormat, depthImage, skeletonOfInterest); this.lastFaceTrackSucceeded = frame.TrackSuccessful; if (this.lastFaceTrackSucceeded) { if (faceTriangles == null) { // only need to get this once. It doesn't change. faceTriangles = frame.GetTriangles(); } this.facePoints = frame.GetProjected3DShape(); } } }
/// <summary> /// Handler for all frames Frames /// </summary> /// <param name="sender"></param> /// <param name="allFramesReadyEventArgs"></param> private void kinect_AllFramesReady(object sender, AllFramesReadyEventArgs allFramesReadyEventArgs) { using (ColorImageFrame colorFrame = allFramesReadyEventArgs.OpenColorImageFrame()) using (DepthImageFrame depthFrame = allFramesReadyEventArgs.OpenDepthImageFrame()) using (SkeletonFrame skeletonFrame = allFramesReadyEventArgs.OpenSkeletonFrame()) { if (colorFrame == null || depthFrame == null || skeletonFrame == null) { Message.Warning("Not All Frames Present in Unified Event Handler. Skipping this data frame set..."); return; } //Thread skeletonCopyThread = new Thread(() => skeletonFrame.CopySkeletonDataTo(this.skeletonData)); //Thread colorCopyThread = new Thread(() => colorFrame.CopyPixelDataTo(this.colorPixels)); //Thread depthCopyThread = new Thread(() => depthFrame.CopyDepthImagePixelDataTo(this.depthPixels)); // SP // Lock this stream of code to make sure nothing happens between/during the 3 copy operations // Can thread each of these, but no significant performance gain lock (dataCopyLock) { //// SKELETON //// if (skeletonFrame != null) { // Copy the skeleton data from the image to a storage array //skeletonCopyThread.Start(); skeletonFrame.CopySkeletonDataTo(this.skeletonData); } //// COLOR IMAGE //// if (colorFrame != null) { // Copy the pixel data from the image to a storage array //colorCopyThread.Start(); colorFrame.CopyPixelDataTo(this.colorPixels); } //// DEPTH IMAGE //// if (depthFrame != null) { // Copy the pixel data from the image to a storage array //depthCopyThread.Start(); depthFrame.CopyPixelDataTo(this.depthPixels); } //skeletonCopyThread.Join(); //colorCopyThread.Join(); //depthCopyThread.Join(); } // Reset Counter Variables this.trackedSkeletons = 0; foreach (Skeleton skel in this.skeletonData) { if (skel.TrackingState == SkeletonTrackingState.Tracked) { skeletonOfInterest = skel; this.trackedSkeletons++; theta = Calculation.radians2Degrees(Calculation.findUserTheta(skel.Joints[JointType.ShoulderCenter].Position.X, skel.Joints[JointType.ShoulderCenter].Position.Z, skel.Joints[JointType.ShoulderRight].Position.X, skel.Joints[JointType.ShoulderRight].Position.Z)); distance = Calculation.findDistance(skel.Joints[JointType.ShoulderCenter].Position.X, skel.Joints[JointType.ShoulderCenter].Position.Z); // Message.Info("Theta: " + theta); //TestStable(); } } //// Face Detection - AS /// if (faceTracker != null) { faceTrackFrame = faceTracker.Track(this.kinectSensor.ColorStream.Format, this.colorPixels, this.kinectSensor.DepthStream.Format, this.depthPixels, skeletonOfInterest); faceDetected = faceTrackFrame.TrackSuccessful; Message.Info("FaceDetected: " + this.FaceDetected()); } else { try { faceTracker = new FaceTracker(this.kinectSensor); Message.Info("Frame processed"); } catch (InvalidOperationException) { Message.Info("Frame not processed"); } } //// Face Detection - AS /// } }
private void UpdateMesh(FaceTrackFrame faceTrackingFrame) { //Console.Out.WriteLine(" ###################### In UpdateMesh ############################# "); bool faceInCentre = true; EnumIndexableCollection<FeaturePoint, Vector3DF> shapePoints = faceTrackingFrame.Get3DShape(); EnumIndexableCollection<FeaturePoint, PointF> projectedShapePoints = faceTrackingFrame.GetProjected3DShape(); if (this.triangleIndices == null) { // Update stuff that doesn't change from frame to frame this.triangleIndices = faceTrackingFrame.GetTriangles(); var indices = new Int32Collection(this.triangleIndices.Length * 3); foreach (FaceTriangle triangle in this.triangleIndices) { indices.Add(triangle.Third); indices.Add(triangle.Second); indices.Add(triangle.First); } this.theGeometry.TriangleIndices = indices; this.theGeometry.Normals = null; // Let WPF3D calculate these. this.theGeometry.Positions = new Point3DCollection(shapePoints.Count); this.theGeometry.TextureCoordinates = new PointCollection(projectedShapePoints.Count); for (int pointIndex = 0; pointIndex < shapePoints.Count; pointIndex++) { this.theGeometry.Positions.Add(new Point3D()); this.theGeometry.TextureCoordinates.Add(new Point()); } } // Update the 3D model's vertices and texture coordinates for (int pointIndex = 0; pointIndex < shapePoints.Count; pointIndex++) { Vector3DF point = shapePoints[pointIndex]; this.theGeometry.Positions[pointIndex] = new Point3D(point.X, point.Y, -point.Z); PointF projected = projectedShapePoints[pointIndex]; this.theGeometry.TextureCoordinates[pointIndex] = new Point( projected.X/ (double)this.colorImageWritableBitmap.PixelWidth, projected.Y/ (double)this.colorImageWritableBitmap.PixelHeight); // Console.Out.WriteLine("X = " + projected.X / (double)this.colorImageWritableBitmap.PixelWidth + "Y = " + projected.Y / (double)this.colorImageWritableBitmap.PixelHeight); if (projected.X / (double)this.colorImageWritableBitmap.PixelWidth > .6 || projected.Y / (double)this.colorImageWritableBitmap.PixelHeight > .75) faceInCentre = false; } if (faceInCentre) { // copyFaceImage(); FaceMesh tempMeshData = new FaceMesh(); tempMeshData.FaceViewport = viewport3d; FaceMeshData = tempMeshData; } }
/// <summary> /// Creates a frame object instance. Can be used for caching of the face tracking /// frame. FaceTrackFrame should be disposed after use. /// </summary> /// <returns> /// newly created frame object /// </returns> internal FaceTrackFrame CreateResult(out int hr) { // Console.WriteLine("HELLO WORLD 3"); IFTResult faceTrackResultPtr; FaceTrackFrame faceTrackFrame = null; this.CheckPtrAndThrow(); hr = this.faceTrackerInteropPtr.CreateFTResult(out faceTrackResultPtr); if (faceTrackResultPtr != null) { faceTrackFrame = new FaceTrackFrame(faceTrackResultPtr, this); } Console.WriteLine(faceTrackResultPtr);//MIKE LOOK HERE return faceTrackFrame; }
void UpdateMesh(FaceTrackFrame faceTrackingFrame) { EnumIndexableCollection<FeaturePoint, Vector3DF> shapePoints = faceTrackingFrame.Get3DShape(); EnumIndexableCollection<FeaturePoint, PointF> projectedShapePoints = faceTrackingFrame.GetProjected3DShape(); if (this.triangleIndices == null) { // Update stuff that doesn't change from frame to frame this.triangleIndices = faceTrackingFrame.GetTriangles(); var indices = new Int32Collection(this.triangleIndices.Length * 3); foreach (FaceTriangle triangle in this.triangleIndices) { indices.Add(triangle.Third); indices.Add(triangle.Second); indices.Add(triangle.First); } this.theGeometry.TriangleIndices = indices; this.theGeometry.Normals = null; // Let WPF3D calculate these. this.theGeometry.Positions = new Point3DCollection(shapePoints.Count); this.theGeometry.TextureCoordinates = new PointCollection(projectedShapePoints.Count); for (int pointIndex = 0; pointIndex < shapePoints.Count; pointIndex++) { this.theGeometry.Positions.Add(new Point3D()); this.theGeometry.TextureCoordinates.Add(new Point()); } } //Vector3DF point=shapePoints[3]; // Update the 3D model's vertices and texture coordinates for (int pointIndex = 0; pointIndex < shapePoints.Count; pointIndex++) { Vector3DF point = shapePoints[pointIndex]; this.theGeometry.Positions[pointIndex] = new Point3D(point.X, point.Y, -point.Z); PointF projected = projectedShapePoints[pointIndex]; data[dataindex, pointIndex, 0] = point.X; data[dataindex, pointIndex, 1] = point.Y; data[dataindex, pointIndex, 2] = point.Z; this.theGeometry.TextureCoordinates[pointIndex] = new Point( projected.X / (double)this.colorImageWritableBitmap.PixelWidth, projected.Y / (double)this.colorImageWritableBitmap.PixelHeight); } textBlock1.Text = data[dataindex, 4, 2].ToString(); if (data[dataindex, 4, 2] > 1.2 && data[dataindex, 4, 2] < 1.4) { dataindex++; //StreamWriter fw = File.AppendText("e:\\newnewstart4.txt"); //fw.Write("z=" + ); //fw.Close(); } }
protected void UpdateFrame(FaceTrackFrame frame) { this.lastFaceTrackSucceeded = frame.TrackSuccessful; if (this.lastFaceTrackSucceeded) { if (faceTriangles == null) { // only need to get this once. It doesn't change. faceTriangles = frame.GetTriangles(); } this.facePoints = frame.Get3DShape(); this.FaceRect = frame.FaceRect; this.FaceTranslation = frame.Translation; this.FaceRotation = frame.Rotation; } }
public FaceTrackFrame GetFaceTrackFrame(Skeleton skeleton) { if (_faceFrame == null) /* Aus effizienzgruenden wird nicht bei jedem Zugriff ein neues Faceframe erzeugt, sondern nur ein Mal pro Frame. Siehe OnAllFramesReady unten.*/ _faceFrame = _faceTracker.Track(_kinectSensor.ColorStream.Format, ColorPixels, _kinectSensor.DepthStream.Format, DepthPixels, skeleton); return _faceFrame; }
private void AnalyzeFace(FaceTrackFrame frame) { //Move ThoughtBubble System.Windows.Point facePoint = new System.Windows.Point(); facePoint.X = frame.FaceRect.Right + _thoughtBubbleOffset; facePoint.Y = frame.FaceRect.Top - (_thoughtBubble.ActualHeight / 2) ; MoveToCameraPosition(_thoughtBubble, facePoint); var animationUnits = frame.GetAnimationUnitCoefficients(); if (animationUnits[AnimationUnit.JawLower] > .7) { _thoughtBubble.SetThoughtBubble("openmouth.png"); } if (animationUnits[AnimationUnit.BrowRaiser] > .5) { _thoughtBubble.SetThoughtBubble("eyebrow.png"); } //Check for yes if (frame.Rotation.X > 10 && yesStarted == false) { yesStarted = true; _yesNo.Reset(); _yesNo.Start(); //start YES timer return; } //check for no if (frame.Rotation.Y < -10 && noStarted == false) { noStarted = true; _yesNo.Reset(); _yesNo.Start(); return; } if (_yesNo.Elapsed.TotalSeconds > 3) { _yesNo.Stop(); yesStarted = false; noStarted = false; } else { if (frame.Rotation.X < -5 && yesStarted) { //YES!! _thoughtBubble.SetThoughtBubble("yes.png"); } if (frame.Rotation.Y > 10 && noStarted) { //NO!!! _thoughtBubble.SetThoughtBubble("no.png"); } } UpdateTextReading(frame, animationUnits[AnimationUnit.BrowRaiser],animationUnits[AnimationUnit.JawLower]); }
private void UpdateTextReading(FaceTrackFrame frame, float outerBrowRaiser = 0, float openMouth = 0) { sb.Clear(); foreach (var item in _faceReadings) { switch (item.ReadingType) { case FaceEmotion.OpenMouth: SetReading(item, openMouth); break; case FaceEmotion.EyeBrow: SetReading(item, outerBrowRaiser); break; case FaceEmotion.UpDownValue: SetReading(item, frame.Rotation.X); break; case FaceEmotion.LeftRight: SetReading(item, frame.Rotation.Y); break; default: break; } sb.AppendLine(item.ToString()); } _CurrentReading.Text = sb.ToString(); }
private Vector3DF getVector(FaceTrackFrame f) { return new Vector3DF(f.Translation.X, f.Translation.Y, f.Translation.Z); }