public virtual void SetCalibrationInstructions(CalibrationStep step) { //Fill the instructions field bool isEnglish = SettingsBase.GetLanguage() == SettingsBase.Languages.English; if (step == CalibrationStep.HandTrackingMode) { SetTextInstructions(isEnglish ? INSTRUCTIONS_HAND_TRACKING_MODE_EN : INSTRUCTIONS_HAND_TRACKING_MODE_ES); } else if (step == CalibrationStep.HMDAdjustment) { SetTextInstructions(isEnglish ? INSTRUCTIONS_HMD_ADJUSTMENT_EN : INSTRUCTIONS_HMD_ADJUSTMENT_ES); } else if (step == CalibrationStep.ForwardDirection) { SetTextInstructions(isEnglish ? INSTRUCTIONS_LOOK_FORWARD_EN : INSTRUCTIONS_LOOK_FORWARD_ES); } else if (step == CalibrationStep.TimeExpired) { SetTextInstructions(isEnglish ? INSTRUCTIONS_TIME_EXPIRED_EN : INSTRUCTIONS_TIME_EXPIRED_ES); } else if (step == CalibrationStep.InternetConnectionRequired) { SetTextInstructions(isEnglish ? INSTRUCTIONS_INTERNET_CONNECTION_REQUIRED_EN : INSTRUCTIONS_INTERNET_CONNECTION_REQUIRED_ES); } }
private void Calibrate() { if (m_skeletonCalibPoints.Count == m_calibPoints.Count // We need at least for points to map a rectangular region. && m_skeletonCalibPoints.Count == 4) { //seketon 3D positions --> 3d positions in depth camera Point3D p0 = ConvertSkeletonPointToDepthPoint(m_kinectSensor, m_skeletonCalibPoints[0]); Point3D p1 = ConvertSkeletonPointToDepthPoint(m_kinectSensor, m_skeletonCalibPoints[1]); Point3D p2 = ConvertSkeletonPointToDepthPoint(m_kinectSensor, m_skeletonCalibPoints[2]); Point3D p3 = ConvertSkeletonPointToDepthPoint(m_kinectSensor, m_skeletonCalibPoints[3]); //3d positions depth camera --> positions on a 2D plane Vector3D v1 = p1 - p0; v1.Normalize(); Vector3D v2 = p2 - p0; v2.Normalize(); Vector3D planeNormalVec = Vector3D.CrossProduct(v1, v2); planeNormalVec.Normalize(); Vector3D resultingPlaneNormal = new Vector3D(0, 0, 1); m_groundPlaneTransform = Util.Make_align_axis_matrix(resultingPlaneNormal, planeNormalVec); Point3D p0OnPlane = m_groundPlaneTransform.Transform(p0); Point3D p1OnPlane = m_groundPlaneTransform.Transform(p1); Point3D p2OnPlane = m_groundPlaneTransform.Transform(p2); Point3D p3OnPlane = m_groundPlaneTransform.Transform(p3); //2d plane positions --> exact 2d square on screen (using perspective transform) System.Drawing.PointF[] src = new System.Drawing.PointF[4]; src[0] = new System.Drawing.PointF((float)p0OnPlane.X, (float)p0OnPlane.Y); src[1] = new System.Drawing.PointF((float)p1OnPlane.X, (float)p1OnPlane.Y); src[2] = new System.Drawing.PointF((float)p2OnPlane.X, (float)p2OnPlane.Y); src[3] = new System.Drawing.PointF((float)p3OnPlane.X, (float)p3OnPlane.Y); System.Drawing.PointF[] dest = new System.Drawing.PointF[4]; dest[0] = new System.Drawing.PointF((float)m_calibPoints[0].X, (float)m_calibPoints[0].Y); dest[1] = new System.Drawing.PointF((float)m_calibPoints[1].X, (float)m_calibPoints[1].Y); dest[2] = new System.Drawing.PointF((float)m_calibPoints[2].X, (float)m_calibPoints[2].Y); dest[3] = new System.Drawing.PointF((float)m_calibPoints[3].X, (float)m_calibPoints[3].Y); Emgu.CV.Mat transform = Emgu.CV.CvInvoke.GetPerspectiveTransform(src, dest); m_transform = new Emgu.CV.Matrix <double>(transform.Rows, transform.Cols, transform.NumberOfChannels); transform.CopyTo(m_transform); m_calibrationStatus = CalibrationStep.Calibrated; //test to see if resulting perspective transform is correct //tResultx should be same as points in m_calibPoints //Point tResult0 = KinectToProjectionPoint(m_skeletonCalibPoints[0]); //Point tResult1 = KinectToProjectionPoint(m_skeletonCalibPoints[1]); //Point tResult2 = KinectToProjectionPoint(m_skeletonCalibPoints[2]); //Point tResult3 = KinectToProjectionPoint(m_skeletonCalibPoints[3]); //Debug.Assert(tResult0.Equals(m_calibPoints[0])); //Debug.Assert(tResult1.Equals(m_calibPoints[1])); //Debug.Assert(tResult2.Equals(m_calibPoints[2])); //Debug.Assert(tResult3.Equals(m_calibPoints[3])); } }
private void Window_Calibrate(object sender, KeyEventArgs e) { if (e.Key == Key.Space) { Debug.WriteLine("space pressed"); if (win2 != null) { win2.Unsubscribe(); } Skeleton[] skeletons = null; using (SkeletonFrame skeletonFrame = this.sensor.SkeletonStream.OpenNextFrame(200)) { if (skeletonFrame != null) { skeletons = new Skeleton[skeletonFrame.SkeletonArrayLength]; skeletonFrame.CopySkeletonDataTo(skeletons); } } var trackedSkeleton = skeletons.Where(s => s.TrackingState == SkeletonTrackingState.Tracked).SingleOrDefault(); if (trackedSkeleton != null) { Debug.WriteLine("1 tracked found"); SkeletonPoint point3D = trackedSkeleton.Position; Debug.WriteLine(calibrationStep.ToString()); switch (calibrationStep) { case CalibrationStep.NotCalibrated: // Start calibration CalibrationStep = CalibrationStep.PointOne; break; case CalibrationStep.PointOne: // Lock in point one calibrationClass.AddCalibrationPoint(Point2DStepOne, point3D); CalibrationStep = CalibrationStep.PointTwo; break; case CalibrationStep.PointTwo: // Lock in point two calibrationClass.AddCalibrationPoint(Point2DStepTwo, point3D); CalibrationStep = CalibrationStep.PointThree; break; case CalibrationStep.PointThree: // Lock in point three calibrationClass.AddCalibrationPoint(Point2DStepThree, point3D); CalibrationStep = CalibrationStep.PointFour; break; case CalibrationStep.PointFour: // Lock in point four. // This automatically forces calibration. calibrationClass.AddCalibrationPoint(Point2DStepFour, point3D); CalibrationStep = CalibrationStep.Calibrated; //now draw the position on the UI. //TODO add seperate subscribe/unsubscribe via listener. this.sensor.SkeletonFrameReady += this.SensorSkeletonFrameReady; break; default: string message = String.Format("Unexpected calibration step: {}", calibrationStep.ToString()); Debug.WriteLine(message); break; } } else { Debug.WriteLine("EXACTLY ONE person must be tracked to calibrate!"); } if (win2 != null) { win2.Subscribe(); } } }
/// <summary> /// Execute startup tasks /// </summary> /// <param name="sender">object sending the event</param> /// <param name="e">event arguments</param> private void WindowLoaded(object sender, RoutedEventArgs e) { // Create the drawing group we'll use for drawing this.drawingGroup = new DrawingGroup(); // Create an image source that we can use in our image control this.imageSource = new DrawingImage(this.drawingGroup); // Display the drawing using our image control Image.Source = this.imageSource; KeyUp += Window_OpenDebug; SensorSkeletonFrameReady(null, null); // Look through all sensors and start the first connected one. // This requires that a Kinect is connected at the time of app startup. // To make your app robust against plug/unplug, // it is recommended to use KinectSensorChooser provided in Microsoft.Kinect.Toolkit (See components in Toolkit Browser). foreach (var potentialSensor in KinectSensor.KinectSensors) { if (potentialSensor.Status == KinectStatus.Connected) { this.sensor = potentialSensor; break; } } if (null != this.sensor) { // Turn on the skeleton stream to receive skeleton frames this.sensor.SkeletonStream.Enable(); // Start the sensor! try { this.sensor.Start(); } catch (IOException) { this.sensor = null; } PropertyChanged += CalibrationChanged; CalibrationStep = CalibrationStep.NotCalibrated; } if (null != this.sensor) { // Setup calibrator to receive mappings between reality and screen image this.calibrationClass = new CalibrationClass(this.sensor); // Listen for key to advance the calibration process. KeyUp += Window_Calibrate; KeyUp += Window_AutoCalibrate; KeyUp += Window_Reset; } if (null == this.sensor) { this.statusBarText.Text = Properties.Resources.NoKinectReady; } }