// We don't actually have to "update" any sense of state. Just easier to conform to this usage // in the event that we need to change the implementation to be more complex. Also makes // maintaining observers easier, as I'm not familiar with C#'s peculiarities regarding static classes. // And, really, the code itself is already dubious. public void Update(Person person) { if (person != null && person.skeletonData != null) { if (person.skeletonData.getTrackingState() == SkeletonTrackingState.Tracked) { TrackPress(person, true); TrackPress(person, false); } } }
private ScissorGestureTracker getTrackerForPerson(Person p) { bool extractionSuccessful; ScissorGestureTracker output; if (scissorGestureTrackers.ContainsKey(p) != true) { ScissorGestureTracker newSGT = new ScissorGestureTracker(); scissorGestureTrackers.Add(p, newSGT); } extractionSuccessful = scissorGestureTrackers.TryGetValue(p, out output); return output; }
public void update(Person subject) { if (subject.skeletonData != null) { ScissorGestureTracker gestureTracker = this.getTrackerForPerson(subject); if (subject.skeletonData.getTrackingState() == SkeletonTrackingState.Tracked) { // Check the state of the user's left and right hands to determine the relation of each to a // wave gesture. TrackGesture(subject, ref gestureTracker); } // If we can't track the user's motions we'll have to reset our data on them; // they may have left and will be replaced by another user. else { gestureTracker.reset(); } // end if-else } }
private void TrackPress(Person person, bool isLeft) { /* We define a press gesture as being made up of two properties: * 1) whether the user's hand is below their shoulder when they make the gesture and * 2) whether the user stretches their arm out to approximately a third of their body height * when making the gesture. * We require that the arm be outstretched by a third because the average adult arm is approximately * half of that adult's height (measuring from the tips of the fingers). We thus allow the user not to have to * fully outstretch their arm but also take into accoutn that we are unable to track their fingers. Having * tested this on an honest to God child it /also/ inadvertantly helps to accomodate the fact that children * have commicaly disproprotionate arms. */ SkeletonWrapper skeleton = person.skeletonData; Joint hand, shoulder, head, foot; if (isLeft == true) { hand = skeleton.getLeftHandJoint(); shoulder = skeleton.getLeftShoulderJoint(); foot = skeleton.getLeftFootJoint(); } else { hand = skeleton.getRightHandJoint(); shoulder = skeleton.getRightShoulderJoint(); foot = skeleton.getRightFootJoint(); } head = skeleton.getHeadJoint(); PressPosition pressPosition = PressPosition.None; // TODO: Don't spawn if hands are overlapping (deleting vs spawning mode) float metricHeight, metricArmExtension; // If anybody turns sideways, I swear to God, I'll kill them. if (hand.TrackingState == JointTrackingState.Tracked && head.TrackingState == JointTrackingState.Tracked && shoulder.TrackingState == JointTrackingState.Tracked) { // Height of user torso (in meters) - this is innacurate as hell, typically off by about two feet. metricHeight = head.Position.Y - foot.Position.Y; // Length of user's extension of arm (in meters) - this is also innacurate as hell but it is /proportionally so/, // so we can still use the relationship between them to identify a press. metricArmExtension = -(hand.Position.Z - shoulder.Position.Z); // If the user's hand is below the shoulder... if (hand.Position.Y < (shoulder.Position.Y + HAND_HEIGHT_THRESHOLD)) { // If the user has extended their arm to the length of a third of their body, // allowing for the grace threshold... if ((metricHeight / 2.78) < (metricArmExtension + ARM_EXTENSION_THRESHOLD) == true) { // Then they have extended their arm into a press. pressPosition = PressPosition.Extended; } else { pressPosition = PressPosition.Neutral; } // end if-else if-else // If the user has extended their arm... if (pressPosition == PressPosition.Extended) { // Then notify the observers that we have a press. There's going to be a party. There is going to be cake. if (GestureDetected != null) { GestureDetected(this, new PressGestureEventArgs(isLeft, person)); } // end if } // end if } } }
public PressGestureEventArgs(bool leftHandPressing, Person user) { this.leftHandPressing = leftHandPressing; this.rightHandPressing = !leftHandPressing; this.user = user; }
/* Computes a unique color for a user based on the average color of their torso. * Takes a TorsoData object that defines the dimensions of the user's torso, a width that defines the width * of the Kinect's ColorImageFrame, and a colorMap that defines the location of our colorData. * * Returns a color that defines a user based on their torso color. */ public static Color ComputeUserColor(Person user, int width, Color[] colorMap, KinectSensor kSensor) { /* ComputeUserColor computes a Color for a user based on the color of their torso. This * color is computed by taking the metric length of their torso and taking a vertical slice * from which an average is computed from a selection of color samples. These samples are * taken every millimeter from the base of the user's torso to the top of it. * For those of us who are not drug users, deals, or Europhiles, this equates to between * 300 and 400 color samples for an average adult and approximately 200 samples for a child. */ // Retrieve the skeletal data from the TorsoData. SkeletonPoint torsoTop = user.torsoTop; SkeletonPoint torsoBottom = user.torsoBottom; // Compute the metric length of the user's torso. double metricTorsoLength = torsoTop.Y - torsoBottom.Y; // The avergae color computed from our user sampling. Defaults to Hot Pink but will be overriden, // rather than modified, in the event that we successfully compute a user color. Color userColorAverage = Color.HotPink; // We initialize the RGB values as integers, rather than bytes, as each of their values prior to being // averaged will almost certainly be many times larger than a byte. int r = 0, g = 0, b = 0; /* The skeletalSamplePosition defines the location of the skeletal position for which we desire a color sample // from the RGB colorMap. As we are taking a vertical color sample we initialize the skeletalSamplePosition // as being equivalent to the base of the user's torso and merely modify the Y value for each new color sample. */ SkeletonPoint skeletalSamplePosition = new SkeletonPoint(); // New as opposed to pointing torsoBottom directly to avoid reference issues. skeletalSamplePosition.X = torsoBottom.X; skeletalSamplePosition.Y = torsoBottom.Y; skeletalSamplePosition.Z = torsoBottom.Z; if ((skeletalSamplePosition.X != 0 || skeletalSamplePosition.Y != 0 || skeletalSamplePosition.Z != 0) == false) { return new Color(); } // Defines the location of the color sample within the RGB space. ColorImagePoint colorLocation; // The actual color sample defined by colorLocation. Color userSampleColor; // The number of color samples taken from the user. Used to average our RGB values into an average color. int numColorSamples = 0; // The distance from the bottom of the user's torso of the current color sample. Measured in millimeters. float i; for (i = 0; i < metricTorsoLength; i += MILLIMETER) { // Set the color value to a default, value-less color. userSampleColor = new Color(); // Shift the source of the sample upwards by the value of i. skeletalSamplePosition.Y = torsoBottom.Y + i; // Convert our skeletal position (colorPosition) to color coordinates. colorLocation = kSensor.CoordinateMapper.MapSkeletonPointToColorPoint(skeletalSamplePosition, ColorImageFormat.RgbResolution640x480Fps30); // Ensure that the location of our color sample is a valid index. If the person moves // offscreen while we're taking the sample then the colorLocation will contain invalid // negative values. if (colorLocation.X > 0 && colorLocation.Y > 0) { // Extract the color sample from the RGB image. userSampleColor = colorMap[colorLocation.X + colorLocation.Y * width]; } // Sum the color sample data for later averaging. r += userSampleColor.R; g += userSampleColor.G; b += userSampleColor.B; // We have now taken a color sample. Let's maintain our invariant. numColorSamples++; } // end for /* Average the running sum of RGB values taken from our color sampling. // If we were passed an invalid TorsoData then we will not have taken any color samples, // so we need to account by division by zero. Unless you want to be a dick, then by all means, delete this if-condition. */ if (numColorSamples != 0) { userColorAverage.R = (byte)(r / numColorSamples); userColorAverage.G = (byte)(g / numColorSamples); userColorAverage.B = (byte)(b / numColorSamples); } // end if // Determine color of area surrounding user // Retrieve the position of the user's head as we can use its Y coordinate to define // the outer edge of the person Joint personTopJoint = user.skeletonData.getHeadJoint(); SkeletonPoint personTopPos = personTopJoint.Position; // Retrieve the position of the user's left shoulder as we can use its X coordinate // to define the outer edge of the person Joint leftShoulderJoint = user.skeletonData.getLeftShoulderJoint(); SkeletonPoint leftShoulderPos = leftShoulderJoint.Position; // We define the top bound of the person to be excluded as being the height of the person // shifted over to their far lefthand side, so we create a faux SkeletonPoint that amalgamates // the head and the shoulder SkeletonPoint personTopBoundPos = new SkeletonPoint(); personTopBoundPos.X = leftShoulderPos.X; personTopBoundPos.Y = personTopPos.Y; personTopBoundPos.Z = personTopPos.Z; // We define the bottom bound of the person to be excluded as being their bottom right // foot, as this is caddy-corner to our upper bound, implying a rectangle that surrounds // the person Joint personBottomBoundJoint = user.skeletonData.getRightFootJoint(); SkeletonPoint personBottomBoundPos = personBottomBoundJoint.Position; // Convert the Skeletal location data to color location data so that our locations correspond // to the colormap that we are working with ColorImagePoint topExclusionBound = kSensor.CoordinateMapper.MapSkeletonPointToColorPoint(personTopBoundPos, ColorImageFormat.RgbResolution640x480Fps30); ColorImagePoint bottomExclusionBound = kSensor.CoordinateMapper.MapSkeletonPointToColorPoint(personBottomBoundPos, ColorImageFormat.RgbResolution640x480Fps30); Color outerSampleColor; int numOuterColorSamples = 0; int currY; int currX; Boolean isOuterColor = true; r = 0; g = 0; b = 0; for (int j = 0; j < colorMap.Length; j++) { outerSampleColor = new Color(); currY = j % width; // If j currently identifies a point within the exclusion bounds on the Y-axis... if (currY >= topExclusionBound.Y && currY <= bottomExclusionBound.Y) { // Then it is possible that the color data corresponding to j is within our // exclusion zone. // We'll determine j's X coordinate and compare it to the exclusion bounds. currX = j - (currY * width); if (currX >= topExclusionBound.X && currX <= topExclusionBound.X) { isOuterColor = false; } else { isOuterColor = true; } } else { isOuterColor = true; } if (isOuterColor == true) { outerSampleColor = colorMap[j]; // Sum the color sample data for later averaging. r += outerSampleColor.R; g += outerSampleColor.G; b += outerSampleColor.B; } // We have now taken a color sample. Let's maintain our invariant. numOuterColorSamples++; } // end for-loop Color outerColorAverage = new Color(); /* Average the running sum of RGB values taken from our color sampling. // If we were passed an invalid TorsoData then we will not have taken any color samples, // so we need to account by division by zero. Unless you want to be a dick, then by all means, delete this if-condition. */ if (numOuterColorSamples != 0) { outerColorAverage.R = (byte)(r / numOuterColorSamples); outerColorAverage.G = (byte)(g / numOuterColorSamples); outerColorAverage.B = (byte)(b / numOuterColorSamples); } // end if int colorContrast = computerContrast(outerColorAverage, userColorAverage); colorycontrasty = colorContrast; // If the contrast between the user and the surrounding area is high... if (colorContrast > 58) { // Then we only need to brighten the color of our resulting color by a small amount userColorAverage = brightenColor(userColorAverage, SLIGHTLY_BRIGHTER); } // the contrast between the user and the surrounding area is low... else { // We need to brighten our resulting color by a non-trivial amount userColorAverage = brightenColor(userColorAverage, SIGNIFICANTLY_BRIGHTER); } //userColorAverage = saturateColor(userColorAverage); // HSLColor hslColor = new HSLColor(colorAverage.R, colorAverage.G, colorAverage.B); // hslColor.Luminosity *= 0.1; // hslColor.Saturation *= 0.8; return userColorAverage; }
private void updateUserColor(Person person) { if (mostRecentFrame != null && mostRecentColorMap.Length != 0) { TorsoData curr = new TorsoData(person.torsoTop, person.torsoBottom); if ((curr.torsoTop.X == 0 && curr.torsoTop.Y == 0 && curr.torsoTop.Z == 0) == false) { person.color = ColorUtility.ComputeUserColor(person, mostRecentFrame.Width, mostRecentColorMap, kinectSensor); } } }
private Person updatePerson(SkeletonWrapper skeletonWrap, Person person, bool isGhost) { // Retrieve hand data. Joint rightHand = skeletonWrap.getRightHandJoint(); Joint leftHand = skeletonWrap.getLeftHandJoint(); // Store retrieved data. if (person == null) { person = new Person(skeletonWrap, isGhost); } else { person.updateSkeletonData(skeletonWrap); } person.rightHandLocation = this.kinectSensor.CoordinateMapper.MapSkeletonPointToDepthPoint(person.rightHandPosition, this.kinectSensor.DepthStream.Format); person.leftHandLocation = this.kinectSensor.CoordinateMapper.MapSkeletonPointToDepthPoint(person.leftHandPosition, this.kinectSensor.DepthStream.Format); //fileWriter.writeLine(file, timestamp, x, y, z) person.setRightHandRadius(person.rightHandLocation.Depth / 60); person.setLeftHandRadius(person.leftHandLocation.Depth / 60); Button b; for (int x = 0; x < buttonsCollection.Count; x++) { b = (Button)buttonsCollection[x]; if (b != null) { b.UpdateHands(users); } } //if (tutorialButton != null) //{ // tutorialButton.UpdateHands(users); //} // Update user color. if (isGhost == false) { updateUserColor(person); } // Update the gesture detectors of the user's physical state. //waveGestureDetector.Update(playerSkeleton, i); scissorGestureDetector.update(person); pressGestureDetector.Update(person); return person; }
public void updateGhost(SkeletonWrapper skel) { if (skel == null) { ghostUser = null; return; } if (ghostUser == null) { ghostUser = new Person(skel, GHOST_USER_COLOR, true); } updatePerson(skel, ghostUser, true); }
public Person[] getUsers() { if (users != null) { int i = 0; while (users[i] != null) { i++; } int activeGhost = 0; if (ghostUser != null) { activeGhost++; } Person[] activeUsers = new Person[i + activeGhost]; for (int j = 0; j < i; j++) { activeUsers[j] = users[j]; } if (activeGhost > 0) { activeUsers[activeUsers.Length - 1] = ghostUser; } return activeUsers; } return new Person[0]; }
public void UpdateHands(Person[] newHands) { if (newHands != null) { this.people = newHands; } }
private void TrackGesture(Person person, ref ScissorGestureTracker gestureTracker) { if (person.leftHand == null || person.rightHand == null) { gestureTracker.reset(); return; } Hand leftHand = person.leftHand; Hand rightHand = person.rightHand; Vector2 leftHandVelocity = leftHand.getHeading(); Vector2 rightHandVelocity = rightHand.getHeading(); Vector2 relativeHeading = Vector2.Subtract(leftHandVelocity, rightHandVelocity); Vector2.Normalize(relativeHeading); DotNET.Point leftHandPosition = leftHand.getLocation(); DotNET.Point rightHandPosition = rightHand.getLocation(); DotNET.Vector tempVec = DotNET.Point.Subtract(rightHandPosition, leftHandPosition); Vector2 leftToRight = new Vector2((float) tempVec.X, (float) tempVec.Y); double dotProduct = Vector2.Dot(relativeHeading, leftToRight); if (dotProduct > 0) { gestureTracker.currState = ScissorGestureState.Converging; } else { gestureTracker.currState = ScissorGestureState.Diverging; } // If the user's hands are converging... if (gestureTracker.currState == ScissorGestureState.Converging) { double leftHandRadius = leftHand.getAgentRadius(); double rightHandRadius = rightHand.getAgentRadius(); double minContactRadius = (leftHandRadius + rightHandRadius) * CONTACT_SCALE; // Check if they're proximal if (minContactRadius <= leftToRight.Length()) { gestureTracker.currState = ScissorGestureState.Proximal; } } if (gestureTracker.currState == ScissorGestureState.Proximal) { // send message GestureDetected(this, new ScissorGestureEventArgs(person, false, CONTACT_SCALE)); } else if (gestureTracker.currState == ScissorGestureState.Diverging) { GestureDetected(this, new ScissorGestureEventArgs(person, true, CONTACT_SCALE)); } }
public ScissorGestureEventArgs(Person user, Boolean diverging, double radiusScale) { this.user = user; this.diverging = diverging; this.radiusScale = radiusScale; }