/// Cleanup private void WindowClosed(object sender, EventArgs e) { // Stop the sensor if (null != this.sensor) { this.sensor.Stop(); } // Close the SerialSender communication SerialSender.SerialSenderShutdown(); Environment.Exit(0); }
////////////////////////////////// ////// Main Window Elements ////// ////////////////////////////////// /// Called when each depth frame is ready /// Does necessary processing to get our finger points and predicting gestures /// Most of the interesting stuff happens in here private void GestureDepthFrameReady(object sender, DepthImageFrameReadyEventArgs e) { // Calculating and storing finger and palm positions Image <Gray, Byte> emguImg = convertToEmgu(); CalculateAndStorePos(emguImg); this.emguImage.Source = BitmapSourceConvert.ToBitmapSource(emguImg); using (DepthImageFrame depthFrame = e.OpenDepthImageFrame()) { if (depthFrame != null) { // Copy the pixel data from the image to a temporary array depthFrame.CopyDepthImagePixelDataTo(this.depthPixels); // Get the min and max reliable depth for the current frame int minDepth = depthFrame.MinDepth; int maxDepth = depthFrame.MaxDepth; // Convert the depth to RGB int colorPixelIndex = 0; for (int i = 0; i < this.depthPixels.Length; ++i) { int x = i % this.sensor.DepthStream.FrameWidth; int y = (int)(i / this.sensor.DepthStream.FrameWidth); // Get the depth for this pixel short depth = depthPixels[i].Depth; // To convert to a byte, we're discarding the most-significant // rather than least-significant bits. // We're preserving detail, although the intensity will "wrap." // Values outside the reliable depth range are mapped to 0 (black). // Note: Using conditionals in this loop could degrade performance. // Consider using a lookup table instead when writing production code. // See the KinectDepthViewer class used by the KinectExplorer sample // for a lookup table example. byte intensity = (byte)(0); //if (depth >= minDepth && depth <= threshDepth) if (depth >= 180 && depth <= threshDepth) { intensity = (byte)(depth); } // Write out blue byte this.depthcolorPixels[colorPixelIndex++] = intensity; // Write out green byte this.depthcolorPixels[colorPixelIndex++] = intensity; // Write out red byte this.depthcolorPixels[colorPixelIndex++] = intensity; // We're outputting BGR, the last byte in the 32 bits is unused so skip it // If we were outputting BGRA, we would write alpha here. ++colorPixelIndex; } // If read is enabled if (_video.Count > MinimumFrames && _capturing == true) { sample = new Matrix <float>(1, _dimension); string[] features = _dtw.ExtractFeatures().Split(' '); for (int i = 0; i < features.Length; i++) { int featureIndex; if (Int32.TryParse(features[i], out featureIndex)) { sample[0, i] = (float)featureIndex; } } Gestures recordedGesture = EmguCVKNearestNeighbors.Predict(sample); results.Text = "Recognised as: " + recordedGesture.ToString(); if (SerialSender.GetSendState() && recordedGesture != Gestures.ReadySignal) { SerialSender.SendGesture(recordedGesture, pulse); SerialSender.SetSendState(false); recordedGesture = Gestures.None; imageBorder.BorderThickness = new Thickness(0); } if (recordedGesture == Gestures.ReadySignal) { imageBorder.BorderThickness = new Thickness(10); SerialSender.SetSendState(true); } if (recordedGesture == Gestures.None) { // There was no match so reset the buffer _video = new ArrayList(); } } // Ensures that we remember only the last x frames if (_video.Count > BufferSize) { // If we are currently capturing and we reach the maximum buffer size then automatically store if (_capturing) { DtwStoreClick(null, null); } else { // Remove the first // 2 frame in the buffer for (int i = 0; i < 3; ++i) { _video.RemoveAt(0); } } } // Write the pixel data into our bitmap this.depthBitmap.WritePixels( new Int32Rect(0, 0, this.depthBitmap.PixelWidth, this.depthBitmap.PixelHeight), this.depthcolorPixels, this.depthBitmap.PixelWidth * sizeof(int), 0); } } }