Beispiel #1
0
        private void AllFrameReady(object sender, AllFramesReadyEventArgs e)
        {
            //ColorImageFrame frame = e.OpenColorImageFrame();
            DepthImageFrame df = e.OpenDepthImageFrame();



            if (df != null)
            {
                this.FInvalidate = true;
                //this.frameindex = frame.FrameNumber;

                lock (m_colorlock)
                {
                    df.CopyPixelDataTo(this.depthimage);
                    //frame.CopyPixelDataTo(this.colorimage);
                    //Marshal.Copy(frame..Image.Bits, 0, this.colorimage, 640 * 480 * 4);
                }

                this.runtime.Runtime.MapDepthFrameToColorFrame(DepthImageFormat.Resolution320x240Fps30, this.depthimage, ColorImageFormat.RgbResolution640x480Fps30, this.cp);

                lock (m_colorlock)
                {
                    for (int i = 0; i < this.cp.Length; i++)
                    {
                        this.colorimage[i * 2]     = (float)VMath.Map(cp[i].X, 0, 640, 0, 1, TMapMode.Clamp);
                        this.colorimage[i * 2 + 1] = (float)VMath.Map(cp[i].Y, 0, 480, 0, 1, TMapMode.Clamp);
                    }
                }


                this.FInvalidate = true;
                this.frameindex  = df.FrameNumber;

                df.Dispose();
            }
        }
        private void PollDepthImageStream()
        {
            if (this._Kinect == null)
            {
                // no kinect
            }

            else
            {
                try
                {
                    using (DepthImageFrame frame = this._Kinect.DepthStream.OpenNextFrame(100))
                    {
                        if (frame != null)
                        {
                            frame.CopyPixelDataTo(this._DepthImagePixelData);

                            if (!lowResource)
                            {
                                this.DepthImageModified.Dispatcher.BeginInvoke(new Action(() =>
                                {
                                    this._DepthImageBitmap.WritePixels(this._DepthImageBitmapRect, this._DepthImagePixelData, this._DepthImageStride, 0);
                                    ModifyDepthImage(frame, _DepthImagePixelData);
                                }));
                            }
                            else
                            {
                                ModifyDepthImage(frame, _DepthImagePixelData);
                            }
                        }
                    }
                }
                catch (Exception ex)
                {
                }
            }
        }
Beispiel #3
0
        public void Record(DepthImageFrame frame)
        {
            // Header

            writer.Write((int)KinectRecordOptions.Depth);

            // Data

            TimeSpan timeSpan = DateTime.Now.Subtract(referenceTime);

            referenceTime = DateTime.Now;

            writer.Write((long)timeSpan.TotalMilliseconds);

            writer.Write(frame.BytesPerPixel);

            writer.Write((int)frame.Format);

            writer.Write(frame.Width);

            writer.Write(frame.Height);

            writer.Write(frame.FrameNumber);

            // Bytes

            short[] shorts = new short[frame.PixelDataLength];

            frame.CopyPixelDataTo(shorts);

            writer.Write(shorts.Length);

            foreach (short s in shorts)
            {
                writer.Write(s);
            }
        }
Beispiel #4
0
        public void Record(DepthImageFrame frame)
        {
            writer.Write((int)FrameType.Depth);

            var timeSpan = DateTime.Now.Subtract(referenceTime);

            referenceTime = DateTime.Now;
            writer.Write((long)timeSpan.TotalMilliseconds);
            writer.Write(frame.BytesPerPixel);
            writer.Write((int)frame.Format);
            writer.Write(frame.Width);
            writer.Write(frame.Height);

            writer.Write(frame.FrameNumber);

            var shorts = new short[frame.PixelDataLength];

            frame.CopyPixelDataTo(shorts);
            writer.Write(shorts.Length);
            foreach (var s in shorts)
            {
                writer.Write(s);
            }
        }
Beispiel #5
0
        void Sensor_DepthFrameReady(object sender, DepthImageFrameReadyEventArgs e)
        {
            using (DepthImageFrame imageFrame = e.OpenDepthImageFrame())
            {
                if (imageFrame != null)
                {
                    depthMap = new Texture2D(Game.GraphicsDevice, imageFrame.Width, imageFrame.Height, false, SurfaceFormat.Color);

                    short[] data      = new short[imageFrame.PixelDataLength];
                    Color[] depthData = new Color[imageFrame.Width * imageFrame.Height];

                    imageFrame.CopyPixelDataTo(data);

                    ConvertDepthFrame(data, Sensor.DepthStream, ref depthData);


                    depthMap.SetData <Color>(depthData);
                }
                else
                {
                    // imageFrame is null because the request did not arrive in time
                }
            }
        }
        void kinect_AllFramesReady(object sender, AllFramesReadyEventArgs e)
        {
            // 赤外線画像を表示する
            using (ColorImageFrame colorFrame = e.OpenColorImageFrame()) {
                if (colorFrame != null)
                {
                    // 赤外線画像データを取得する
                    byte[] color = new byte[colorFrame.PixelDataLength];
                    colorFrame.CopyPixelDataTo(color);

                    // 赤外線画像を表示する(16bitのグレースケール)
                    imageInfrared.Source = BitmapSource.Create(colorFrame.Width, colorFrame.Height,
                                                               96, 96, PixelFormats.Gray16, null, color,
                                                               colorFrame.Width * colorFrame.BytesPerPixel);
                }
            }

            // 距離データを表示する
            using (DepthImageFrame depthFrame = e.OpenDepthImageFrame()) {
                if (depthFrame != null)
                {
                    // 可視画像に変換する
                    short[] depth = new short[depthFrame.PixelDataLength];
                    depthFrame.CopyPixelDataTo(depth);

                    for (int i = 0; i < depth.Length; i++)
                    {
                        depth[i] = (short)~depth[i];
                    }

                    imageDepth.Source = BitmapSource.Create(depthFrame.Width, depthFrame.Height,
                                                            96, 96, PixelFormats.Gray16, null, depth,
                                                            depthFrame.Width * depthFrame.BytesPerPixel);
                }
            }
        }
        public void DisplayColorImageAllFramesReady(object sender, AllFramesReadyEventArgs e)
        {
            using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
            {
                Console.WriteLine("here");
                if (colorFrame == null)
                {
                    return;
                }

                byte[] pixels = new byte[colorFrame.PixelDataLength];
                colorFrame.CopyPixelDataTo(pixels);
                int stride = colorFrame.Width * 4;
                debugImage.Source = BitmapSource.Create(colorFrame.Width, colorFrame.Height, 96, 96, PixelFormats.Bgr32, null, pixels, stride);

                using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
                {
                    short[] rawDepthData = new short[depthFrame.PixelDataLength];
                    depthFrame.CopyPixelDataTo(rawDepthData);
                    int depth = rawDepthData[200 * depthFrame.Width + 300] >> DepthImageFrame.PlayerIndexBitmaskWidth;
                    threshold = depth;
                }
            }
        }
Beispiel #8
0
        void KinectDevice_DepthFrameReady(object sender, DepthImageFrameReadyEventArgs e)
        {
            using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
            {
                if (depthFrame != null)
                {
                    //Using standard SDK
                    this.depthPixelData = new short[depthFrame.PixelDataLength];
                    this.depthFrame32   = new byte[depthFrame.Width * depthFrame.Height * 4];
                    depthFrame.CopyPixelDataTo(this.depthPixelData);
                    byte[] convertedDepthBits = this.ConvertDepthFrame(this.depthPixelData, ((KinectSensor)sender).DepthStream);

                    this.outputBitmap = new WriteableBitmap(depthFrame.Width, depthFrame.Height, 96, 96, PixelFormats.Bgr32, null);

                    this.outputBitmap.WritePixels(
                        new Int32Rect(0, 0, depthFrame.Width, depthFrame.Height),
                        convertedDepthBits,
                        depthFrame.Width * 4,
                        0);

                    this.kinectDepthImage.Source = this.outputBitmap;
                }
            }
        }
Beispiel #9
0
        public static BitmapSource SliceDepthImage(this DepthImageFrame image, int min = 20, int max = 1000)
        {
            int width  = image.Width;
            int height = image.Height;

            //var depthFrame = image.Image.Bits;
            short[] rawDepthData = new short[image.PixelDataLength];
            image.CopyPixelDataTo(rawDepthData);

            var pixels = new byte[height * width * 4];

            const int BlueIndex  = 0;
            const int GreenIndex = 1;
            const int RedIndex   = 2;

            for (int depthIndex = 0, colorIndex = 0;
                 depthIndex < rawDepthData.Length && colorIndex < pixels.Length;
                 depthIndex++, colorIndex += 4)
            {
                // Calculate the distance represented by the two depth bytes
                int depth = rawDepthData[depthIndex] >> DepthImageFrame.PlayerIndexBitmaskWidth;

                // Map the distance to an intesity that can be represented in RGB
                var intensity = CalculateIntensityFromDistance(depth);

                if (depth > min && depth < max)
                {
                    // Apply the intensity to the color channels
                    pixels[colorIndex + BlueIndex]  = intensity; //blue
                    pixels[colorIndex + GreenIndex] = intensity; //green
                    pixels[colorIndex + RedIndex]   = intensity; //red
                }
            }

            return(BitmapSource.Create(width, height, 96, 96, PixelFormats.Bgr32, null, pixels, width * 4));
        }
        private void sensor_AllFramesReady(object sender, AllFramesReadyEventArgs e)
        {
            blobCount = 0;

            var bCw = new BackgroundWorker();

            var colorRange         = (int)sliderColorRange.Value;
            var sliderMaxValue     = (int)sliderMax.Value;
            var sliderMinSizeValue = sliderMinSize.Value;
            var sliderMaxSizeValue = sliderMaxSize.Value;


            bCw.DoWork += (s, a) =>
            {
                using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
                    using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
                    {
                        if (colorFrame != null && depthFrame != null)
                        {
                            short[] depthPixels;
                            byte[]  colorPixels;
                            byte[]  thesPixels;

                            colorPixels = new byte[this.sensor.ColorStream.FramePixelDataLength];
                            thesPixels  = new byte[this.sensor.ColorStream.FramePixelDataLength];
                            depthPixels = new short[this.sensor.DepthStream.FramePixelDataLength];

                            BitmapSource colorBmp = null;
                            BitmapSource thesBmp  = null;
                            // BitmapSource depthBmp = null;
                            depthFrame.CopyPixelDataTo(depthPixels);
                            //var greyPixels = new byte[depthFrame.Height * depthFrame.Width * 4];
                            colorFrame.CopyPixelDataTo(colorPixels);
                            colorFrame.CopyPixelDataTo(thesPixels);
                            //depthBmp = BitmapSource.Create(depthFrame.Width, depthFrame.Height, 96, 96, PixelFormats.Bgr32, null, greyPixels, depthFrame.Width * 4);
                            colorBmp = BitmapSource.Create(colorFrame.Width, colorFrame.Height, 96, 96, PixelFormats.Bgr32, null, colorPixels, colorFrame.Width * 4);
                            Image <Bgr, Byte> returnImage = new Image <Bgr, byte>(colorBmp.ToBitmap());

                            const int BlueIndex  = 0;
                            const int GreenIndex = 1;
                            const int RedIndex   = 2;

                            double minDepth = int.MaxValue;
                            System.Drawing.Point minPoint = new System.Drawing.Point();

                            for (int depthIndex = 0, colorIndex = 0;
                                 depthIndex < depthPixels.Length && colorIndex < colorPixels.Length;
                                 depthIndex++, colorIndex += 4)
                            {
                                // Calculate the distance represented by the two depth bytes
                                int depth = depthPixels[depthIndex] >> DepthImageFrame.PlayerIndexBitmaskWidth;
                                int y     = colorIndex / (colorFrame.Width * 4);
                                int x     = (colorIndex - (y * colorFrame.Width * 4)) / 4;
                                if (minDepth > depth && depth > 0)
                                {
                                    minDepth = depth;

                                    minPoint = new System.Drawing.Point(x, y);
                                }

                                // Apply the intensity to the color channels
                                if (depth > sliderMaxValue)
                                {
                                    thesPixels[colorIndex + BlueIndex]  = 0; //blue
                                    thesPixels[colorIndex + GreenIndex] = 0; //green
                                    thesPixels[colorIndex + RedIndex]   = 0; //red
                                }
                            }

                            thesBmp = BitmapSource.Create(colorFrame.Width, colorFrame.Height, 96, 96, PixelFormats.Bgr32, null, thesPixels, colorFrame.Width * 4);

                            Image <Hsv, Byte> openCVImg = new Image <Hsv, byte>(thesBmp.ToBitmap());
                            var smoothed = openCVImg.SmoothMedian(7);
                            //Increase saturation
                            smoothed[1] += 30;

                            //Get 10x10 pixel color sample from the nearest point
                            Image <Gray, Byte> averageMask = new Image <Gray, byte>(colorFrame.Width, colorFrame.Height, new Gray(0));
                            averageMask.Draw(new System.Drawing.Rectangle(minPoint.X - 5, minPoint.Y - 5, 10, 10), new Gray(255), -1);


                            //Make a HSV theshold mask
                            Image <Gray, Byte> theshold;

                            // 2. Obtain the 3 channels (hue, saturation and value) that compose the HSV image
                            Image <Gray, byte>[] channels = smoothed.Split();

                            try
                            {
                                var avgColor = channels[0].GetAverage(averageMask);
                                // 3. Remove all pixels from the hue channel that are not in the range [40, 60]
                                CvInvoke.cvInRangeS(channels[0], new Gray(avgColor.Intensity - colorRange).MCvScalar, new Gray(avgColor.Intensity + colorRange).MCvScalar, channels[0]);

                                // 4. Display the result
                                theshold = channels[0];
                            }
                            finally
                            {
                                channels[1].Dispose();
                                channels[2].Dispose();
                            }


                            ////Find blob
                            //using (MemStorage stor = new MemStorage())
                            //{
                            //    //Find contours with no holes try CV_RETR_EXTERNAL to find holes
                            //    Contour<System.Drawing.Point> contours = theshold.FindContours(
                            //     Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
                            //     Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_EXTERNAL ,
                            //     stor);

                            //    for (int i = 0; contours != null; contours = contours.HNext)
                            //    {
                            //        i++;

                            //        if ((contours.Area > Math.Pow(sliderMinSizeValue, 2)) && (contours.Area < Math.Pow(sliderMaxSizeValue, 2)))
                            //        {
                            //            MCvBox2D box = contours.GetMinAreaRect();
                            //            returnImage.Draw(box, new Bgr(System.Drawing.Color.Red), 2);
                            //            blobCount++;
                            //        }
                            //    }
                            //}

                            //get depthpoint data (point cloud)

                            //work out if cuboid by tracking point clouds

                            //draw yellow cross on nearest point
                            returnImage = theshold.Convert <Bgr, byte>();
                            returnImage.Draw(new Cross2DF(minPoint, 50, 50), new Bgr(System.Drawing.Color.Yellow), 4);

                            outImg.Dispatcher.BeginInvoke(new Action(() =>
                            {
                                this.outImg.Source = ImageHelpers.ToBitmapSource(returnImage);
                                txtBlobCount.Text  = string.Format("x:{0}, y:{1}", minPoint.X, minPoint.Y);
                            }));
                        }
                    }
            };

            bCw.RunWorkerAsync();
        }
Beispiel #11
0
        //Gets depth info from kinect and casts to a bitmap
        private void Ksensor_DepthFrameReady(object sender, DepthImageFrameReadyEventArgs e)
        {
            DepthImageFrame depthFrame = e.OpenDepthImageFrame();   //Puts Depthframe into Depthframe

            //Checks if there is a depthFrame
            if (depthFrame != null)
            {
                // Copy the pixel data from the image to a temporary array
                depthFrame.CopyDepthImagePixelDataTo(this.depthPixels);

                // Get the min and max reliable depth for the current frame
                int minDepth = depthFrame.MinDepth;
                int maxDepth = depthFrame.MaxDepth;

                //Convert depth data to bitmapsource
                short[] pixelData = new short[depthFrame.PixelDataLength];
                depthFrame.CopyPixelDataTo(pixelData);

                BitmapSource bmap = BitmapSource.Create(
                    depthFrame.Width,
                    depthFrame.Height,
                    2, 2,
                    PixelFormats.Gray16, null,
                    pixelData,
                    depthFrame.Width * depthFrame.BytesPerPixel);

                double vertF = 571.401, horzF = 557.274; //Focal lengths

                ColorImagePoint[] color = new ColorImagePoint[depthFrame.PixelDataLength];
                ksensor.CoordinateMapper.MapDepthFrameToColorFrame(DepthImageFormat.Resolution640x480Fps30, this.depthPixels, ColorImageFormat.RgbResolution640x480Fps30, color);

                //Seraches mapped RED coordinates
                for (k = 0; k < 640; ++k)
                {
                    if (color[k].X == XR)
                    {
                        break;
                    }
                }
                for (int h = k; h < depthFrame.PixelDataLength; h += 640)
                {
                    if (color[h].Y == YR)
                    {
                        if (h % 640 != 0)
                        {
                            XRMapped = h % 640;
                        }

                        YRMapped = (h - XR) / 640;

                        //Red coordinates
                        ZR = this.depthPixels[(640 - XRMapped) + (YRMapped * 640)].Depth;

                        Rxcoord = (ZR * (320 - XRMapped)) / horzF;
                        Rycoord = (ZR * (240 - YRMapped)) / vertF;

                        RCoordX.Content = Math.Round(Rxcoord);
                        RCoordY.Content = Math.Round(Rycoord);
                        RCoordZ.Content = ZR;
                        break;
                    }
                }

                //Searches mapped Blue coordinates
                for (j = 0; j < 640; ++j)
                {
                    if (color[j].X == XB)
                    {
                        break;
                    }
                }
                for (int h = j; h < depthFrame.PixelDataLength; h += 640)
                {
                    if (color[h].Y == YB)
                    {
                        if (h % 640 != 0)
                        {
                            XBMapped = h % 640;
                        }
                        YBMapped = (h - XB) / 640;

                        //Red coordinates
                        ZB = this.depthPixels[(640 - XBMapped) + (YBMapped * 640)].Depth;

                        Bxcoord = (ZB * (320 - XBMapped)) / horzF;
                        Bycoord = (ZB * (240 - YBMapped)) / vertF;

                        BCoordX.Content = Math.Round(Bxcoord);
                        BCoordY.Content = Math.Round(Bycoord);
                        BCoordZ.Content = ZB;
                        break;
                    }
                }

                //Set stream to image
                Depthstream.Source = bmap;

                //Add points to imageviews for debugging
                Canvas1.Children.Clear();
                Canvas2.Children.Clear();

                System.Windows.Shapes.Ellipse DepthPointRed  = CreateEllipse.CircleRed();
                System.Windows.Shapes.Ellipse DepthPointBlue = CreateEllipse.CircleBlue();
                System.Windows.Shapes.Ellipse ColorPointRed  = CreateEllipse.CircleRed();
                System.Windows.Shapes.Ellipse ColorPointBlue = CreateEllipse.CircleBlue();

                Canvas2.Children.Add(ColorPointRed);
                Canvas2.Children.Add(ColorPointBlue);

                Canvas1.Children.Add(DepthPointRed);
                Canvas1.Children.Add(DepthPointBlue);

                DepthPointRed.SetValue(Canvas.LeftProperty, (depthFrame.Width - XRMapped - 3) * .6);
                DepthPointRed.SetValue(Canvas.TopProperty, (YRMapped - 3) * .6);

                DepthPointBlue.SetValue(Canvas.LeftProperty, (depthFrame.Width - XBMapped - 3) * .6);
                DepthPointBlue.SetValue(Canvas.TopProperty, (YBMapped - 3) * .6);

                ColorPointRed.SetValue(Canvas.LeftProperty, (depthFrame.Width - XR - 3) * .6);
                ColorPointRed.SetValue(Canvas.TopProperty, (YR - 3) * .6);

                ColorPointBlue.SetValue(Canvas.LeftProperty, (depthFrame.Width - XB - 3) * .6);
                ColorPointBlue.SetValue(Canvas.TopProperty, (YB - 3) * .6);

                //Cleanup
                depthFrame.Dispose();
                CoordinateFrameCalc();
            }
        }
Beispiel #12
0
        } //fin CompositionTarget_Rendering()

        private WriteableBitmap PollDepth()
        {
            Bitmap bitmapDepth;

            if (this.Kinect != null)
            {
                DepthImageStream DepthStream = this.Kinect.DepthStream;
                this.DepthImagenBitmap  = new WriteableBitmap(DepthStream.FrameWidth, DepthStream.FrameHeight, 96, 96, PixelFormats.Bgr32, null);
                this.DepthImagenRect    = new Int32Rect(0, 0, DepthStream.FrameWidth, DepthStream.FrameHeight);
                this.DepthImagenStride  = DepthStream.FrameWidth * 4;
                this.DepthValores       = new short[DepthStream.FramePixelDataLength];
                this.DepthImagenPixeles = new byte[DepthStream.FramePixelDataLength * 4];

                try
                {
                    using (DepthImageFrame frame = this.Kinect.DepthStream.OpenNextFrame(100))
                    {
                        if (frame != null)
                        {
                            frame.CopyPixelDataTo(this.DepthValores);

                            int index = 0;
                            for (int i = 0; i < frame.PixelDataLength; i++)
                            {
                                int valorDistancia = DepthValores[i] >> 3;

                                if (valorDistancia == this.Kinect.DepthStream.UnknownDepth)
                                {
                                    DepthImagenPixeles[index]     = 0;
                                    DepthImagenPixeles[index + 1] = 0;
                                    DepthImagenPixeles[index + 2] = 0;
                                }
                                else if (valorDistancia == this.Kinect.DepthStream.TooFarDepth)
                                {
                                    DepthImagenPixeles[index]     = 0;
                                    DepthImagenPixeles[index + 1] = 0;
                                    DepthImagenPixeles[index + 2] = 0;
                                }
                                else
                                {
                                    byte byteDistancia = (byte)(255 - (valorDistancia >> 5));
                                    DepthImagenPixeles[index]     = byteDistancia;
                                    DepthImagenPixeles[index + 1] = byteDistancia;
                                    DepthImagenPixeles[index + 2] = byteDistancia;
                                }
                                index = index + 4;
                            }

                            this.DepthImagenBitmap.WritePixels(this.DepthImagenRect, this.DepthImagenPixeles, this.DepthImagenStride, 0);
                        }
                    }
                }
                catch
                {
                    MessageBox.Show("No se pueden leer los datos del sensor", "Error");
                }
            }

            bitmapDepth = convertWriteablebitmap(DepthImagenBitmap);
            Detection(bitmapDepth);

            return(DepthImagenBitmap);
        }//fin PollDepth()
Beispiel #13
0
        void kinect_AllFramesReady(object sender, AllFramesReadyEventArgs e)
        {
            using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
            {
                if (colorFrame != null)
                {
                    if (pixelData == null)
                    {
                        pixelData = new byte[colorFrame.PixelDataLength];
                    }
                    colorFrame.CopyPixelDataTo(pixelData);

                    this.colorImageBitmap.WritePixels(this.colorImageBitmapRect, pixelData, this.colorImageStride, 0);
                }
            }


            using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
            {
                if (depthFrame == null)
                {
                    return;
                }
                if (depthData == null)
                {
                    depthData = new short[depthFrame.PixelDataLength];
                }
                if (depthColorImage == null)
                {
                    depthColorImage = new byte[depthFrame.PixelDataLength * 4];
                }

                depthFrame.CopyPixelDataTo(depthData);
                int depthColorImagePos = 0;
                for (int depthPos = 0; depthPos < depthFrame.PixelDataLength; depthPos++)
                {
                    int depthVal = depthData[depthPos] >> DepthImageFrame.PlayerIndexBitmaskWidth;

                    byte depthByte = (byte)(255 - (depthVal) >> 4);
                    depthColorImage[depthColorImagePos++] = depthByte;
                    depthColorImage[depthColorImagePos++] = depthByte;
                    depthColorImage[depthColorImagePos++] = depthByte;

                    //transparency
                    depthColorImagePos++;
                }



                this.depthImageBitmap.WritePixels(new Int32Rect(0, 0, depthFrame.Width, depthFrame.Height),
                                                  depthData, depthFrame.Width * 2, 0);
            }

            //handle skeleton data

            if (seatedModeEnabled)
            {
                kinect.SkeletonStream.TrackingMode = SkeletonTrackingMode.Seated;
            }
            else
            {
                kinect.SkeletonStream.TrackingMode = SkeletonTrackingMode.Default;
            }
            using (SkeletonFrame skeletonFrame = e.OpenSkeletonFrame()) {
                if (skeletonFrame != null && this.skeletonData != null)
                {
                    skeletonFrame.CopySkeletonDataTo(this.skeletonData);
                }



                //draw the output
                using (DrawingContext drawingContext = this.drawingGroupColor.Open()) {
                    //color stream output
                    drawingContext.DrawImage(this.colorImageBitmap, new Rect(0, 0, kinect.ColorStream.FrameWidth, kinect.ColorStream.FrameHeight));
                    //draw skeleton steram data
                    if (skeletonRenderEnabled == true)
                    {
                        DrawSkeletons(drawingContext);
                    }
                    //define limieted aread
                    this.drawingGroupColor.ClipGeometry = new RectangleGeometry(new Rect(0, 0, kinect.ColorStream.FrameWidth, kinect.ColorStream.FrameHeight));
                }
                using (DrawingContext drawingContext = this.drawingGroupDepth.Open()) {
                    //color stream output
                    drawingContext.DrawImage(this.depthImageBitmap, new Rect(0, 0, kinect.DepthStream.FrameWidth, kinect.DepthStream.FrameHeight));
                    //draw skeleton steram data
                    if (skeletonRenderEnabled == true)
                    {
                        DrawSkeletons(drawingContext);
                    }
                    //define limieted aread
                    this.drawingGroupColor.ClipGeometry = new RectangleGeometry(new Rect(0, 0, kinect.DepthStream.FrameWidth, kinect.DepthStream.FrameHeight));
                }
            }
        }
Beispiel #14
0
        void newsensor_AllFramesReady(object sender, AllFramesReadyEventArgs e)
        {
            using (colorframe = e.OpenColorImageFrame())
            {
                if (colorframe == null)
                {
                    return;
                }


                pixeldata = new byte[colorframe.PixelDataLength];
                colorframe.CopyPixelDataTo(pixeldata);
                source = BitmapSource.Create(colorframe.Width, colorframe.Height, 96.0, 96.0, PixelFormats.Bgr32, null, pixeldata, colorframe.Width * 4);

                image1.Source = source;
            }

            using (depthframe = e.OpenDepthImageFrame())
            {
                if (depthframe == null)
                {
                    return;
                }

                int    temp = 0, x = 0, y = 0;
                double angleX = 0, angleY = 0;
                string start;

                count       = new int[4000 * sizeof(double)];
                depthpixels = new short[depthframe.PixelDataLength];
                depthframe.CopyPixelDataTo(depthpixels);

                using (StreamReader reader = new StreamReader("kinectstart.txt"))
                {
                    start = reader.ReadLine();
                }
                if (start == "1")
                {
                    using (StreamWriter writer = new StreamWriter("image3Dkinect.txt"))
                    {
                        for (x = 0; x < depthframe.Width; x++)
                        {
                            for (y = 0; y < depthframe.Height; y++)
                            {
                                temp   = GetPixelDepth(x, y, depthpixels, depthframe.Width);
                                angleX = (x - depthframe.Width / 2);
                                angleX = (angleX / depthframe.Width) * 57;
                                angleY = (y - depthframe.Height / 2);
                                angleY = (angleY / depthframe.Height) * 43;

                                writer.WriteLine((int)(temp * Math.Tan(angleX * Math.PI / 180)) + " " + (int)(temp * Math.Tan(angleY * Math.PI / 180)) + " " + temp.ToString());
                            }
                        }
                    }
                    using (StreamWriter writer = new StreamWriter("kinectstart.txt"))
                    {
                        writer.Write(0);
                    }
                }
                if (mode == "Continuos" || mode == "LimitsAuto")
                {
                    DrawObstacle(source, colorframe); //Draw closest obstacle in continuos frames
                }

                else
                {
                    float distance = 5000;
                    int   player = 0, playerdist = 0;
                    for (int i = 0; i < depthpixels.Length; i++)
                    {
                        player = depthpixels[i] & DepthImageFrame.PlayerIndexBitmask;
                        if (player > 0)
                        {
                            playerdist = depthpixels[i] >> DepthImageFrame.PlayerIndexBitmaskWidth;
                        }
                        //gets the depth value
                        int depth = depthpixels[i] >> DepthImageFrame.PlayerIndexBitmaskWidth;

                        if (depth > 0 && depth < distance)
                        {
                            distance = depth;
                        }
                        temp             = ((ushort)depthpixels[i]) >> 3;
                        distancepixel[i] = temp;
                        count[temp]++; //increments the counter corresponding to the distance of the current pixel
                    }


                    //float mindist = depthpixels.Min(element => Math.Abs(element));
                    //textBox1.Text = Convert.ToString(GetPixelDepth(depthframe.Width/2, depthframe.Height/2, depthpixels, depthframe.Width));
                    textBox2.Text = Convert.ToString(playerdist);
                }
            }
            using (SkeletonFrame SFrame = e.OpenSkeletonFrame())
            {
                /* if (SFrame == null)
                 * {
                 *   return;
                 * }
                 * Skeleton[] Skeletons = new Skeleton[SFrame.SkeletonArrayLength];
                 * SFrame.CopySkeletonDataTo(Skeletons);
                 * foreach (Skeleton S in Skeletons)
                 * {
                 *   if (S.TrackingState == SkeletonTrackingState.Tracked)
                 *   {
                 *       SkeletonPoint joint = S.Joints[JointType.HandLeft].Position;
                 *
                 *       ColorImagePoint Cloc = newsensor.MapSkeletonPointToColor(joint, ColorImageFormat.RgbResolution640x480Fps30);
                 *
                 *       textBox9.Text = Cloc.X.ToString();
                 *
                 *
                 *       if (Cloc.X > 150 && Cloc.X < 350)
                 *       {
                 *          // System.Windows.MessageBox.Show("Fired");
                 *
                 *
                 *           try
                 *           {
                 *               SlideShowWindow window = app.ActiveWindow.Presentation.SlideShowWindow;
                 *                if (Cloc.X > 200)
                 *                    window.View.Next();
                 *                else if(Cloc.X < 200)
                 *                    window.View.Previous();
                 *
                 *
                 *
                 *           }
                 *           catch (System.Runtime.InteropServices.COMException)
                 *           {
                 *
                 *
                 *           }
                 *       }
                 *
                 *       //temp = Cloc.X;
                 *
                 *   }
                 * }
                 * /*try
                 * {
                 *   SlideShowWindow window = app.ActiveWindow.Presentation.SlideShowWindow;
                 *
                 * }
                 * catch (System.Runtime.InteropServices.COMException)
                 * {
                 *
                 *
                 * }
                 *
                 */
                //Slides slides = app.ActiveWindow.Presentation.Slides;
                //System.Windows.MessageBox.Show(slides.Count.ToString());
            }
        }
Beispiel #15
0
        private void OnAllFramesReady(object sender, AllFramesReadyEventArgs allFramesReadyEventArgs)
        {
            ColorImageFrame colorImageFrame = null;
            DepthImageFrame depthImageFrame = null;
            SkeletonFrame   skeletonFrame   = null;

            try
            {
                colorImageFrame = allFramesReadyEventArgs.OpenColorImageFrame();
                depthImageFrame = allFramesReadyEventArgs.OpenDepthImageFrame();
                skeletonFrame   = allFramesReadyEventArgs.OpenSkeletonFrame();

                if (colorImageFrame == null || depthImageFrame == null || skeletonFrame == null)
                {
                    return;
                }

                // Check for image format changes.  The FaceTracker doesn't
                // deal with that so we need to reset.
                if (this.depthImageFormat != depthImageFrame.Format)
                {
                    this.ResetFaceTracking();
                    this.depthImage       = null;
                    this.depthImageFormat = depthImageFrame.Format;
                }

                if (this.colorImageFormat != colorImageFrame.Format)
                {
                    this.ResetFaceTracking();
                    this.colorImage       = null;
                    this.colorImageFormat = colorImageFrame.Format;
                }

                // Create any buffers to store copies of the data we work with
                if (this.depthImage == null)
                {
                    this.depthImage = new short[depthImageFrame.PixelDataLength];
                }

                if (this.colorImage == null)
                {
                    this.colorImage = new byte[colorImageFrame.PixelDataLength];
                }

                // Get the skeleton information
                if (this.skeletonData == null || this.skeletonData.Length != skeletonFrame.SkeletonArrayLength)
                {
                    this.skeletonData = new Skeleton[skeletonFrame.SkeletonArrayLength];
                }

                colorImageFrame.CopyPixelDataTo(this.colorImage);
                depthImageFrame.CopyPixelDataTo(this.depthImage);
                skeletonFrame.CopySkeletonDataTo(this.skeletonData);

                // Update the list of trackers and the trackers with the current frame information
                var i = 0;
                foreach (Skeleton skeleton in this.skeletonData)
                {
                    if (skeleton.TrackingState == SkeletonTrackingState.Tracked ||
                        skeleton.TrackingState == SkeletonTrackingState.PositionOnly)
                    {
                        // We want keep a record of any skeleton, tracked or untracked.
                        if (!this.trackedSkeletons.ContainsKey(skeleton.TrackingId))
                        {
                            this.trackedSkeletons.Add(skeleton.TrackingId, new SkeletonFaceTracker());
                        }

                        var position = skeleton.Position;
                        if (position.X != 0 && position.Y != 0 && position.Z != 0)
                        {
                            Console.WriteLine($"Face {i}: X {position.X}, Y {position.Y}, Z {position.Z}");
                            string text = position.X + "@x " + position.Y + "@y " + position.Z + "@z";
                            System.IO.File.WriteAllText(@"C:\Users\Corbin Pixels\Desktop\kinect\FaceTrackingBasics-WPF\coords.txt", text);
                        }
                        // Give each tracker the upated frame.
                        SkeletonFaceTracker skeletonFaceTracker;
                        if (this.trackedSkeletons.TryGetValue(skeleton.TrackingId, out skeletonFaceTracker))
                        {
                            skeletonFaceTracker.OnFrameReady(this.Kinect, colorImageFormat, colorImage, depthImageFormat, depthImage, skeleton);
                            skeletonFaceTracker.LastTrackedFrame = skeletonFrame.FrameNumber;
                        }
                    }
                    i += 1;
                }

                this.RemoveOldTrackers(skeletonFrame.FrameNumber);

                this.InvalidateVisual();
            }
            finally
            {
                if (colorImageFrame != null)
                {
                    colorImageFrame.Dispose();
                }

                if (depthImageFrame != null)
                {
                    depthImageFrame.Dispose();
                }

                if (skeletonFrame != null)
                {
                    skeletonFrame.Dispose();
                }
            }
        }
Beispiel #16
0
        //***** rotateMotorThread() *****//
        // This is where the motor thread starts (we are doing this because we want to pass parameters to the thread)
        //static Thread rotateMotorThread(int angleRange, int sleepingDuration, int firstKinect)
        //{
        //    //Console.WriteLine("entered thread");
        //    var t = new Thread(() => rotateMotor(angleRange, sleepingDuration, firstKinect));
        //    t.Start();
        //    return t;
        //}


        //***** sensor_AllFramesReady() *****//
        // Called when depth and color frames are synchronized
        // This is where the data gets written
        static void _sensor_AllFramesReady(object sender, AllFramesReadyEventArgs e)
        {
            if (!abortMotorThread)
            {
                DateTime startWritingDepth = DateTime.Now;

                //-------------------COLOR---------------------------------------//
                using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
                {
                    if (colorFrame == null)
                    {
                        return;
                    }
                    colorFrame.CopyPixelDataTo(colorPixels);

                    // ----------------------------------------> WRITE FRAME
                    colorWriter.Write(colorPixels);
                    colorWriter.Flush();
                    // ----------------------------------------> WRITE INFO FILE
                    colorInfoWriter.Write(frameOrderingNo);
                    colorInfoWriter.Write(" ");
                    colorInfoWriter.Write(colorFrame.Timestamp);
                    colorInfoWriter.Write(" ");
                    colorInfoWriter.Write(colorFrame.FrameNumber);
                    colorInfoWriter.Write(" ");
                    colorInfoWriter.Write(_sensor.ElevationAngle);
                    colorInfoWriter.Write(Environment.NewLine);
                    colorInfoWriter.Flush();
                }
                //----------------------------------------------------------------//

                //-------------------DEPTH---------------------------------------//
                using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
                {
                    if (depthFrame == null)
                    {
                        return;
                    }

                    depthFrame.CopyPixelDataTo(depthPixels);

                    _sensor.MapDepthFrameToColorFrame(depthFrame.Format, depthPixels, _sensor.ColorStream.Format, colorCoordinates); //mapdepthtocolor

                    depthChar = Array.ConvertAll(depthPixels, new Converter <short, char>(shortToChar));

                    // ----------------------------------------> WRITE FRAME
                    depthWriter.Write(depthChar);
                    depthWriter.Flush();
                    // ----------------------------------------> WRITE INFO FILE
                    depthInfoWriter.Write(frameOrderingNo);
                    depthInfoWriter.Write(" ");
                    depthInfoWriter.Write(depthFrame.Timestamp);
                    depthInfoWriter.Write(" ");
                    depthInfoWriter.Write(depthFrame.FrameNumber);
                    depthInfoWriter.Write(" ");
                    depthInfoWriter.Write(_sensor.ElevationAngle);
                    depthInfoWriter.Write(Environment.NewLine);
                    depthInfoWriter.Flush();
                }
                //----------------------------------------------------------------//
                frameOrderingNo++;
                calculateFps();

                //// Calculate time to write frame
                //DateTime stopWritingDepth = DateTime.Now;
                //TimeSpan diffDepth = stopWritingDepth.Subtract(startWritingDepth);
                //Console.WriteLine("Time to write one frame: " + diffDepth.TotalSeconds);
            }
        }
Beispiel #17
0
        void KinectFaceNode_AllFrameReady(object sender, AllFramesReadyEventArgs e)
        {
            ColorImageFrame colorImageFrame = null;
            DepthImageFrame depthImageFrame = null;
            SkeletonFrame   skeletonFrame   = null;

            if (face == null)
            {
                face = new FaceTracker(this.runtime.Runtime);
            }

            colorImageFrame = e.OpenColorImageFrame();
            depthImageFrame = e.OpenDepthImageFrame();
            skeletonFrame   = e.OpenSkeletonFrame();

            if (colorImageFrame == null || depthImageFrame == null || skeletonFrame == null)
            {
                return;
            }

            if (this.depthImage == null)
            {
                this.depthImage = new short[depthImageFrame.PixelDataLength];
            }

            if (this.colorImage == null)
            {
                this.colorImage = new byte[colorImageFrame.PixelDataLength];
            }

            if (this.skeletonData == null || this.skeletonData.Length != skeletonFrame.SkeletonArrayLength)
            {
                this.skeletonData = new Skeleton[skeletonFrame.SkeletonArrayLength];
            }

            colorImageFrame.CopyPixelDataTo(this.colorImage);
            depthImageFrame.CopyPixelDataTo(this.depthImage);
            skeletonFrame.CopySkeletonDataTo(this.skeletonData);

            foreach (Skeleton skeleton in this.skeletonData)
            {
                if (skeleton.TrackingState == SkeletonTrackingState.Tracked ||
                    skeleton.TrackingState == SkeletonTrackingState.PositionOnly)
                {
                    // We want keep a record of any skeleton, tracked or untracked.
                    if (!this.trackedSkeletons.ContainsKey(skeleton.TrackingId))
                    {
                        this.trackedSkeletons.Add(skeleton.TrackingId, new SkeletonFaceTracker());
                    }

                    // Give each tracker the upated frame.
                    SkeletonFaceTracker skeletonFaceTracker;
                    if (this.trackedSkeletons.TryGetValue(skeleton.TrackingId, out skeletonFaceTracker))
                    {
                        skeletonFaceTracker.OnFrameReady(this.runtime.Runtime, ColorImageFormat.RgbResolution640x480Fps30, colorImage, DepthImageFormat.Resolution320x240Fps30, depthImage, skeleton);
                        skeletonFaceTracker.LastTrackedFrame = skeletonFrame.FrameNumber;
                    }
                }
            }

            this.RemoveOldTrackers(skeletonFrame.FrameNumber);

            colorImageFrame.Dispose();
            depthImageFrame.Dispose();
            skeletonFrame.Dispose();

            this.FInvalidate = true;
        }
Beispiel #18
0
        private void DepthImageReady(object sender, DepthImageFrameReadyEventArgs e)
        {
            int  imageWidth    = 0;
            int  imageHeight   = 0;
            bool haveNewFormat = false;

            using (DepthImageFrame imageFrame = e.OpenDepthImageFrame())
            {
                if (imageFrame != null)
                {
                    imageWidth  = imageFrame.Width;
                    imageHeight = imageFrame.Height;

                    // We need to detect if the format has changed.
                    haveNewFormat = this.lastImageFormat != imageFrame.Format;

                    if (haveNewFormat)
                    {
                        this.pixelData       = new short[imageFrame.PixelDataLength];
                        this.depthFrame32    = new byte[imageFrame.Width * imageFrame.Height * Bgr32BytesPerPixel];
                        this.lastImageFormat = imageFrame.Format;

                        // We also need to reallocate the outputBitmap, but WriteableBitmap has
                        // thread affinity based on the allocating thread.  Since we want this to
                        // be displayed in the UI, we need to do this allocation on the UI thread (below).
                    }

                    imageFrame.CopyPixelDataTo(this.pixelData);
                }
            }

            // Did we get a depth frame?
            if (imageWidth != 0)
            {
                this.ConvertDepthFrame(this.pixelData, ((KinectSensor)sender).DepthStream);

                // The images are converted, update the UI on the UI thread.
                // We use Invoke here instead of BeginInvoke so that the processing frame is blocked from overwriting
                // this.pixelData and this.depthFrame32.
                this.Dispatcher.Invoke((Action)(() =>
                {
                    if (haveNewFormat)
                    {
                        // A WriteableBitmap is a WPF construct that enables resetting the Bits of the image.
                        // This is more efficient than creating a new Bitmap every frame.
                        this.outputBitmap = new WriteableBitmap(
                            imageWidth,
                            imageHeight,
                            96,     // DpiX
                            96,     // DpiY
                            PixelFormats.Bgr32,
                            null);

                        this.kinectDepthImage.Source = this.outputBitmap;
                    }

                    this.outputBitmap.WritePixels(
                        new Int32Rect(0, 0, imageWidth, imageHeight),
                        this.depthFrame32,
                        imageWidth * Bgr32BytesPerPixel,
                        0);

                    UpdateFrameRate();
                }));
            }
        }
Beispiel #19
0
        /// <summary>
        /// Gets the color pixel data with distance.
        /// </summary>
        /// <param name="depthFrame">The depth frame.</param>

        /*private void GetColorPixelDataWithDistance(short[] depthFrame)
         * {
         *  for (int depthIndex = 0, colorIndex = 0; depthIndex < depthFrame.Length && colorIndex < this.depth32.Length; depthIndex++, colorIndex += 4)
         *  {
         *      // Calculate the depth distance
         *      int distance = depthFrame[depthIndex] >> DepthImageFrame.PlayerIndexBitmaskWidth;
         *      // Colorize pixels for a range of distance
         *      if (distance <= 1000)
         *      {
         *          depth32[colorIndex + 2] = 115; // red
         *          depth32[colorIndex + 1] = 169;  // green
         *          depth32[colorIndex + 0] = 9; // blue
         *
         *      }
         *      else if (distance > 1000 && distance <= 2500)
         *      {
         *          depth32[colorIndex + 2] = 255;
         *          depth32[colorIndex + 1] = 61;
         *          depth32[colorIndex + 0] = 0;
         *      }
         *      else if (distance > 2500)
         *      {
         *          depth32[colorIndex + 2] = 169;
         *          depth32[colorIndex + 1] = 9;
         *          depth32[colorIndex + 0] = 115;
         *      }
         *  }
         * }*/
        /// <summary>
        /// Tracks the player.
        /// </summary>
        /// <param name="depthFrame">The depth frame.</param>

        /* private void TrackPlayer(short[] depthFrame)
         * {
         *   for (int depthIndex = 0, colorIndex = 0; depthIndex < depthFrame.Length && colorIndex < this.depth32.Length; depthIndex++, colorIndex += 4)
         *   {
         *       // Get the player
         *       int player = depthFrame[depthIndex] & DepthImageFrame.PlayerIndexBitmask;
         *       // Color the all pixels associated with a player
         *       if (player > 0 && pixelData!=null)
         *       {
         *           depth32[colorIndex + 2] = pixelData[colorIndex*4 +2];
         *               //169;
         *           depth32[colorIndex + 1] = pixelData[colorIndex*4 +1];
         *           //62;
         *           depth32[colorIndex + 0] = pixelData[colorIndex*4];
         *           //9;
         *       }
         *
         *   }
         * }*/

        /// <summary>
        /// Reversings the bit value with distance.
        /// </summary>
        /// <param name="depthImageFrame">The depth image frame.</param>
        /// <param name="pixelData">The pixel data.</param>
        /// <returns></returns>

        /* private short[] ReversingBitValueWithDistance(DepthImageFrame depthImageFrame, short[] pixelData32)
         * {
         *   short[] reverseBitPixelData = new short[depthImageFrame.PixelDataLength];
         *   int depth;
         *   for (int index = 0; index < pixelData32.Length; index++)
         *   {
         *       // Caculate the distance
         *       depth = pixelData32[index] >> DepthImageFrame.PlayerIndexBitmaskWidth;
         *
         *
         *       int player = pixelData32[index] & DepthImageFrame.PlayerIndexBitmask;
         *
         *       // Change the pixel value
         *       if (depth < 1500 || depth > 3500)
         *       {
         *           reverseBitPixelData[index] = (short)~pixelData32[index]; ;
         *       }
         *       else
         *       {
         *           reverseBitPixelData[index] = pixelData32[index];
         *       }
         *   }
         *
         *   return reverseBitPixelData;
         * }*/

        /// <summary>
        /// Handles the DepthFrameReady event of the sensor control.
        /// </summary>
        /// <param name="sender">The source of the event.</param>
        /// <param name="e">The <see cref="Microsoft.Kinect.DepthImageFrameReadyEventArgs"/> instance containing the event data.</param>
        void sensor_DepthFrameReady(object sender, DepthImageFrameReadyEventArgs e)
        {
            using (DepthImageFrame depthimageFrame = e.OpenDepthImageFrame())
            {
                if (depthimageFrame == null)
                {
                    return;
                }
                frame       = depthimageFrame;
                pixelData32 = new short[depthimageFrame.PixelDataLength];



                depthimageFrame.CopyPixelDataTo(pixelData32);


                //short[] reversePixelData = new short[depthimageFrame.PixelDataLength];
                //reversePixelData = this.ReversingBitValueWithDistance(depthimageFrame, pixelData);



                //Modif mapping

                using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
                {
                    if (null != depthFrame)
                    {
                        // Copy the pixel data from the image to a temporary array
                        depthFrame.CopyDepthImagePixelDataTo(this.depthPixels);
                    }
                }

                this.sensor.CoordinateMapper.MapDepthFrameToColorFrame(
                    DepthImageFormat.Resolution640x480Fps30,
                    this.depthPixels,
                    ColorImageFormat.RgbResolution1280x960Fps12,
                    this.colorCoordinates);

                Array.Clear(this.playerPixelData, 0, this.playerPixelData.Length);

                // loop over each row and column of the depth
                for (int y = 0; y < this.depthHeight; ++y)
                {
                    for (int x = 0; x < this.depthWidth; ++x)
                    {
                        // calculate index into depth array
                        int depthIndex = x + (y * this.depthWidth);

                        DepthImagePixel depthPixel = this.depthPixels[depthIndex];

                        int player = depthPixel.PlayerIndex;

                        // if we're tracking a player for the current pixel, sets it opacity to full
                        if (player > 0)
                        {
                            // retrieve the depth to color mapping for the current depth pixel
                            ColorImagePoint colorImagePoint = this.colorCoordinates[depthIndex];

                            // scale color coordinates to depth resolution
                            int colorInDepthX = colorImagePoint.X / this.colorToDepthDivisor;
                            int colorInDepthY = colorImagePoint.Y / this.colorToDepthDivisor;

                            // make sure the depth pixel maps to a valid point in color space
                            // check y > 0 and y < depthHeight to make sure we don't write outside of the array
                            // check x > 0 instead of >= 0 since to fill gaps we set opaque current pixel plus the one to the left
                            // because of how the sensor works it is more correct to do it this way than to set to the right
                            if (colorInDepthX > 0 && colorInDepthX < this.depthWidth && colorInDepthY >= 0 && colorInDepthY < this.depthHeight)
                            {
                                // calculate index into the player mask pixel array
                                int playerPixelIndex = colorInDepthX + (colorInDepthY * this.depthWidth);

                                // set opaque
                                this.playerPixelData[playerPixelIndex] = opaquePixelValue;

                                // compensate for depth/color not corresponding exactly by setting the pixel
                                // to the left to opaque as well
                                this.playerPixelData[playerPixelIndex - 1] = opaquePixelValue;
                            }
                        }
                    }
                }


                // do our processing outside of the using block
                // so that we return resources to the kinect as soon as possible

                // Write the pixel data into our bitmap
                this.colorBitmap.WritePixels(
                    new Int32Rect(0, 0, this.colorBitmap.PixelWidth, this.colorBitmap.PixelHeight),
                    this.pixelData,
                    this.colorBitmap.PixelWidth * sizeof(int),
                    0);

                this.caller.colorimageControl2.Source = colorBitmap;
                if (this.playerOpacityMaskImage == null)
                {
                    this.playerOpacityMaskImage = new WriteableBitmap(
                        this.depthWidth,
                        this.depthHeight,
                        96,
                        96,
                        PixelFormats.Bgra32,
                        null);

                    this.caller.colorimageControl2.OpacityMask = new ImageBrush {
                        ImageSource = this.playerOpacityMaskImage
                    };
                }

                this.playerOpacityMaskImage.WritePixels(
                    new Int32Rect(0, 0, this.depthWidth, this.depthHeight),
                    this.playerPixelData,
                    this.depthWidth * ((this.playerOpacityMaskImage.Format.BitsPerPixel + 7) / 8),
                    0);

                //Fin modif mapping

                /*
                 * if (trackPlayer == true)
                 * {
                 *  depth32 = new byte[depthimageFrame.PixelDataLength * 4];
                 *  this.TrackPlayer(pixelData32);
                 *  this.caller.colorimageControl.Source = BitmapSource.Create(
                 *             depthimageFrame.Width,
                 *             depthimageFrame.Height,
                 *             96,
                 *             96,
                 *             PixelFormats.Bgr32,
                 *             null,
                 *             depth32,
                 *             depthimageFrame.Width * 4
                 * );
                 * }
                 * else if (colorized == true)
                 * {
                 *  depth32 = new byte[depthimageFrame.PixelDataLength * 4];
                 *  this.GetColorPixelDataWithDistance(pixelData32);
                 *  this.caller.colorimageControl.Source = BitmapSource.Create(
                 * depthimageFrame.Width, depthimageFrame.Height, 96, 96, PixelFormats.Bgr32, null, depth32, depthimageFrame.Width
                 * );
                 * }
                 * else
                 * {
                 *  this.caller.colorimageControl.Source = BitmapSource.Create(
                 *     depthimageFrame.Width, depthimageFrame.Height, 96, 96, PixelFormats.Gray16, null, pixelData32, depthimageFrame.Width * depthimageFrame.BytesPerPixel
                 *     );
                 * }*/
            }
        }
Beispiel #20
0
 public static short[] ToPixelData(this DepthImageFrame depthFrame)
 {
     short[] depth = new short[depthFrame.PixelDataLength];
     depthFrame.CopyPixelDataTo(depth);
     return(depth);
 }
        void KinectFaceNode_AllFrameReady(object sender, AllFramesReadyEventArgs e)
        {
            ColorImageFrame colorImageFrame = null;
            DepthImageFrame depthImageFrame = null;
            SkeletonFrame   skeletonFrame   = null;

            colorImageFrame = e.OpenColorImageFrame();
            depthImageFrame = e.OpenDepthImageFrame();
            skeletonFrame   = e.OpenSkeletonFrame();

            if (colorImageFrame == null || depthImageFrame == null || skeletonFrame == null)
            {
                if (colorImageFrame != null)
                {
                    colorImageFrame.Dispose();
                }
                if (depthImageFrame != null)
                {
                    depthImageFrame.Dispose();
                }
                if (skeletonFrame != null)
                {
                    skeletonFrame.Dispose();
                }
                return;
            }

            if (first)
            {
                first         = false;
                this.olddepth = depthImageFrame.Format;
            }
            else
            {
                if (this.olddepth != depthImageFrame.Format)
                {
                    //Need a reset
                    if (this.depthImage != null)
                    {
                        this.depthImage = null;
                    }

                    foreach (SkeletonFaceTracker sft in this.trackedSkeletons.Values)
                    {
                        sft.Dispose();
                    }

                    this.trackedSkeletons.Clear();
                    this.olddepth = depthImageFrame.Format;
                }
            }

            if (this.depthImage == null)
            {
                this.depthImage = new short[depthImageFrame.PixelDataLength];
            }

            if (this.colorImage == null)
            {
                this.colorImage = new byte[colorImageFrame.PixelDataLength];
            }

            if (this.skeletonData == null || this.skeletonData.Length != skeletonFrame.SkeletonArrayLength)
            {
                this.skeletonData = new Skeleton[skeletonFrame.SkeletonArrayLength];
            }

            colorImageFrame.CopyPixelDataTo(this.colorImage);
            depthImageFrame.CopyPixelDataTo(this.depthImage);
            skeletonFrame.CopySkeletonDataTo(this.skeletonData);

            foreach (Skeleton skeleton in this.skeletonData)
            {
                if (skeleton.TrackingState == SkeletonTrackingState.Tracked ||
                    skeleton.TrackingState == SkeletonTrackingState.PositionOnly)
                {
                    // We want keep a record of any skeleton, tracked or untracked.
                    if (!this.trackedSkeletons.ContainsKey(skeleton.TrackingId))
                    {
                        this.trackedSkeletons.Add(skeleton.TrackingId, new SkeletonFaceTracker());
                    }

                    // Give each tracker the upated frame.
                    SkeletonFaceTracker skeletonFaceTracker;
                    if (this.trackedSkeletons.TryGetValue(skeleton.TrackingId, out skeletonFaceTracker))
                    {
                        skeletonFaceTracker.OnFrameReady(this.runtime.Runtime, colorImageFrame.Format, colorImage, depthImageFrame.Format, depthImage, skeleton);
                        skeletonFaceTracker.LastTrackedFrame = skeletonFrame.FrameNumber;
                    }
                }
            }

            this.RemoveOldTrackers(skeletonFrame.FrameNumber);

            colorImageFrame.Dispose();
            depthImageFrame.Dispose();
            skeletonFrame.Dispose();

            this.FInvalidate = true;
        }
Beispiel #22
0
        private void faceOperations(AllFramesReadyEventArgs e)
        {
            ColorImageFrame colorImageFrame = null;
            DepthImageFrame depthImageFrame = null;

            try{
                colorImageFrame = e.OpenColorImageFrame();
                depthImageFrame = e.OpenDepthImageFrame();

                if (colorImageFrame == null || depthImageFrame == null)
                {
                    return;
                }

                // Check for image format changes.  The FaceTracker doesn't
                // deal with that so we need to reset.
                if (this.depthImageFormat != depthImageFrame.Format)
                {
                    this.ResetFaceTracking();
                    this.depthImage       = null;
                    this.depthImageFormat = depthImageFrame.Format;
                }

                if (this.colorImageFormat != colorImageFrame.Format)
                {
                    this.ResetFaceTracking();
                    this.colorImage       = null;
                    this.colorImageFormat = colorImageFrame.Format;
                }

                // Create any buffers to store copies of the data we work with
                if (this.depthImage == null)
                {
                    this.depthImage = new short[depthImageFrame.PixelDataLength];
                }

                if (this.colorImage == null)
                {
                    this.colorImage = new byte[colorImageFrame.PixelDataLength];
                }

                colorImageFrame.CopyPixelDataTo(colorImage);
                depthImageFrame.CopyPixelDataTo(depthImage);

                if (!this.trackedSkeletons.ContainsKey(this.trackingID))
                {
                    this.trackedSkeletons.Add(this.trackingID, new SkeletonFaceTracker());
                }

                if (this.trackedSkeletons.TryGetValue(this.trackingID, out skeletonFaceTracker))
                {
                    skeletonFaceTracker.OnFrameReady(sensor, colorImageFormat, colorImage, depthImageFormat, depthImage);
                    skeletonFaceTracker.LastTrackedFrame = this.frameNumber;
                }

                this.RemoveOldTrackers(this.frameNumber);

                this.InvalidateVisual();
            } finally {
                if (colorImageFrame != null)
                {
                    colorImageFrame.Dispose();
                }

                if (depthImageFrame != null)
                {
                    depthImageFrame.Dispose();
                }
            }
        }
Beispiel #23
0
        void GetROI(Skeleton user, DepthImageFrame depthFrame, ColorImageFrame color_frame = null)
        {
            // Map skeleton to Depth
            DepthImagePoint rightHandPoint =
                _sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(user.Joints[JointType.HandRight].Position, DepthImageFormat.Resolution640x480Fps30);

            DepthImagePoint rightWristPoint =
                _sensor.CoordinateMapper.MapSkeletonPointToDepthPoint(user.Joints[JointType.WristRight].Position, DepthImageFormat.Resolution640x480Fps30);

            int hand_depth = (rightHandPoint.Depth > rightWristPoint.Depth)?rightHandPoint.Depth:rightWristPoint.Depth + 10; // hand depth used for segmenting out the hand


            //*********************************** Map The depth Image to color Image to align the color image************************************************************************

            DepthImagePixel[] depthImagePixels = new DepthImagePixel[depthFrame.PixelDataLength];
            depthFrame.CopyDepthImagePixelDataTo(depthImagePixels);

            short[] rawDepthData = new short[depthFrame.PixelDataLength];
            depthFrame.CopyPixelDataTo(rawDepthData);

            ColorImagePoint[] mapped_depth_locations = new ColorImagePoint[depthFrame.PixelDataLength];

            _sensor.CoordinateMapper.MapDepthFrameToColorFrame(DepthImageFormat.Resolution640x480Fps30, depthImagePixels, ColorImageFormat.RgbResolution640x480Fps30, mapped_depth_locations);
            byte[] aligned_colorPixels = new byte[color_frame.PixelDataLength];  // creating a byte array for storing the aligned pixel values

            byte[] original_colorPixels = new byte[color_frame.PixelDataLength];
            color_frame.CopyPixelDataTo(original_colorPixels);
            int aligned_image_index = 0;

            //int hand_baseindex = rightHandPoint.Y*640 + rightHandPoint.X;
            for (int i = 0; i < mapped_depth_locations.Length; i++)
            {
                int depth = rawDepthData[i] >> DepthImageFrame.PlayerIndexBitmaskWidth;
                //Console.WriteLine(depth);
                ColorImagePoint point = mapped_depth_locations[i];

                if ((point.X >= 0 && point.X < 640) && (point.Y >= 0 && point.Y < 480))
                {
                    int baseIndex = (point.Y * 640 + point.X) * 4;
                    if (depth < hand_depth && depth != -1)
                    {
                        aligned_colorPixels[aligned_image_index]     = original_colorPixels[baseIndex];
                        aligned_colorPixels[aligned_image_index + 1] = original_colorPixels[baseIndex + 1];
                        aligned_colorPixels[aligned_image_index + 2] = original_colorPixels[baseIndex + 2];
                        aligned_colorPixels[aligned_image_index + 3] = 0;
                    }
                    else
                    {
                        aligned_colorPixels[aligned_image_index]     = 0;
                        aligned_colorPixels[aligned_image_index + 1] = 0;
                        aligned_colorPixels[aligned_image_index + 2] = 0;
                        aligned_colorPixels[aligned_image_index + 3] = 0;
                    }
                }
                aligned_image_index = aligned_image_index + 4;



                // *************************** Now modify the contents of this aligned_colorBitmap using the depth information ***************************************************
            }


            //***********************************************************************************************************************************************************************



            int threshold = 20;

            int hand_length = 3 * Math.Max(Math.Abs(rightHandPoint.X - rightWristPoint.X), Math.Abs(rightHandPoint.Y - rightWristPoint.Y));

            //  int hand_length = (int)Math.Sqrt((rightHandPoint.X - rightWristPoint.X) ^ 2 + (rightHandPoint.Y - rightWristPoint.Y) ^ 2);

            int hand_length_old = hand_length;

            //****************************Low pass filter for hand_length*********************************

            if (Math.Abs(hand_length - hand_length_old) > threshold)
            {
                hand_length = hand_length_old;
            }

            //************************************************************************************************

            // Console.WriteLine(hand_length);
            int top_left_X_depth = rightHandPoint.X - hand_length;
            int top_left_Y_depth = rightHandPoint.Y - hand_length;
            int top_left_Z_depth = rightHandPoint.Depth;


            top_left_X_depth = (top_left_X_depth < 0)? 0 : top_left_X_depth;
            top_left_Y_depth = (top_left_Y_depth < 0)? 0 : top_left_Y_depth;

            DepthImagePoint top_left = new DepthImagePoint();

            top_left.X     = top_left_X_depth;
            top_left.Y     = top_left_Y_depth;
            top_left.Depth = rightHandPoint.Depth;

            int bottom_right_X_depth = rightHandPoint.X + hand_length;
            int bottom_right_Y_depth = rightHandPoint.Y + hand_length;
            int bottom_right_Z_depth = rightHandPoint.Depth;

            bottom_right_X_depth = (bottom_right_X_depth > 640)? 600 : bottom_right_X_depth;
            bottom_right_Y_depth = (bottom_right_Y_depth > 480)? 400 : bottom_right_Y_depth;

            DepthImagePoint bottom_right = new DepthImagePoint();

            bottom_right.X     = bottom_right_X_depth;
            bottom_right.Y     = bottom_right_Y_depth;
            bottom_right.Depth = bottom_right_Z_depth;

            Canvas.SetLeft(right_hand_pointer, top_left.X - right_hand_pointer.Width / 2);
            Canvas.SetTop(right_hand_pointer, top_left.Y - right_hand_pointer.Height / 2);



            Canvas.SetLeft(left_hand_pointer, bottom_right.X - left_hand_pointer.Width / 2);
            Canvas.SetTop(left_hand_pointer, bottom_right.Y - left_hand_pointer.Height / 2);

            border_rect.Width  = 2 * hand_length;
            border_rect.Height = 2 * hand_length;

            Canvas.SetLeft(border_rect, top_left.X);
            Canvas.SetTop(border_rect, top_left.Y);


            aligned_colorPixelsToBitmap(aligned_colorPixels, color_frame, (int)top_left.X, (int)top_left.Y, (int)border_rect.Width, (int)border_rect.Height);
        }
Beispiel #24
0
        private void OnAllFramesReady(object sender, AllFramesReadyEventArgs allFramesReadyEventArgs)
        {
            ColorImageFrame colorImageFrame = null;
            DepthImageFrame depthImageFrame = null;
            SkeletonFrame   skeletonFrame   = null;

            try
            {
                colorImageFrame = allFramesReadyEventArgs.OpenColorImageFrame();
                depthImageFrame = allFramesReadyEventArgs.OpenDepthImageFrame();
                skeletonFrame   = allFramesReadyEventArgs.OpenSkeletonFrame();

                if (colorImageFrame == null || depthImageFrame == null || skeletonFrame == null)
                {
                    return;
                }

                // Check for image format changes.  The FaceTracker doesn't
                // deal with that so we need to reset.
                if (this.depthImageFormat != depthImageFrame.Format)
                {
                    this.ResetFaceTracking();
                    this.depthImage       = null;
                    this.depthImageFormat = depthImageFrame.Format;
                }

                if (this.colorImageFormat != colorImageFrame.Format)
                {
                    this.ResetFaceTracking();
                    this.colorImage       = null;
                    this.colorImageFormat = colorImageFrame.Format;
                }

                // Create any buffers to store copies of the data we work with
                if (this.depthImage == null)
                {
                    this.depthImage = new short[depthImageFrame.PixelDataLength];
                }

                if (this.colorImage == null)
                {
                    this.colorImage = new byte[colorImageFrame.PixelDataLength];
                }

                // Get the skeleton information
                if (this.skeletonData == null || this.skeletonData.Length != skeletonFrame.SkeletonArrayLength)
                {
                    this.skeletonData = new Skeleton[skeletonFrame.SkeletonArrayLength];
                }

                colorImageFrame.CopyPixelDataTo(this.colorImage);
                depthImageFrame.CopyPixelDataTo(this.depthImage);
                skeletonFrame.CopySkeletonDataTo(this.skeletonData);

                // Update the list of trackers and the trackers with the current frame information
                int num = 0; Skeleton sk1 = null, sk2 = null;
                foreach (Skeleton skeleton in this.skeletonData)
                {
                    if (skeleton.TrackingState == SkeletonTrackingState.Tracked)
                    {
                        if (num == 0)
                        {
                            sk1 = skeleton;
                        }
                        else
                        {
                            sk2 = skeleton;
                        }
                        num++;
                    }
                }

                if (sk1 != null)
                {
                    if (sk2 == null)
                    {
                        if (!this.trackedSkeletons.ContainsKey(sk1.TrackingId))
                        {
                            this.trackedSkeletons.Add(sk1.TrackingId, new SkeletonFaceTracker());
                            Gesture.images.Add(sk1.TrackingId, 1);
                        }
                    }
                    else
                    {
                        if (!this.trackedSkeletons.ContainsKey(sk1.TrackingId))
                        {
                            if (!this.trackedSkeletons.ContainsKey(sk2.TrackingId))//都无
                            {
                                this.trackedSkeletons.Add(sk1.TrackingId, new SkeletonFaceTracker());
                                this.trackedSkeletons.Add(sk2.TrackingId, new SkeletonFaceTracker());
                                Gesture.images.Add(sk1.TrackingId, 1);
                                Gesture.images.Add(sk2.TrackingId, 2);
                            }
                            else//1无2有
                            {
                                this.trackedSkeletons.Add(sk1.TrackingId, new SkeletonFaceTracker());
                                if (Gesture.images[sk2.TrackingId] == 1)
                                {
                                    Gesture.images.Add(sk1.TrackingId, 2);
                                }
                                else
                                {
                                    Gesture.images.Add(sk1.TrackingId, 1);
                                }
                            }
                        }
                        else
                        {
                            if (!this.trackedSkeletons.ContainsKey(sk2.TrackingId))//1有2无
                            {
                                this.trackedSkeletons.Add(sk2.TrackingId, new SkeletonFaceTracker());
                                if (Gesture.images[sk1.TrackingId] == 1)
                                {
                                    Gesture.images.Add(sk2.TrackingId, 2);
                                }
                                else
                                {
                                    Gesture.images.Add(sk2.TrackingId, 1);
                                }
                            }
                        }
                    }
                }

                SkeletonFaceTracker skeletonFaceTracker;
                if (sk1 != null)
                {
                    if (this.trackedSkeletons.TryGetValue(sk1.TrackingId, out skeletonFaceTracker))
                    {
                        skeletonFaceTracker.OnFrameReady(this.Kinect, colorImageFormat, colorImage, depthImageFormat, depthImage, sk1);
                        skeletonFaceTracker.LastTrackedFrame = skeletonFrame.FrameNumber;
                    }
                }
                if (sk2 != null)
                {
                    if (this.trackedSkeletons.TryGetValue(sk2.TrackingId, out skeletonFaceTracker))
                    {
                        skeletonFaceTracker.OnFrameReady(this.Kinect, colorImageFormat, colorImage, depthImageFormat, depthImage, sk2);
                        skeletonFaceTracker.LastTrackedFrame = skeletonFrame.FrameNumber;
                    }
                }

                this.RemoveOldTrackers(skeletonFrame.FrameNumber);

                this.InvalidateVisual();
            }
            finally
            {
                if (colorImageFrame != null)
                {
                    colorImageFrame.Dispose();
                }

                if (depthImageFrame != null)
                {
                    depthImageFrame.Dispose();
                }

                if (skeletonFrame != null)
                {
                    skeletonFrame.Dispose();
                }
            }
        }
Beispiel #25
0
        private void OnAllFramesReady(object sender, AllFramesReadyEventArgs allFramesReadyEventArgs)
        {
            ColorImageFrame colorImageFrame = null;
            DepthImageFrame depthImageFrame = null;
            SkeletonFrame   skeletonFrame   = null;

            try
            {
                colorImageFrame = allFramesReadyEventArgs.OpenColorImageFrame();
                depthImageFrame = allFramesReadyEventArgs.OpenDepthImageFrame();
                skeletonFrame   = allFramesReadyEventArgs.OpenSkeletonFrame();

                if (colorImageFrame == null || depthImageFrame == null || skeletonFrame == null)
                {
                    return;
                }

                // Check for image format changes.  The FaceTracker doesn't
                // deal with that so we need to reset.
                if (this.depthImageFormat != depthImageFrame.Format)
                {
                    this.ResetFaceTracking();
                    this.depthImage       = null;
                    this.depthImageFormat = depthImageFrame.Format;
                }

                if (this.colorImageFormat != colorImageFrame.Format)
                {
                    this.ResetFaceTracking();
                    this.colorImage       = null;
                    this.colorImageFormat = colorImageFrame.Format;
                }

                // Create any buffers to store copies of the data we work with
                if (this.depthImage == null)
                {
                    this.depthImage = new short[depthImageFrame.PixelDataLength];
                }

                if (this.colorImage == null)
                {
                    this.colorImage = new byte[colorImageFrame.PixelDataLength];
                }

                // Get the skeleton information
                if (this.skeletonData == null || this.skeletonData.Length != skeletonFrame.SkeletonArrayLength)
                {
                    this.skeletonData = new Skeleton[skeletonFrame.SkeletonArrayLength];
                }

                colorImageFrame.CopyPixelDataTo(this.colorImage);
                depthImageFrame.CopyPixelDataTo(this.depthImage);
                skeletonFrame.CopySkeletonDataTo(this.skeletonData);

                // Update the list of trackers and the trackers with the current frame information
                foreach (Skeleton skeleton in this.skeletonData)
                {
                    if (skeleton.TrackingState == SkeletonTrackingState.Tracked ||
                        skeleton.TrackingState == SkeletonTrackingState.PositionOnly)
                    {
                        // We want keep a record of any skeleton, tracked or untracked.
                        if (!this.trackedSkeletons.ContainsKey(skeleton.TrackingId))
                        {
                            this.trackedSkeletons.Add(skeleton.TrackingId, new SkeletonFaceTracker());
                        }

                        // Give each tracker the upated frame.
                        SkeletonFaceTracker skeletonFaceTracker;
                        if (this.trackedSkeletons.TryGetValue(skeleton.TrackingId, out skeletonFaceTracker))
                        {
                            skeletonFaceTracker.OnFrameReady(this.Kinect, colorImageFormat, colorImage, depthImageFormat, depthImage, skeleton);
                            skeletonFaceTracker.LastTrackedFrame = skeletonFrame.FrameNumber;
                        }
                    }
                }

                this.RemoveOldTrackers(skeletonFrame.FrameNumber);

                this.InvalidateVisual();
            }
            finally
            {
                if (colorImageFrame != null)
                {
                    colorImageFrame.Dispose();
                }

                if (depthImageFrame != null)
                {
                    depthImageFrame.Dispose();
                }

                if (skeletonFrame != null)
                {
                    skeletonFrame.Dispose();
                }
            }
        }
Beispiel #26
0
        void _sensor_AllFramesReady(object sender, AllFramesReadyEventArgs e)
        {
            //throw new NotImplementedException();
            using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
            {
                if (colorFrame == null)
                {
                    return;
                }

                using (DepthImageFrame depthFrame = e.OpenDepthImageFrame()) //new method that uses color camera and depth camera
                {
                    if (depthFrame == null)
                    {
                        return;
                    }

                    //                    byte[] pixels = GenerateColoredBytes(depthFrame);
                    //                    int stride = depthFrame.Width * 4;


                    short[] rawDepthData = new short[depthFrame.PixelDataLength];
                    depthFrame.CopyPixelDataTo(rawDepthData);

                    Byte[] pixels = new byte[depthFrame.Height * depthFrame.Width * 4];
                    colorFrame.CopyPixelDataTo(pixels);

                    const int BlueIndex  = 0;
                    const int GreenIndex = 1;
                    const int RedIndex   = 2;
                    const int alpha      = 3;


                    byte[] frame       = new byte[pixels.Length];
                    bool   playerFound = false;
                    bool   playerDone  = false;
                    //make it shades of red (remove b and g)
                    for (int depthIndex = 0, idx = 0;
                         depthIndex < rawDepthData.Length && idx < pixels.Length;
                         depthIndex++, idx += 4)
                    {
                        int player = rawDepthData[depthIndex] & DepthImageFrame.PlayerIndexBitmask;
                        int depth  = rawDepthData[depthIndex] >> DepthImageFrame.PlayerIndexBitmaskWidth;

                        if (player > 0)
                        {
                            playerFound             = true;
                            frame[idx + BlueIndex]  = pixels[idx + BlueIndex];  //blue pixel
                            frame[idx + GreenIndex] = pixels[idx + GreenIndex]; //green pixel
                            frame[idx + RedIndex]   = pixels[idx + RedIndex];   //red pixel
                            frame[idx + alpha]      = 255;

                            if (playerFound && !playerDone)
                            {
                                ellipse1.Height = depth / 30;
                                ellipse1.Width  = depth / 30;
                                playerDone      = true;
                            }
                        }
                        else
                        {
                            frame[idx + BlueIndex]  = 0; //blue pixel
                            frame[idx + GreenIndex] = 0; //green pixel
                            frame[idx + RedIndex]   = 0; //red pixel
                            frame[idx + alpha]      = 0;
                        }
                    }


                    int stride = colorFrame.Width * 4;

                    image1.Source = BitmapSource.Create(colorFrame.Width, colorFrame.Height,
                                                        96, 96, PixelFormats.Bgra32, null, frame, stride);
                }
            }

/*
 *          using (DepthImageFrame depthFrame = e.OpenDepthImageFrame()) //new method that uses color camera and depth camera
 *          {
 *              if (depthFrame == null)
 *              {
 *                  return;
 *              }
 *
 *              byte[] pixels = GenerateColoredBytes(depthFrame);
 *              int stride = depthFrame.Width * 4;
 *
 *              image2.Source = BitmapSource.Create(depthFrame.Width, depthFrame.Height,
 *                  96, 96, PixelFormats.Bgra32, null, pixels, stride);
 *
 *
 *          }
 */
        }
Beispiel #27
0
        private byte[] GenerateColoredBytes(DepthImageFrame depthFrame)
        {
            short[] rawDepthDate = new short[depthFrame.PixelDataLength];
            depthFrame.CopyPixelDataTo(rawDepthDate);

            // use depthFrame to create the image to display on-screen
            // depthFrame contains color information for all the pixels in image
            // Height * Width * 4 (RGBE)
            Byte[] pixels = new byte[depthFrame.Height * depthFrame.Width * 4];

            // BGR32 = BGRE
            // BGRA32 = BGRA // In that case we need to set the transparency because .net defaults to 0 which is transparent

            // data locations
            const int BlueIndex  = 0;
            const int GreenIndex = 1;
            const int RedIndex   = 2;

            for (int depthIndex = 0, colorIndex = 0; depthIndex < rawDepthDate.Length && colorIndex < pixels.Length; ++depthIndex, colorIndex += 4)
            {
                // Formula for player
                int player = rawDepthDate[depthIndex] & DepthImageFrame.PlayerIndexBitmask;
                // Formula for depth
                int depth = rawDepthDate[depthIndex] >> DepthImageFrame.PlayerIndexBitmaskWidth;

                // .9m
                if (depth <= 900)
                {
                    pixels[colorIndex + BlueIndex]  = 255;
                    pixels[colorIndex + GreenIndex] = 0;
                    pixels[colorIndex + RedIndex]   = 0;
                }
                // .9 - 2.0
                else if (depth > 900 && depth < 2000)
                {
                    pixels[colorIndex + BlueIndex]  = 0;
                    pixels[colorIndex + GreenIndex] = 255;
                    pixels[colorIndex + RedIndex]   = 0;
                }
                else if (depth > 2000)
                {
                    pixels[colorIndex + BlueIndex]  = 0;
                    pixels[colorIndex + GreenIndex] = 0;
                    pixels[colorIndex + RedIndex]   = 255;
                }

                // equal coloring for monochromatic histogram
                byte intensity = CalculateIntensityFromDepth(depth);
                pixels[colorIndex + BlueIndex]  = intensity;
                pixels[colorIndex + GreenIndex] = intensity;
                pixels[colorIndex + RedIndex]   = intensity;

                // Color all players gold
                if (player > 0) // There is a player
                {
                    pixels[colorIndex + BlueIndex]  = Colors.Gold.B;
                    pixels[colorIndex + GreenIndex] = Colors.Gold.G;
                    pixels[colorIndex + RedIndex]   = Colors.Gold.R;
                }
            }

            return(pixels);
        }
Beispiel #28
0
        /// <summary>
        /// RGBカメラ、距離カメラのフレーム更新イベント
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        void kinect_AllFramesReady(object sender, AllFramesReadyEventArgs e)
        {
            try {
                using (ColorImageFrame colorFrame = e.OpenColorImageFrame()) {
                    using (DepthImageFrame depthFrame = e.OpenDepthImageFrame()) {
                        if (depthFrame != null && colorFrame != null && kinect.IsRunning)
                        {
                            if (depthPixel == null)
                            {
                                depthPixel           = new short[depthFrame.PixelDataLength];
                                colorImagePixelPoint = new ColorImagePoint[depthFrame.PixelDataLength];
                            }

                            // 描画を3フレームに1回にする
                            if (depthFrame.FrameNumber % 3 != 0)
                            {
                                return;
                            }

                            depthFrame.CopyPixelDataTo(depthPixel);

                            // Depthデータの座標をRGB画像の座標に変換する
                            kinect.MapDepthFrameToColorFrame(kinect.DepthStream.Format, depthPixel,
                                                             kinect.ColorStream.Format, colorImagePixelPoint);

                            // カメラ画像の描画
                            byte[] colorPixel = new byte[colorFrame.PixelDataLength];
                            colorFrame.CopyPixelDataTo(colorPixel);

                            // RGB画像の位置を距離画像の位置に補正
                            colorPixel = CoordinateColorImage(colorImagePixelPoint, colorPixel);

                            CameraImage.Source = BitmapSource.Create(colorFrame.Width, colorFrame.Height, 96, 96,
                                                                     PixelFormats.Bgr32, null, colorPixel,
                                                                     colorFrame.Width * colorFrame.BytesPerPixel);
                        }
                    }
                }
            }
            catch (Exception ex) {
                MessageBox.Show(ex.Message);
            }

            // モードに応じた処理
            switch (currentMode)
            {
            case SelectMode.SELECTING:
                // 領域を指定中ならば描画も更新
                UpdateRectPosition();

                break;

            case SelectMode.SELECTED:
                // 領域内を触っているかチェック
                Point point = CheckThePointTouchingTheRegion();
                UpdateTouchingPointEllipse(point);
                UpdatePaintCanvas(point);

                break;
            }
        }
Beispiel #29
0
        private void AllFramesReady(object sender, AllFramesReadyEventArgs allFramesReadyEventArgs)
        {
            ColorImageFrame colorImageFrame = null;
            DepthImageFrame depthImageFrame = null;
            SkeletonFrame   skeletonFrame   = null;

            try
            {
                colorImageFrame = allFramesReadyEventArgs.OpenColorImageFrame();
                depthImageFrame = allFramesReadyEventArgs.OpenDepthImageFrame();
                skeletonFrame   = allFramesReadyEventArgs.OpenSkeletonFrame();

                if (colorImageFrame == null || depthImageFrame == null || skeletonFrame == null)
                {
                    return;
                }

                // Check for changes in any of the data this function is receiving
                // and reset things appropriately.
                if (this.depthImageFormat != depthImageFrame.Format)
                {
                    this.DestroyFaceTracker();
                    this.depthImage       = null;
                    this.depthImageFormat = depthImageFrame.Format;
                }

                if (this.colorImageFormat != colorImageFrame.Format)
                {
                    this.DestroyFaceTracker();
                    this.colorImage               = null;
                    this.colorImageFormat         = colorImageFrame.Format;
                    this.colorImageWritableBitmap = null;
                    this.ColorImage.Source        = null;
                    this.theMaterial.Brush        = null;
                }

                if (this.skeletonData != null && this.skeletonData.Length != skeletonFrame.SkeletonArrayLength)
                {
                    this.skeletonData = null;
                }

                // Create any buffers to store copies of the data we work with
                if (this.depthImage == null)
                {
                    this.depthImage = new short[depthImageFrame.PixelDataLength];
                }

                if (this.colorImage == null)
                {
                    this.colorImage = new byte[colorImageFrame.PixelDataLength];
                }

                if (this.colorImageWritableBitmap == null)
                {
                    this.colorImageWritableBitmap = new WriteableBitmap(
                        colorImageFrame.Width, colorImageFrame.Height, 96, 96, PixelFormats.Bgr32, null);
                    this.ColorImage.Source = this.colorImageWritableBitmap;
                    this.theMaterial.Brush = new ImageBrush(this.colorImageWritableBitmap)
                    {
                        ViewportUnits = BrushMappingMode.Absolute
                    };
                }

                if (this.skeletonData == null)
                {
                    this.skeletonData = new Skeleton[skeletonFrame.SkeletonArrayLength];
                }

                // Copy data received in this event to our buffers.
                colorImageFrame.CopyPixelDataTo(this.colorImage);
                depthImageFrame.CopyPixelDataTo(this.depthImage);
                skeletonFrame.CopySkeletonDataTo(this.skeletonData);
                this.colorImageWritableBitmap.WritePixels(
                    new Int32Rect(0, 0, colorImageFrame.Width, colorImageFrame.Height),
                    this.colorImage,
                    colorImageFrame.Width * Bgr32BytesPerPixel,
                    0);

                // Find a skeleton to track.
                // First see if our old one is good.
                // When a skeleton is in PositionOnly tracking state, don't pick a new one
                // as it may become fully tracked again.
                Skeleton skeletonOfInterest =
                    this.skeletonData.FirstOrDefault(
                        skeleton =>
                        skeleton.TrackingId == this.trackingId &&
                        skeleton.TrackingState != SkeletonTrackingState.NotTracked);

                if (skeletonOfInterest == null)
                {
                    // Old one wasn't around.  Find any skeleton that is being tracked and use it.
                    skeletonOfInterest =
                        this.skeletonData.FirstOrDefault(
                            skeleton => skeleton.TrackingState == SkeletonTrackingState.Tracked);

                    if (skeletonOfInterest != null)
                    {
                        // This may be a different person so reset the tracker which
                        // could have tuned itself to the previous person.
                        if (this.faceTracker != null)
                        {
                            this.faceTracker.ResetTracking();
                        }

                        this.trackingId = skeletonOfInterest.TrackingId;
                    }
                }

                bool displayFaceMesh = false;

                if (skeletonOfInterest != null && skeletonOfInterest.TrackingState == SkeletonTrackingState.Tracked)
                {
                    if (this.faceTracker == null)
                    {
                        try
                        {
                            this.faceTracker = new FaceTracker(this.Kinect);
                        }
                        catch (InvalidOperationException)
                        {
                            // During some shutdown scenarios the FaceTracker
                            // is unable to be instantiated.  Catch that exception
                            // and don't track a face.
                            Debug.WriteLine("AllFramesReady - creating a new FaceTracker threw an InvalidOperationException");
                            this.faceTracker = null;
                        }
                    }

                    if (this.faceTracker != null)
                    {
                        FaceTrackFrame faceTrackFrame = this.faceTracker.Track(
                            this.colorImageFormat,
                            this.colorImage,
                            this.depthImageFormat,
                            this.depthImage,
                            skeletonOfInterest);

                        if (faceTrackFrame.TrackSuccessful)
                        {
                            this.UpdateMesh(faceTrackFrame);

                            // Only display the face mesh if there was a successful track.
                            displayFaceMesh = true;
                        }
                    }
                }
                else
                {
                    this.trackingId = -1;
                }

                this.viewport3d.Visibility = displayFaceMesh ? Visibility.Visible : Visibility.Hidden;
            }
            finally
            {
                if (colorImageFrame != null)
                {
                    colorImageFrame.Dispose();
                }

                if (depthImageFrame != null)
                {
                    depthImageFrame.Dispose();
                }

                if (skeletonFrame != null)
                {
                    skeletonFrame.Dispose();
                }
            }
        }
Beispiel #30
0
        // boolean meaning T or F value
        //
        //MAYBE to be played with
        void _sensor_AllFramesReady(object sender, AllFramesReadyEventArgs e)
        {
            Vector coolh = new Vector();

            // coolh = coordinates left hand
            // in vector i will save position of left hand


            short[] DataDepth;
            int     HandDepth;

            byte[] RbgVideo;


            using (SkeletonFrame skeletonFrameData = e.OpenSkeletonFrame())
            // the using above will detect a skeleton; the first one.
            {
                if (skeletonFrameData == null)
                {
                    return;
                }
                skeletonFrameData.CopySkeletonDataTo(allSkeletons);

                Skeleton first = (from s in allSkeletons
                                  where s.TrackingState == SkeletonTrackingState.Tracked
                                  select s).FirstOrDefault();

                if (first == null)
                {
                    return;
                }

                // the var = when i ask for the position of a joint the kinect will return a value btwn -1 and 1, for x and y
                // the fct does take those values and give the actual coordinates in comparison to the video stream.

                var pct = __kinect.MapSkeletonPointToColor

                              (first.Joints[JointType.HandLeft].Position, ColorImageFormat.RgbResolution640x480Fps30

                              );

                coolh.X = pct.X;
                coolh.Y = pct.Y;

                // the var = when i ask for the position of a joint the kinect will return a value btwn -1 and 1, for x and y
                // the fct does take those values and give the actual coordinates in comparison to the video stream.
            }
            // the using above will detect a skeleton; the first one.

            //next up we detect the depth of field, and then take the depth of the hand so we can only work with that, not with the whole img

            using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
            {
                if (depthFrame == null)
                {
                    return;
                }

                DataDepth = new short [depthFrame.PixelDataLength];
                depthFrame.CopyPixelDataTo(DataDepth);
            }
            //this above took a frame with depth

            HandDepth = DataDepth[(int)coolh.Y * 640 + (int)coolh.X] >> DepthImageFrame.PlayerIndexBitmaskWidth;


            //magic cast = transforms a variable on the spot (from int to double int for example.)


            using (ColorImageFrame colorFrame = e.OpenColorImageFrame())

            {
                if (colorFrame == null)
                {
                    return;
                }

                RbgVideo = new byte[colorFrame.PixelDataLength];
                colorFrame.CopyPixelDataTo(RbgVideo);
            }

            int RedArea    = 0;
            int YellowArea = 0;

            //cati pixeli de culoarea aia s-au gasit
            //how many pixels of that color are found


            for (int i = -50; i < 51; i++)
            {
                if (coolh.Y + i < 0 || coolh.Y + i > 480)
                {
                    continue;
                }

                int offset = ((int)(coolh.Y - 1) * 640);

                for (int j = -50; j < 51; j++)
                {
                    if (coolh.X + j < 0 || coolh.X + j > 640)
                    {
                        continue;
                    }
                    const int redin   = 0;
                    const int greenin = 0;
                    const int bluein  = 0;

                    //redin, greenin, bluein are indexes; offsets
                    if (AE(DataDepth[offset + (int)coolh.X + j] >> DepthImageFrame.PlayerIndexBitmaskWidth, HandDepth, 50))
                    {
                        int coloroffset = (offset + (int)coolh.X * 4);

                        //*4 because every pixel is composed of 4 pixels: RGB and transparency

                        if (AE(RbgVideo[coloroffset + redin], 255, 20) &&
                            AE(RbgVideo[coloroffset + greenin], 0, 20) &&
                            AE(RbgVideo[coloroffset + bluein], 0, 20))

                        {
                            RedArea++;
                        }

                        //do that if above for each color that i want; this one is for red.
                    }
                }
            }
        }