示例#1
0
        private static short[,] Capture(short minZ)
        {
            int   lastX = 0;
            int   lastY = 0;
            short lastZ = 0;

            short[,] dmatrix = new short[myKin.ColorStream.FrameWidth, myKin.ColorStream.FrameHeight];
            for (int i = 0; i < myKin.ColorStream.FrameWidth; i++)
            {
                for (int j = 0; j < myKin.ColorStream.FrameHeight; j++)
                {
                    DepthImagePixel d = DepthMappedToColor[i, j];
                    if (d.IsKnownDepth && (d.Depth - minZ) < 200)
                    {
                        if (lastX == i && j - lastY < 10)
                        {
                            for (int k = lastY + 1; k < j; k++)
                            {
                                dmatrix[i, k] = lastZ;
                            }
                        }
                        dmatrix[i, j] = d.Depth;
                        lastX         = i;
                        lastY         = j;
                        lastZ         = d.Depth;
                    }
                }
            }
            return(dmatrix);
        }
示例#2
0
        public TDepthFrame(DepthImageFrame sensorFrame)
        {
            //TODO This can be done better
            var depthImagePixels = new DepthImagePixel[sensorFrame.PixelDataLength];

            sensorFrame.CopyDepthImagePixelDataTo(depthImagePixels);

            var depthData = new short[sensorFrame.PixelDataLength];

            for (int i = 0; i < sensorFrame.PixelDataLength; i++)
            {
                depthData[i] = depthImagePixels[i].Depth;
            }

            DepthData = depthData;

            PixelDataLength = sensorFrame.PixelDataLength;
            BytesPerPixel   = sensorFrame.BytesPerPixel;
            FrameNumber     = sensorFrame.FrameNumber;
            Width           = sensorFrame.Width;
            Height          = sensorFrame.Height;
            Timestamp       = sensorFrame.Timestamp;

            MinDepth = sensorFrame.MinDepth;
            MaxDepth = sensorFrame.MaxDepth;
        }
示例#3
0
        /// <summary>
        /// 距離データをDepthImagePixel列に変換する
        /// </summary>
        /// <param name="frame"></param>
        /// <returns></returns>
        public static DepthImagePixel[] ToDepthImagePixel(this DepthImageFrame depthFrame)
        {
            var pixels = new DepthImagePixel[depthFrame.PixelDataLength];

            depthFrame.CopyDepthImagePixelDataTo(pixels);
            return(pixels);
        }
        /// <summary>
        /// 距離データの更新
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        void kinect_DepthFrameReady(object sender, DepthImageFrameReadyEventArgs e)
        {
            using (DepthImageFrame frame = e.OpenDepthImageFrame()) {
                if (frame == null)
                {
                    return;
                }

                // DepthImagePixel で取得すると、16bitの距離データ+プライヤーインデックス
                // が取得できる
                DepthImagePixel[] depth = new DepthImagePixel[frame.PixelDataLength];
                frame.CopyDepthImagePixelDataTo(depth);

                // 中心点の距離を表示する
                int index = (frame.Height / 2) * frame.Width + (frame.Width / 2);
                textDepth.Text = string.Format("{0}mm", depth[index].Depth);

                // 可視画像に変換する(14bitで16m)
                // どこまでいけるかは不明だけどOpenNI時の10m弱くらいが限界?
                short[] pixel = new short[frame.PixelDataLength];
                for (int i = 0; i < depth.Length; i++)
                {
                    pixel[i] = (short)~(depth[i].Depth * 0xFFFF / 0x3FFF);
                }

                imageDepth.Source = BitmapSource.Create(frame.Width, frame.Height, 96, 96,
                                                        PixelFormats.Gray16, null, pixel, frame.Width * frame.BytesPerPixel);
            }
        }
示例#5
0
        private void nui_DepthFrameReady(object sender, DepthImageFrameReadyEventArgs e)
        {
            using (DepthImageFrame dif = e.OpenDepthImageFrame())
            {//DepthImageFrame dif = e.OpenDepthImageFrame();
                if (dif != null)
                {
                    DepthImagePixel[] depthPixels = new DepthImagePixel[this.sensor.DepthStream.FramePixelDataLength];
                    //byte[] pre = new byte[dif.Width * dif.Height * 4];
                    byte[]  pre      = new byte[this.sensor.DepthStream.FramePixelDataLength * sizeof(int)];
                    short[] preshort = new short[dif.PixelDataLength];
                    int     minDepth = dif.MinDepth;
                    int     maxDepth = dif.MaxDepth;
                    //dif.CopyPixelDataTo(preshort);
                    dif.CopyDepthImagePixelDataTo(depthPixels);

                    for (int i = 0; i < depthPixels.Length; ++i)
                    {
                        short depth = depthPixels[i].Depth;
                        pre[i] = (byte)(depth >= minDepth && depth <= maxDepth ? depth : 0);
                    }

                    processDepthFrame(pre);
                    depthFrameTex = generateDepthTex();

                    if (OnDepthFrame != null)
                    {
                        OnDepthFrame();
                    }

                    fps.PushFrame();
                    Ready = true;
                }
            }
        }
示例#6
0
        private Bitmap CreateColorBitmapFromDepth(DepthImageFrame frame)
        {
            //WriteableBitmap colorBitmap;
            DepthImagePixel[] depthPixels;
            byte[]            colorPixels;

            depthPixels = new DepthImagePixel[kSensor.DepthStream.FramePixelDataLength];
            colorPixels = new byte[kSensor.DepthStream.FramePixelDataLength * sizeof(int)];

            frame.CopyDepthImagePixelDataTo(depthPixels);

            //get min and max reliable depth
            int minDepth = frame.MinDepth;
            int maxDepth = frame.MaxDepth;

            //convert depth to RGB
            int colorPixelIndex = 0;

            for (int i = 0; i < depthPixels.Length; ++i)
            {
                //get depth for this pixel
                short depth = depthPixels[i].Depth;

                // To convert to a byte, we're discarding the most-significant
                // rather than least-significant bits.
                // We're preserving detail, although the intensity will "wrap."
                // Values outside the reliable depth range are mapped to 0 (black).

                // Note: Using conditionals in this loop could degrade performance.
                // Consider using a lookup table instead when writing production code.
                // See the KinectDepthViewer class used by the KinectExplorer sample
                // for a lookup table example.

                byte intensity = (byte)(depth >= minDepth && depth <= maxDepth ? depth : 0);

                //write out blue byte
                colorPixels[colorPixelIndex++] = intensity;

                //write out green byte
                colorPixels[colorPixelIndex++] = intensity;

                //write out red byte
                colorPixels[colorPixelIndex++] = intensity;

                // We're outputting BGR, the last byte in the 32 bits is unused so skip it
                // If we were outputting BGRA, we would write alpha here.
                ++colorPixelIndex;
            }

            var stride = frame.Width * frame.BytesPerPixel;

            var bmpFrame = new Bitmap(frame.Width, frame.Height, System.Drawing.Imaging.PixelFormat.Format32bppRgb);
            var bmpData  = bmpFrame.LockBits(new Rectangle(0, 0, frame.Width, frame.Height), System.Drawing.Imaging.ImageLockMode.WriteOnly, bmpFrame.PixelFormat);

            System.Runtime.InteropServices.Marshal.Copy(colorPixels, 0, bmpData.Scan0, colorPixels.Length);

            bmpFrame.UnlockBits(bmpData);

            return(bmpFrame);
        }
示例#7
0
        public static unsafe short[] ConvertDepthImagePixelToShort(DepthImagePixel[] depthImage)
        {
            int len = depthImage.Length;
            short[] ret = new short[len];

            fixed (short* retPtrFixed = ret)
            {
                fixed (DepthImagePixel* srcPtrFixed = depthImage)
                {
                    short* retPtr = retPtrFixed;
                    DepthImagePixel* srcPtr = srcPtrFixed;

                    for (int i = 0; i < len; i++)
                    {
                        *(retPtr) = (*(srcPtr)).Depth;
                        retPtr++;
                        srcPtr++;

                        //ret[i] = depthImage[i].Depth;
                    }
                }
            }

            return ret;
        }
示例#8
0
        void sensor_DepthFrameReady(object sender, DepthImageFrameReadyEventArgs e)
        {
            using (var frame = e.OpenDepthImageFrame())
            {
                if (frame != null)
                {
                    //create buffer for depth data
                    var depthImagePixels = new DepthImagePixel[sensor.DepthStream.FramePixelDataLength];
                    //copy data to buffer
                    frame.CopyDepthImagePixelDataTo(depthImagePixels);

                    // create buffer for color pixels data
                    var colorPixels = new byte[4 * sensor.DepthStream.FramePixelDataLength];

                    //for every color pixel in buffer
                    for (int i = 0; i < colorPixels.Length; i += 4)
                    {
                        //check if current pixels belongs to player
                        if (depthImagePixels[i / 4].PlayerIndex != 0)
                        {
                            //set color of that pixel to green
                            colorPixels[i + 1] = 255;
                        }
                    }
                    //display color pixels on screen
                    imageCanvas.Background = new ImageBrush(colorPixels.ToBitmapSource(640, 400));
                }
            }
        }
        private void ReconhecerDistancia(DepthImageFrame quadro, byte[] bytesImagem, int distanciaMaxima)
        {
            if (quadro == null || bytesImagem == null)
            {
                return;
            }

            using (quadro)
            {
                DepthImagePixel[] imagemProfundidade = new DepthImagePixel[quadro.PixelDataLength];
                quadro.CopyDepthImagePixelDataTo(imagemProfundidade);

                DepthImagePoint[] pontosImagemProfundidade = new DepthImagePoint[640 * 480];
                kinect.CoordinateMapper.MapColorFrameToDepthFrame(kinect.ColorStream.Format, kinect.DepthStream.Format, imagemProfundidade, pontosImagemProfundidade);

                for (int i = 0; i < pontosImagemProfundidade.Length; i++)
                {
                    var point = pontosImagemProfundidade[i];
                    if (point.Depth < distanciaMaxima && KinectSensor.IsKnownPoint(point))
                    {
                        var pixelDataIndex = i * 4;

                        byte maiorValorCor = Math.Max(bytesImagem[pixelDataIndex], Math.Max(bytesImagem[pixelDataIndex + 1], bytesImagem[pixelDataIndex + 2]));

                        bytesImagem[pixelDataIndex]     = maiorValorCor;
                        bytesImagem[pixelDataIndex + 1] = maiorValorCor;
                        bytesImagem[pixelDataIndex + 2] = maiorValorCor;
                    }
                }
            }
        }
        private void ReconhecerDistancia(DepthImageFrame quadro, byte[] bytesImagem, int distanciaMaxima)
        {
            if (quadro == null || bytesImagem == null)
            {
                return;
            }

            using (quadro)
            {
                DepthImagePixel[] imagemProfundidade = new DepthImagePixel[quadro.PixelDataLength];
                quadro.CopyDepthImagePixelDataTo(imagemProfundidade);

                for (int indice = 0; indice < imagemProfundidade.Length; indice++)
                {
                    if (imagemProfundidade[indice].Depth < distanciaMaxima)
                    {
                        int  indiceImageCores = indice * 4;
                        byte maiorValorCor    = Math.Max(bytesImagem[indiceImageCores], Math.Max(bytesImagem[indiceImageCores + 1], bytesImagem[indiceImageCores + 2]));

                        bytesImagem[indiceImageCores]     = maiorValorCor;
                        bytesImagem[indiceImageCores + 1] = maiorValorCor;
                        bytesImagem[indiceImageCores + 2] = maiorValorCor;
                    }
                }
            }
        }
示例#11
0
        public static FusionWorkItem Create(DepthImageFormat depthFormat)
        {
            var size = FormatHelper.GetDepthSize(depthFormat);
            var data = new DepthImagePixel[(int)(size.Width * size.Height)];

            return(new FusionWorkItem(data, depthFormat));
        }
示例#12
0
        private void Sensor_DepthFrameReady(object sender, DepthImageFrameReadyEventArgs e)
        {
            DepthImageFrame frame = e.OpenDepthImageFrame();


            if (frame != null)
            {
                CurrentFrame++;

                DepthImagePixel[] pixelArray = new DepthImagePixel[sensor.DepthStream.FramePixelDataLength];

                //Copy depth data to Array
                frame.CopyDepthImagePixelDataTo(pixelArray);


                if (isSet == false && focusPoint.X != -1)
                {
                    focusPoint.Depth  = pixelArray[pointToIndex(frame.Width, focusPoint)].Depth;
                    lbl_Point.Text    = "(" + focusPoint.X + "," + focusPoint.Y + ")";
                    lbl_Distance.Text = (float)focusPoint.Depth / 1000 + " m";

                    //Debug.WriteLine("Set New Focus Point On {" + focusPoint.X + "," + focusPoint.Y + "} Distance : " + (float)focusPoint.Depth / 1000 + " Meters");
                    isSet    = true;
                    focusSet = true;
                }
                else if (focusSet == true)
                {
                    lbl_Distance.Text = (float)focusPoint.Depth / 1000 + " m";
                }

                //Dont collide with graphics driver
                if (isWorking == false)
                {
                    isWorking = true;

                    t = new Thread(() =>
                    {
                        depthSet(ref pixelArray, frame.Width, frame.Height);
                    });

                    t.Start();

                    //Modify Text
                    lbl_status.Text = "Depth Average : " + average(ref pixelArray);
                }

                frame.Dispose();
            }
            else
            {
                //If frame is null do nothing...

                //Modify Text
                lbl_status.Text = "DATA : Not Received";
            }
        }
示例#13
0
 /// <summary>
 /// Gets the depth data.
 /// </summary>
 /// <returns>The array of depth data</returns>
 public DepthImagePixel[] GetDepthData()
 {
     DepthImagePixel[] data;
     lock (_depthDataMutex)
     {
         data = new DepthImagePixel[_depthData.Length];
         _depthData.CopyTo(data, 0);
     }
     return(data);
 }
 private static SkeletonPoint? FindDepth(KinectSensor sensor, DepthImagePixel[] depthPixels, Point point, int idx)
 {
     if (idx >= 0)
     {
         if (!depthPixels[idx].IsKnownDepth)
             return null;
         var dp = new DepthImagePoint() { X = (int)point.X, Y = (int)point.Y, Depth = depthPixels[idx].Depth };
         return sensor.CoordinateMapper.MapDepthPointToSkeletonPoint(DepthImageFormat.Resolution640x480Fps30, dp);
     }
     return null;
 }
        private void doPlainImageProcessing()
        {
            // Translate our most recent color coordinates - Done before the bg worker as
            // we cant acess the sensor inside another thread

            // Clear the green screen
            Array.Clear(_greenScreenPixelData, 0, _greenScreenPixelData.Length);

            // We have to iterate the whole depth image
            for (int y = 0; y < _depthStreamFrameHeight; ++y)
            {
                for (int x = 0; x < _depthStreamFrameWidth; ++x)
                {
                    // calculate index into depth array
                    int depthIndex = x + (y * _sensorRef.DepthStream.FrameWidth);

                    DepthImagePixel depthPixel = _depthPixels[depthIndex];

                    // retrieve the depth to color mapping for the current depth pixel
                    ColorImagePoint colorImagePoint = _colorCoordinates[depthIndex];

                    // scale color coordinates to depth resolution
                    int colorInDepthX = colorImagePoint.X;
                    int colorInDepthY = colorImagePoint.Y;

                    // make sure the depth pixel maps to a valid point in color space
                    // check y > 0 and y < depthHeight to make sure we don't write outside of the array
                    // check x > 0 instead of >= 0 since to fill gaps we set opaque current pixel plus the one to the left
                    // because of how the sensor works it is more correct to do it this way than to set to the right
                    if (colorInDepthX > 0 && colorInDepthX < _depthStreamFrameWidth && colorInDepthY >= 0 && colorInDepthY < _depthStreamFrameHeight)
                    {
                        // calculate index into the green screen pixel array
                        int greenScreenIndex = colorInDepthX + (colorInDepthY * _depthStreamFrameWidth);

                        // OK emgu needs a black and white only image.
                        if (depthPixel.Depth < _depthThreshold && depthPixel.Depth != 0)
                        {
                            // set opaque
                            _greenScreenPixelData[greenScreenIndex] = opaquePixelValue;

                            // compensate for depth/color not corresponding exactly by setting the pixel
                            // to the left to opaque as well
                            _greenScreenPixelData[greenScreenIndex - 1] = opaquePixelValue;
                        }
                    }
                }
            }

            BitmapSource finalRef = BitmapSource.Create(_copyArea.Width, _copyArea.Height, 96, 96, PixelFormats.Bgra32, null, _greenScreenPixelData, _gsStride);

            finalRef.Freeze();
            finalRef.CopyPixels(_copyArea, _pBackBuffer, _gsBufferSize, _gsStride);
        }
示例#16
0
        private void handleDepthImageFrame(DepthImageFrame depthFrame)
        {
            using (depthFrame)
            {
                if (depthFrame != null)
                {
                    DepthImagePixel[] depthPixels = new DepthImagePixel[depthFrame.PixelDataLength];
                    depthFrame.CopyDepthImagePixelDataTo(depthPixels);

                    ThreadPool.QueueUserWorkItem(new WaitCallback(o => DepthFrameCallback(depthFrame.Timestamp, depthFrame.FrameNumber, depthPixels)));
                }
            }
        }
        public void SensorDepthFrameReady(object sender, DepthImageFrameReadyEventArgs e)
        {
            DepthImagePixel[] depthPixels = new DepthImagePixel[this.adapter.sensor.DepthStream.FramePixelDataLength];

            using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
            {
                if (depthFrame != null)
                {
                    depthFrame.CopyDepthImagePixelDataTo(depthPixels);
                    this.adapter.OnDepthFrameAvailable(depthPixels);
                }
            }
        }
示例#18
0
        private DepthImagePixel[,] CreateDepthArray(DepthImagePixel[] rawData)
        {
            var data = new DepthImagePixel[ImageWidth, ImageHeight];

            for (int j = 0; j < ImageHeight; j++)
            {
                for (int i = 0; i < ImageWidth; i++)
                {
                    data[i, j] = rawData[j * ImageWidth + i];
                }
            }
            return(data);
        }
示例#19
0
 /// <summary>
 /// 深度情報を取得
 /// </summary>
 /// <param name="sender"></param>
 /// <param name="e"></param>
 private void getDepthFrameInfo()
 {
     using (DepthImageFrame dFrame = kinect.DepthStream.OpenNextFrame(1))
     {
         if (dFrame != null)
         {
             CoordinateMapper mapper = new CoordinateMapper(kinect);
             DepthData = new SkeletonPoint[dFrame.PixelDataLength];
             DepthImagePixel[] depthData = new DepthImagePixel[dFrame.PixelDataLength];
             dFrame.CopyDepthImagePixelDataTo(depthData);
             mapper.MapDepthFrameToSkeletonFrame(DepthImageFormat.Resolution640x480Fps30, depthData, DepthData);
         }
     }
 }
示例#20
0
        private void SensorChooserOnKinectChanged(object sender, KinectChangedEventArgs args)
        {
            bool error = false;

            if (args.OldSensor != null)
            {
                try
                {
                    args.OldSensor.DepthStream.Range = DepthRange.Default;
                    args.OldSensor.SkeletonStream.EnableTrackingInNearRange = false;
                    args.OldSensor.DepthStream.Disable();
                    args.OldSensor.ColorStream.Disable();
                }
                catch (InvalidOperationException) { error = true; }
            }

            if (args.NewSensor != null)
            {
                CurrentKinectSensor = args.NewSensor;
                try
                {
                    args.NewSensor.DepthStream.Enable(DepthImageFormat.Resolution640x480Fps30);
                    args.NewSensor.ColorStream.Enable(ColorImageFormat.RgbResolution640x480Fps30);
                    //args.NewSensor.SkeletonStream.Enable();

                    depthPixels  = new DepthImagePixel[CurrentKinectSensor.DepthStream.FramePixelDataLength];
                    _colorPixels = new byte[CurrentKinectSensor.ColorStream.FramePixelDataLength];
                    try
                    {
                        //args.NewSensor.SkeletonStream.TrackingMode = SkeletonTrackingMode.Seated;
                        args.NewSensor.DepthStream.Range = DepthRange.Near;
                        args.NewSensor.SkeletonStream.EnableTrackingInNearRange = true;
                    }
                    catch (InvalidOperationException)
                    {
                        // Switch back to normal mode if Kinect does not support near mode
                        args.NewSensor.DepthStream.Range = DepthRange.Default;
                        args.NewSensor.SkeletonStream.EnableTrackingInNearRange = false;
                    }
                }
                catch (InvalidOperationException)
                {
                    error = true;
                }
            }
            else
            {
                error = true;
            }
        }
示例#21
0
        /// <summary>
        /// Calculate distribution of depth over an axis and pick the most common one to 
        /// reduce the chance of error
        /// </summary>
        /// <param name="data"></param>
        /// <param name="start"></param>
        /// <param name="length"></param>
        /// <returns></returns>
        public static double GetMostCommonDepthImagePixel(DepthImagePixel[] data, int start, int length)
        {
            if (start + length > data.Length)
                throw new InvalidOperationException();

            DepthImagePixel[] ret = new DepthImagePixel[length];
            for (int i = 0; i < length; i++)
            {
                if(data[i+start].IsKnownDepth)
                    ret[i] = data[i + start];
            }
            var temp = ret.GroupBy(x => x.Depth).OrderByDescending(x => x.Count()).First().Key;
            return (double)temp;
        }
        private Bitmap CreateBitmapFromSensor3(DepthImageFrame frame)
        {
            int minDepth    = frame.MinDepth;
            int maxDepth    = frame.MaxDepth;
            int colorsNumer = maxDepth - minDepth + 10;

            Color[] colors = new Color[colorsNumer];
            for (int colorCnt = 0; colorCnt < colorsNumer; colorCnt++)
            {
                colors[colorCnt] = MapRainbowColor(colorCnt, 0, colorsNumer);
            }
            int width      = frame.Width;
            int height     = frame.Height;
            var pixelData2 = new DepthImagePixel[frame.PixelDataLength];

            int[][] localDistance = new int[height][];
            frame.CopyDepthImagePixelDataTo(pixelData2);
            var bmp = new Bitmap(frame.Width, frame.Height);

            for (int y = 0; y < height; y++)
            {
                int basePositionY = y * width;
                localDistance[y] = new int[width];
                for (int x = 0; x < width; x++)
                {
                    int realOositionY = basePositionY + x;
                    if (pixelData2[realOositionY].IsKnownDepth)
                    {
                        //byte red = (byte)(pixelData2[realOositionY].Depth >> 8);
                        //byte green = 125;
                        //byte blue = (byte)(pixelData2[realOositionY].Depth & 255); ;
                        //bmp.SetPixel(x, y, Color.FromArgb(red, green, blue));
                        int depthLocal = pixelData2[realOositionY].Depth;
                        localDistance[y][x] = depthLocal;

                        if (depthLocal > maxDepth)
                        {
                            depthLocal = maxDepth;
                        }
                        else if (depthLocal < minDepth)
                        {
                            depthLocal = minDepth;
                        }
                        bmp.SetPixel(x, y, colors[depthLocal - minDepth]);
                    }
                }
            }
            distance = localDistance;
            return(bmp);
        }
示例#23
0
        private void AllFramesReady(object sender, AllFramesReadyEventArgs e)
        {
            using (ColorImageFrame colorImageFrame = e.OpenColorImageFrame())
            {
                if (colorImageFrame != null)
                {
                    if (ColorPixels == null)
                    {
                        ColorPixels = new byte[colorImageFrame.PixelDataLength];
                    }
                    colorImageFrame.CopyPixelDataTo(ColorPixels);
                    ColorImageFrame = colorImageFrame;
                }
            }

            using (DepthImageFrame depthImageFrame = e.OpenDepthImageFrame())
            {
                if (depthImageFrame != null)
                {
                    if (DepthImagePixels == null)
                    {
                        DepthImagePixels = new DepthImagePixel[depthImageFrame.PixelDataLength];
                    }
                    depthImageFrame.CopyDepthImagePixelDataTo(DepthImagePixels);
                    if (DepthPixels == null)
                    {
                        DepthPixels = new short[depthImageFrame.PixelDataLength];
                    }
                    depthImageFrame.CopyPixelDataTo(DepthPixels);
                    DepthImageFrame = depthImageFrame;
                    _faceFrame      = null;
                }
            }

            using (SkeletonFrame skeletonFrame = e.OpenSkeletonFrame())
            {
                if (skeletonFrame != null)
                {
                    if (Skeletons == null)
                    {
                        Skeletons = new Skeleton[skeletonFrame.SkeletonArrayLength];
                    }
                    skeletonFrame.CopySkeletonDataTo(Skeletons);
                    //CorrectRoomCoords();
                }
            }

            FireAllFramesDispatched();
        }
示例#24
0
 private static DepthImagePixel[] ByteToDepthImagePixel(byte[] data, int length)
 {
     DepthImagePixel[] output = new DepthImagePixel[length];
     unsafe
     {
         fixed (byte* p = data)
         {
             IntPtr ptr = (IntPtr)p;
             DepthImagePixel* pi = (DepthImagePixel*)ptr.ToPointer();
             for (int i = 0; i < length; i++)
                 output[i] = *(pi + i);
         }
     }
     return output;
 }
 int closerDepthValueHead2(DepthImagePixel[] depthPixelData)
 {
     int closerValue = 4000;
     for (int depthPos = 0; depthPos < depthPixelData.Length; depthPos++)
     {
         if (depthPixelData[depthPos].PlayerIndex == playerIndex2)
         {
             if (depthPixelData[depthPos].Depth < closerValue && depthPixelData[depthPos].Depth >= 800)
             {
                 closerValue = depthPixelData[depthPos].Depth;
             }
         }
     }
     return closerValue;
 }
示例#26
0
        private void Sensor_DepthFrameReady(object sender, DepthImageFrameReadyEventArgs e)
        {
            try
            {
                DepthImagePixel[] depthImagePixel;
                using (var depthFrame = e.OpenDepthImageFrame())
                {
                    if (depthFrame == null)
                    {
                        return;
                    }
                    depthImagePixel = depthFrame.GetRawPixelData();
                }

                fingerTipLeft.X  = int.MaxValue;
                fingerTipRight.X = int.MinValue;
                hair.Y           = int.MaxValue;
                for (int i = 0; i < depthImagePixel.Length; i++)
                {
                    DepthImagePixel pixel = depthImagePixel[i];
                    if (pixel.IsKnownDepth && pixel.PlayerIndex > 0)
                    {
                        var point = new DepthImagePoint()
                        {
                            X     = i % sensor.DepthStream.FrameWidth,
                            Y     = i / sensor.DepthStream.FrameWidth,
                            Depth = pixel.Depth,
                        };

                        if (point.X < fingerTipLeft.X)
                        {
                            fingerTipLeft = mapper.MapDepthPointToSkeletonPoint(sensor.DepthStream.Format, point);
                        }
                        if (point.X > fingerTipRight.X)
                        {
                            fingerTipRight = mapper.MapDepthPointToSkeletonPoint(sensor.DepthStream.Format, point);
                        }
                        if (point.Y < hair.Y)
                        {
                            hair = mapper.MapDepthPointToSkeletonPoint(sensor.DepthStream.Format, point);
                        }
                    }
                }

                heightFingerTip = Distance(fingerTipLeft, fingerTipRight) * 100;
            }
            catch { }
        }
        public static KinectFrameWorkItem Create(KinectFormat format)
        {
            var depthSize = FormatHelper.GetDepthSize(format.DepthImageFormat);
            var colorSize = FormatHelper.GetColorSize(format.ColorImageFormat);

            var depthPixels = new DepthImagePixel[(int)(depthSize.Width * depthSize.Height)];

            int colorLen    = (int)(colorSize.Width * colorSize.Height);
            var colorPixels = new byte[colorLen * 4];

            var skeletons = new Skeleton[format.NumSkeletons];

            var colorMappedToDepthPoints = new DepthImagePoint[colorLen];

            return(new KinectFrameWorkItem(format, depthPixels, colorPixels, skeletons, colorMappedToDepthPoints));
        }
        // DepthImagePixelを使って処理する
        void kinect_DepthFrameReady(object sender, DepthImageFrameReadyEventArgs e)
        {
            using (DepthImageFrame depthFrame = e.OpenDepthImageFrame()) {
                if (depthFrame != null && !processingFrame)
                {
                    var depthPixels = new DepthImagePixel[depthFrame.PixelDataLength];
                    depthFrame.CopyDepthImagePixelDataTo(depthPixels);

                    Dispatcher.BeginInvoke(
                        DispatcherPriority.Background,
                        (Action <DepthImagePixel[]>)(d => ProcessDepthData(d)),
                        depthPixels);

                    processingFrame = true;
                }
            }
        }
示例#29
0
        private static short FindMinDepth()
        {
            short minZ = short.MaxValue;

            for (int i = 0; i < myKin.ColorStream.FrameWidth; i++)
            {
                for (int j = 0; j < myKin.ColorStream.FrameHeight / 2; j++)
                {
                    DepthImagePixel d = DepthMappedToColor[i, j];
                    if (d.IsKnownDepth && d.Depth < minZ)
                    {
                        minZ = d.Depth;
                    }
                }
            }
            return(minZ);
        }
示例#30
0
        //wird aktuell nicht gebraucht
        private Bitmap DepthImageFrameToBitmap(DepthImageFrame depthFrame)
        {
            DepthImagePixel[] depthPixels = new DepthImagePixel[depthFrame.PixelDataLength];
            byte[]            colorPixels = new byte[depthFrame.PixelDataLength * 4];
            depthFrame.CopyDepthImagePixelDataTo(depthPixels);

            // Get the min and max reliable depth for the current frame
            int minDepth = depthFrame.MinDepth;
            int maxDepth = depthFrame.MaxDepth;

            // Convert the depth to RGB
            int colorPixelIndex = 0;

            for (int i = 0; i < depthPixels.Length; ++i)
            {
                // Get the depth for this pixel
                short depth = depthPixels[i].Depth;

                // To convert to a byte, we're discarding the most-significant
                // rather than least-significant bits.
                // We're preserving detail, although the intensity will "wrap."
                // Values outside the reliable depth range are mapped to 0 (black).

                // NOTE: Using conditionals in this loop could degrade performance.
                // Consider using a lookup table instead when writing production code.
                // See the KinectDepthViewer class used by the KinectExplorer sample
                // for a lookup table example.
                byte intensity = (byte)(depth >= minDepth && depth <= maxDepth ? depth : 0);

                // Write out blue byte
                colorPixels[colorPixelIndex++] = intensity;

                // Write out green byte
                colorPixels[colorPixelIndex++] = intensity;

                // Write out red byte
                colorPixels[colorPixelIndex++] = intensity;

                // We're outputting BGR, the last byte in the 32 bits is unused so skip it
                // If we were outputting BGRA, we would write alpha here.
                ++colorPixelIndex;
            }
            Bitmap bitmapFrame = ArrayToBitmap(colorPixels, depthFrame.Width, depthFrame.Height, PixelFormat.Format32bppRgb);

            return(bitmapFrame);
        }
示例#31
0
 public Kinect()
 {
     foreach (var sensor in KinectSensor.KinectSensors)
     {
         if (sensor.Status == KinectStatus.Connected)
         {
             _kinectSensor = sensor;
             break;
         }
     }
     if (_kinectSensor != null)
     {
         _kinectSensor.DepthStream.Enable(DepthImageFormat.Resolution640x480Fps30);
         Depth = new DepthImagePixel[_kinectSensor.DepthStream.FramePixelDataLength];
         _kinectSensor.DepthFrameReady += OnDepthImageReady;
         _kinectSensor.Start();
     }
 }
示例#32
0
        private void _sensor_DepthFrameReady(object sender, DepthImageFrameReadyEventArgs e)
        {
            using (var frame = e.OpenDepthImageFrame())
            {
                if (frame != null)
                {
                    DepthImagePixel[] pixels = new DepthImagePixel[frame.PixelDataLength];
                    frame.CopyDepthImagePixelDataTo(pixels);
                    short[] buff = pixels.Select(pixel => pixel.Depth).ToArray();

                    //_depthMapPainter.UpdateWith(buff);
                    if (frame.FrameNumber % 1 == 0)
                    {
                        Points = new PointCloundConverter(new RandomSampler(2000)).Convert(buff, frame.Width, frame.Height, frame.Width, 525f).Points;
                    }
                }
            }
        }
示例#33
0
        public static Bitmap GetDepthFrame(DepthImageFrame frame)
        {
            short[]           pixelData = new short[frame.PixelDataLength];
            DepthImagePixel[] depthData = new DepthImagePixel[frame.PixelDataLength];

            frame.CopyDepthImagePixelDataTo(depthData);
            for (int i = 0; i < frame.PixelDataLength; i++)
            {
                pixelData[i] = depthData[i].Depth;
            }

            Bitmap     bmap     = new Bitmap(frame.Width, frame.Height, PixelFormat.Format16bppRgb555);//Format32bppRgb Format16bppRgb555
            BitmapData bmapdata = bmap.LockBits(new System.Drawing.Rectangle(0, 0, frame.Width, frame.Height), ImageLockMode.WriteOnly, bmap.PixelFormat);
            IntPtr     ptr      = bmapdata.Scan0;

            Marshal.Copy(pixelData, 0, ptr, frame.Width * frame.Height);
            bmap.UnlockBits(bmapdata);
            return(bmap);
        }
示例#34
0
 /// <summary>
 /// NOT a flood fill, standin for testing
 /// </summary>
 /// <param name="depthData"></param>
 /// <param name="x"></param>
 /// <param name="y"></param>
 /// <param name="width"></param>
 /// <param name="height"></param>
 /// <param name="mmCutoff"></param>
 /// <returns></returns>
 public static PointCluster FloodFill(DepthImagePixel[] depthData, int x, int y, int width, int height, int mmCutoff)
 {
     HashSet<DepthPoint> resultPoints = new HashSet<DepthPoint>();
     for (int i = 0; i < depthData.Length; i++)
     {
         if (resultPoints.Count > MAX_PIXELS)
             break;
         if (depthData[i].Depth > 400 && depthData[i].Depth < 4000 && depthData[i].Depth < 1000)
         {
             int xIdx = i % width;
             int yIdx = i / width;
             resultPoints.Add(new DepthPoint(xIdx, yIdx, depthData[i].Depth));
         }
     }
     PointCluster result = new PointCluster(resultPoints);
     //Console.WriteLine("Before pruning: " + result.points.Count);
     result.Prune();
     return result;
 }
示例#35
0
        public HandTracker(KinectSensor mySensor)
        {
            this.kinectSensor = mySensor;
            this.kinectSensor.DepthStream.Enable(DepthImageFormat.Resolution320x240Fps30);
            this.kinectSensor.DepthFrameReady += this.sensorDepthFrameReady;
            this.hand = new Point3d();
            this.imagePixelList = new List<DepthImagePixel>();
            this.closestPoint = new DepthImagePixel();

            // Start the sensor!
            try
            {
                this.kinectSensor.Start();
            }
            catch (IOException)
            {
                this.kinectSensor = null;
            }
        }
示例#36
0
        /// <summary>
        /// Calculate distribution of depth over an axis and pick the most common one to
        /// reduce the chance of error
        /// </summary>
        /// <param name="data"></param>
        /// <param name="start"></param>
        /// <param name="length"></param>
        /// <returns></returns>
        public static double GetMostCommonDepthImagePixel(DepthImagePixel[] data, int start, int length)
        {
            if (start + length > data.Length)
            {
                throw new InvalidOperationException();
            }

            DepthImagePixel[] ret = new DepthImagePixel[length];
            for (int i = 0; i < length; i++)
            {
                if (data[i + start].IsKnownDepth)
                {
                    ret[i] = data[i + start];
                }
            }
            var temp = ret.GroupBy(x => x.Depth).OrderByDescending(x => x.Count()).First().Key;

            return((double)temp);
        }
示例#37
0
        void Sensor_DepthFrameReady(object sender, DepthImageFrameReadyEventArgs e)
        {
            if (counter % 6 != 0)
            {
                return;
            }

            WriteableBitmap outputBitmap;

            byte[]            depthFrame32;
            DepthImagePixel[] depthPixelData;

            using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
            {
                if (depthFrame != null && sensorKinect != null)
                {
                    //Using standard SDK
                    depthPixelData = new DepthImagePixel[sensorKinect.DepthStream.FramePixelDataLength];
                    depthFrame.CopyDepthImagePixelDataTo(depthPixelData);

                    depthFrame32 = new byte[depthFrame.Width * depthFrame.Height * 4]; //To form an RGB image
                    byte[] convertedDepthBits = ConvertDepthFrame(depthPixelData, depthFrame32);

                    outputBitmap = new WriteableBitmap
                                   (
                        depthFrame.Width,
                        depthFrame.Height,
                        96,  //DpiX
                        96,  // DpiY
                        PixelFormats.Bgr32,
                        null
                                   );

                    outputBitmap.WritePixels(
                        new Int32Rect(0, 0, depthFrame.Width, depthFrame.Height),
                        convertedDepthBits,
                        depthFrame.Width * 4,
                        0);

                    this.depthStream.Source = outputBitmap;
                }
            }
        }
示例#38
0
        private void SensorOnDepthFrameReasy(object sender, DepthImageFrameReadyEventArgs depthImageFrameReadyEventArgs)
        {
            using (var frame = depthImageFrameReadyEventArgs.OpenDepthImageFrame())
            {
                //ImageCanvas.Background = new ImageBrush(frame.ToBitmapSource());
                var depthImagePixels = new DepthImagePixel[sensor.DepthStream.FramePixelDataLength];
                frame.CopyDepthImagePixelDataTo(depthImagePixels);

                var colorPixels = new byte[4 * sensor.DepthStream.FramePixelDataLength];
                for (int i = 0; i < colorPixels.Length; i += 4)
                {
                    if (depthImagePixels[i / 4].PlayerIndex != 0)
                    {
                        colorPixels[i + 1] = 255;
                    }
                }

                ImageCanvas.Background = new ImageBrush(colorPixels.ToBitmapSource(640, 480));
            }
        }
示例#39
0
        void depthFrameSetUp(DepthImageFrame frame, DepthImagePixel[] depthPixels, Color[] depthArray, RenderTarget2D depthTarget)
        {
            depthFramyBusy = true;
            using (frame)
            {
                if (frame != null)
                {
                    //Console.WriteLine("Has frame");
                    frame.CopyDepthImagePixelDataTo(depthPixels);

                    for (int i = 0; i < depthPixels.Length; i++)
                    {

                        int b = (depthPixels[i].Depth >= frame.MinDepth && depthPixels[i].Depth <= frame.MaxDepth && depthPixels[i].IsKnownDepth) ? depthPixels[i].Depth : 0;
                        if (depthPixels[i].Depth >= frame.MaxDepth)
                        {
                            b = frame.MaxDepth;
                        }
                        float f = (float)((float)b - frame.MinDepth) / (float)(frame.MaxDepth - frame.MinDepth);

                        depthArray[i] = new Color(f, f, f, 1);
                    }

                    depthTarget.SetData(depthArray);
                    DepthImagePoint[] dip = new DepthImagePoint[kinect.ColorStream.FrameWidth * kinect.ColorStream.FrameHeight];
                    kinect.CoordinateMapper.MapColorFrameToDepthFrame(kinect.ColorStream.Format,
                        kinect.DepthStream.Format, depthPixels, dip);
                    for (int i = 0; i < depthCoordArray.Length; i++)
                    {
                        depthCoordArray[i] = new Color((float)dip[i].X / (float)kinect.ColorStream.FrameWidth, (float)dip[i].Y / (float)kinect.ColorStream.FrameHeight, 0, 1);
                    }
                    depthCoordMap.SetData(depthCoordArray);

                }
                else
                {

                }
            }
            depthFramyBusy = false;
        }
示例#40
0
        public TDepthFrame(DepthImageFrame sensorFrame)
        {
            //TODO This can be done better
            var depthImagePixels = new DepthImagePixel[sensorFrame.PixelDataLength];
            sensorFrame.CopyDepthImagePixelDataTo(depthImagePixels);

            var depthData = new short[sensorFrame.PixelDataLength];

            for (int i = 0; i < sensorFrame.PixelDataLength; i++)
                depthData[i] = depthImagePixels[i].Depth;

            DepthData = depthData;

            PixelDataLength = sensorFrame.PixelDataLength;
            BytesPerPixel = sensorFrame.BytesPerPixel;
            FrameNumber = sensorFrame.FrameNumber;
            Width = sensorFrame.Width;
            Height = sensorFrame.Height;
            Timestamp = sensorFrame.Timestamp;

            MinDepth = sensorFrame.MinDepth;
            MaxDepth = sensorFrame.MaxDepth;
        }
示例#41
0
        private void findTheClosestPoint(int pixelDataLenght)
        {
            for (int i = 0; i < pixelDataLenght; i++)
            {
                if (this.imagePixelData[i].IsKnownDepth == true)
                {
                    this.imagePixelList.Add(this.imagePixelData[i]);
                }
            }

            foreach (DepthImagePixel pixle in imagePixelList)
            {
                if (pixle.Depth > minDepth && pixle.Depth < this.closestPoint.Depth)
                {
                    this.closestPoint = pixle;

                }
            }

            Console.WriteLine("The closest point depth is: " + this.closestPoint.Depth + " ( " + this.counter + " )");
            this.counter++;
            this.imagePixelList.Clear();
        }
        private void ReconhecerDistancia(DepthImageFrame quadro, byte[] bytesImagem, int distanciaMaxima)
        {
            if (quadro == null || bytesImagem == null) return;

            using (quadro)
            {
                DepthImagePixel[] imagemProfundidade = new DepthImagePixel[quadro.PixelDataLength];
                quadro.CopyDepthImagePixelDataTo(imagemProfundidade);

                for (int indice = 0; indice < imagemProfundidade.Length; indice++)
                {
                    if (imagemProfundidade[indice].Depth < distanciaMaxima)
                    {
                        int indiceImageCores = indice * 4;
                        byte maiorValorCor = Math.Max(bytesImagem[indiceImageCores], Math.Max(bytesImagem[indiceImageCores + 1], bytesImagem[indiceImageCores + 2]));

                        bytesImagem[indiceImageCores] = maiorValorCor;
                        bytesImagem[indiceImageCores + 1] = maiorValorCor;
                        bytesImagem[indiceImageCores + 2] = maiorValorCor;
                    }
                }
            }
        }
示例#43
0
        private Bitmap DepthImageFrameToBitmap(DepthImageFrame depthFrame)
        {
            DepthImagePixel[] depthPixels = new DepthImagePixel[depthFrame.PixelDataLength];
            byte[] colorPixels = new byte[depthFrame.PixelDataLength * 4];
            depthFrame.CopyDepthImagePixelDataTo(depthPixels);

            // Get the min and max reliable depth for the current frame
            int minDepth = depthFrame.MinDepth;
            int maxDepth = depthFrame.MaxDepth;

            // Convert the depth to RGB
            int colorPixelIndex = 0;
            for (int i = 0; i < depthPixels.Length; ++i)
            {
                // Get the depth for this pixel
                short depth = depthPixels[i].Depth;

                // To convert to a byte, we're discarding the most-significant
                // rather than least-significant bits.
                // We're preserving detail, although the intensity will "wrap."
                // Values outside the reliable depth range are mapped to 0 (black).

                // NOTE: Using conditionals in this loop could degrade performance.
                // Consider using a lookup table instead when writing production code.
                // See the KinectDepthViewer class used by the KinectExplorer sample
                // for a lookup table example.
                byte intensity = (byte)(depth >= minDepth && depth <= maxDepth ? depth : 0);

                // Write out blue byte
                colorPixels[colorPixelIndex++] = intensity;

                // Write out green byte
                colorPixels[colorPixelIndex++] = intensity;

                // Write out red byte
                colorPixels[colorPixelIndex++] = intensity;

                // We're outputting BGR, the last byte in the 32 bits is unused so skip it
                // If we were outputting BGRA, we would write alpha here.
                ++colorPixelIndex;
            }
            Bitmap bitmapFrame = ArrayToBitmap(colorPixels, depthFrame.Width, depthFrame.Height, PixelFormat.Format32bppRgb);
            return bitmapFrame;
        }
        /// <summary>
        /// Process data from one Kinect depth frame.
        /// </summary>
        /// <param name="depthData">
        /// Kinect depth data.
        /// </param>
        /// <param name="depthFrame">
        /// <see cref="DepthImageFrame"/> from which we obtained depth data.
        /// </param>
        public override void ProcessDepth(DepthImagePixel[] depthData, DepthImageFrame depthFrame)
        {
            if (depthData == null)
            {
                throw new ArgumentNullException("depthData");
            }

            if (depthFrame == null)
            {
                throw new ArgumentNullException("depthFrame");
            }

            if (this.backgroundRemovalStreamIsEnabled)
            {
                this.backgroundRemovalStream.ProcessDepth(depthData, depthFrame.Timestamp);
            }
        }
        /// <summary>
        /// Process depth data to obtain user viewer image.
        /// </summary>
        /// <param name="depthData">
        /// Kinect depth data.
        /// </param>
        /// <param name="depthFrame">
        /// <see cref="DepthImageFrame"/> from which we obtained depth data.
        /// </param>
        private async void ProcessUserViewerImageAsync(DepthImagePixel[] depthData, DepthImageFrame depthFrame)
        {
            if (this.userViewerIsEnabled)
            {
                if (this.isProcessingUserViewerImage)
                {
                    // Re-entered ProcessUserViewerImageAsync while a previous image is already being processed.
                    // Just ignore new depth frames until the current one finishes processing.
                    return;
                }

                this.isProcessingUserViewerImage = true;

                try
                {
                    this.userViewerColorizer.ColorizeDepthPixels(depthData, depthFrame.Width, depthFrame.Height);

                    this.userViewerStreamMessage.timestamp = depthFrame.Timestamp;
                    this.userViewerStreamMessage.width = 192;
                    this.userViewerStreamMessage.height = 320;
                    this.userViewerStreamMessage.bufferLength = this.userViewerColorizer.Buffer.Length;

                    await this.ownerContext.SendTwoPartStreamMessageAsync(this.userViewerStreamMessage, this.userViewerColorizer.Buffer);
                }
                finally
                {
                    this.isProcessingUserViewerImage = false;
                }
            }
        }
示例#46
0
        // Handle Privacy Screen
        private void KinectSensorOnAllFramesReady(object sender, AllFramesReadyEventArgs allFramesReadyEventArgs)
        {
            var colorImageFrame = allFramesReadyEventArgs.OpenColorImageFrame();
            var depthImageFrame = allFramesReadyEventArgs.OpenDepthImageFrame();
            //var skeltImageFrame = allFramesReadyEventArgs.OpenSkeletonFrame();
            try
            {
                using (var skeletonFrame = allFramesReadyEventArgs.OpenSkeletonFrame())
                {
                    if (null != skeletonFrame)
                    {
                        skeletonFrame.CopySkeletonDataTo(this.skeletons);
                        ChoosePrivacySkeleton();
                    }
                }
                {
                    if (colorImageFrame == null)
                    {
                        return;
                    }
                    if (depthImageFrame == null)
                    {
                        return;
                    }
                    // if (skeltImageFrame == null)
                    //    {
                    //      return;
                    //   }
              //      Console.WriteLine(s);
                    depthRange = 300;
                    if (s != null)
                    {
                        double min= 50;
                        double max = 0;
                        depthRange = 300;
                        foreach (Joint joint in s.Joints)
                        {
                            ColorImagePoint point = ks.CoordinateMapper.MapSkeletonPointToColorPoint(joint.Position, ColorImageFormat.RgbResolution640x480Fps30);
                            if (point.X > -1000000 && point.X < 1000000 && point.Y > -1000000 && point.Y < 1000000)
                            {
                                Joint j = joint;
                                double depth = joint.Position.Z;

                                if (depth < min)
                                {
                                    min = depth;
                                }
                                if (depth > max)
                                {
                                    max = depth;
                                }
                            }
                        }
                        currentDepth = min * 1000 - 800 - 100 - 50;
                        depthRange = (int)(max * 1000 - 800 - 100 - currentDepth);
                    /*    Joint jointHead = s.Joints[JointType.Head];
                        Joint jointCenter = s.Joints[JointType.ShoulderCenter];
                        ColorImagePoint pointHead = ks.CoordinateMapper.MapSkeletonPointToColorPoint(jointHead.Position, ColorImageFormat.RgbResolution640x480Fps30);
                        ColorImagePoint pointCenter = ks.CoordinateMapper.MapSkeletonPointToColorPoint(jointCenter.Position, ColorImageFormat.RgbResolution640x480Fps30);
                        Console.WriteLine(jointHead.Position.Z * 1000);
                        if (pointHead.X > -1000000 && pointHead.X < 1000000 && pointHead.Y > -1000000 && pointHead.Y < 1000000)
                        {
                            currentDepth = jointHead.Position.Z * 1000 - 700 - 100;
                            depthRange = 200;
                        }
                        else if (pointCenter.X > -1000000 && pointCenter.X < 1000000 && pointCenter.Y > -1000000 && pointCenter.Y < 1000000)
                        {
                            currentDepth = jointCenter.Position.Z * 1000 - 700 - 100;
                            depthRange = 200;
                        }*/
                    }

                    DepthImagePixel[] depthPixels = new DepthImagePixel[depthImageFrame.PixelDataLength];
                    depthImageFrame.CopyDepthImagePixelDataTo(depthPixels);
                    int minDepth = depthImageFrame.MinDepth;
                    int maxDepth = depthImageFrame.MaxDepth;
                    var ratio = colorImageFrame.PixelDataLength / depthImageFrame.PixelDataLength;
                    var heightC = colorImageFrame.Height;
                    var heightD = depthImageFrame.Height;
                    var LengthC = colorImageFrame.PixelDataLength;
                    var LengthD = depthPixels.Length;
                    var ratH = colorImageFrame.Height / depthImageFrame.Height;
                    var ratW = colorImageFrame.Width / depthImageFrame.Width;

                    // Make a copy of the color frame for displaying.
                    var haveNewFormat = this.currentColorImageFormat != colorImageFrame.Format;
                    if (haveNewFormat)
                    {

                        this.currentColorImageFormat = colorImageFrame.Format;
                        this.colorImageData = new byte[colorImageFrame.PixelDataLength];

                        this.colorImageWritableBitmap = new WriteableBitmap(
                            colorImageFrame.Width, colorImageFrame.Height, 96, 96, PixelFormats.Bgr32, null);
                        ColorImage.Source = this.colorImageWritableBitmap;
                    }

                    colorImageFrame.CopyPixelDataTo(this.colorImageData);
                    newColorImageData = new byte[this.colorImageData.Length];
                    int tempMinDepth = (int)(minDepth + currentDepth);
                    int tempMaxDepth = (int)(minDepth + currentDepth + depthRange);
                    for (int i = 0; i < colorImageFrame.Width; ++i)
                    {
                        int srcX = i / ratW;
                        for (int j = 0; j < colorImageFrame.Height; ++j)
                        {
                            int srcY = j / ratH;
                            int srcPixel = srcX + 2 + ((srcY - 15) * depthImageFrame.Width);
                            int tgtPixel = (i + (j * colorImageFrame.Width));
                            int l = depthPixels.Length;
                            
                            if (srcPixel >= 0 && srcPixel < l)
                            {
                                //      currentDepth = currentDepth + .00001;
                                short depth = depthPixels[(int)srcPixel].Depth;
                                /*
                                if (depth < tempMinDepth)
                                {
                                    //changePixel(tgtPixel, 0);
                                    int index = tgtPixel * 4;
                                    this.colorImageData[index++] = 0;
                                    this.colorImageData[index++] = 0;
                                    this.colorImageData[index++] = 0;
                                    //changePixel(tgtPixel, new byte[]{255, 255, 255});
                                }
                                //else if (depth > maxDepth)
                                else if (depth > tempMaxDepth)
                                {
                                   // changePixel(tgtPixel, 0);
                                    int index = tgtPixel * 4;
                                    this.colorImageData[index++] = 0;
                                    this.colorImageData[index++] = 0;
                                    this.colorImageData[index++] = 0;
                                }
                                else
                                {
                                    //changePixel(tgtPixel, 0);
                                }
                                if (currentDepth + depthRange >= maxDepth + 300)
                                {
                                    currentDepth = minDepth;
                                }
                                */
                                if (depth > tempMinDepth && depth < tempMaxDepth)
                                {
                                    int index = tgtPixel * 4;
                                    newColorImageData[index] = this.colorImageData[index++];
                                    newColorImageData[index] = this.colorImageData[index++];
                                    newColorImageData[index] = this.colorImageData[index++];
                                }
                            }
                        }
                    }
                    /*
                    this.colorImageWritableBitmap.WritePixels(
                        new Int32Rect(0, 0, colorImageFrame.Width, colorImageFrame.Height),
                        this.colorImageData,
                        colorImageFrame.Width * Bgr32BytesPerPixel,
                        0);
                    // Write the pixel data into our bitmap
                    this.foregroundBitmap.WritePixels(
                        new Int32Rect(0, 0, this.foregroundBitmap.PixelWidth, this.foregroundBitmap.PixelHeight),
                        backgroundRemovedFrame.GetRawPixelData(),
                        this.foregroundBitmap.PixelWidth * sizeof(int),
                        0);
                    */
                    this.colorImageWritableBitmap.WritePixels(
                        new Int32Rect(0, 0, colorImageFrame.Width, colorImageFrame.Height),
                        this.newColorImageData,
                        colorImageFrame.Width * sizeof(int),
                        0);
                }
            }
            finally
            {
                if (colorImageFrame != null)
                {
                    colorImageFrame.Dispose();
                }
                if (depthImageFrame != null)
                {
                    depthImageFrame.Dispose();
                }
                //    if (skeltImageFrame != null)
                //    {
                //        skeltImageFrame.Dispose();
                //    }
            }
          //  Console.WriteLine(s);
         //   ChoosePrivacySkeleton();
        }
示例#47
0
 public DepthDataChangeEventArgs(DepthImagePixel[] depthData, byte[] rgbData)
 {
     this.depthData = depthData;
     this.rgbData = rgbData;
 }
        private void ReconhecerProfundidade(byte[] bytesImagem, int distanciaMaxima, DepthImagePixel[] imagemProfundidade)
        {
            DepthImagePoint[] pontosImagemProfundidade = new DepthImagePoint[640 * 480];
            kinect.CoordinateMapper.MapColorFrameToDepthFrame(kinect.ColorStream.Format, kinect.DepthStream.Format, imagemProfundidade, pontosImagemProfundidade);

            for (int i = 0; i < pontosImagemProfundidade.Length; i++)
            {
                var point = pontosImagemProfundidade[i];
                if (point.Depth < distanciaMaxima && KinectSensor.IsKnownPoint(point))
                {
                    var pixelDataIndex = i * 4;

                    byte maiorValorCor = Math.Max(bytesImagem[pixelDataIndex], Math.Max(bytesImagem[pixelDataIndex + 1], bytesImagem[pixelDataIndex + 2]));

                    bytesImagem[pixelDataIndex] = maiorValorCor;
                    bytesImagem[pixelDataIndex + 1] = maiorValorCor;
                    bytesImagem[pixelDataIndex + 2] = maiorValorCor;
                }
            }
        }
示例#49
0
        private void AllFramesReady(object sender, AllFramesReadyEventArgs e)
        {
            using (ColorImageFrame colorImageFrame = e.OpenColorImageFrame())
                {
                    if (colorImageFrame != null)
                    {
                        if (ColorPixels == null)
                            ColorPixels = new byte[colorImageFrame.PixelDataLength];
                        colorImageFrame.CopyPixelDataTo(ColorPixels);
                        ColorImageFrame = colorImageFrame;
                    }
                }

                using (DepthImageFrame depthImageFrame = e.OpenDepthImageFrame())
                {
                    if (depthImageFrame != null)
                    {
                        if (DepthImagePixels == null)
                            DepthImagePixels = new DepthImagePixel[depthImageFrame.PixelDataLength];
                        depthImageFrame.CopyDepthImagePixelDataTo(DepthImagePixels);
                        if (DepthPixels == null)
                            DepthPixels = new short[depthImageFrame.PixelDataLength];
                        depthImageFrame.CopyPixelDataTo(DepthPixels);
                        DepthImageFrame = depthImageFrame;
                        _faceFrame = null;
                    }
                }

                using (SkeletonFrame skeletonFrame = e.OpenSkeletonFrame())
                {
                    if (skeletonFrame != null)
                    {
                        if (Skeletons == null)
                            Skeletons = new Skeleton[skeletonFrame.SkeletonArrayLength];
                        skeletonFrame.CopySkeletonDataTo(Skeletons);
                        //CorrectRoomCoords();
                    }
                }

            FireAllFramesDispatched();
        }
示例#50
0
        private void run(object sender, AllFramesReadyEventArgs e)
        {
            //if (!takeFrame)
            using(ColorImageFrame frame_colour = e.OpenColorImageFrame() )
            using(DepthImageFrame frame_depth = e.OpenDepthImageFrame() )
            using(SkeletonFrame frame_skel = e.OpenSkeletonFrame() )
            {
                frames++;
                if (frames % gap != 0)
                {
                    //try { e.OpenColorImageFrame().Dispose(); }
                    //catch (Exception noFrameException) { };
                    takeFrame = true;
                    return;
                }
                else
                {
                    takeFrame = false;
                }

                if (null != frame_colour)
                {
                    byte[] rawColorImage = new byte[frame_colour.PixelDataLength];
                    frame_colour.CopyPixelDataTo(rawColorImage);
                    ProcessFrame(rawColorImage);

                    if (null != frame_depth)
                    {
                        DepthImagePixel[] depthImage = new DepthImagePixel[frame_depth.PixelDataLength];
                        frame_depth.CopyDepthImagePixelDataTo(depthImage);
                        int newX = Math.Max(x - 150, 0);
                        depth = depthImage[x + (y * 640)].Depth;
                        lbl_depth.Content = depth;

                        /*
                        //double x_pos = Math.Round((2.2 * 2 * Math.Tan(57) * (this.x - 320))/ 640,    4);
                        double x_pos = 0.00425 * (this.x - 320);

                        double y_pos = Math.Round((2.2 * 2 * Math.Tan(21.5) * (this.y - 240) * -1) / 480, 4);
                        double y_pos2 = (y_pos * -1) -0.45;
                        // +1.05
                        //Console.WriteLine("Depth: " + depth + ", " +
                        //label3.Content = "X/Y = " + x_pos + ", " + y_pos + " (" + y_pos2 + ")";

                        double actual_y_skel = (y_pos2) * 1000; // -1.6
                        this.y_actual = actual_y_skel;
                        this.x_actual = x_pos;
                        //double actual_x_skel = (x_pos - 2.2) * 1000;
                        */
                        //double x_pos = Math.Round((2.2 * 2 * Math.Tan(57) * tempX) / 640, 4);
                        double x_pos = 0.00425 * (this.x - 320);

                        double y_pos = Math.Round((2.2 * 2 * Math.Tan(21.5) * (this.y - 240) * -1) / 480, 4);
                        //double y_pos2 = (y_pos * -1) + 1.05;
                        double y_pos2 = (y_pos * -1) - 0.155;
                        this.x_actual = x_pos;
                        this.y_actual = y_pos2;
                    }
                }

                processSkeleton(frame_skel);

                //ProcessFrame(MaskedRGB);
            }
        }
示例#51
0
        private byte[][] SensorAllFramesReady(object sender, AllFramesReadyEventArgs e)
        {
            bool depthReceived = false;
            bool colorReceived = false;

            DepthImagePixel[] depthPixels;
            byte[] colorPixels;
            ColorImagePoint[] colorCoordinates;
            int colorToDepthDivisor;
            byte[] greenScreenPixelData;

            // Allocate space to put the color pixels we'll create
            depthPixels = new DepthImagePixel[this.kinectSensor.DepthStream.FramePixelDataLength];
            colorPixels = new byte[this.kinectSensor.ColorStream.FramePixelDataLength];
            greenScreenPixelData = new byte[this.kinectSensor.DepthStream.FramePixelDataLength];
            colorCoordinates = new ColorImagePoint[this.kinectSensor.DepthStream.FramePixelDataLength];

            int colorWidth = this.kinectSensor.ColorStream.FrameWidth;
            int colorHeight = this.kinectSensor.ColorStream.FrameHeight;
            colorToDepthDivisor = colorWidth / 640;

            byte[][] results = new byte[2][]; // kinectSensor.DepthStream.FramePixelDataLength];

            DepthImageFormat DepthFormat = DepthImageFormat.Resolution640x480Fps30;
            ColorImageFormat ColorFormat = ColorImageFormat.RgbResolution640x480Fps30;

            using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
            {
                if (null != depthFrame)
                {
                    // Copy the pixel data from the image to a temporary array
                    depthFrame.CopyDepthImagePixelDataTo(depthPixels);
                    depthReceived = true;
                }
            }

            using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
            {
                if (null != colorFrame)
                {
                    // Copy the pixel data from the image to a temporary array
                    this.outputColorBitmap = new WriteableBitmap(640, 480, 96, 96, PixelFormats.Bgr32, null);
                    colorFrame.CopyPixelDataTo(colorPixels);
                    colorReceived = true;
                }
            }

            if (true == depthReceived)
            {
                this.kinectSensor.CoordinateMapper.MapDepthFrameToColorFrame(
                    DepthFormat,
                    depthPixels,
                    ColorFormat,
                    colorCoordinates);

                Array.Clear(greenScreenPixelData, 0, greenScreenPixelData.Length);

                // loop over each row and column of the depth
                for (int y = 0; y < 480; ++y)
                {
                    for (int x = 0; x < 640; ++x)
                    {
                        // calculate index into depth array
                        int depthIndex = x + (y * 640);

                        DepthImagePixel depthPixel = depthPixels[depthIndex];

                        int player = depthPixel.PlayerIndex;

                        // if we're tracking a player for the current pixel, do green screen
                        if (player > 0)
                        {
                            // retrieve the depth to color mapping for the current depth pixel
                            ColorImagePoint colorImagePoint = colorCoordinates[depthIndex];

                            // scale color coordinates to depth resolution
                            int colorInDepthX = colorImagePoint.X / colorToDepthDivisor;
                            int colorInDepthY = colorImagePoint.Y / colorToDepthDivisor;

                            // make sure the depth pixel maps to a valid point in color space
                            if (colorInDepthX > 0 && colorInDepthX < 640 && colorInDepthY >= 0 && colorInDepthY < 480)
                            {
                                // calculate index into the green screen pixel array
                                int greenScreenIndex = colorInDepthX + (colorInDepthY * 640);

                                // set opaque
                                greenScreenPixelData[greenScreenIndex] = 33;

                                // compensate for depth/color not corresponding exactly by setting the pixel
                                // to the left to opaque as well
                                greenScreenPixelData[greenScreenIndex - 1] = 33;
                            }
                        }
                    }
                }
            }

            if (true == colorReceived)
            {
                // Write the pixel data into our bitmap
                /*
                this.outputColorBitmap.WritePixels(
                new Int32Rect(0, 0, this.outputColorBitmap.PixelWidth, this.outputColorBitmap.PixelHeight),
                colorPixels,
                this.outputColorBitmap.PixelWidth * sizeof(int),
                0);

                if (playerOpacityMaskImage == null)
                {
                    playerOpacityMaskImage = new WriteableBitmap(
                        640,
                        480,
                        96,
                        96,
                        PixelFormats.Bgra32,
                        null);

                    results[0] = playerOpacityMaskImage;
                }

                playerOpacityMaskImage.WritePixels(
                    new Int32Rect(0, 0, 640, 480),
                    greenScreenPixelData,
                    640 * ((playerOpacityMaskImage.Format.BitsPerPixel + 7) / 8),
                    0);
                */
                results[0] = greenScreenPixelData; // playerOpacityMaskImage
                results[1] = colorPixels;
                return results;
            }

            return results;
        }
        public void TransformAndConvertDepthFrame(DepthImagePixel[] depthFrame,byte[] colorFrame)
        {
            // Test that the buffer lengths are appropriately correlated, which allows us to use only one
            // value as the loop condition.
            if ((depthFrame.Length * Bgr32BytesPerPixel) != colorFrame.Length)
            {
                throw new InvalidOperationException();
            }
            //get intensity map
            byte[] mappingTable = this.intensityTable;

            // process data

            for (int depthIndex = 0, colorIndex = 0;
                colorIndex < colorFrame.Length;
                depthIndex++, colorIndex += Bgr32BytesPerPixel)
            {
                try
                {
                    short depth = depthFrame[depthIndex].Depth;
                    //transform
                    depth = TwoD_intensityTable[depthIndex / 640, depth];
                    // look up in intensity table
                    byte color = mappingTable[(ushort)depth];

                    // Write color pixel to buffer
                    colorFrame[colorIndex + RedIndex] = color;
                    colorFrame[colorIndex + GreenIndex] = color;
                    colorFrame[colorIndex + BlueIndex] = color;
                }
                catch (Exception)
                {
                    continue;
                }

            }
        }
 //
 // Summary:
 //     Tests whether the DepthImagePixel has a known value.
 //
 // Parameters:
 //   depthImagePixel:
 //     The DepthImagePixel to test.
 //
 // Returns:
 //     Returns true if the DepthImagePixel has a known value, false otherwise.
 public static bool IsKnownPoint(DepthImagePixel depthImagePixel);
示例#54
0
        int find_code(ColorImageFrame colorFrame, DepthImageFrame depthFrame)
        {
            ZXing.Kinect.BarcodeReader reader = new ZXing.Kinect.BarcodeReader();
            if (colorFrame != null)
            {
                //Decode the colorFrame
                var result = reader.Decode(colorFrame);
                if (result != null)
                {
                    string val = result.Text;
                    int code_num = Convert.ToInt32(val);
                    double center_x = result.ResultPoints[0].X + 0.5 * (result.ResultPoints[2].X - result.ResultPoints[0].X);
                    double center_y = result.ResultPoints[0].Y + 0.5 * (result.ResultPoints[2].Y - result.ResultPoints[0].Y);

                    code_size = new Point((result.ResultPoints[2].X - result.ResultPoints[0].X), (result.ResultPoints[2].Y - result.ResultPoints[0].Y));

                    // Must mirror the coordinate here -- the depth frame comes in mirrored.
                    center_x = 640 - center_x;

                    // Map the color frame onto the depth frame
                    DepthImagePixel[] depthPixel = new DepthImagePixel[depthFrame.PixelDataLength];
                    depthFrame.CopyDepthImagePixelDataTo(depthPixel);
                    DepthImagePoint[] depthImagePoints = new DepthImagePoint[sensor.DepthStream.FramePixelDataLength];
                    sensor.CoordinateMapper.MapColorFrameToDepthFrame(sensor.ColorStream.Format, sensor.DepthStream.Format, depthPixel, depthImagePoints);

                    // Get the point in the depth frame at the center of the barcode
                    int center_point_color_index = (int)center_y * 640 + (int)center_x;
                    DepthImagePoint converted_depth_point = depthImagePoints[center_point_color_index];
                    Point p = new Point(converted_depth_point.X, converted_depth_point.Y);
                    code_points[code_num] = p;

                    Console.WriteLine("Found code " + code_num + " at (" + center_x + ", " + center_y + ") in color coordinates.");
                    Console.WriteLine("Translated to (" + p.X + ", " + p.Y + ") in depth coordinates.");
                    return code_num;
                }
            }

            return -1;
        }
        private void FuncoesProfundidade(DepthImageFrame quadro, byte[] bytesImagem, int distanciaMaxima)
        {
            if (quadro == null || bytesImagem == null) return;

            using (quadro)
            {
                DepthImagePixel[] imagemProfundidade = new DepthImagePixel[quadro.PixelDataLength];
                quadro.CopyDepthImagePixelDataTo(imagemProfundidade);

                if (btnDesenhar.IsChecked)
                    fluxoInteracao.ProcessDepth(imagemProfundidade, quadro.Timestamp);
                else if (btnEscalaCinza.IsChecked)
                    ReconhecerProfundidade(bytesImagem, distanciaMaxima, imagemProfundidade);
            }
        }
示例#56
0
        /// <summary>
        /// Converts Kinect depth frames in unsigned short format to depth frames in float format
        /// representing distance from the camera in meters (parallel to the optical center axis).
        /// Note: <paramref name="depthImageData"/> and <paramref name="depthFloatFrame"/> must
        /// be the same pixel resolution and equal to <paramref name="depthImageDataWidth"/> by
        /// <paramref name="depthImageDataHeight"/>.
        /// The min and max depth clip values enable clipping of the input data, for example, to help
        /// isolate particular objects or surfaces to be reconstructed. Note that the thresholds return 
        /// different values when a depth pixel is outside the threshold - pixels inside minDepthClip will
        /// will be returned as 0 and ignored in processing, whereas pixels beyond maxDepthClip will be set
        /// to 1000 to signify a valid depth ray with depth beyond the set threshold. Setting this far-
        /// distance flag is important for reconstruction integration in situations where the camera is
        /// static or does not move significantly, as it enables any voxels closer to the camera
        /// along this ray to be culled instead of persisting (as would happen if the pixels were simply 
        /// set to 0 and ignored in processing). Note that when reconstructing large real-world size volumes,
        /// be sure to set large maxDepthClip distances, as when the camera moves around, any voxels in view
        /// which go beyond this threshold distance from the camera will be removed.
        /// </summary>
        /// <param name="depthImageData">
        /// An array which stores the extended-depth texture of a depth image from the Kinect camera.
        /// </param>
        /// <param name="depthImageDataWidth">Width of the depth image data.</param>
        /// <param name="depthImageDataHeight">Height of the depth image data.</param>
        /// <param name="depthFloatFrame">
        /// A pre-allocated depth float type image frame, to be filled with the floating point depth values.
        /// </param>
        /// <param name="minDepthClip">
        /// Minimum depth distance threshold in meters. Depth pixels below this value will be
        /// returned as invalid (0). Min depth must be positive or 0.
        /// </param>
        /// <param name="maxDepthClip">
        /// Maximum depth distance threshold in meters. Depth pixels above this value will be
        /// returned as invalid (1000). Max depth must be greater than 0.
        /// </param>
        /// <param name="mirrorDepth">
        /// A boolean parameter specifying whether to horizontally mirror the input depth image.
        /// </param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthImageData"/> or the <paramref name="depthFloatFrame"/>
        /// parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="depthImageDataWidth"/> parameter and depthFloatFrame's
        /// <c>width</c> is not equal, or the <paramref name="depthImageDataHeight"/> parameter and
        /// depthFloatFrame's <c>height</c> member is not equal.
        /// Thrown when the <paramref name="minDepthClip"/> parameter or the
        /// <paramref name="maxDepthClip"/> is less than zero.
        /// </exception>
        /// <exception cref="OutOfMemoryException">
        /// Thrown if a CPU memory allocation failed.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected,
        /// a GPU memory allocation failed or the call failed for an unknown reason.
        /// </exception>
        public static void DepthToDepthFloatFrame(
            DepthImagePixel[] depthImageData,
            int depthImageDataWidth,
            int depthImageDataHeight,
            FusionFloatImageFrame depthFloatFrame,
            float minDepthClip,
            float maxDepthClip,
            bool mirrorDepth)
        {
            if (null == depthImageData)
            {
                throw new ArgumentNullException("depthImageData");
            }

            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            ExceptionHelper.ThrowIfFailed(NativeMethods.NuiFusionDepthToDepthFloatFrame(
                depthImageData,
                (uint)depthImageDataWidth,
                (uint)depthImageDataHeight,
                FusionImageFrame.ToHandleRef(depthFloatFrame),
                minDepthClip,
                maxDepthClip,
                mirrorDepth));
        }
示例#57
0
        /// <summary>
        /// Siavash
        /// 
        /// </summary>
        /// <param name="pixelDataLenght"></param>
        private void findTheClosestPoint(int pixelDataLenght, int windowWidth, int windowHeight)
        {
            int i = 0;
            int closestBadPoint = -1;
            int closestBadPointDepth = 10000000;
            for (i = 0; i < pixelDataLenght; )
            {
                if (this.imagePixelData[i].IsKnownDepth == true)
                {
                    short currentDepth = this.imagePixelData[i].Depth;
                    if (currentDepth > minDepth && currentDepth < this.closestPoint.Depth)
                    {
                        int leftIdx = i > 0 ? i - 1 : i;
                        int rightIdx = i < pixelDataLenght + 1 ? i + 1 : i;
                        int upIdx = i > windowWidth ? i - windowWidth : i;
                        int downIdx = i < windowHeight * windowWidth - windowWidth ? i + windowWidth : i;
                        int goodCount = 0;
                        if (Math.Abs(this.imagePixelData[leftIdx].Depth - currentDepth) < NEIGHBOR_CUTOFF) goodCount++;
                        if (Math.Abs(this.imagePixelData[leftIdx].Depth - currentDepth) < NEIGHBOR_CUTOFF) goodCount++;
                        if (Math.Abs(this.imagePixelData[leftIdx].Depth - currentDepth) < NEIGHBOR_CUTOFF) goodCount++;
                        if (Math.Abs(this.imagePixelData[leftIdx].Depth - currentDepth) < NEIGHBOR_CUTOFF) goodCount++;

                        if (goodCount >= 3)
                        {
                            this.closestPoint = this.imagePixelData[i];
                            this.pixelIndex = i;
                        }
                        else
                        {
                            if (currentDepth < closestBadPointDepth)
                            {
                                closestBadPoint = i;
                                closestBadPointDepth = currentDepth;
                            }
                        }
                    }
                }
                i = i + 2;
            }
            badPoint = closestBadPoint;
        }
示例#58
0
        /// <summary>
        /// Converts an array of DepthImagePixels into a byte array in Bgr32 format.
        /// Pixel intensity represents depth; colors indicate players.
        /// </summary>
        /// <param name="depthFrame">The depth buffer to convert.</param>
        /// <param name="minDepth">The minimum reliable depth for this frame.</param>
        /// <param name="maxDepth">The maximum reliable depth for this frame.</param>
        /// <param name="depthTreatment">The depth treatment to apply.</param>
        /// <param name="colorFrame">The buffer to fill with color pixels.</param>
        public void ConvertDepthFrame(
            DepthImagePixel[] depthFrame,
            int minDepth,
            int maxDepth,
            KinectDepthTreatment depthTreatment,
            byte[] colorFrame)
        {
            // Test that the buffer lengths are appropriately correlated, which allows us to use only one
            // value as the loop condition.
            if ((depthFrame.Length * Bgr32BytesPerPixel) != colorFrame.Length)
            {
                throw new InvalidOperationException();
            }

            ColorMapping[] mappingTable = GetColorMappingTable(minDepth, maxDepth, depthTreatment);

            for (int depthIndex = 0, colorIndex = 0;
                colorIndex < colorFrame.Length;
                depthIndex++, colorIndex += Bgr32BytesPerPixel)
            {
                short depth = depthFrame[depthIndex].Depth;
                ColorMapping color = mappingTable[(ushort)depth];

                int player = depthFrame[depthIndex].PlayerIndex;

                // Write color pixel to buffer
                colorFrame[colorIndex + RedIndex] = (byte)(color.R >> IntensityShiftByPlayerR[player]);
                colorFrame[colorIndex + GreenIndex] = (byte)(color.G >> IntensityShiftByPlayerG[player]);
                colorFrame[colorIndex + BlueIndex] = (byte)(color.B >> IntensityShiftByPlayerB[player]);
            }
        }
示例#59
0
        /// <summary>
        /// Converts Kinect depth frames in unsigned short format to depth frames in float format 
        /// representing distance from the camera in meters (parallel to the optical center axis).
        /// Note: <paramref name="depthImageData"/> and <paramref name="depthFloatFrame"/> must
        /// be the same pixel resolution. This version of the function runs on the GPU.
        /// </summary>
        /// <param name="depthImageData">The source depth data.</param>
        /// <param name="depthFloatFrame">A depth float frame, to be filled with depth.</param>
        /// <param name="minDepthClip">The minimum depth threshold. Values below this will be set to 0.</param>
        /// <param name="maxDepthClip">The maximum depth threshold. Values above this will be set to 1000.</param>
        /// <param name="mirrorDepth">Set true to mirror depth, false so the image appears correct if viewing
        /// the Kinect camera from behind.</param>
        /// <exception cref="ArgumentNullException">
        /// Thrown when the <paramref name="depthImageData"/> or 
        /// <paramref name="depthFloatFrame"/> parameter is null.
        /// </exception>
        /// <exception cref="ArgumentException">
        /// Thrown when the <paramref name="depthImageData"/> or
        /// <paramref name="depthFloatFrame"/> parameter is an incorrect image size, or the 
        /// kernelWidth is an incorrect size.
        /// </exception>
        /// <exception cref="InvalidOperationException">
        /// Thrown when the Kinect Runtime could not be accessed, the device is not connected
        /// or the call failed for an unknown reason.
        /// </exception>
        /// <remarks>
        /// The min and max depth clip values enable clipping of the input data, for example, to help
        /// isolate particular objects or surfaces to be reconstructed. Note that the thresholds return 
        /// different values when a depth pixel is outside the threshold - pixels inside minDepthClip will
        /// will be returned as 0 and ignored in processing, whereas pixels beyond maxDepthClip will be set
        /// to 1000 to signify a valid depth ray with depth beyond the set threshold. Setting this far-
        /// distance flag is important for reconstruction integration in situations where the camera is
        /// static or does not move significantly, as it enables any voxels closer to the camera
        /// along this ray to be culled instead of persisting (as would happen if the pixels were simply 
        /// set to 0 and ignored in processing). Note that when reconstructing large real-world size volumes,
        /// be sure to set large maxDepthClip distances, as when the camera moves around, any voxels in view
        /// which go beyond this threshold distance from the camera will be removed.
        /// </remarks>
        public void DepthToDepthFloatFrame(
            DepthImagePixel[] depthImageData,
            FusionFloatImageFrame depthFloatFrame,
            float minDepthClip,
            float maxDepthClip,
            bool mirrorDepth)
        {
            if (null == depthImageData)
            {
                throw new ArgumentNullException("depthImageData");
            }

            if (null == depthFloatFrame)
            {
                throw new ArgumentNullException("depthFloatFrame");
            }

            ExceptionHelper.ThrowIfFailed(volume.DepthToDepthFloatFrame(
                depthImageData,
                (uint)depthImageData.Length * sizeof(int),  // DepthImagePixel is 2 ushort per pixel
                FusionImageFrame.ToHandleRef(depthFloatFrame),
                minDepthClip,
                maxDepthClip,
                mirrorDepth));
        }
        private void depth(KinectSensor sensor, Image Image)
        {
            DepthImagePixel[] depthPixels;
            byte[] colorPixels;
            WriteableBitmap colorBitmap;
            // Turn on the depth stream to receive depth frames
            sensor.DepthStream.Enable(DepthImageFormat.Resolution640x480Fps30);

            // Allocate space to put the depth pixels we'll receive
            depthPixels = new DepthImagePixel[sensor.DepthStream.FramePixelDataLength];

            // Allocate space to put the color pixels we'll create
            colorPixels = new byte[sensor.DepthStream.FramePixelDataLength * sizeof(int)];

            // This is the bitmap we'll display on-screen
            colorBitmap = new WriteableBitmap(sensor.DepthStream.FrameWidth, sensor.DepthStream.FrameHeight, 96.0, 96.0, PixelFormats.Bgr32, null);

            // Set the image we display to point to the bitmap where we'll put the image data
            Image.Source = colorBitmap;

            sensor.DepthFrameReady += (o, arg) =>
            {
                using (DepthImageFrame depthFrame = arg.OpenDepthImageFrame())
                {
                    if (depthFrame == null)
                        return;
                    // Copy the pixel data from the image to a temporary array
                    depthFrame.CopyDepthImagePixelDataTo(depthPixels);

                    // Get the min and max reliable depth for the current frame
                    int minDepth = depthFrame.MinDepth;
                    int maxDepth = depthFrame.MaxDepth;

                    // Convert the depth to RGB
                    int colorPixelIndex = 0;
                    for (int i = 0; i < depthPixels.Length; ++i)
                    {
                        // Get the depth for this pixel
                        short depth = depthPixels[i].Depth;

                        // To convert to a byte, we're discarding the most-significant
                        // rather than least-significant bits.
                        // We're preserving detail, although the intensity will "wrap."
                        // Values outside the reliable depth range are mapped to 0 (black).

                        // Note: Using conditionals in this loop could degrade performance.
                        // Consider using a lookup table instead when writing production code.
                        // See the KinectDepthViewer class used by the KinectExplorer sample
                        // for a lookup table example.
                        byte intensity = (byte)(depth >= minDepth && depth <= maxDepth ? depth : 0);

                        // Write out blue byte
                        colorPixels[colorPixelIndex++] = intensity;

                        // Write out green byte
                        colorPixels[colorPixelIndex++] = intensity;

                        // Write out red byte
                        colorPixels[colorPixelIndex++] = intensity;

                        // We're outputting BGR, the last byte in the 32 bits is unused so skip it
                        // If we were outputting BGRA, we would write alpha here.
                        ++colorPixelIndex;
                    }
                    // Write the pixel data into our bitmap
                    colorBitmap.WritePixels(
                        new Int32Rect(0, 0, colorBitmap.PixelWidth, colorBitmap.PixelHeight),
                        colorPixels,
                        colorBitmap.PixelWidth * sizeof(int),
                        0);
                }
                if (frames != null && record)
                    frames.Enqueue(RenderBitmap(VideoView));
            };
        }