예제 #1
0
        /// <summary>
        /// Creates a DepthCorrectionMap from an uncorrected raw image and the desired TableDistance
        /// </summary>
        /// <param name="uncorrectedImage"></param>
        /// <param name="averageTableDistance"></param>
        /// <returns></returns>
        public DepthCorrectionMap CreateDepthCorrectionMap(DepthImage uncorrectedImage, int averageTableDistance)
        {
            DepthCorrectionMap correctionMap = new DepthCorrectionMap(uncorrectedImage.Width, uncorrectedImage.Height);
            int lastValue = 0;

            for (int x = 0; x < uncorrectedImage.Width; x++)
            {
                for (int y = 0; y < uncorrectedImage.Height; y++)
                {
                    if (uncorrectedImage.Data[x, y] > 0)
                    {
                        //Correctly recognized point: calculate correction value
                        correctionMap.CorrectionData[x, y] = averageTableDistance - uncorrectedImage.Data[x, y];
                        lastValue = averageTableDistance - uncorrectedImage.Data[x, y];
                    }
                    else
                    {
                        //Not recognized point (height = 0) use recent calibration value as approximation
                        correctionMap.CorrectionData[x, y] = lastValue;
                    }
                }
            }

            return correctionMap;
        }
예제 #2
0
        /// <summary>
        /// Applies a correctionMap to a raw DepthImage
        /// </summary>
        /// <param name="rawImage"></param>
        /// <param name="correctionMap"></param>
        /// <returns></returns>
        public DepthImage ApplyDepthCorrection(DepthImage rawImage, DepthCorrectionMap correctionMap)
        {
            if (rawImage.Width != correctionMap.Width ||rawImage.Height != correctionMap.Height)
            {
                throw new Exception("Image size does not match");
            }

            //calculate the values for the section that is not involved in border cutting
            int BorderCutXmin = 0 + correctionMap.CutOffLeft;
            int BorderCutXmax = rawImage.Width - 1 - correctionMap.CutOffRight;
            int BorderCutYmin = 0 + correctionMap.CutOffTop;
            int BorderCutYmax = rawImage.Height - 1 - correctionMap.CutOffBOttom;

            //Apply the Depth Correction
            for (int x=0; x < rawImage.Width; x++)
            {
                for (int y = 0; y < rawImage.Height; y++)
                {
                    if (rawImage.Data[x,y] > 0) //Only if there wasn't a reading error
                        rawImage.Data[x, y] = rawImage.Data[x, y] + correctionMap.CorrectionData[x, y];

                    //Coordinates outside the CutOffBorder?
                    if (!((BorderCutXmin <= x) && (x <= BorderCutXmax) && (BorderCutYmin <= y) && (y <= BorderCutYmax)))
                        rawImage.Data[x, y] = 0;
                }
            }

            return rawImage;
        }
예제 #3
0
        public RecognitionDataPacket()
        {
            rawDepthImage = null;
            correctedDepthImage = null;
            TableObjects = new List<TableObject>();
            HandObj = null;
            objectmap = null;
            neighbourmap = null;

            bmpCorrectedDepth = null;
            bmpRawDepth = null;
            bmpVideoFrame = null;

            RecognitionDuration = 0;
        }
예제 #4
0
        /// <summary>
        /// Creates a boolmap out of an depth image. Every pixel that has an height > 0 is set to true
        /// </summary>
        /// <param name="source"></param>
        /// <returns></returns>
        private bool[,] CreateBoolMap(DepthImage source)
        {
            bool[,] boolmap = new bool[source.Width,source.Height];

            for (int y = 0; y < source.Height; y++)
            {
                for (int x = 0; x < source.Width; x++)
                {
                    if (source.Data[x,y] > 0)
                    {
                        boolmap[x, y] = true;
                    }
                    else
                    {
                        boolmap[x, y] = false;
                    }
                }
            }

            return boolmap;
        }
예제 #5
0
        public List<TableObject> SeperateObjects(ref DepthImage image, out bool[,] boolmap_object, out int[,,] neighbourmap)
        {
            //Create 2 seperate depth-images, remove the objects (for hand recognition) and the hand (for object recognition)
            DepthImage HandImage = image.Clone();
            DepthImage ObjectImage = image.Clone();
            SplitUpDepthImages(ref HandImage, ref ObjectImage);

            //For object recognition, create a BooleanXY-Map (true for all objects with a height > 0)
            bool[,] boolmap_hand = CreateBoolMap(HandImage);
            boolmap_object = CreateBoolMap(ObjectImage);

            if (SettingsManager.RecognitionSet.SaveDebugMaps)
            {
                Bitmap dm_obj = MapVisualizer.VisualizeDepthImage(ObjectImage, false, false);
                dm_obj.Save("ObjectDepthImage.bmp");
                Bitmap dm_h = MapVisualizer.VisualizeDepthImage(HandImage, false, false);
                dm_h.Save("HandDepthImage.bmp");
                Bitmap bobj = MapVisualizer.VisualizeBoolMap(boolmap_hand, image.Width, image.Height);
                bobj.Save("boolmap_hand.bmp");
                Bitmap bmpo = MapVisualizer.VisualizeBoolMap(boolmap_object, image.Width, image.Height);
                bmpo.Save("boolmap_object.bmp");
                Bitmap ci = MapVisualizer.VisualizeDepthImage(image, false, false);
                ci.Save("normalized_depthimage.bmp");
            }

            //Run the Hand Recognition
            HandRecognizer hrec = new HandRecognizer();
            HandObject hand = hrec.RecognizeHands(ref boolmap_hand, image.Width, image.Height);

            //RunTheObjectRecognition
            ObjectRecognizer orec = new ObjectRecognizer();
            List<TableObject> tableObjects = orec.RecognizeObjects(boolmap_object, image, out neighbourmap);

            //If there is a hand, add it to the TableObjects
            if (hand != null)
                tableObjects.Add(hand);

            //Return the ObjectList
            return tableObjects;
        }
예제 #6
0
        private void b_aktualisieren_Click(object sender, RoutedEventArgs e)
        {
            if (_tmgr.LastKinectDepthFrame == null)
            {
                MessageBox.Show("Kinect muss zuerst initialisiert werden!", "Fehler!");
                return;
            }

            int width, height;
            SettingsManager.KinectSet.GetDepthResolution(out width, out height);
            _dimage = new DepthImage(_tmgr.LastKinectDepthFrame, width, height);

            Bitmap bmp = MapVisualizer.VisualizeDepthImage(_dimage,false,false);
            image.Source = bmp.ToWpfBitmap();

            s_cutoff_down.Value = SettingsManager.PreprocessingSet.DefaultCorrectionMap.CutOffBOttom;
            s_cutoff_left.Value = SettingsManager.PreprocessingSet.DefaultCorrectionMap.CutOffLeft;
            s_cutoff_right.Value = SettingsManager.PreprocessingSet.DefaultCorrectionMap.CutOffRight;
            s_cutoff_top.Value = SettingsManager.PreprocessingSet.DefaultCorrectionMap.CutOffTop;

            l_höhe.Text = SettingsManager.RecognitionSet.TableDistance.ToString();
        }
예제 #7
0
        /// <summary>
        /// Recognizes the Objects via a depth image and a prepared boolmap
        /// </summary>
        /// <param name="prepared_boolmap">a prepared boolmap, with only objects marked as true</param>
        /// <param name="image">the depth image, used to calculate the height of the recognized objects</param>
        /// <returns></returns>
        public List<TableObject> RecognizeObjects(bool[,] prepared_boolmap, DepthImage image, out int[,,] neighbourmap)
        {
            //Check in a raster whether there are any true pixels
            List<TPoint> TrueRasterPoints = GetTrueRasterPoints(prepared_boolmap, image);

            //Create a neighbourmap - an int[x,y,n] array, where the int stores the count of the whitepixel-neighbours of the object (n=0) and the size of the circle/rect used
            //to count these neighbours (n=1). The circle/rect is scaled up until a defined percentage of the pixels is false, meaning no part of the object
            neighbourmap = CreateNeighbourMap(prepared_boolmap, image, TrueRasterPoints);

            //DEBUG
            if (SettingsManager.RecognitionSet.SaveDebugMaps)
            {
                Bitmap bmp = MapVisualizer.VisualizeNeighbourMap(neighbourmap, image.Width, image.Height);
                bmp.Save("neigbourmap.bmp");
            }

            //Now select the ObjectCenters (maximum neigbour-values on the neigbourmap)
            List<ObjectPoint> pointlist = SelectObjectCenters(neighbourmap, image.Width, image.Height);

            //Create TableObjects from the ObjectCenters List
            return GenerateTableObjects(pointlist, image);
        }
예제 #8
0
        public DepthImage NormalizeHeights(DepthImage source)
        {
            for (int y = 0; y < source.Height; y++)
            {
                for (int x = 0; x < source.Width; x++)
                {
                    int height = source.Data[x, y];

                    //Normalize Height (Calculate the object height in respect to the table surface)
                    height = SettingsManager.RecognitionSet.TableDistance - height;

                    //is the height within the Range of the table surface? then set to 0
                    //also set to 0 if height is negative -> "under" the table
                    if (height < 0 || SettingsManager.RecognitionSet.TableDistanceRange > height)
                        height = 0;

                    //is the height too high, set it to 0
                    if (height > SettingsManager.RecognitionSet.HandMaximalHeight)
                        height = 0;

                    source.Data[x, y] = height;
                }
            }
            return source;
        }
예제 #9
0
        private void DoRecognitionWork(object data)
        {
            object[] dataArray = (object[]) data;
            PlanarImage pimg = (PlanarImage) dataArray[0];
            int[] deptharray = (int[]) dataArray[1];
            Bitmap colorFrame = (Bitmap) dataArray[2];

            RecognitionDataPacket rpacket = new DataStructures.RecognitionDataPacket();
            DateTime dtBegin = DateTime.Now;

            //Create DepthImage
            DepthImage dimg = new DepthImage(deptharray,pimg.Width,pimg.Height);
            rpacket.rawDepthImage = dimg.Clone();

            //Correct the image
            DepthMapPreprocessor dmp = new DepthMapPreprocessor();
            dimg = dmp.ApplyDepthCorrection(dimg, SettingsManager.PreprocessingSet.DefaultCorrectionMap);
            dimg = dmp.NormalizeHeights(dimg);

            ObjectSeperator objectSeperator = new ObjectSeperator();

            //Seperate objects
            bool[,] boolmap_object;
            int[,,] neighbourmap;
            List<TableObject> objects = objectSeperator.SeperateObjects(ref dimg,out boolmap_object,out neighbourmap);

            //if supplied, extract the relevant bitmap parts from the ColorFrame
            if (colorFrame != null)
            {
                ObjectVideoBitmapAssigner ovba = new ObjectVideoBitmapAssigner();
                ovba.AssignVideoBitmap(objects, colorFrame);
            }

            //Extract hand object from table objects
            if (objects.Where( o => o.GetType() == typeof(HandObject)).Count() > 0)
            {
                rpacket.HandObj = (HandObject)objects.Where(o => o.GetType() == typeof (HandObject)).ToArray()[0];
            }

            //Fill DataPacket with Data
            rpacket.correctedDepthImage = dimg;
            rpacket.TableObjects = objects;
            rpacket.objectmap = boolmap_object;
            rpacket.neighbourmap = neighbourmap;
            rpacket.bmpVideoFrame = colorFrame;

            TimeSpan ts = DateTime.Now - dtBegin;
            rpacket.RecognitionDuration = (int)Math.Round(ts.TotalMilliseconds);

            if (SettingsManager.RecognitionSet.SaveDebugMaps)
            {
                Bitmap bmp = MapVisualizer.VisualizeDepthImage(rpacket.rawDepthImage);
                bmp.Save("rawDepthImage.bmp");
            }

            //Event
            OnRecognitionFinished(rpacket);
        }
예제 #10
0
        private void SplitUpDepthImages(ref DepthImage HandImage, ref DepthImage objectImage)
        {
            for (int x=0;x<HandImage.Width;x++)
            {
                for(int y=0;y<HandImage.Height;y++)
                {
                    int heigt = HandImage.Data[x, y];
                    if (HandImage.Data[x, y] > SettingsManager.RecognitionSet.HandMaximalHeight || HandImage.Data[x, y] <= SettingsManager.RecognitionSet.ObjectMaximalHeight)
                        HandImage.Data[x, y] = 0;

                    if (objectImage.Data[x, y] > SettingsManager.RecognitionSet.ObjectMaximalHeight)
                        objectImage.Data[x, y] = 0;
                }
            }
        }
예제 #11
0
        public static Bitmap VisualizeDepthImage(DepthImage img, bool invert=false, bool printDepthValues = false)
        {
            //Calculate Min Value
            int[] RawData = img.RawData;

            int min = 0;
            for (int i = 1; i < RawData.Count(); i++)
            {
                if (min == 0)
                    min = RawData[i];
                if ((RawData[i] < min) && (RawData[i] != 0))
                    min = RawData[i];
            }

            //Calculate max
            int max = RawData[0];
            for (int i = 1; i < RawData.Count(); i++)
            {
                if (RawData[i] > max)
                    max = RawData[i];
            }

            if (max == 0)
                max = 1;

            //The range between min and max has to fit into 255 bit (grayscale image)
            double scale_multiplicator = (float)255.0 / Convert.ToDouble(max - min);

            //Convert each pixel
            Bitmap grayscale = new Bitmap(img.Width, img.Height);

            int indexc = 0;

            for (var y = 0; y < img.Height; y++)
            {
                for (var x = 0; x < img.Width; x++)
                {
                    int value = img.Data[x, y];
                    int rgb_value = (int)Math.Round((value - min) * scale_multiplicator);

                    if (rgb_value < 0)
                        rgb_value = 0;
                    if (rgb_value > 255)
                        rgb_value = 255;

                    if (invert)
                        rgb_value = 255 - rgb_value;

                    Color col = new Color();
                    col = Color.FromArgb(rgb_value, rgb_value, rgb_value);
                    grayscale.SetPixel(x, y, col);
                    indexc++;
                }
            }

            //Add information if desired
            if (printDepthValues)
            {
                /* Graphics g = Graphics.FromImage(grayscale);
                g.DrawString("min: " + min.ToString() + "mm max: " + max.ToString() + "mm", new Font("Tahoma", 10),
                             Brushes.Red, new PointF(0, 0));
                return new Bitmap(img.Width, img.Height, g);
                 * */
                //TODO: Bild wird schwarz, bug beheben
            }

            return grayscale;
        }
예제 #12
0
        private List<TableObject> GenerateTableObjects(List<ObjectPoint> pointlist, DepthImage image)
        {
            List<TableObject> tableobjects = new List<TableObject>();

            foreach (ObjectPoint op in pointlist)
            {
                TableObject tobj = new TableObject();

                tobj.Center = new TPoint(op.X, op.Y, TPoint.PointCreationType.depth);
                tobj.CenterDefined = true;
                tobj.Radius = op.RectSize;
                tobj.Height = image.Data[op.X, op.Y];

                tableobjects.Add(tobj);
            }

            return tableobjects;
        }
예제 #13
0
        /// <summary>
        /// Check in a raster whether there are any true pixels
        /// </summary>
        /// <param name="prepared_boolmap"></param>
        /// <param name="image"></param>
        /// <returns></returns>
        private static List<TPoint> GetTrueRasterPoints(bool[,] prepared_boolmap, DepthImage image)
        {
            List<TPoint> TrueRasterPoints = new List<TPoint>();

            for (int x = 0; x < image.Width; x = x + SettingsManager.RecognitionSet.ObjectRecognitionGridSpacing)
            {
                for (int y = 0; y < image.Height; y = y + SettingsManager.RecognitionSet.ObjectRecognitionGridSpacing)
                {
                    if (prepared_boolmap[x, y])
                        TrueRasterPoints.Add(new TPoint(x, y, TPoint.PointCreationType.depth));
                }
            }
            return TrueRasterPoints;
        }
예제 #14
0
        /// <summary>
        /// Creates a neighbourmap - an int[x,y,n] array, where the int stores the count of the whitepixel-neighbours of the object (n=0) and the size of the circle/rect used 
        /// to count these neighbours (n=1). The circle/rect is scaled up until a defined percentage of the pixels is false, meaning no part of the object. the neigbourmap is only created around true rasterpoints to be eficcent
        /// </summary>
        /// <param name="boolmap">The boolmap</param>
        /// <param name="image">Depth Image</param>
        /// <param name="rasterpoints">The calculated Rasterpoints</param>
        /// <returns></returns>
        private static int[,,] CreateNeighbourMap(bool[,] boolmap, DepthImage image, List<TPoint> rasterpoints)
        {
            int[,,] neighbourmap = new int[image.Width,image.Height,2];

            //Do this for each region arount the true rasterpoints
            foreach (TPoint rasterpoint in rasterpoints)
            {
                //calculate the area: the Gridsize after the points Position
                int xmin = rasterpoint.DepthX;
                int ymin = rasterpoint.DepthY;

                int xmax = rasterpoint.DepthX + SettingsManager.RecognitionSet.ObjectRecognitionGridSpacing;
                if (xmax >= image.Width)
                    xmax = image.Width - 1;

                int ymax = rasterpoint.DepthY + SettingsManager.RecognitionSet.ObjectRecognitionGridSpacing;
                if (ymax >= image.Height)
                    ymax = image.Height - 1;

                TRectangle area = new TRectangle(xmin, ymin, xmax, ymax);

                //Now calculate the values for every point in the area
                CalculateNeigbourValues(boolmap, area, ref neighbourmap, image.Width, image.Height);
            }

            return neighbourmap;
        }