Exemple #1
0
 public object Clone()
 {
     RecognitionDataPacket p = new RecognitionDataPacket();
     if (correctedDepthImage != null)
         p.correctedDepthImage = correctedDepthImage.Clone();
     if (rawDepthImage != null)
         p.rawDepthImage = rawDepthImage.Clone();
     if (TableObjects != null)
         p.TableObjects = CloneTO();
     if (neighbourmap != null)
         p.neighbourmap = (int[,,])neighbourmap.Clone();
     if (objectmap != null)
         p.objectmap = (bool[,]) objectmap.Clone();
     if (bmpVideoFrame != null)
         p.bmpVideoFrame = (Bitmap) bmpVideoFrame.Clone();
     if (bmpRawDepth != null)
         p.bmpRawDepth = (Bitmap)bmpRawDepth.Clone();
     if (bmpCorrectedDepth != null)
         p.bmpCorrectedDepth = (Bitmap) bmpCorrectedDepth.Clone();
     if (HandObj != null)
         p.HandObj = (HandObject)HandObj.Clone();
     p.RecognitionDuration = this.RecognitionDuration;
     return p;
 }
        public bool Init()
        {
            //Stop everything
            Stop();

            //init the kinect
            _kinectController = new KinectController();

            try
            {
                if (!_kinectController.Init())
                    return false;

                _kinectController.OnDepthFrame += new KinectController.DepthFrameHandler(_kinectController_OnDepthFrame);
                _kinectController.OnVideoFrame += new KinectController.VideoFrameHandler(_kinectController_OnVideoFrame);
            }
            catch (Exception)
            {
                return false;
            }

            //init settings
            int width = 0;
            int height = 0;
            SettingsManager.KinectSet.GetDepthResolution(out width, out height);

            //init the other classes
            PositionMapper.AssignKinectController(_kinectController);
            _rthread = new RecognitionThread();
            _rthread.OnRecognitionFinished += new RecognitionThread.RecognitionFinished(_rthread_OnRecognitionFinished);
            Forms = new FormSupplier(this);
            _lastReconPacket = new RecognitionDataPacket();

            //Everything worked, return true
            return true;
        }
        void _rthread_OnRecognitionFinished(RecognitionDataPacket result)
        {
            //Save recognition data
            lock (lock_reconpacket)
            {
                _lastReconPacket = result;
            }

            //set the running property to false
            _recognitionThreadrunning = false;

            //raise event
            OnNewRecognitionPacket();
        }
        private void DoRecognitionWork(object data)
        {
            object[] dataArray = (object[]) data;
            PlanarImage pimg = (PlanarImage) dataArray[0];
            int[] deptharray = (int[]) dataArray[1];
            Bitmap colorFrame = (Bitmap) dataArray[2];

            RecognitionDataPacket rpacket = new DataStructures.RecognitionDataPacket();
            DateTime dtBegin = DateTime.Now;

            //Create DepthImage
            DepthImage dimg = new DepthImage(deptharray,pimg.Width,pimg.Height);
            rpacket.rawDepthImage = dimg.Clone();

            //Correct the image
            DepthMapPreprocessor dmp = new DepthMapPreprocessor();
            dimg = dmp.ApplyDepthCorrection(dimg, SettingsManager.PreprocessingSet.DefaultCorrectionMap);
            dimg = dmp.NormalizeHeights(dimg);

            ObjectSeperator objectSeperator = new ObjectSeperator();

            //Seperate objects
            bool[,] boolmap_object;
            int[,,] neighbourmap;
            List<TableObject> objects = objectSeperator.SeperateObjects(ref dimg,out boolmap_object,out neighbourmap);

            //if supplied, extract the relevant bitmap parts from the ColorFrame
            if (colorFrame != null)
            {
                ObjectVideoBitmapAssigner ovba = new ObjectVideoBitmapAssigner();
                ovba.AssignVideoBitmap(objects, colorFrame);
            }

            //Extract hand object from table objects
            if (objects.Where( o => o.GetType() == typeof(HandObject)).Count() > 0)
            {
                rpacket.HandObj = (HandObject)objects.Where(o => o.GetType() == typeof (HandObject)).ToArray()[0];
            }

            //Fill DataPacket with Data
            rpacket.correctedDepthImage = dimg;
            rpacket.TableObjects = objects;
            rpacket.objectmap = boolmap_object;
            rpacket.neighbourmap = neighbourmap;
            rpacket.bmpVideoFrame = colorFrame;

            TimeSpan ts = DateTime.Now - dtBegin;
            rpacket.RecognitionDuration = (int)Math.Round(ts.TotalMilliseconds);

            if (SettingsManager.RecognitionSet.SaveDebugMaps)
            {
                Bitmap bmp = MapVisualizer.VisualizeDepthImage(rpacket.rawDepthImage);
                bmp.Save("rawDepthImage.bmp");
            }

            //Event
            OnRecognitionFinished(rpacket);
        }