Пример #1
0
        private void sensor_AllFramesReady(object sender, AllFramesReadyEventArgs e)
        {
            depthFrame = e.OpenDepthImageFrame();
            colorFrame = e.OpenColorImageFrame();

            if (depthFrame != null && colorFrame != null)
            {
                depthFrame.CopyPixelDataTo(depthPixels);
                colorFrame.CopyPixelDataTo(colorPixels);

                Image <Bgr, Byte> convertImage = colorFrame.ToOpenCVImage <Bgr, Byte>();

                depthBitmap.WritePixels(new Int32Rect(0, 0, sensor.DepthStream.FrameWidth, sensor.DepthStream.FrameHeight), depthPixels, depthBitmapStride, 0);

                if (depthPixels != null && isCombineDepthToColor)
                {
                    RangeFilter();
                }

                colorBitmap.WritePixels(new Int32Rect(0, 0, sensor.ColorStream.FrameWidth, sensor.ColorStream.FrameHeight), colorPixels, colorBitmapStride, 0);
                colorImageViewer.Source = colorBitmap;
                depthImageViewer.Source = depthBitmap;

                //depthFrame.Dispose();
                //colorFrame.Dispose();
            }
        }
 void Record(ColorImageFrame image)
 {
     if (!_isRecording)
     {
         _fileName    = string.Format("{0}{1}{2}", _baseDirectory, DateTime.Now.ToString("MMddyyyyHmmss"), ".avi");
         _isRecording = true;
     }
     _videoArray.Add(image.ToOpenCVImage <Rgb, Byte>());
 }
Пример #3
0
        private void PollColorImageStream()
        {
            if (this._Kinect == null)
            {
                // no kinect
            }

            else
            {
                try
                {
                    using (ColorImageFrame frame = this._Kinect.ColorStream.OpenNextFrame(valDelayOpenNextFrame))
                    {
                        if (frame != null)
                        {
                            frame.CopyPixelDataTo(this._ColorImagePixelData);
                            if (!lowResource)
                            {
                                this.ColorImageElement.Dispatcher.BeginInvoke(new Action(() =>
                                {
                                    this._ColorImageBitmap.WritePixels(this._ColorImageBitmapRect, this._ColorImagePixelData, this._ColorImageStride, 0);
                                }));
                            }
                            //add video images to array such that SaveVideo() can record video through opencv
                            if (currentFrame % frameAcceptance == 0) //set to only add every frameAcceptanceth'd frame
                            {
                                _videoArray.Add(frame.ToOpenCVImage <Rgb, Byte>());
                            }
                            if (_videoArray.Count() > recordLength / frameAcceptance) // Frame limiter (ideally 4x where x is length of event)
                            {
                                vidCounter++;
                                SaveVideo();
                                _videoArray.Clear();
                            }
                        }
                    }
                }
                catch (Exception ex)
                {
                    //report error?
                }
            }
        }
Пример #4
0
        /// <summary>
        /// Event handler for Kinect sensor's ColorFrameReady event
        /// </summary>
        /// <param name="sender">object sending the event</param>
        /// <param name="e">event arguments</param>
        private void SensorColorFrameReady(object sender, ColorImageFrameReadyEventArgs e)
        {
            using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
            {
                if (colorFrame != null)
                {
                    // Copy the pixel data from the image to a temporary array
                    colorFrame.CopyPixelDataTo(this.colorPixels);
                    Image <Bgr, Byte>  image2     = colorFrame.ToOpenCVImage <Bgr, Byte>();//將kinect彩色影像轉換emgu處理格式
                    Image <Gray, Byte> grayFrame  = image2.Convert <Gray, Byte>();
                    Image <Gray, Byte> cannyFrame = grayFrame.Canny(new Gray(100), new Gray(60));
                    //image1.Source = cannyFrame.ToBitmapSource(); //將emgu格式轉換bitmap顯示
                    if (ck == true)
                    {
                        ck = false;
                        if (goodsRecogSys != null)
                        {
                            goodsRecogSys.SetupInputImage(image2);
                        }
                        else
                        {
                            goodsRecogSys = new GoodsRecognition(image2);
                        }

                        string goodData = goodsRecogSys.RunRecognition(true);
                        if (synthesizer.State != SynthesizerState.Speaking)
                        {
                            synthesizer.SpeakAsync(goodData);
                        }
                    }
                    // Write the pixel data into our bitmap
                    this.colorBitmap.WritePixels(
                        new Int32Rect(0, 0, this.colorBitmap.PixelWidth, this.colorBitmap.PixelHeight),
                        this.colorPixels,
                        this.colorBitmap.PixelWidth * sizeof(int),
                        0);
                }
            }
        }
        private void Pulse()
        {
            using (ColorImageFrame imageFrame = _kinectSensor.ColorStream.OpenNextFrame(200))
            {
                if (imageFrame == null)
                {
                    return;
                }

                using (Image <Bgr, byte> image = imageFrame.ToOpenCVImage <Bgr, byte>())
                    using (MemStorage storage = new MemStorage()) //create storage for motion components
                    {
                        if (_forgroundDetector == null)
                        {
                            _forgroundDetector = new BGStatModel <Bgr>(image
                                                                       , Emgu.CV.CvEnum.BG_STAT_TYPE.GAUSSIAN_BG_MODEL);
                        }

                        _forgroundDetector.Update(image);

                        //update the motion history
                        _motionHistory.Update(_forgroundDetector.ForgroundMask);

                        //get a copy of the motion mask and enhance its color
                        double[] minValues, maxValues;
                        System.Drawing.Point[] minLoc, maxLoc;
                        _motionHistory.Mask.MinMax(out minValues, out maxValues
                                                   , out minLoc, out maxLoc);
                        Image <Gray, Byte> motionMask = _motionHistory.Mask
                                                        .Mul(255.0 / maxValues[0]);

                        //create the motion image
                        Image <Bgr, Byte> motionImage = new Image <Bgr, byte>(motionMask.Size);
                        motionImage[0] = motionMask;

                        //Threshold to define a motion area
                        //reduce the value to detect smaller motion
                        double minArea = 100;

                        storage.Clear(); //clear the storage
                        Seq <MCvConnectedComp> motionComponents = _motionHistory.GetMotionComponents(storage);
                        bool isMotionDetected = false;
                        //iterate through each of the motion component
                        for (int c = 0; c < motionComponents.Count(); c++)
                        {
                            MCvConnectedComp comp = motionComponents[c];
                            //reject the components that have small area;
                            if (comp.area < minArea)
                            {
                                continue;
                            }

                            OnDetection();
                            isMotionDetected = true;
                            break;
                        }
                        if (isMotionDetected == false)
                        {
                            OnDetectionStopped();
                            this.Dispatcher.Invoke(new Action(() => rgbImage.Source = null));
                            StopRecording();
                            return;
                        }

                        this.Dispatcher.Invoke(
                            new Action(() => rgbImage.Source = imageFrame.ToBitmapSource())
                            );
                        Record(imageFrame);
                    }
            }
        }