Ejemplo n.º 1
0
        private KinectHelper(TransformSmoothParameters tsp, bool near = false, 
                             ColorImageFormat colorFormat = ColorImageFormat.RgbResolution1280x960Fps12, 
                             DepthImageFormat depthFormat = DepthImageFormat.Resolution640x480Fps30)
        {
            _kinectSensor = KinectSensor.KinectSensors.FirstOrDefault(s => s.Status == KinectStatus.Connected);

            if (_kinectSensor == null)
            {
                throw new Exception("No Kinect-Sensor found.");
            }
            if (near)
            {
                _kinectSensor.SkeletonStream.TrackingMode = SkeletonTrackingMode.Seated;
                _kinectSensor.DepthStream.Range = DepthRange.Near;
                _kinectSensor.SkeletonStream.EnableTrackingInNearRange = true;
            }

            DepthImageFormat = depthFormat;
            ColorImageFormat = colorFormat;

            _kinectSensor.SkeletonStream.Enable(tsp);
            _kinectSensor.ColorStream.Enable(colorFormat);
            _kinectSensor.DepthStream.Enable(depthFormat);
            _kinectSensor.AllFramesReady += AllFramesReady;

            _kinectSensor.Start();
            _faceTracker = new FaceTracker(_kinectSensor);
        }
        public void ProcessData(KinectSensor kinectSensor, ColorImageFormat colorImageFormat,
                                 byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage,
                                 Skeleton[] skeletons, int skeletonFrameNumber)
        {
            if (skeletons == null)
            {
                return;
            }

            // Update the list of trackers and the trackers with the current frame information
            foreach (Skeleton skeleton in skeletons)
            {
                if (skeleton.TrackingState == SkeletonTrackingState.Tracked ||
                    skeleton.TrackingState == SkeletonTrackingState.PositionOnly)
                {
                    // We want keep a record of any skeleton, tracked or untracked.
                    if (!this._trackedSkeletons.ContainsKey(skeleton.TrackingId))
                    {
                        this._trackedSkeletons.Add(skeleton.TrackingId, new SkeletonFaceTracker());
                    }

                    // Give each tracker the upated frame.
                    SkeletonFaceTracker skeletonFaceTracker;
                    if (this._trackedSkeletons.TryGetValue(skeleton.TrackingId, out skeletonFaceTracker))
                    {
                        skeletonFaceTracker.OnFrameReady(kinectSensor, colorImageFormat, colorImage, depthImageFormat,
                                                         depthImage, skeleton);
                        skeletonFaceTracker.LastTrackedFrame = skeletonFrameNumber;
                    }
                }
            }

            RemoveOldTrackers(skeletonFrameNumber);
        }
        /*/////////////////////////////////////////
          * CONSTRUCTOR(S)/DESTRUCTOR(S)
          */
        ///////////////////////////////////////
        public KinectManager(ColorImageFormat p_colour_format,
                             DepthImageFormat p_depth_format,
                             KinectGame_WindowsXNA p_game)
        {
            // Initialise the Kinect selector...
            this.colour_image_format = p_colour_format;
            this.depth_image_format = p_depth_format;
            this.root_game = p_game;

            this.colour_stream = null;
            this.depth_stream = null;
            this.skeleton_stream = null;

            this.debug_video_stream_dimensions = new Vector2(200, 150);

            status_map = new Dictionary<KinectStatus, string>();
            KinectSensor.KinectSensors.StatusChanged += this.KinectSensorsStatusChanged; // handler function for changes in the Kinect system
            this.DiscoverSensor();

            this.status_map.Add(KinectStatus.Undefined, "UNKNOWN STATUS MESSAGE");
            this.status_map.Add(KinectStatus.Connected, "Connected.");//string.Empty);
            this.status_map.Add(KinectStatus.DeviceNotGenuine, "Detected device is not genuine!");
            this.status_map.Add(KinectStatus.DeviceNotSupported, "Detected device is not supported!");
            this.status_map.Add(KinectStatus.Disconnected, "Disconnected/Device required!");
            this.status_map.Add(KinectStatus.Error, "Error in Kinect sensor!");
            this.status_map.Add(KinectStatus.Initializing, "Initialising Kinect sensor...");
            this.status_map.Add(KinectStatus.InsufficientBandwidth, "Insufficient bandwidth for Kinect sensor!");
            this.status_map.Add(KinectStatus.NotPowered, "Detected device is not powered!");
            this.status_map.Add(KinectStatus.NotReady, "Detected device is not ready!");

            // Load the status message font:
            this.msg_font = this.root_game.Content.Load<SpriteFont>("Fonts/Segoe16");
            this.msg_label_pos = new Vector2(4.0f, 2.0f);
        }
Ejemplo n.º 4
0
        public VideoShot(ColorImageProcesser processer, MainWindow window, int videoNum,
            KinectSensor kinectDevice,
            int dWidht, int dHeight,
            int cWidth, int cHeight,
            DepthImageFormat dImageFormat, ColorImageFormat cImageFormat)
        {
            parentProcesser = processer;
            videoName = PadLeft(videoNum);
            _windowUI = window;
            _kinectDevice = kinectDevice;

            depthFrameWidth = dWidht;
            depthFrameHeight = dHeight;

            colorFrameWidth = cWidth;
            colorFrameHeight = cHeight;

            depthFrameStride = depthFrameWidth * BytesPerPixel;
            colorFrameStride = colorFrameWidth * BytesPerPixel;

            depthImageFormat = dImageFormat;
            colorImageFormat = cImageFormat;

            screenHeight = SystemParameters.PrimaryScreenHeight;
            screenWidth = SystemParameters.PrimaryScreenWidth;

            Start();
        }
Ejemplo n.º 5
0
 public CoordinateConverter(IEnumerable<byte> kinectParams, ColorImageFormat cif, 
                     DepthImageFormat dif)
 {
     mapper = new CoordinateMapper(kinectParams);
       this.cif = cif;
       this.dif = dif;
 }
Ejemplo n.º 6
0
        /// <summary>
        /// コンストラクタ
        /// </summary>
        public MainWindow()
        {
            InitializeComponent();

            // Init Kinect Sensors
            this.kinect = KinectSensor.GetDefault();

            if (kinect == null)
            {
                this.showCloseDialog("Kinectが接続されていないか、利用できません。アプリケーションを終了します。");
            }

            this.colorImageFormat = ColorImageFormat.Bgra;
            this.colorFrameDescription = this.kinect.ColorFrameSource.CreateFrameDescription(this.colorImageFormat);
            this.colorFrameReader = this.kinect.ColorFrameSource.OpenReader();
            this.colorFrameReader.FrameArrived += ColorFrameReader_FrameArrived;
            bodyFrameReader = kinect.BodyFrameSource.OpenReader();
            bodyFrameReader.FrameArrived += bodyFrameReader_FrameArrived;

            this.kinect.Open();
            this.bodies = this.bodies = new Body[kinect.BodyFrameSource.BodyCount];

            KinectRegion.SetKinectRegion(this, kinectRegion);
            this.kinectRegion.KinectSensor = KinectSensor.GetDefault();

            this.isTraining = false;
        }
 public SkeletalTracker(Rectangle fullscreen, CoordinateMapper coordinateMapper, ColorImageFormat colorFormat)
 {
     _coordinateMapper = coordinateMapper;
     _colorFormat = colorFormat;
     _fullscreen = fullscreen;
     _aspectRatio = _fullscreen.Width / (double)_fullscreen.Height;
 }
Ejemplo n.º 8
0
        /// <summary>
        /// Initializes a new instance of the FaceTracker class from a reference of the Kinect device.
        /// <param name="sensor">Reference to kinect sensor instance</param>
        /// </summary>
        public FaceTracker(KinectSensor sensor)
        {
            if (sensor == null) {
            throw new ArgumentNullException("sensor");
              }

              if (!sensor.ColorStream.IsEnabled) {
            throw new InvalidOperationException("Color stream is not enabled yet.");
              }

              if (!sensor.DepthStream.IsEnabled) {
            throw new InvalidOperationException("Depth stream is not enabled yet.");
              }

              this.operationMode = OperationMode.Kinect;
              this.coordinateMapper = sensor.CoordinateMapper;
              this.initializationColorImageFormat = sensor.ColorStream.Format;
              this.initializationDepthImageFormat = sensor.DepthStream.Format;

              var newColorCameraConfig = new CameraConfig(
              (uint)sensor.ColorStream.FrameWidth,
              (uint)sensor.ColorStream.FrameHeight,
              sensor.ColorStream.NominalFocalLengthInPixels,
              FaceTrackingImageFormat.FTIMAGEFORMAT_UINT8_B8G8R8X8);
              var newDepthCameraConfig = new CameraConfig(
              (uint)sensor.DepthStream.FrameWidth,
              (uint)sensor.DepthStream.FrameHeight,
              sensor.DepthStream.NominalFocalLengthInPixels,
              FaceTrackingImageFormat.FTIMAGEFORMAT_UINT16_D13P3);
              this.Initialize(newColorCameraConfig, newDepthCameraConfig, IntPtr.Zero, IntPtr.Zero, this.DepthToColorCallback);
        }
        private void KinectSensorOnAllFramesReady(object sender, AllFramesReadyEventArgs allFramesReadyEventArgs)
        {
            using (var colorImageFrame = allFramesReadyEventArgs.OpenColorImageFrame())
            {
                if (colorImageFrame == null)
                {
                    return;
                }

                // Make a copy of the color frame for displaying.
                var haveNewFormat = this.currentColorImageFormat != colorImageFrame.Format;
                if (haveNewFormat)
                {
                    this.currentColorImageFormat = colorImageFrame.Format;
                    this.colorImageData = new byte[colorImageFrame.PixelDataLength];
                    this.colorImageWritableBitmap = new WriteableBitmap(
                        colorImageFrame.Width, colorImageFrame.Height, 96, 96, PixelFormats.Bgr32, null);
                    ColorImage.Source = this.colorImageWritableBitmap;
                }

                colorImageFrame.CopyPixelDataTo(this.colorImageData);
                this.colorImageWritableBitmap.WritePixels(
                    new Int32Rect(0, 0, colorImageFrame.Width, colorImageFrame.Height),
                    this.colorImageData,
                    colorImageFrame.Width * Bgr32BytesPerPixel,
                    0);

            }
        }
        public MainWindow()
        {
            InitializeComponent();
            try
            {
                ///キネクト本体の接続を確保、たしか接続されてない場合はfalseとかになった記憶
                this.kinect = KinectSensor.GetDefault();
                ///読み込む画像のフォーマット(rgbとか)を指定、どうやって読み込むかのリーダの設定も
                this.colorImageFormat = ColorImageFormat.Bgra;
                this.colorFrameDescription = this.kinect.ColorFrameSource.CreateFrameDescription(this.colorImageFormat);
                this.colorFrameReader = this.kinect.ColorFrameSource.OpenReader();
                this.colorFrameReader.FrameArrived += colorFrameReader_FrameArrived;
                this.kinect.Open();//キネクト起動!!
                if (!kinect.IsOpen)
                {
                    this.errorLog.Visibility = Visibility.Visible;
                    this.errorLog.Content = "キネクトが見つからないよ!残念!";
                    throw new Exception("キネクトが見つかりませんでした!!!");
                }
                ///bodyを格納するための配列作成
                bodies = new Body[kinect.BodyFrameSource.BodyCount];

                ///ボディリーダーを開く
                bodyFrameReader = kinect.BodyFrameSource.OpenReader();
                bodyFrameReader.FrameArrived += bodyFrameReader_FrameArrived;
            }
            catch (Exception ex)
            {
                MessageBox.Show(ex.Message);
                Close();
            }
        }
Ejemplo n.º 11
0
        /// <summary>
        /// Initializes a new instance of the KinectChooser class.
        /// </summary>
        /// <param name="game">The related game object.</param>
        /// <param name="colorFormat">The desired color image format.</param>
        /// <param name="depthFormat">The desired depth image format.</param>
        public KinectChooser(Game game, ColorImageFormat colorFormat, DepthImageFormat depthFormat)
            : base(game)
        {
            this.colorImageFormat = colorFormat;
            this.depthImageFormat = depthFormat;

            this.nearMode = false;
            this.seatedMode = false;
            this.SimulateMouse = false;

            if (!Game1.SIMULATE_NO_KINECT)
            {
                KinectSensor.KinectSensors.StatusChanged += this.KinectSensors_StatusChanged;
                this.DiscoverSensor();
            }

            this.statusMap.Add(KinectStatus.Undefined, "Not connected, or in use");
            this.statusMap.Add(KinectStatus.Connected, string.Empty);
            this.statusMap.Add(KinectStatus.DeviceNotGenuine, "Device Not Genuine");
            this.statusMap.Add(KinectStatus.DeviceNotSupported, "Device Not Supported");
            this.statusMap.Add(KinectStatus.Disconnected, "Required");
            this.statusMap.Add(KinectStatus.Error, "Error");
            this.statusMap.Add(KinectStatus.Initializing, "Initializing...");
            this.statusMap.Add(KinectStatus.InsufficientBandwidth, "Insufficient Bandwidth");
            this.statusMap.Add(KinectStatus.NotPowered, "Not Powered");
            this.statusMap.Add(KinectStatus.NotReady, "Not Ready");
        }
        protected override void OnKinectSensorChanged(object sender, KinectSensorManagerEventArgs<KinectSensor> args)
        {
            if (null == args)
            {
                throw new ArgumentNullException("args");
            }

            if (null != args.OldValue)
            {
                args.OldValue.ColorFrameReady -= this.ColorImageReady;

                if (!this.RetainImageOnSensorChange)
                {
                    kinectColorImage.Source = null;
                    this.lastImageFormat = ColorImageFormat.Undefined;
                }
            }

            if ((null != args.NewValue) && (KinectStatus.Connected == args.NewValue.Status))
            {
                ResetFrameRateCounters();

                if (ColorImageFormat.RawYuvResolution640x480Fps15 == args.NewValue.ColorStream.Format)
                {
                    throw new NotImplementedException("RawYuv conversion is not yet implemented.");
                }
                else
                {
                    args.NewValue.ColorFrameReady += this.ColorImageReady;
                }
            }
        }
        private void KinectSensorOnAllFramesReady(object sender, AllFramesReadyEventArgs allFramesReadyEventArgs)
        {
            using (var colorImageFrame = allFramesReadyEventArgs.OpenColorImageFrame())
            {
                if (colorImageFrame == null)
                {
                    return;
                }

                // Make a copy of the color frame for displaying.
                var haveNewFormat = this.currentColorImageFormat != colorImageFrame.Format;
                if (haveNewFormat)
                {
                    this.currentColorImageFormat = colorImageFrame.Format;
                    this.colorImageData = new byte[colorImageFrame.PixelDataLength];
                    this.colorImageWritableBitmap = new WriteableBitmap(
                        colorImageFrame.Width, colorImageFrame.Height, 96, 96, PixelFormats.Bgr32, null);
                    ColorImage.Source = this.colorImageWritableBitmap;
                }

                colorImageFrame.CopyPixelDataTo(this.colorImageData);
                this.colorImageWritableBitmap.WritePixels(
                    new Int32Rect(0, 0, colorImageFrame.Width, colorImageFrame.Height),
                    this.colorImageData,
                    colorImageFrame.Width * Bgr32BytesPerPixel,
                    0);

                /*
                double length = Point.Subtract(faceTrackingViewer.ppLeft, faceTrackingViewer.ppRight).Length;
                tB.Text = length.ToString();
                if (length > 19)
                {
                    elp.Fill = Brushes.Red;
                }
                else
                {
                    elp.Fill = Brushes.Green;
                }
                */

                double mouthWidth = faceTrackingViewer.mouthWidth;
                double noseWidth = faceTrackingViewer.noseWidth;

                double threshold = noseWidth * modifyValue;

                tBMouthDistance.Text = mouthWidth.ToString();
                tbThreshold.Text = threshold.ToString();

                if (mouthWidth > threshold)
                {
                    elp.Fill = Brushes.Red;
                }
                else
                {
                    elp.Fill = Brushes.Green;
                }
            }
        }
Ejemplo n.º 14
0
 /// <summary>
 /// Map PointSkeleton3D to Point2D
 /// </summary>
 /// <param name="pointSkleton3D"></param>
 /// <param name="colorImageFormat"></param>
 /// <returns></returns>
 public Point2D MapSkeletonPointToColorPoint(PointSkeleton3D pointSkleton3D, ColorImageFormat colorImageFormat)
 {
     SkeletonPoint point = new SkeletonPoint();
     point.X = pointSkleton3D.X;
     point.Y = pointSkleton3D.Y;
     point.Z = pointSkleton3D.Z;
     ColorImagePoint ImgPoint = mapper.MapSkeletonPointToColorPoint(point, colorImageFormat);
     return new Point2D(ImgPoint.X, ImgPoint.Y);
 }
Ejemplo n.º 15
0
 /// <summary>
 /// Map PointSkeleton3D List to Point2 List
 /// </summary>
 /// <param name="pointSkeleton3D"></param>
 /// <param name="colorImageFormat"></param>
 /// <returns></returns>
 public List<Point2D> MapSkeletonPointsToColorPoints(List<PointSkeleton3D> pointSkeleton3D, ColorImageFormat colorImageFormat)
 {
     List<Point2D> ret = new List<Point2D>();
     foreach (var element in pointSkeleton3D)
     {
         ret.Add(MapSkeletonPointToColorPoint(element, colorImageFormat));
     }
     return ret;
 }
Ejemplo n.º 16
0
        public Kinect(Game game, ColorImageFormat colorFormat, DepthImageFormat depthFormat)
            : base(game)
        {
            this.colorImageFormat = colorFormat;
            this.depthImageFormat = depthFormat;

            KinectSensor.KinectSensors.StatusChanged += this.KinectSensors_StatusChanged;
            this.DiscoverSensor();
        }
Ejemplo n.º 17
0
 public static int GetHeight(ColorImageFormat format)
 {
     switch (format)
     {
         case ColorImageFormat.RgbResolution1280x960Fps12:
             return 960;
         case ColorImageFormat.RawYuvResolution640x480Fps15:
         case ColorImageFormat.RgbResolution640x480Fps30:
         case ColorImageFormat.YuvResolution640x480Fps15:
             return 480;
         default:
             return 480;
     }
 }
        public MainWindow()
        {
            InitializeComponent();

            faceTrackingViewerBinding = new Binding("Kinect") {Source = sensorChooser};
            CurrentColorImageFormat = ColorImageFormat.Undefined;
            Bgr32BytesPerPixel = (PixelFormats.Bgr32.BitsPerPixel + 7) / 8;

            sensorChooser = new KinectSensorChooser();
            sensorChooser.KinectChanged += SensorChooserOnKinectChanged;
            sensorChooser.Start();

            faceTrackingViewer.SetBinding(FaceTrackingViewer.KinectProperty, faceTrackingViewerBinding);
            GetDirectory();
        }
Ejemplo n.º 19
0
        protected override void OnKinectChanged(KinectSensor oldKinectSensor, KinectSensor newKinectSensor)
        {
            if (oldKinectSensor != null) {
                oldKinectSensor.ColorFrameReady -= ColorImageReady;
                kinectColorImage.Source = null;
                lastImageFormat = ColorImageFormat.Undefined;
            }

            if (newKinectSensor != null && newKinectSensor.Status == KinectStatus.Connected) {
                ResetFrameRateCounters();

                if (newKinectSensor.ColorStream.Format == ColorImageFormat.RawYuvResolution640x480Fps15) {
                    throw new NotImplementedException("RawYuv conversion is not yet implemented.");
                }
                else {
                    newKinectSensor.ColorFrameReady += ColorImageReady;
                }
            }
        }
Ejemplo n.º 20
0
        /// <summary>
        /// Initializes a new instance of the KinectChooser class.
        /// </summary>
        /// <param name="game">The related game object.</param>
        /// <param name="colorFormat">The desired color image format.</param>
        /// <param name="depthFormat">The desired depth image format.</param>
        public KinectChooser(Game game, ColorImageFormat colorFormat, DepthImageFormat depthFormat)
            : base(game)
        {
            this.colorImageFormat = colorFormat;
            this.depthImageFormat = depthFormat;

            KinectSensor.KinectSensors.StatusChanged += this.KinectSensors_StatusChanged;
            this.DiscoverSensor();

            this.statusMap.Add(KinectStatus.Connected, string.Empty);
            this.statusMap.Add(KinectStatus.DeviceNotGenuine, "Device Not Genuine");
            this.statusMap.Add(KinectStatus.DeviceNotSupported, "Device Not Supported");
            this.statusMap.Add(KinectStatus.Disconnected, "Required");
            this.statusMap.Add(KinectStatus.Error, "Error");
            this.statusMap.Add(KinectStatus.Initializing, "Initializing...");
            this.statusMap.Add(KinectStatus.InsufficientBandwidth, "Insufficient Bandwidth");
            this.statusMap.Add(KinectStatus.NotPowered, "Not Powered");
            this.statusMap.Add(KinectStatus.NotReady, "Not Ready");
        }
Ejemplo n.º 21
0
 public KinectDevice()
 {
     //kinect設定
     this.kinect = KinectSensor.GetDefault();
     //設定とハンドラ
     //colorImage
     #region
     this.colorImageFormat = ColorImageFormat.Bgra;
     this.colorFrameDescription = this.kinect.ColorFrameSource.CreateFrameDescription(this.colorImageFormat);
     this.colorFrameReader = this.kinect.ColorFrameSource.OpenReader();
     this.colorFrameReader.FrameArrived += ColorFrame_Arrived;
     this.colors = new byte[this.colorFrameDescription.Width
                                    * this.colorFrameDescription.Height
                                    * this.colorFrameDescription.BytesPerPixel];
     #endregion
     //骨格情報
     #region
     this.bodyFrameReader = this.kinect.BodyFrameSource.OpenReader();
     this.bodyFrameReader.FrameArrived += BodyFrame_Arrived;
     #endregion
     //震度情報
     #region
     this.depthFrameReader = this.kinect.DepthFrameSource.OpenReader();
     this.depthFrameReader.FrameArrived += DepthFrame_Arrived;
     this.depthFrameDescription = this.kinect.DepthFrameSource.FrameDescription;
     this.depthBuffer = new ushort[this.depthFrameDescription.LengthInPixels];
     #endregion
     //BodyIndex
     #region
     this.bodyIndexFrameDes = this.kinect.BodyIndexFrameSource.FrameDescription;
     this.bodyIndexFrameReader = this.kinect.BodyIndexFrameSource.OpenReader();
     this.bodyIndexFrameReader.FrameArrived += this.BodyIndexFrame_Arrived;
     this.bodyIndexBuffer = new byte[this.bodyIndexFrameDes.Width *
                                         this.bodyIndexFrameDes.Height * this.bodyIndexFrameDes.BytesPerPixel];
     #endregion
     //kinect開始
     this.package = new ShadowPackage();
     this.imageWidth = this.bodyIndexFrameDes.Width; 
     this.imageHeight = this.bodyIndexFrameDes.Height; 
     this.imageBytePerPixel = (int)this.bodyIndexFrameDes.BytesPerPixel;
     this.kinectImage = new Mat(this.imageHeight, this.imageWidth, MatType.CV_8UC1);
     this.kinect.Open();
 }
Ejemplo n.º 22
0
        private bool video_is_exist; //フレームを取得できたか

        #endregion Fields

        #region Constructors

        public VideoFrameClass(KinectSensor sensor, ColorImageFormat imgFormat)
        {
            this.kinectSensor = sensor;

            switch (imgFormat)
            {
                case ColorImageFormat.RgbResolution640x480Fps30:
                    this.FrameWidth = 640;
                    this.FrameHeight = 480;
                    break;
                case ColorImageFormat.RgbResolution1280x960Fps12:
                    this.FrameWidth = 1280;
                    this.FrameHeight = 960;
                    break;
                default:
                    throw new FormatException();
            }

            this.AllocateMemory();
        }
Ejemplo n.º 23
0
        /// <summary>
        /// Get the depth image size from the color image format.
        /// </summary>
        /// <param name="imageFormat">The depth image format.</param>
        /// <returns>The width and height of the color image format.</returns>
        public static Size GetColorSize(ColorImageFormat imageFormat)
        {
            switch (imageFormat)
            {
                case ColorImageFormat.InfraredResolution640x480Fps30:
                case ColorImageFormat.RawBayerResolution640x480Fps30:
                case ColorImageFormat.RawYuvResolution640x480Fps15:
                case ColorImageFormat.RgbResolution640x480Fps30:
                case ColorImageFormat.YuvResolution640x480Fps15:
                    return new Size(640, 480);

                case ColorImageFormat.RawBayerResolution1280x960Fps12:
                case ColorImageFormat.RgbResolution1280x960Fps12:
                    return new Size(1280, 960);

                case ColorImageFormat.Undefined:
                    return new Size(0, 0);
            }

            throw new ArgumentOutOfRangeException("imageFormat");
        }
        /// <summary>
        /// Selects the color data from the color stream.
        /// </summary>
        /// <param name="source">The source observable.</param>
        /// <returns>An observable sequence of colorData.</returns>
        public static IObservable<byte[]> SelectColorData(this IObservable<ColorFrameArrivedEventArgs> source, byte[] frameData, ColorImageFormat colorImageFormat = ColorImageFormat.Bgra)
        {
            if (source == null) throw new ArgumentNullException("source");

            return source.Select(_ =>
            {
                using (var frame = _.FrameReference.AcquireFrame())
                {
                    if (frame == null) return frameData;

                    if (frame.RawColorImageFormat == ColorImageFormat.Bgra)
                    {
                        frame.CopyRawFrameDataToArray(frameData);
                    }
                    else
                    {
                        frame.CopyConvertedFrameDataToArray(frameData, colorImageFormat);
                    }

                    return frameData;
                }
            });
        }
        public ColorImageProcesser(MainWindow window, KinectSensor kinectDevice,
            int depthDataLength, int colorDataLength,
            int dWidht, int dHeight,
            int cWidth, int cHeight,
            DepthImageFormat dImageFormat, ColorImageFormat cImageFormat)
        {
            _windowUI = window;
            _kinectDevice = kinectDevice;

            depthPixelData = new short[depthDataLength];
            colorPixelData = new byte[colorDataLength];

            depthFrameWidth = dWidht;
            depthFrameHeight = dHeight;

            colorFrameWidth = cWidth;
            colorFrameHeight = cHeight;

            depthFrameStride = depthFrameWidth * BytesPerPixel;
            colorFrameStride = colorFrameWidth * BytesPerPixel;

            depthImageFormat = dImageFormat;
            colorImageFormat = cImageFormat;
        }
Ejemplo n.º 26
0
        private KinectHelper(TransformSmoothParameters tsp, bool near, ColorImageFormat colorFormat, DepthImageFormat depthFormat)
        {
            _kinectSensor = KinectSensor.KinectSensors.FirstOrDefault(s => s.Status == KinectStatus.Connected);

            if (_kinectSensor == null)
            {
                throw new Exception("No Kinect-Sensor found.");
            }
            if (near)
            {
                _kinectSensor.SkeletonStream.TrackingMode = SkeletonTrackingMode.Seated;
                _kinectSensor.DepthStream.Range = DepthRange.Near;
                _kinectSensor.SkeletonStream.EnableTrackingInNearRange = true;
            }
            _kinectSensor.SkeletonStream.Enable(tsp);
            _kinectSensor.ColorStream.Enable(colorFormat);
            _kinectSensor.DepthStream.Enable(depthFormat);
            _kinectSensor.AllFramesReady += AllFramesReady;

            _kinectSensor.Start();

            DepthImageFormat = depthFormat;
            ColorImageFormat = colorFormat;
        }
Ejemplo n.º 27
0
        /// <summary>
        /// Initializes a new instance of the MainWindow class.
        /// </summary>
        public MainWindow()
        {
            // one sensor is currently supported
            this.kinectSensor = KinectSensor.GetDefault();

            // get the coordinate mapper
            this.coordinateMapper = this.kinectSensor.CoordinateMapper;

            // get the depth (display) extents
            FrameDescription frameDescription = this.kinectSensor.DepthFrameSource.FrameDescription;

            // get size of joint space
            this.displayWidth  = frameDescription.Width;
            this.displayHeight = frameDescription.Height;

            // open the reader for the body frames
            //this.bodyFrameReader = this.kinectSensor.BodyFrameSource.OpenReader();
            this.MSreader = this.kinectSensor.OpenMultiSourceFrameReader(FrameSourceTypes.Body | FrameSourceTypes.Color);

            var  fd        = kinectSensor.ColorFrameSource.CreateFrameDescription(ColorImageFormat.Bgra);
            uint frameSize = fd.BytesPerPixel * fd.LengthInPixels;

            colorData   = new byte[frameSize];
            format      = ColorImageFormat.Bgra;
            imageSerial = 0;

            // this.MSreader = this.kinectSensor.OpenMultiSourceFrameReader(FrameSourceTypes.Body | FrameSourceTypes.Color | FrameSourceTypes.Infrared);
            // a bone defined as a line between two joints
            // this.reader = this.kinectSensor.ColorFrameSource.OpenReader();

            this.bones = new List <Tuple <JointType, JointType> >();

            // Torso
            this.bones.Add(new Tuple <JointType, JointType>(JointType.Head, JointType.Neck));
            this.bones.Add(new Tuple <JointType, JointType>(JointType.Neck, JointType.SpineShoulder));
            this.bones.Add(new Tuple <JointType, JointType>(JointType.SpineShoulder, JointType.SpineMid));
            this.bones.Add(new Tuple <JointType, JointType>(JointType.SpineMid, JointType.SpineBase));
            this.bones.Add(new Tuple <JointType, JointType>(JointType.SpineShoulder, JointType.ShoulderRight));
            this.bones.Add(new Tuple <JointType, JointType>(JointType.SpineShoulder, JointType.ShoulderLeft));
            this.bones.Add(new Tuple <JointType, JointType>(JointType.SpineBase, JointType.HipRight));
            this.bones.Add(new Tuple <JointType, JointType>(JointType.SpineBase, JointType.HipLeft));

            // Right Arm
            this.bones.Add(new Tuple <JointType, JointType>(JointType.ShoulderRight, JointType.ElbowRight));
            this.bones.Add(new Tuple <JointType, JointType>(JointType.ElbowRight, JointType.WristRight));
            this.bones.Add(new Tuple <JointType, JointType>(JointType.WristRight, JointType.HandRight));
            this.bones.Add(new Tuple <JointType, JointType>(JointType.HandRight, JointType.HandTipRight));
            this.bones.Add(new Tuple <JointType, JointType>(JointType.WristRight, JointType.ThumbRight));

            // Left Arm
            this.bones.Add(new Tuple <JointType, JointType>(JointType.ShoulderLeft, JointType.ElbowLeft));
            this.bones.Add(new Tuple <JointType, JointType>(JointType.ElbowLeft, JointType.WristLeft));
            this.bones.Add(new Tuple <JointType, JointType>(JointType.WristLeft, JointType.HandLeft));
            this.bones.Add(new Tuple <JointType, JointType>(JointType.HandLeft, JointType.HandTipLeft));
            this.bones.Add(new Tuple <JointType, JointType>(JointType.WristLeft, JointType.ThumbLeft));

            // Right Leg
            this.bones.Add(new Tuple <JointType, JointType>(JointType.HipRight, JointType.KneeRight));
            this.bones.Add(new Tuple <JointType, JointType>(JointType.KneeRight, JointType.AnkleRight));
            this.bones.Add(new Tuple <JointType, JointType>(JointType.AnkleRight, JointType.FootRight));

            // Left Leg
            this.bones.Add(new Tuple <JointType, JointType>(JointType.HipLeft, JointType.KneeLeft));
            this.bones.Add(new Tuple <JointType, JointType>(JointType.KneeLeft, JointType.AnkleLeft));
            this.bones.Add(new Tuple <JointType, JointType>(JointType.AnkleLeft, JointType.FootLeft));

            // populate body colors, one for each BodyIndex
            this.bodyColors = new List <Pen>();

            this.bodyColors.Add(new Pen(Brushes.Red, 6));
            this.bodyColors.Add(new Pen(Brushes.Orange, 6));
            this.bodyColors.Add(new Pen(Brushes.Green, 6));
            this.bodyColors.Add(new Pen(Brushes.Blue, 6));
            this.bodyColors.Add(new Pen(Brushes.Indigo, 6));
            this.bodyColors.Add(new Pen(Brushes.Violet, 6));

            // set IsAvailableChanged event notifier
            this.kinectSensor.IsAvailableChanged += this.Sensor_IsAvailableChanged;

            // open the sensor
            this.kinectSensor.Open();

            // set the status text
            this.StatusText = this.kinectSensor.IsAvailable ? Properties.Resources.RunningStatusText
                                                            : Properties.Resources.NoSensorStatusText;

            // Create the drawing group we'll use for drawing
            this.drawingGroup = new DrawingGroup();

            // Create an image source that we can use in our image control
            this.imageSource = new DrawingImage(this.drawingGroup);

            // use the window object as the view model in this simple example
            this.DataContext = this;

            //StartButton.Click += StartClicked;

            // initialize the components (controls) of the window
            this.InitializeComponent();
        }
Ejemplo n.º 28
0
        /// <summary>
        /// Starts face tracking from Kinect input data. Track() detects a face
        /// based on the passed parameters, then identifies characteristic
        /// points and begins tracking. The first call to this API is more
        /// expensive, but if the tracking succeeds then subsequent calls use
        /// the tracking information generated from first call and is faster,
        /// until a tracking failure happens.
        /// </summary>
        /// <param name="colorImageFormat">format of the colorImage array</param>
        /// <param name="colorImage">Input color image frame retrieved from Kinect sensor</param>
        /// <param name="depthImageFormat">format of the depthImage array</param>
        /// <param name="depthImage">Input depth image frame retrieved from Kinect sensor</param>
        /// <param name="skeletonOfInterest">Input skeleton to track. Head & shoulder joints in the skeleton are used to calculate the head vector</param>
        /// <param name="regionOfInterest">Region of interest in the passed video frame where the face tracker should search for a face to initiate tracking.
        /// Passing Rectangle.Empty (default) causes the entire frame to be searched.</param>
        /// <returns>Returns computed face tracking results for this image frame</returns>
        private FaceTrackFrame Track(
            ColorImageFormat colorImageFormat,
            byte[] colorImage,
            DepthImageFormat depthImageFormat,
            short[] depthImage,
            Skeleton skeletonOfInterest,
            Rect regionOfInterest)
        {
            this.totalTracks++;
            this.trackStopwatch.Start();

            if (this.operationMode != OperationMode.Kinect)
            {
                throw new InvalidOperationException(
                          "Cannot use Track with Kinect input types when face tracker is initialized for tracking videos/images");
            }

            if (colorImage == null)
            {
                throw new ArgumentNullException("colorImage");
            }

            if (depthImage == null)
            {
                throw new ArgumentNullException("depthImage");
            }

            if (colorImageFormat != this.initializationColorImageFormat)
            {
                throw new InvalidOperationException("Color image frame format different from initialization");
            }

            if (depthImageFormat != this.initializationDepthImageFormat)
            {
                throw new InvalidOperationException("Depth image frame format different from initialization");
            }

            if (colorImage.Length != this.videoCameraConfig.FrameBufferLength)
            {
                throw new ArgumentOutOfRangeException("colorImage", "Color image data size is needs to match initialization configuration.");
            }

            if (depthImage.Length != this.depthCameraConfig.FrameBufferLength)
            {
                throw new ArgumentOutOfRangeException("depthImage", "Depth image data size is needs to match initialization configuration.");
            }

            int        hr;
            HeadPoints headPointsObj = null;

            Vector3DF[] headPoints = GetHeadPointsFromSkeleton(skeletonOfInterest);

            if (headPoints != null && headPoints.Length == 2)
            {
                headPointsObj = new HeadPoints {
                    Points = headPoints
                };
            }



            this.copyStopwatch.Start();
            this.colorFaceTrackingImage.CopyFrom(colorImage);
            this.depthFaceTrackingImage.CopyFrom(depthImage);
            this.copyStopwatch.Stop();

            var sensorData = new SensorData(this.colorFaceTrackingImage, this.depthFaceTrackingImage, DefaultZoomFactor, Point.Empty);
            FaceTrackingSensorData faceTrackSensorData = sensorData.FaceTrackingSensorData;

            this.startOrContinueTrackingStopwatch.Start();
            if (this.trackSucceeded)
            {
                hr = this.faceTrackerInteropPtr.ContinueTracking(ref faceTrackSensorData, headPointsObj, this.frame.ResultPtr);
            }
            else
            {
                hr = this.faceTrackerInteropPtr.StartTracking(
                    ref faceTrackSensorData, ref regionOfInterest, headPointsObj, this.frame.ResultPtr);
            }

            this.startOrContinueTrackingStopwatch.Stop();

            this.trackSucceeded = hr == (int)ErrorCode.Success && this.frame.Status == ErrorCode.Success;
            this.trackStopwatch.Stop();

            if (this.trackSucceeded)
            {
                ++this.totalSuccessTracks;
                this.totalSuccessTrackMs      += this.trackStopwatch.ElapsedMilliseconds - this.lastSuccessTrackElapsedMs;
                this.lastSuccessTrackElapsedMs = this.trackStopwatch.ElapsedMilliseconds;
            }

            return(this.frame);
        }
Ejemplo n.º 29
0
 private static extern IntPtr Windows_Kinect_ColorFrameSource_CreateFrameDescription(IntPtr pNative, ColorImageFormat format);
        /// <summary>
        /// Map PointSkeleton3D List to Point2 List
        /// </summary>
        /// <param name="pointSkeleton3D"></param>
        /// <param name="colorImageFormat"></param>
        /// <returns></returns>
        public List <Point2D> MapSkeletonPointsToColorPoints(List <PointSkeleton3D> pointSkeleton3D, ColorImageFormat colorImageFormat)
        {
            List <Point2D> ret = new List <Point2D>();

            foreach (var element in pointSkeleton3D)
            {
                ret.Add(MapSkeletonPointToColorPoint(element, colorImageFormat));
            }
            return(ret);
        }
Ejemplo n.º 31
0
        void nui_AllFramesReady(object sender, AllFramesReadyEventArgs e)
        {
            ColorImageFrame cf = e.OpenColorImageFrame();
            DepthImageFrame df = e.OpenDepthImageFrame();
            SkeletonFrame   sf = e.OpenSkeletonFrame();

            //            Skeleton[] skeletonData = new Skeleton[sf.SkeletonArrayLength];

            if (cf == null || df == null || sf == null)
            {
                return;
            }


            byte[] ImageBits = new byte[cf.PixelDataLength];
            cf.CopyPixelDataTo(ImageBits);

            BitmapSource src = null;

            src = BitmapSource.Create(cf.Width, cf.Height,
                                      96, 96, PixelFormats.Bgr32, null,
                                      ImageBits,
                                      cf.Width * cf.BytesPerPixel);
            image2.Source = src;



            // Check for image format changes.  The FaceTracker doesn't
            // deal with that so we need to reset.
            if (this.depthImageFormat != df.Format)
            {
                this.ResetFaceTracking();
                this.depthImage       = null;
                this.depthImageFormat = df.Format;
            }

            if (this.colorImageFormat != cf.Format)
            {
                this.ResetFaceTracking();
                this.colorImage       = null;
                this.colorImageFormat = cf.Format;
            }

            // Create any buffers to store copies of the data we work with
            if (this.depthImage == null)
            {
                this.depthImage = new short[df.PixelDataLength];
            }

            if (this.colorImage == null)
            {
                this.colorImage = new byte[cf.PixelDataLength];
            }

            // Get the skeleton information
            if (this.skeletonData == null || this.skeletonData.Length != sf.SkeletonArrayLength)
            {
                this.skeletonData = new Skeleton[sf.SkeletonArrayLength];
            }

            cf.CopyPixelDataTo(this.colorImage);
            df.CopyPixelDataTo(this.depthImage);
            sf.CopySkeletonDataTo(this.skeletonData);

            foreach (Skeleton skeleton in this.skeletonData)
            {
                if (skeleton.TrackingState == SkeletonTrackingState.Tracked ||
                    skeleton.TrackingState == SkeletonTrackingState.PositionOnly)
                {
                    // We want keep a record of any skeleton, tracked or untracked.
                    if (!this.trackedSkeletons.ContainsKey(skeleton.TrackingId))
                    {
                        this.trackedSkeletons.Add(skeleton.TrackingId, new SkeletonFaceTracker());
                    }

                    // Give each tracker the upated frame.
                    SkeletonFaceTracker skeletonFaceTracker;
                    if (this.trackedSkeletons.TryGetValue(skeleton.TrackingId, out skeletonFaceTracker))
                    {
                        skeletonFaceTracker.OnFrameReady(nui2, colorImageFormat, colorImage, depthImageFormat, depthImage, skeleton);
                        skeletonFaceTracker.LastTrackedFrame = sf.FrameNumber;
                    }
                }
            }



            using (DepthImageFrame depthImageFrame = e.OpenDepthImageFrame())
            {
                if (depthImageFrame != null)
                {
                    foreach (Skeleton sd in skeletonData)
                    {
                        if (sd.TrackingState == SkeletonTrackingState.Tracked)
                        {
                            Joint joint = sd.Joints[JointType.Head];

                            DepthImagePoint depthPoint;

                            //                            CoordinateMapper coordinateMapper = new CoordinateMapper(nui);
                            //                            depthPoint = coordinateMapper.MapSkeletonPointToDepthPoint(joint.Position, DepthImageFormat.Resolution320x240Fps30);

                            depthPoint = depthImageFrame.MapFromSkeletonPoint(joint.Position);

                            System.Windows.Point point = new System.Windows.Point((int)(image2.ActualWidth * depthPoint.X
                                                                                        / depthImageFrame.Width),
                                                                                  (int)(image2.ActualHeight * depthPoint.Y
                                                                                        / depthImageFrame.Height));


                            Canvas.SetLeft(ellipse1, (point.X) - ellipse1.Width);
                            Canvas.SetTop(ellipse1, (point.Y) - ellipse1.Height);

                            App thisApp = App.Current as App;

                            Canvas.SetLeft(rect2, thisApp.m_dbX - rect2.Width);
                            Canvas.SetTop(rect2, thisApp.m_dbY - rect2.Height);

                            double GapX, GapY;
                            GapX = point.X - (thisApp.m_dbX - 2);
                            GapY = point.Y - (thisApp.m_dbY - 2);

                            int siteX = 999, siteY = 999;

                            if (GapX < 30 && GapX > -30)
                            {
                                siteX = 1;
                            }
                            else if (GapX >= 30)
                            {
                                siteX = 0;
                            }
                            else if (GapY <= -30)
                            {
                                siteX = 2;
                            }

                            if (GapY >= -40)
                            {
                                siteY = 0;
                            }
                            else if (GapY < -40 && GapY > -60)
                            {
                                siteY = 1;
                            }
                            else if (GapY <= -60)
                            {
                                siteY = 2;
                            }

                            int site;
                            site = siteX + (siteY * 3);
                            if (site == 0)
                            {
                                text2.Text = "좌상";
                            }
                            else if (site == 1)
                            {
                                text2.Text = "상";
                            }
                            else if (site == 2)
                            {
                                text2.Text = "우상";
                            }
                            else if (site == 3)
                            {
                                text2.Text = "좌";
                            }
                            else if (site == 4)
                            {
                                text2.Text = "정";
                            }
                            else if (site == 5)
                            {
                                text2.Text = "우";
                            }
                            else if (site == 6)
                            {
                                text2.Text = "좌하";
                            }
                            else if (site == 7)
                            {
                                text2.Text = "하";
                            }
                            else if (site == 8)
                            {
                                text2.Text = "우하";
                            }

                            thisApp.nowsite = site;

                            /*
                             *
                             * rect4.X = facePoints[i].X - 2;
                             * rect4.Y = facePoints[i].Y - 2;
                             * rect4.Width = 4;
                             * rect4.Height = 4;
                             */
                        }
                    }
                }
            }
        }
Ejemplo n.º 32
0
            /// <summary>
            /// Updates the face tracking information for this skeleton
            /// </summary>
            ///

            public void OnFrameReady(KinectSensor kinectSensor, ColorImageFormat colorImageFormat, byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage, Skeleton skeletonOfInterest)
            {
                //No Touchy
                this.skeletonTrackingState = skeletonOfInterest.TrackingState;
                if (this.skeletonTrackingState != SkeletonTrackingState.Tracked)
                {
                    return;
                }
                if (faceTracker == null)
                {
                    faceTracker = new FaceTracker(kinectSensor);
                }
                frame = this.faceTracker.Track(
                    colorImageFormat, colorImage, depthImageFormat, depthImage, skeletonOfInterest);
                this.lastFaceTrackSucceeded = frame.TrackSuccessful;
                if (this.lastFaceTrackSucceeded)
                {
                    if (faceTriangles == null)
                    {
                        faceTriangles = frame.GetTriangles();
                    }
                    this.facePoints = frame.GetProjected3DShape();



                    //Touchy

                    //Assign Reference points
                    this.absfacePoints = frame.Get3DShape();
                    leftForehead       = this.absfacePoints[FeaturePoint.TopLeftForehead];
                    rightForehead      = this.absfacePoints[FeaturePoint.TopRightForehead];
                    jaw           = this.absfacePoints[FeaturePoint.BottomOfChin];
                    faceRotationX = frame.Rotation.X;
                    faceRotationY = frame.Rotation.Y;
                    faceRotationZ = frame.Rotation.Z;

                    //Calculate Reference Points
                    foreheadReferencePointX = ((rightForehead.X - leftForehead.X) / 2);
                    foreheadReferencePointY = ((rightForehead.Y - leftForehead.Y) / 2);
                    foreheadReferencePointZ = ((rightForehead.Z - leftForehead.Z) / 2);

                    //Set Animation Units
                    AUCoeff        = frame.GetAnimationUnitCoefficients();
                    jawLowererAU   = AUCoeff[AnimationUnit.JawLower];
                    lipStretcherAU = AUCoeff[AnimationUnit.LipStretcher];
                    browRaiserAU   = AUCoeff[AnimationUnit.BrowRaiser];
                    setJawData(jaw.Y, leftForehead.Y, rightForehead.Y, jawLowererAU, lipStretcherAU);

                    rotations = new float[5];
                    //set up matlab
                    matlab = new MLApp.MLApp();
                    matlab.Execute(@"cd C:\Users\Bala\Documents\MATLAB");
                    result = null;

                    //get rotation values
                    rotations[0] = faceRotationX;
                    rotations[1] = faceRotationY;
                    rotations[2] = faceRotationZ;
                    rotations[3] = jawLowererAU;
                    rotations[4] = lipStretcherAU;
                    //Set up GlovePie
                    OscPacket.LittleEndianByteOrder = false;
                    IPEndPoint myapp    = new IPEndPoint(IPAddress.Loopback, 1944);
                    IPEndPoint glovepie = new IPEndPoint(IPAddress.Loopback, 1945);
                    Console.WriteLine(browRaiserAU);

                    matlab.Feval("nnW", 1, out result, rotations[0]);
                    object[] resW = result as object[];
                    nnoutput = (int)((float)resW[0] + 0.5f);
                    if (nnoutput == 1)
                    {
                        commandtoSend = 1;
                    }
                    else
                    {
                        result = null;
                        matlab.Feval("nnA", 1, out result, rotations[1]);
                        object[] resA = result as object[];
                        nnoutput = (int)((float)resA[0] + 0.5f);
                        if (nnoutput == 1)
                        {
                            commandtoSend = 2;
                        }
                        else
                        {
                            result = null;
                            matlab.Feval("nnS", 1, out result, rotations[0]);
                            object[] resS = result as object[];
                            nnoutput = (int)((float)resS[0] + 0.5f);
                            if (nnoutput == 1)
                            {
                                commandtoSend = 3;
                            }
                            else
                            {
                                result = null;
                                matlab.Feval("nnd", 1, out result, rotations[1]);
                                object[] resD = result as object[];
                                nnoutput = (int)((float)resD[0] + 0.5f);
                                if (nnoutput == 1)
                                {
                                    commandtoSend = 4;
                                }
                                else
                                {
                                    result = null;
                                    matlab.Feval("nnLC", 1, out result, rotations[2]);
                                    object[] resLC = result as object[];
                                    nnoutput = (int)((float)resLC[0] + 0.5f);
                                    if (nnoutput == 1)
                                    {
                                        commandtoSend = 5;
                                    }
                                    else
                                    {
                                        result = null;
                                        matlab.Feval("nnRC", 1, out result, rotations[2]);
                                        object[] resRC = result as object[];
                                        nnoutput = (int)((float)resRC[0] + 0.5f);
                                        if (nnoutput == 1)
                                        {
                                            commandtoSend = 6;
                                        }
                                        else
                                        {
                                            result = null;
                                            if (jawLowererAU > 0.7)
                                            {
                                                commandtoSend = 7;
                                            }

                                            /*
                                             * matlab.Feval("nnSpace", 1, out result, rotations[3]);
                                             * object[] resSpace = result as object[];
                                             * nnoutput = (int)((float)resSpace[0] + 0.5f);
                                             * if (nnoutput == 1)
                                             * {
                                             *  commandtoSend = 7;
                                             * }*/
                                            else
                                            {
                                                result = null;
                                                if (browRaiserAU > 0.4)
                                                {
                                                    commandtoSend = 8;
                                                }
                                                else
                                                {
                                                    result        = null;
                                                    commandtoSend = 0;
                                                }

                                                /*result = null;
                                                 * matlab.Feval("nnMiddle", 1, out result, lipStretcherAU);
                                                 * object[] resMiddle = result as object[];
                                                 * nnoutput = (int)((float)resMiddle[0] + 0.5f);
                                                 * if (nnoutput == 1)
                                                 * {
                                                 *  commandtoSend = 8;
                                                 * }
                                                 * else
                                                 * {
                                                 *  result = null;
                                                 *  commandtoSend = 0;
                                                 * }*/
                                            }
                                        }
                                    }
                                }
                            }
                        }
                    }
                    //Console.WriteLine("Iteration Complete");
                    switch (commandtoSend)
                    {
                    case 0:
                        msg = new OscMessage(myapp, "/move/w", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/a", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/s", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/d", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/lc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/rc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/middle", 0.0f);
                        msg.Send(glovepie);
                        break;

                    case 1:
                        msg = new OscMessage(myapp, "/move/w", 10.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/a", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/s", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/d", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/lc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/rc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/space", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/middle", 0.0f);
                        msg.Send(glovepie);
                        break;

                    case 2:
                        msg = new OscMessage(myapp, "/move/w", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/a", 10.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/s", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/d", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/lc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/rc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/space", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/middle", 0.0f);
                        msg.Send(glovepie);
                        break;

                    case 3:
                        msg = new OscMessage(myapp, "/move/w", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/a", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/s", 10.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/d", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/lc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/rc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/space", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/middle", 0.0f);
                        msg.Send(glovepie);
                        break;

                    case 4:
                        msg = new OscMessage(myapp, "/move/w", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/a", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/s", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/d", 10.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/lc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/rc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/space", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/middle", 0.0f);
                        msg.Send(glovepie);
                        break;

                    case 5:
                        msg = new OscMessage(myapp, "/move/w", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/a", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/s", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/d", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/lc", 10.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/rc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/space", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/middle", 0.0f);
                        msg.Send(glovepie);
                        break;

                    case 6:
                        msg = new OscMessage(myapp, "/move/w", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/a", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/s", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/d", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/lc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/rc", 10.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/space", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/middle", 0.0f);
                        msg.Send(glovepie);
                        break;

                    case 7:
                        msg = new OscMessage(myapp, "/move/w", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/a", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/s", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/d", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/lc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/rc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/space", 10.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/middle", 0.0f);
                        msg.Send(glovepie);
                        break;

                    case 8:
                        msg = new OscMessage(myapp, "/move/w", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/a", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/s", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/d", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/lc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/rc", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/space", 0.0f);
                        msg.Send(glovepie);
                        msg = new OscMessage(myapp, "/move/middle", 10.0f);
                        msg.Send(glovepie);
                        break;
                    }
                }
            }
        /// <summary>
        /// Selects the color data from the color stream.
        /// </summary>
        /// <param name="source">The source observable.</param>
        /// <returns>An observable sequence of colorData.</returns>
        public static IObservable <byte[]> SelectColorData(this IObservable <ColorFrameArrivedEventArgs> source, byte[] frameData, ColorImageFormat colorImageFormat = ColorImageFormat.Bgra)
        {
            if (source == null)
            {
                throw new ArgumentNullException("source");
            }

            return(source.Select(_ =>
            {
                using (var frame = _.FrameReference.AcquireFrame())
                {
                    if (frame == null)
                    {
                        return frameData;
                    }

                    if (frame.RawColorImageFormat == ColorImageFormat.Bgra)
                    {
                        frame.CopyRawFrameDataToArray(frameData);
                    }
                    else
                    {
                        frame.CopyConvertedFrameDataToArray(frameData, colorImageFormat);
                    }

                    return frameData;
                }
            }));
        }
        private void ColorImageReady(object sender, ColorImageFrameReadyEventArgs e)
        {
            using (ColorImageFrame imageFrame = e.OpenColorImageFrame())
            {
                if (imageFrame != null)
                {
                    // We need to detect if the format has changed.
                    bool haveNewFormat = this.lastImageFormat != imageFrame.Format;
                    bool convertToRgb  = false;
                    int  bytesPerPixel = imageFrame.BytesPerPixel;

                    if (imageFrame.Format == ColorImageFormat.RawBayerResolution640x480Fps30 ||
                        imageFrame.Format == ColorImageFormat.RawBayerResolution1280x960Fps12)
                    {
                        convertToRgb  = true;
                        bytesPerPixel = 4;
                    }

                    if (haveNewFormat)
                    {
                        if (convertToRgb)
                        {
                            this.rawPixelData = new byte[imageFrame.PixelDataLength];
                            this.pixelData    = new byte[bytesPerPixel * imageFrame.Width * imageFrame.Height];
                        }
                        else
                        {
                            this.pixelData = new byte[imageFrame.PixelDataLength];
                        }
                    }

                    if (convertToRgb)
                    {
                        imageFrame.CopyPixelDataTo(this.rawPixelData);
                        ConvertBayerToRgb32(imageFrame.Width, imageFrame.Height);
                    }
                    else
                    {
                        imageFrame.CopyPixelDataTo(this.pixelData);
                    }

                    // A WriteableBitmap is a WPF construct that enables resetting the Bits of the image.
                    // This is more efficient than creating a new Bitmap every frame.
                    if (haveNewFormat)
                    {
                        PixelFormat format = PixelFormats.Bgr32;
                        if (imageFrame.Format == ColorImageFormat.InfraredResolution640x480Fps30)
                        {
                            format = PixelFormats.Gray16;
                        }

                        kinectColorImage.Visibility = Visibility.Visible;
                        this.outputImage            = new WriteableBitmap(
                            imageFrame.Width,
                            imageFrame.Height,
                            96,  // DpiX
                            96,  // DpiY
                            format,
                            null);

                        this.kinectColorImage.Source = this.outputImage;
                    }

                    this.outputImage.WritePixels(
                        new Int32Rect(0, 0, imageFrame.Width, imageFrame.Height),
                        this.pixelData,
                        imageFrame.Width * bytesPerPixel,
                        0);

                    this.lastImageFormat = imageFrame.Format;

                    UpdateFrameRate();
                }
            }
        }
        private void DecodeSkeletonData(AllFramesReadyEventArgs e, KinectSensor sensor)
        {
            #region GetImageFormat

            ColorImageFormat colorFormat = ColorImageFormat.Undefined;
            int colorWidth  = 0;
            int colorHeight = 0;

            DepthImageFormat depthFormat = DepthImageFormat.Undefined;
            int depthWidth  = 0;
            int depthHeight = 0;

            switch (this._imageType)
            {
            case ImageType.Color:
                // Retrieve the current color format, from the frame if present, and from the sensor if not.
                using (ColorImageFrame colorImageFrame = e.OpenColorImageFrame())
                {
                    if (null != colorImageFrame)
                    {
                        colorFormat = colorImageFrame.Format;
                        colorWidth  = colorImageFrame.Width;
                        colorHeight = colorImageFrame.Height;
                    }
                    else if (null != sensor.ColorStream)
                    {
                        colorFormat = sensor.ColorStream.Format;
                        colorWidth  = sensor.ColorStream.FrameWidth;
                        colorHeight = sensor.ColorStream.FrameHeight;
                    }
                }

                break;

            case ImageType.Depth:
                // Retrieve the current depth format, from the frame if present, and from the sensor if not.
                using (DepthImageFrame depthImageFrame = e.OpenDepthImageFrame())
                {
                    if (null != depthImageFrame)
                    {
                        depthFormat = depthImageFrame.Format;
                        depthWidth  = depthImageFrame.Width;
                        depthHeight = depthImageFrame.Height;
                    }
                    else if (null != sensor.DepthStream)
                    {
                        depthFormat = sensor.DepthStream.Format;
                        depthWidth  = sensor.DepthStream.FrameWidth;
                        depthHeight = sensor.DepthStream.FrameHeight;
                    }
                }

                break;
            }

            #endregion

            // Clear the play canvas
            this.playField.Children.Clear();

            // Check every skeleton
            for (int skeletonSlot = 0; skeletonSlot < this._skeletonBuffer.Length; skeletonSlot++)
            {
                var skeleton = this._skeletonBuffer[skeletonSlot];

                #region Skeleton Position

                // Map points between skeleton and color/depth
                var jointMapping = this._jointMappings[skeletonSlot];
                jointMapping.Clear();

                try
                {
                    // Transform the data into the correct space
                    // For each joint, we determine the exact X/Y coordinates for the target view
                    foreach (Joint joint in skeleton.Joints)
                    {
                        ColorImagePoint colorPoint = sensor.CoordinateMapper.MapSkeletonPointToColorPoint(joint.Position, colorFormat);

                        Point mappedPoint = new Point(
                            (int)(this._renderSize.Width * colorPoint.X / colorWidth),
                            (int)(this._renderSize.Height * colorPoint.Y / colorHeight));

                        jointMapping[joint.JointType] = new JointMapping
                        {
                            Joint       = joint,
                            MappedPoint = mappedPoint,
                            OriginPoint = colorPoint,
                        };
                    }
                }
                catch (UnauthorizedAccessException)
                {
                    // Kinect is no longer available.
                    return;
                }

                // Look up the center point
                Point centerPoint = PositionCalculator.Get2DPosition(
                    sensor,
                    this._imageType,
                    this._renderSize,
                    skeleton.Position,
                    colorFormat,
                    colorWidth,
                    colorHeight,
                    depthFormat,
                    depthWidth,
                    depthHeight);

                #endregion

                // Scale the skeleton thickness
                // 1.0 is the desired size at 640 width
                this._scaleFactor = this._renderSize.Width / colorWidth;

                // Displays a gradient near the edge of the display
                // where the skeleton is leaving the screen
                this.DrawClippedEdges(skeleton);

                switch (skeleton.TrackingState)
                {
                case SkeletonTrackingState.PositionOnly:
                {
                    // The skeleton is being tracked, but we only know the general position, and
                    // we do not know the specific joint locations.
                    this.DrawBodyCenter(centerPoint);
                }
                break;

                case SkeletonTrackingState.Tracked:
                {
                    // The skeleton is being tracked and the joint data is available for consumption.
                    this.DrawBody(skeleton, jointMapping);

                    // Track player
                    this.TrackPlayer(skeleton, jointMapping);
                }
                break;
                }
            }
        }
Ejemplo n.º 36
0
 private static extern RootSystem.IntPtr Windows_Kinect_ColorFrame_CreateFrameDescription(
     RootSystem.IntPtr pNative, ColorImageFormat format);
Ejemplo n.º 37
0
 private static extern void Windows_Kinect_ColorFrame_CopyConvertedFrameDataToArray(RootSystem.IntPtr pNative,
                                                                                    RootSystem.IntPtr frameData, int frameDataSize, ColorImageFormat colorFormat);
        private void KinectAllFramesReady(object sender, AllFramesReadyEventArgs e)
        {
            KinectSensor sensor = sender as KinectSensor;

            foreach (var skeletonCanvas in this.skeletonCanvases)
            {
                skeletonCanvas.Skeleton = null;
            }

            // Have we already been "shut down" by the user of this viewer,
            // or has the SkeletonStream been disabled since this event was posted?
            if ((null == this.KinectSensorManager) ||
                (null == sensor) ||
                (null == sensor.SkeletonStream) ||
                !sensor.SkeletonStream.IsEnabled)
            {
                return;
            }

            bool haveSkeletonData = false;
            long frameTimeStamp   = -1;

            using (SkeletonFrame skeletonFrame = e.OpenSkeletonFrame())
            {
                if (skeletonFrame != null)
                {
                    if ((this.skeletonData == null) || (this.skeletonData.Length != skeletonFrame.SkeletonArrayLength))
                    {
                        this.skeletonData = new Skeleton[skeletonFrame.SkeletonArrayLength];
                    }

                    skeletonFrame.CopySkeletonDataTo(this.skeletonData);

                    frameTimeStamp = skeletonFrame.Timestamp;

                    haveSkeletonData = true;
                }
            }

            int trackedIndex = -1;

            // find the first tracked skeleton and set var trackedSkeleton accordingly
            for (int i = 0; i < skeletonData.Length; i++)
            {
                if (skeletonData[i].TrackingState.Equals(SkeletonTrackingState.Tracked))
                {
                    trackedIndex = i;
                    break;
                }
            }


            bool isFullyTracked = false;

            if (isMeasuring && trackedIndex > -1)
            {
                // check to see if the skeleton @ trackedIndex is fully tracked
                if (fullyTrackedMapping == null && IsFullyTracked(skeletonData[trackedIndex]))
                {
                    isFullyTracked = true;
                }

                SkeletonMeasurer measurer = new SkeletonMeasurer(skeletonData[trackedIndex]);
                measurer.determineMeasurements();
                AddMeasurementsToBuffer(measurer.TestMeasurements);

                skeletonBuffer.Add(ObjectCopier.Clone <Skeleton>(skeletonData[trackedIndex]));
                frameTimeStampBuffer.Add(frameTimeStamp);
            }

            if (haveSkeletonData)
            {
                ColorImageFormat colorFormat = ColorImageFormat.Undefined;
                int colorWidth  = 0;
                int colorHeight = 0;

                DepthImageFormat depthFormat = DepthImageFormat.Undefined;
                int depthWidth  = 0;
                int depthHeight = 0;

                switch (this.ImageType)
                {
                case ImageType.Color:
                    // Retrieve the current color format, from the frame if present, and from the sensor if not.
                    using (ColorImageFrame colorImageFrame = e.OpenColorImageFrame())
                    {
                        if (null != colorImageFrame)
                        {
                            colorFormat = colorImageFrame.Format;
                            colorWidth  = colorImageFrame.Width;
                            colorHeight = colorImageFrame.Height;
                        }
                        else if (null != sensor.ColorStream)
                        {
                            colorFormat = sensor.ColorStream.Format;
                            colorWidth  = sensor.ColorStream.FrameWidth;
                            colorHeight = sensor.ColorStream.FrameHeight;
                        }
                    }

                    break;

                case ImageType.Depth:
                    // Retrieve the current depth format, from the frame if present, and from the sensor if not.
                    using (DepthImageFrame depthImageFrame = e.OpenDepthImageFrame())
                    {
                        if (null != depthImageFrame)
                        {
                            depthFormat = depthImageFrame.Format;
                            depthWidth  = depthImageFrame.Width;
                            depthHeight = depthImageFrame.Height;
                        }
                        else if (null != sensor.DepthStream)
                        {
                            depthFormat = sensor.DepthStream.Format;
                            depthWidth  = sensor.DepthStream.FrameWidth;
                            depthHeight = sensor.DepthStream.FrameHeight;
                        }
                    }

                    break;
                }

                for (int i = 0; i < this.skeletonData.Length && i < this.skeletonCanvases.Count; i++)
                {
                    var skeleton       = this.skeletonData[i];
                    var skeletonCanvas = this.skeletonCanvases[i];
                    var jointMapping   = this.jointMappings[i];

                    jointMapping.Clear();

                    try
                    {
                        // Transform the data into the correct space
                        // For each joint, we determine the exact X/Y coordinates for the target view
                        foreach (Joint joint in skeleton.Joints)
                        {
                            Point mappedPoint = Get2DPosition(
                                sensor,
                                this.ImageType,
                                this.RenderSize,
                                joint.Position,
                                colorFormat,
                                colorWidth,
                                colorHeight,
                                depthFormat,
                                depthWidth,
                                depthHeight);

                            jointMapping[joint.JointType] = new JointMapping
                            {
                                Joint       = joint,
                                MappedPoint = mappedPoint
                            };
                        }
                    }
                    catch (UnauthorizedAccessException)
                    {
                        // Kinect is no longer available.
                        return;
                    }

                    // Look up the center point
                    Point centerPoint = Get2DPosition(
                        sensor,
                        this.ImageType,
                        this.RenderSize,
                        skeleton.Position,
                        colorFormat,
                        colorWidth,
                        colorHeight,
                        depthFormat,
                        depthWidth,
                        depthHeight);

                    // Scale the skeleton thickness
                    // 1.0 is the desired size at 640 width
                    double scale = this.RenderSize.Width / 640;

                    skeletonCanvas.Skeleton      = skeleton;
                    skeletonCanvas.JointMappings = jointMapping;
                    skeletonCanvas.Center        = centerPoint;
                    skeletonCanvas.ScaleFactor   = scale;
                }

                if (isFullyTracked)
                {
                    fullyTrackedMapping = new Dictionary <JointType, JointMapping>();

                    foreach (JointType type in jointMappings[trackedIndex].Keys)
                    {
                        fullyTrackedMapping[type]             = new JointMapping();
                        fullyTrackedMapping[type].Joint       = jointMappings[trackedIndex][type].Joint;
                        fullyTrackedMapping[type].MappedPoint = jointMappings[trackedIndex][type].MappedPoint;
                    }
                }
            }
        }
Ejemplo n.º 39
0
        private void Kinect_AllFramesReady(object sender, AllFramesReadyEventArgs allFramesReadyEventArgs)
        {
            ColorImageFrame colorImageFrame = null;
            DepthImageFrame depthImageFrame = null;
            SkeletonFrame   skeletonFrame   = null;

            try
            {
                colorImageFrame = allFramesReadyEventArgs.OpenColorImageFrame();
                depthImageFrame = allFramesReadyEventArgs.OpenDepthImageFrame();
                skeletonFrame   = allFramesReadyEventArgs.OpenSkeletonFrame();

                if (colorImageFrame == null || depthImageFrame == null || skeletonFrame == null)
                {
                    return;
                }

                // Check for image format changes.  The FaceTracker doesn't
                // deal with that so we need to reset.
                if (this.depthImageFormat != depthImageFrame.Format)
                {
                    this.ResetFaceTracking();
                    this.depthImage       = null;
                    this.depthImageFormat = depthImageFrame.Format;
                }

                if (this.colorImageFormat != colorImageFrame.Format)
                {
                    this.ResetFaceTracking();
                    this.colorImage       = null;
                    this.colorImageFormat = colorImageFrame.Format;
                }

                // Create any buffers to store copies of the data we work with
                if (this.depthImage == null)
                {
                    this.depthImage = new short[depthImageFrame.PixelDataLength];
                }

                if (this.colorImage == null)
                {
                    this.colorImage = new byte[colorImageFrame.PixelDataLength];
                }

                // Get the skeleton information
                if (this.skeletonData == null || this.skeletonData.Length != skeletonFrame.SkeletonArrayLength)
                {
                    this.skeletonData = new Skeleton[skeletonFrame.SkeletonArrayLength];
                }

                colorImageFrame.CopyPixelDataTo(this.colorImage);
                depthImageFrame.CopyPixelDataTo(this.depthImage);
                skeletonFrame.CopySkeletonDataTo(this.skeletonData);

                // Update the list of trackers and the trackers with the current frame information
                foreach (Skeleton skeleton in this.skeletonData)
                {
                    if (skeleton.TrackingState == SkeletonTrackingState.Tracked ||
                        skeleton.TrackingState == SkeletonTrackingState.PositionOnly)
                    {
                        // We want keep a record of any skeleton, tracked or untracked.
                        if (!this.trackedSkeletons.ContainsKey(skeleton.TrackingId))
                        {
                            this.trackedSkeletons.Add(skeleton.TrackingId, new SkeletonFaceTracker());
                        }

                        // Give each tracker the upated frame.
                        SkeletonFaceTracker skeletonFaceTracker;
                        if (this.trackedSkeletons.TryGetValue(skeleton.TrackingId, out skeletonFaceTracker))
                        {
                            skeletonFaceTracker.OnFrameReady(this.kinect, colorImageFormat, colorImage, depthImageFormat, depthImage, skeleton);
                            skeletonFaceTracker.LastTrackedFrame = skeletonFrame.FrameNumber;
                            if (skeletonFaceTracker.lastFaceTrackSucceeded)
                            {
                                if (skeletonFaceTracker.LastTrackedFrame % 10 == 0)
                                {
                                    Out.Post(true, pipeline.GetCurrentTime());
                                }
                            }
                            else
                            {
                                if (skeletonFaceTracker.LastTrackedFrame % 10 == 0)
                                {
                                    Out.Post(false, pipeline.GetCurrentTime());
                                }
                            }
                        }
                    }
                }

                this.RemoveOldTrackers(skeletonFrame.FrameNumber);

                //this.InvalidateVisual();
            }
            finally
            {
                if (colorImageFrame != null)
                {
                    colorImageFrame.Dispose();
                }

                if (depthImageFrame != null)
                {
                    depthImageFrame.Dispose();
                }

                if (skeletonFrame != null)
                {
                    skeletonFrame.Dispose();
                }
            }
        }
Ejemplo n.º 40
0
            /// <summary>
            /// Updates the face tracking information for this skeleton
            /// </summary>
            internal void OnFrameReady(KinectSensor kinectSensor, ColorImageFormat colorImageFormat, byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage, Skeleton skeletonOfInterest, FaceRecognitionActivityWindow win)
            {
                if (CheckFace(kinectSensor, colorImageFormat, colorImage, depthImageFormat, depthImage, skeletonOfInterest))
                {
                    count++;
                }
                else
                {
                    count = 0;
                }
                if (count == 1)
                {
                    count        = 0;
                    currentState = (currentState + 1) % 3;
                    // highlight the next exercise


                    if (currentState == 0)
                    {
                        Color           tileFill = Color.FromRgb(76, 76, 76);
                        SolidColorBrush brush1   = new SolidColorBrush(tileFill);
                        win.SadTile.Fill = brush1;


                        Color           focusTileFill = Color.FromRgb(96, 96, 96);
                        SolidColorBrush brush2        = new SolidColorBrush(focusTileFill);
                        win.HappyTile.Fill = brush2;

                        Color           secTileFill = Color.FromRgb(76, 76, 76);
                        SolidColorBrush brush3      = new SolidColorBrush(tileFill);
                        win.AngryTile.Fill = brush3;

                        // display the large icon
                        win.ActivityImage.Source  = new BitmapImage(new Uri(@"happy_big.png"));
                        win.ActivityLabel.Content = "Happy";
                    }
                    else if (currentState == 1)
                    {
                        Color           tileFill = Color.FromRgb(96, 96, 96);
                        SolidColorBrush brush1   = new SolidColorBrush(tileFill);
                        win.SadTile.Fill = brush1;

                        Color           focusTileFill = Color.FromRgb(76, 76, 76);
                        SolidColorBrush brush2        = new SolidColorBrush(focusTileFill);
                        win.HappyTile.Fill = brush2;

                        Color           secTileFill = Color.FromRgb(76, 76, 76);
                        SolidColorBrush brush3      = new SolidColorBrush(tileFill);
                        win.AngryTile.Fill = brush3;

                        // display the large icon
                        win.ActivityImage.Source  = new BitmapImage(new Uri(@"sad_big.png"));
                        win.ActivityLabel.Content = "Sad";
                    }
                    else if (currentState == 2)
                    {
                        Color           tileFill = Color.FromRgb(76, 76, 76);
                        SolidColorBrush brush1   = new SolidColorBrush(tileFill);
                        win.SadTile.Fill = brush1;

                        Color           focusTileFill = Color.FromRgb(76, 76, 76);
                        SolidColorBrush brush2        = new SolidColorBrush(focusTileFill);
                        win.HappyTile.Fill = brush2;

                        Color           secTileFill = Color.FromRgb(96, 96, 96);
                        SolidColorBrush brush3      = new SolidColorBrush(tileFill);
                        win.AngryTile.Fill = brush3;

                        // display the large icon
                        win.ActivityImage.Source  = new BitmapImage(new Uri(@"angry_big.png"));
                        win.ActivityLabel.Content = "Angry";
                    }


                    this.speech.SpeakAsync("Moving to next level");
                    // Notify to change face
                    Trace.WriteLine("Change state to: " + states[currentState]);
                }
            }
Ejemplo n.º 41
0
 private static extern void Windows_Kinect_ColorFrame_CopyConvertedFrameDataToIntPtr(IntPtr pNative, IntPtr frameData, uint frameDataSize, ColorImageFormat colorFormat);
Ejemplo n.º 42
0
            /// <summary>
            /// Updates the face tracking information for this skeleton
            /// </summary>
            internal void OnFrameReady(KinectSensor kinectSensor, ColorImageFormat colorImageFormat, byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage, Skeleton skeletonOfInterest)
            {
                this.skeletonTrackingState = skeletonOfInterest.TrackingState;

                if (this.skeletonTrackingState != SkeletonTrackingState.Tracked)
                {
                    // if the current skeleton is not tracked, track it now
                    //kinectSensor.SkeletonStream.ChooseSkeletons(skeletonOfInterest.TrackingId);
                }

                if (this.faceTracker == null)
                {
                    try
                    {
                        this.faceTracker = new FaceTracker(kinectSensor);
                    }
                    catch (InvalidOperationException)
                    {
                        // During some shutdown scenarios the FaceTracker
                        // is unable to be instantiated.  Catch that exception
                        // and don't track a face.
                        Debug.WriteLine("AllFramesReady - creating a new FaceTracker threw an InvalidOperationException");
                        this.faceTracker = null;
                    }
                }

                if (this.faceTracker != null)
                {
                    // hack to make this face tracking detect the face even when it is not actually tracked
                    // <!>need to confirm if it works
                    //skeletonOfInterest.TrackingState = SkeletonTrackingState.Tracked;

                    FaceTrackFrame frame = this.faceTracker.Track(
                        colorImageFormat, colorImage, depthImageFormat, depthImage, skeletonOfInterest);
                    //new Microsoft.Kinect.Toolkit.FaceTracking.Rect(skeletonOfInterest.Position.));

                    this.lastFaceTrackSucceeded = frame.TrackSuccessful;
                    if (this.lastFaceTrackSucceeded)
                    {
                        if (faceTriangles == null)
                        {
                            // only need to get this once.  It doesn't change.
                            faceTriangles = frame.GetTriangles();
                        }
                        if (faceTag == null)
                        {
                            // here call the face detection
                            faceTag = new FaceRecognizer().getFaceTag(this.colorImageBmp);

                            if (faceTag != null)
                            {
                                Global.StatusBarText.Text = "Found " + faceTag + "!";
                                if (Global.trackedPeople.ContainsKey(skeletonOfInterest))
                                {
                                    Global.trackedPeople[skeletonOfInterest] = faceTag;
                                }
                                else
                                {
                                    Global.trackedPeople.Add(skeletonOfInterest, faceTag);
                                }
                            }
                        }
                        this.facePoints = frame.GetProjected3DShape();
                        this.faceRect   = frame.FaceRect;
                    }
                }
            }
 public ConvertedColorImage(ColorFrame frame, ColorImageFormat colorImageFormat)
 {
     this.frame            = frame;
     this.colorImageFormat = colorImageFormat;
     Info = frame.FrameDescription.ToImageInfo(colorImageFormat);
 }
Ejemplo n.º 44
0
        /// <summary>
        /// Execute startup tasks
        /// </summary>
        /// <param name="sender">object sending the event</param>
        /// <param name="e">event arguments</param>
        private void WindowLoaded(object sender, RoutedEventArgs e)
        {
            // Look through all sensors and start the first connected one.
            // This requires that a Kinect is connected at the time of app startup.
            // To make your app robust against plug/unplug,
            // it is recommended to use KinectSensorChooser provided in Microsoft.Kinect.Toolkit (See components in Toolkit Browser).
            foreach (var potentialSensor in KinectSensor.KinectSensors)
            {
                if (potentialSensor.Status == KinectStatus.Connected)
                {
                    this.sensor = potentialSensor;
                    break;
                }
            }

            if (this.sensor != null)
            {
                // Turn on the skeleton, color, depth stream to receive skeleton frames
                TransformSmoothParameters smoothingParam = new TransformSmoothParameters();
                {
                    smoothingParam.Smoothing          = 0.5f;
                    smoothingParam.Correction         = 0.1f;
                    smoothingParam.Prediction         = 0.5f;
                    smoothingParam.JitterRadius       = 0.1f;
                    smoothingParam.MaxDeviationRadius = 0.1f;
                };
                this.sensor.SkeletonStream.Enable(smoothingParam);

                //this.sensor.SkeletonStream.Enable();

                this.sensor.SkeletonStream.TrackingMode = SkeletonTrackingMode.Seated;
                this.checkBoxSeatedMode.SetCurrentValue(CheckBox.IsCheckedProperty, true);

                this.sensor.ColorStream.Enable(ColorImageFormat.RgbResolution640x480Fps30);
                this.sensor.DepthStream.Enable(DepthImageFormat.Resolution640x480Fps30);

                this.RenderHeight     = 480;
                this.RenderWidth      = 640;
                this.depthImageFormat = this.sensor.DepthStream.Format;
                this.colorImageFormat = this.sensor.ColorStream.Format;

                // Create the drawing group we'll use for drawing
                this.drawingGroup = new DrawingGroup();
                this.drawingGroup.ClipGeometry = new RectangleGeometry(new Rect(0.0, 0.0, RenderWidth, RenderHeight));

                this.outputDrawingGroup = new DrawingGroup();
                this.outputDrawingGroup.ClipGeometry = new RectangleGeometry(new Rect(0.0, 0.0, RenderWidth, RenderHeight));

                // Display the drawing using our image control
                Image.Source = new DrawingImage(this.drawingGroup);
                // Allocate space to put the pixels we'll receive
                this.colorImage = new byte[this.sensor.ColorStream.FramePixelDataLength];
                // This is the bitmap we'll display on-screen
                this.colorBitmap      = new WriteableBitmap(this.sensor.ColorStream.FrameWidth, this.sensor.ColorStream.FrameHeight, 96.0, 96.0, PixelFormats.Bgr32, null);
                this.blankColorBitmap = new WriteableBitmap(this.sensor.ColorStream.FrameWidth, this.sensor.ColorStream.FrameHeight, 96.0, 96.0, PixelFormats.Bgr32, null);

                OutputImage.Source = new DrawingImage(this.outputDrawingGroup);

                RoomSetting.SetCameraMatrix();

                RoomSetting.SetPlates();

                // Add an event handler to be called whenever there is new all frame data
                this.sensor.AllFramesReady += this.OnAllFramesReady;

                // Start the sensor!
                try
                {
                    this.sensor.Start();
                }
                catch (IOException)
                {
                    this.sensor = null;
                }
            }

            if (null == this.sensor)
            {
                this.statusBarText.Text = Properties.Resources.NoKinectReady;
            }
        }
Ejemplo n.º 45
0
            /// <summary>
            /// Updates the face tracking information for this skeleton
            /// </summary>
            internal void OnFrameReady(KinectSensor kinectSensor, ColorImageFormat colorImageFormat, byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage, Skeleton skeletonOfInterest)
            {
                this.skeletonTrackingState = skeletonOfInterest.TrackingState;

                if (this.skeletonTrackingState != SkeletonTrackingState.Tracked)
                {
                    // nothing to do with an untracked skeleton.
                    return;
                }

                if (this.faceTracker == null)
                {
                    try
                    {
                        this.faceTracker = new FaceTracker(kinectSensor);
                    }
                    catch (InvalidOperationException)
                    {
                        // During some shutdown scenarios the FaceTracker
                        // is unable to be instantiated.  Catch that exception
                        // and don't track a face.
                        Debug.WriteLine("AllFramesReady - creating a new FaceTracker threw an InvalidOperationException");
                        this.faceTracker = null;
                    }
                }

                if (this.faceTracker != null)
                {
                    FaceTrackFrame frame = this.faceTracker.Track(
                        colorImageFormat, colorImage, depthImageFormat, depthImage, skeletonOfInterest);
                    this.lastFaceTrackSucceeded = frame.TrackSuccessful;
                    if (this.lastFaceTrackSucceeded)
                    {
                        if (faceTriangles == null)
                        {
                            //    only need to get this once.  It doesn't change.
                            faceTriangles = frame.GetTriangles();
                        }

                        this.facePoints3DRaw = frame.Get3DShape();
                        this.facePoints      = frame.GetProjected3DShape();
                        animationUnitsRaw    = frame.GetAnimationUnitCoefficients();
                    }
                    x              = frame.Rotation.X;
                    y              = frame.Rotation.Y;
                    z              = frame.Rotation.Z;
                    facePointS3D   = this.facePoints3DRaw;
                    animationUnits = animationUnitsRaw;
                    //Debug.WriteLine(animationUnits[AnimationUnit.JawLower]);
                    //Debug.WriteLine(animationUnits[AnimationUnit.BrowLower]);
                    //Debug.WriteLine(animationUnits[AnimationUnit.BrowRaiser]);
                    //Debug.WriteLine(animationUnits[AnimationUnit.JawLower]);
                    //Debug.WriteLine(animationUnits[AnimationUnit.LipCornerDepressor]);
                    //Debug.WriteLine(animationUnits[AnimationUnit.LipRaiser]);
                    //Debug.WriteLine(animationUnits[AnimationUnit.LipStretcher]);
                    //Debug.WriteLine(frame.Translation.ToString());
                    //Debug.WriteLine(frame.Rotation.ToString());
                    //this.facePoints[FeaturePoint.AboveChin].X+2;
                    //Debug.WriteLine(frame.Translation.X.ToString());
                    //Debug.WriteLine(frame.Translation.Y.ToString());
                    //Debug.WriteLine(frame.Translation.Z.ToString());
                }
            }
Ejemplo n.º 46
0
 /// <summary>
 /// Maps every point in a depth frame to the corresponding location in a ColorImageFormat coordinate space.
 ///
 /// </summary>
 /// <param name="depthImageFormat">The depth format of the source.</param><param name="depthPixelData">The depth frame pixel data, as retrieved from DepthImageFrame.CopyPixelDataTo.
 ///             Must be equal in length to Width*Height of the depth format specified by depthImageFormat.
 ///             </param><param name="colorImageFormat">The desired target image format.</param><param name="colorCoordinates">The ColorImagePoint array to receive the data.  Each element will be be the result of mapping the
 ///             corresponding depthPixelDatum to the specified ColorImageFormat coordinate space.
 ///             Must be equal in length to depthPixelData.
 ///             </param>
 public void MapDepthFrameToColorFrame(DepthImageFormat depthImageFormat, short[] depthPixelData,
                                       ColorImageFormat colorImageFormat, ColorImagePoint[] colorCoordinates)
 {
     _kinectSensor.MapDepthFrameToColorFrame(depthImageFormat, depthPixelData, colorImageFormat, colorCoordinates);
 }
Ejemplo n.º 47
0
        private void OnFrameReady(object sender, AllFramesReadyEventArgs e)
        {
            var imageWith     = 0;
            var imageHeight   = 0;
            var bytesPerPixel = 0;
            var convertToRgb  = false;
            var haveNewFormat = false;

            using (var frame = e.OpenColorImageFrame())
            {
                if (frame == null)
                {
                    return;
                }

                imageWith     = frame.Width;
                imageHeight   = frame.Height;
                bytesPerPixel = frame.BytesPerPixel;
                haveNewFormat = _lastImageFormat != frame.Format;

                if (frame.Format == ColorImageFormat.RawBayerResolution640x480Fps30 ||
                    frame.Format == ColorImageFormat.RawBayerResolution1280x960Fps12)
                {
                    convertToRgb  = true;
                    bytesPerPixel = 4;
                }

                if (haveNewFormat)
                {
                    _lastImageFormat = frame.Format;
                    _rawPixelData    = new byte[frame.PixelDataLength];
                    _pixelData       = new byte[frame.Width * frame.Height * bytesPerPixel];
                }

                if (convertToRgb)
                {
                    frame.CopyPixelDataTo(_rawPixelData);

                    ConvertBayerToRgb32(imageWith, imageHeight, _rawPixelData, _pixelData);
                }
                else
                {
                    frame.CopyPixelDataTo(_pixelData);
                }

                View.Dispatcher.Invoke(() =>
                {
                    if (haveNewFormat)
                    {
                        _bitmap = new WriteableBitmap(imageWith, imageHeight, 96, 96, PixelFormats.Bgr32,
                                                      null);

                        //View.ColorImage.Source = _bitmap;
                    }

                    _bitmap.WritePixels(new Int32Rect(0, 0, imageWith, imageHeight), _pixelData, imageWith * bytesPerPixel, 0);

                    var bitmapSource = BitmapSource.Create(imageWith, imageHeight, 96, 96, PixelFormats.Bgr32, null, _pixelData, imageWith * 4);

                    var b         = bitmapSource.ToBitmap();
                    var image     = new Image <Bgr, byte>(b);
                    var grayImage = image.Convert <Gray, byte>();

                    using (var storage = new MemStorage())
                    {
                        var contours = grayImage.FindContours(
                            Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
                            Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_EXTERNAL,
                            storage);

                        for (var i = 0; contours != null; contours = contours.HNext)
                        {
                            i++;

                            if ((contours.Area > Math.Pow(10, 2)) && (contours.Area < Math.Pow(1000, 2)))
                            {
                                var box = contours.GetMinAreaRect();
                                image.Draw(box, new Bgr(System.Drawing.Color.Red), 2);
                            }
                        }
                    }

                    View.ColorImage.Source = image.ToBitmapSource();
                });

                /*for (var y = 0; y < frame.Height; y++)
                 * {
                 *  for (var x = 0; x < frame.Width; x++)
                 *  {
                 *      var index = y * frame.Width + x;
                 *      var depth = _depthPixels[index].Depth;
                 *
                 *      if (depth > 600 && depth < 900)
                 *      {
                 *          PixelMaker.SetPixel(x, y, Colors.Gold);
                 *      }
                 *      else
                 *      {
                 *          PixelMaker.SetPixel(x, y, Colors.Transparent);
                 *      }
                 *  }
                 * }*/

                //PixelMaker.Draw();
            }
        }
Ejemplo n.º 48
0
 /// <summary>
 /// Starts face tracking from Kinect input data. Track() detects a face
 /// based on the passed parameters, then identifies characteristic
 /// points and begins tracking. The first call to this API is more
 /// expensive, but if the tracking succeeds then subsequent calls use
 /// the tracking information generated from first call and is faster,
 /// until a tracking failure happens.
 /// </summary>
 /// <param name="colorImageFormat">
 /// format of the colorImage array
 /// </param>
 /// <param name="colorImage">
 /// Input color image frame retrieved from Kinect sensor
 /// </param>
 /// <param name="depthImageFormat">
 /// format of the depthImage array
 /// </param>
 /// <param name="depthImage">
 /// Input depth image frame retrieved from Kinect sensor
 /// </param>
 /// <returns>
 /// Returns computed face tracking results for this image frame
 /// </returns>
 public FaceTrackFrame Track(
     ColorImageFormat colorImageFormat, byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage)
 {
     return(this.Track(colorImageFormat, colorImage, depthImageFormat, depthImage, null, Rect.Empty));
 }
Ejemplo n.º 49
0
            private bool CheckFace(KinectSensor kinectSensor, ColorImageFormat colorImageFormat, byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage, Skeleton skeletonOfInterest)
            {
                this.skeletonTrackingState = skeletonOfInterest.TrackingState;

                if (this.skeletonTrackingState != SkeletonTrackingState.Tracked)
                {
                    // nothing to do with an untracked skeleton.
                    return(false);
                }

                if (this.faceTracker == null)
                {
                    try
                    {
                        this.faceTracker = new FaceTracker(kinectSensor);
                    }
                    catch (InvalidOperationException)
                    {
                        // During some shutdown scenarios the FaceTracker
                        // is unable to be instantiated.  Catch that exception
                        // and don't track a face.
                        Debug.WriteLine("AllFramesReady - creating a new FaceTracker threw an InvalidOperationException");
                        this.faceTracker = null;
                    }
                }

                if (this.faceTracker != null)
                {
                    FaceTrackFrame frame = this.faceTracker.Track(
                        colorImageFormat, colorImage, depthImageFormat, depthImage, skeletonOfInterest);

                    this.lastFaceTrackSucceeded = frame.TrackSuccessful;
                    if (this.lastFaceTrackSucceeded)
                    {
                        if (faceTriangles == null)
                        {
                            // only need to get this once.  It doesn't change.
                            faceTriangles = frame.GetTriangles();
                        }

                        //getting the Animation Unit Coefficients
                        this.AUs = frame.GetAnimationUnitCoefficients();
                        var jawLowerer   = AUs[AnimationUnit.JawLower];
                        var browLower    = AUs[AnimationUnit.BrowLower];
                        var browRaiser   = AUs[AnimationUnit.BrowRaiser];
                        var lipDepressor = AUs[AnimationUnit.LipCornerDepressor];
                        var lipRaiser    = AUs[AnimationUnit.LipRaiser];
                        var lipStretcher = AUs[AnimationUnit.LipStretcher];
                        //set up file for output
                        using (System.IO.StreamWriter file = new System.IO.StreamWriter
                                                                 (@"C:\Users\Public\data.txt"))
                        {
                            file.WriteLine("FaceTrack Data, started recording at " + DateTime.Now.ToString("HH:mm:ss tt"));
                        }

                        //here is the algorithm to test different facial features

                        //BrowLower is messed up if you wear glasses, works if you don't wear 'em

                        string state = "";

                        //surprised
                        if ((jawLowerer < 0.25 || jawLowerer > 0.25) && browLower < 0)
                        {
                            state = "surprised";
                        }
                        //smiling
                        if (lipStretcher > 0.4 || lipDepressor < 0)
                        {
                            state = "smiling";
                        }
                        //sad
                        if (browRaiser < 0 && lipDepressor > 0)
                        {
                            state = "sad";
                        }
                        //angry
                        if ((browLower > 0 && (jawLowerer > 0.25 || jawLowerer < -0.25)) ||
                            (browLower > 0 && lipDepressor > 0))
                        {
                            state = "angry";
                        }
                        //System.Diagnostics.Debug.WriteLine(browLower);

                        this.facePoints = frame.GetProjected3DShape();

                        if (states[currentState] == state)
                        {
                            Trace.WriteLine("Yo!");
                            return(true);
                        }
                    }
                }

                return(false);
            }
Ejemplo n.º 50
0
        public IEnumerable <WeightedRect> DetectFaces(ColorImageFormat colorImageFormat,
                                                      byte[] colorImage,
                                                      DepthImageFormat depthImageFormat,
                                                      short[] depthImage, Rect roi)
        {
            if (this.operationMode != OperationMode.Kinect)
            {
                throw new InvalidOperationException(
                          "Cannot use Track with Kinect input types when face tracker is initialized for tracking videos/images");
            }

            if (colorImage == null)
            {
                throw new ArgumentNullException("colorImage");
            }

            if (depthImage == null)
            {
                throw new ArgumentNullException("depthImage");
            }

            if (colorImageFormat != this.initializationColorImageFormat)
            {
                throw new InvalidOperationException("Color image frame format different from initialization");
            }

            if (depthImageFormat != this.initializationDepthImageFormat)
            {
                throw new InvalidOperationException("Depth image frame format different from initialization");
            }

            if (colorImage.Length != this.videoCameraConfig.FrameBufferLength)
            {
                throw new ArgumentOutOfRangeException("colorImage", "Color image data size is needs to match initialization configuration.");
            }

            if (depthImage.Length != this.depthCameraConfig.FrameBufferLength)
            {
                throw new ArgumentOutOfRangeException("depthImage", "Depth image data size is needs to match initialization configuration.");
            }

            this.copyStopwatch.Start();
            this.colorFaceTrackingImage.CopyFrom(colorImage);
            this.depthFaceTrackingImage.CopyFrom(depthImage);
            this.copyStopwatch.Stop();

            var sensorData = new SensorData(this.colorFaceTrackingImage, this.depthFaceTrackingImage, DefaultZoomFactor, Point.Empty);
            FaceTrackingSensorData faceTrackSensorData = sensorData.FaceTrackingSensorData;

            int  hr;
            uint count = 4;

            WeightedRect[] rects = new WeightedRect[count];

            GCHandle handle = GCHandle.Alloc(rects, GCHandleType.Pinned);

            try
            {
                IntPtr rectsPtr = handle.AddrOfPinnedObject();
                hr = this.faceTrackerInteropPtr.DetectFaces(ref faceTrackSensorData, ref roi, rectsPtr, ref count);
            }
            finally
            {
                if (handle.IsAllocated)
                {
                    handle.Free();
                }
            }

            this.trackSucceeded = hr == (int)ErrorCode.Success;

            return(rects.Take((int)count));
        }
Ejemplo n.º 51
0
        private void OnAllFramesReady(object sender, Microsoft.Kinect.AllFramesReadyEventArgs allFramesReadyEventArgs)
        {
            ColorImageFrame colorImageFrame = null;
            DepthImageFrame depthImageFrame = null;
            SkeletonFrame   skeletonFrame   = null;

            try
            {
                colorImageFrame = allFramesReadyEventArgs.OpenColorImageFrame();
                depthImageFrame = allFramesReadyEventArgs.OpenDepthImageFrame();
                skeletonFrame   = allFramesReadyEventArgs.OpenSkeletonFrame();

                if (colorImageFrame == null || depthImageFrame == null || skeletonFrame == null)
                {
                    return;
                }

                // Check for image format changes.  The FaceTracker doesn't
                // deal with that so we need to reset.
                if (this.depthImageFormat != depthImageFrame.Format)
                {
                    this.depthImage       = null;
                    this.depthImageFormat = depthImageFrame.Format;
                }

                if (this.colorImageFormat != colorImageFrame.Format)
                {
                    this.colorImage       = null;
                    this.colorImageFormat = colorImageFrame.Format;
                }

                // Create any buffers to store copies of the data we work with
                if (this.depthImage == null)
                {
                    this.depthImage = new short[depthImageFrame.PixelDataLength];
                }

                if (this.colorImage == null)
                {
                    this.colorImage = new byte[colorImageFrame.PixelDataLength];
                }

                // Get the skeleton information
                if (this.SkeletonData == null || this.SkeletonData.Length != skeletonFrame.SkeletonArrayLength)
                {
                    this.SkeletonData = new Skeleton[skeletonFrame.SkeletonArrayLength];
                }

                colorImageFrame.CopyPixelDataTo(this.colorImage);
                depthImageFrame.CopyPixelDataTo(this.depthImage);
                skeletonFrame.CopySkeletonDataTo(this.SkeletonData);
                Skeleton activeSkeleton = null;
                activeSkeleton = (from skel in this.SkeletonData where skel.TrackingState == SkeletonTrackingState.Tracked select skel).FirstOrDefault();


                //Idea: Separate Eye-Parts of Color Image
                //Use learning Algorithm for right and left eye
                //Detect blink on separated parts of color Image

                //colorImage is one dimensional array with 640 x 480 x 4 (RGBA) values


                if (activeSkeleton != null)
                {
                    FaceTrackFrame currentFaceFrame = faceTracker.Track(ColorImageFormat.RgbResolution640x480Fps30, colorImage, depthImageFormat, depthImage, activeSkeleton);
                    float          browRaiserValue  = currentFaceFrame.GetAnimationUnitCoefficients()[AnimationUnit.BrowRaiser];
                    float          browLowererValue = currentFaceFrame.GetAnimationUnitCoefficients()[AnimationUnit.BrowLower];
                    tbBrowLowerer.Text = browLowererValue.ToString();
                    tbBrowRaiser.Text  = browRaiserValue.ToString();
                    //Get relevant Points for blink detection
                    //Left eye
                    int    minX    = (int)Math.Round(currentFaceFrame.GetProjected3DShape()[FeaturePoint.AboveOneFourthLeftEyelid].X);
                    int    minY    = (int)Math.Round(currentFaceFrame.GetProjected3DShape()[FeaturePoint.AboveOneFourthLeftEyelid].Y);
                    int    maxX    = (int)Math.Round(currentFaceFrame.GetProjected3DShape()[FeaturePoint.BelowThreeFourthLeftEyelid].X);
                    int    maxY    = (int)Math.Round(currentFaceFrame.GetProjected3DShape()[FeaturePoint.BelowThreeFourthLeftEyelid].Y);
                    Bitmap leftEye = EyeExtract(colorImageFrame, currentFaceFrame, minX, minY, maxX, maxY, false);
                    pbLeftEye.Image = leftEye;

                    //Right eye
                    minX = (int)Math.Round(currentFaceFrame.GetProjected3DShape()[FeaturePoint.AboveThreeFourthRightEyelid].X);
                    minY = (int)Math.Round(currentFaceFrame.GetProjected3DShape()[FeaturePoint.AboveThreeFourthRightEyelid].Y);
                    maxX = (int)Math.Round(currentFaceFrame.GetProjected3DShape()[FeaturePoint.OneFourthBottomRightEyelid].X);
                    maxY = (int)Math.Round(currentFaceFrame.GetProjected3DShape()[FeaturePoint.OneFourthBottomRightEyelid].Y);

                    Bitmap rightEye = EyeExtract(colorImageFrame, currentFaceFrame, minX, minY, maxX, maxY, true);
                    pbRightEye.Image = rightEye;

                    //Wende Kantenfilter auf die beiden Augen an.
                    double dxRight;
                    double dyRight;
                    double dxLeft;
                    double dyLeft;
                    if (rightEye != null && leftEye != null)
                    {
                        Bitmap edgePicRight = Convolution(ConvertGrey(rightEye), true, out dxRight, out dyRight);
                        Bitmap edgePicLeft  = Convolution(ConvertGrey(leftEye), false, out dxLeft, out dyLeft);



                        //If Face is rotated, move Mouse
                        if (headRotationHistory.Count > filterLength && currentFaceFrame.TrackSuccessful)
                        {
                            int x = 0;
                            int y = 0;

                            //Method 1: Ohne Glättung
                            //ScaleXY(currentFaceFrame.Rotation, out x, out y);
                            //MouseControl.Move(x, y);

                            ////Method 2: Glättung über die letzten x Bilder:
                            //int i = 0;
                            //Vector3DF rotationMedium = new Vector3DF();
                            //while (i < 10 && headRotationHistory.Count - 1 > i)
                            //{
                            //    i++;
                            //    rotationMedium.X += headRotationHistory[headRotationHistory.Count - 1 - i].X;
                            //    rotationMedium.Y += headRotationHistory[headRotationHistory.Count - 1 - i].Y;
                            //}
                            //rotationMedium.X = rotationMedium.X / i;
                            //rotationMedium.Y = rotationMedium.Y / i;
                            //ScaleXY(rotationMedium, out x, out y);
                            //MouseControl.Move(x, y);

                            //Method 3: Gauß-Filter: Gewichte die letzten Bilder stärker.



                            Vector3DF rotationMedium = new Vector3DF();
                            rotationMedium.X = currentFaceFrame.Rotation.X * gaussFilter[0];
                            rotationMedium.Y = currentFaceFrame.Rotation.Y * gaussFilter[0];
                            int i = 0;
                            while (i < filterLength - 1)
                            {
                                i++;
                                rotationMedium.X += (headRotationHistory[headRotationHistory.Count - 1 - i].X * gaussFilter[i]);
                                rotationMedium.Y += (headRotationHistory[headRotationHistory.Count - 1 - i].Y * gaussFilter[i]);
                            }
                            rotationMedium.X = (float)(rotationMedium.X / gaussFactor);
                            rotationMedium.Y = (float)(rotationMedium.Y / gaussFactor);
                            ScaleXY(rotationMedium, out x, out y);

                            MouseControl.Move(x, y);
                            //Method 4: Quadratische Glättung
                            //double deltaX = ((-currentFaceFrame.Rotation.Y) - (-headRotationHistory.Last().Y));
                            //double deltaY = ((-currentFaceFrame.Rotation.X) - (-headRotationHistory.Last().X));
                            //if (deltaX < 0)
                            //    deltaX = -Math.Pow(deltaX, 2) * 4;
                            //else
                            //    deltaX = Math.Pow(deltaX, 2) * 4;
                            //if (deltaY < 0)
                            //    deltaY = -Math.Pow(deltaY, 2) * 5;
                            //else
                            //    deltaY = Math.Pow(deltaY, 2) * 5;
                            //MouseControl.DeltaMove((int)Math.Round(deltaX, 0), (int)Math.Round(deltaY));
                        }

                        headRotationHistory.Add(currentFaceFrame.Rotation);
                        if (headRotationHistory.Count >= 100)
                        {
                            headRotationHistory.RemoveAt(0);
                        }
                    }
                }
            }
            catch (Exception e)
            {
            }
            finally
            {
                if (colorImageFrame != null)
                {
                    colorImageFrame.Dispose();
                }

                if (depthImageFrame != null)
                {
                    depthImageFrame.Dispose();
                }

                if (skeletonFrame != null)
                {
                    skeletonFrame.Dispose();
                }
            }
        }
Ejemplo n.º 52
0
        public void ProcessFrame(KinectSensor sensor, byte[] colorImage, ColorImageFormat colorImageFormat, DepthImageFrame depthFrame, short[] depthImage, DepthImageFormat depthImageFormat, Skeleton[] skeletonData, SkeletonFrame skeletonFrame)
        {
            //Console.WriteLine("N: ---------");
            coordinates.Clear();
            int detectedFace          = 0;
            int trackedSkeletonsCount = 0;

            int playerIndex = -1;

            for (int i = 0; i < skeletonData.Length; i++)
            //foreach (Skeleton skeleton in skeletonData)
            {
                Skeleton skeleton = skeletonData[i];
                if (skeleton.TrackingState == SkeletonTrackingState.Tracked ||
                    skeleton.TrackingState == SkeletonTrackingState.PositionOnly)
                {
                    // We want keep a record of any skeleton, tracked or untracked.
                    if (!trackedSkeletons.ContainsKey(skeleton.TrackingId))
                    {
                        trackedSkeletons.Add(skeleton.TrackingId, new SkeletonFaceTracker());
                    }

                    DepthImagePoint depthPoint = depthFrame.MapFromSkeletonPoint(skeleton.Joints[JointType.Head].Position);
                    ColorImagePoint colorPoint = depthFrame.MapToColorImagePoint(depthPoint.X, depthPoint.Y, colorImageFormat);

                    Coordinates2D c = new Coordinates2D();

                    playerIndex = i + 1;

                    c.X           = colorPoint.X;
                    c.Y           = colorPoint.Y;
                    c.Width       = 0;
                    c.Height      = 0;
                    c.LeftEyeX    = 0;
                    c.LeftEyeY    = 0;
                    c.RightEyeX   = 0;
                    c.RightEyeY   = 0;
                    c.PlayerIndex = playerIndex;

                    trackedSkeletonsCount++;

                    // Give each tracker the upated frame.
                    SkeletonFaceTracker skeletonFaceTracker;
                    if (!scannedIdentities.Contains(skeleton.TrackingId) &&
                        detectedFace < 1 &&
                        trackedSkeletons.TryGetValue(skeleton.TrackingId, out skeletonFaceTracker))
                    {
                        detectedFace++;
                        scannedIdentities.Add(skeleton.TrackingId);


                        skeletonFaceTracker.OnFrameReady(sensor, colorImageFormat, colorImage, depthImageFormat, depthImage, skeleton);
                        skeletonFaceTracker.LastTrackedFrame = skeletonFrame.FrameNumber;
                        Coordinates2D?realCoords = skeletonFaceTracker.GetFaceCoordinates();
                        if (realCoords.HasValue)
                        {
                            c             = realCoords.Value;
                            c.PlayerIndex = playerIndex;
                        }
                    }

                    c.TrackingId = skeleton.TrackingId;
                    coordinates.Add(c);
                }
            }

            if (scannedIdentities.Count > 0 && scannedIdentities.Count >= trackedSkeletonsCount)
            {
                scannedIdentities.Clear();
                //Console.WriteLine("Clearing");
            }

            RemoveOldTrackers(skeletonFrame.FrameNumber);

            //if (coordinates.Count > 0)
            {
                int[] identities = new int[coordinates.Count];


                //  stopwatch.Reset();
                //  stopwatch.Start();
                double[] distances = new double[coordinates.Count * 8];
                this.
                ProcessImage(colorImage, GetWidth(colorImageFormat), GetHeight(colorImageFormat), depthImage, 640, 480, coordinates.ToArray(), identities, distances);
                //  stopwatch.Stop();
                //       foreach (int i in identities)
                //       {
                //           Console.WriteLine("Recognized: {0} (in {1} millis - {2} ticks)", i, stopwatch.ElapsedMilliseconds, stopwatch.ElapsedTicks);
                //       }
            }
        }
        /// <summary>
        /// Get the size for the given color image format.
        /// </summary>
        /// <param name="format">The color image format.</param>
        /// <returns>The width and height of the given image.</returns>
        private static Size GetColorImageSize(ColorImageFormat format)
        {
            try
            {
                var q = from item in BackgroundRemovalResolutions
                    where item.Key == format
                    select item.Value;

                return q.Single();
            }
            catch (InvalidOperationException)
            {
                throw new ArgumentException(Resources.UnsupportedColorFormat, "format");
            }
        }
        private void OnAllFramesReady(object sender, AllFramesReadyEventArgs allFramesReadyEventArgs)
        {
            ColorImageFrame colorImageFrame = null;
            DepthImageFrame depthImageFrame = null;
            SkeletonFrame skeletonFrame = null;

            try
            {
                colorImageFrame = allFramesReadyEventArgs.OpenColorImageFrame();
                depthImageFrame = allFramesReadyEventArgs.OpenDepthImageFrame();
                skeletonFrame = allFramesReadyEventArgs.OpenSkeletonFrame();

                if (colorImageFrame == null || depthImageFrame == null || skeletonFrame == null)
                {
                    return;
                }

                // Check for image format changes.  The FaceTracker doesn't
                // deal with that so we need to reset.
                if (this.depthImageFormat != depthImageFrame.Format)
                {
                    this.ResetFaceTracking();
                    this.depthImage = null;
                    this.depthImageFormat = depthImageFrame.Format;
                }

                if (this.colorImageFormat != colorImageFrame.Format)
                {
                    this.ResetFaceTracking();
                    this.colorImage = null;
                    this.colorImageFormat = colorImageFrame.Format;
                }

                // Create any buffers to store copies of the data we work with
                if (this.depthImage == null)
                {
                    this.depthImage = new short[depthImageFrame.PixelDataLength];
                }

                if (this.colorImage == null)
                {
                    this.colorImage = new byte[colorImageFrame.PixelDataLength];
                }

                // Get the skeleton information
                if (this.skeletonData == null || this.skeletonData.Length != skeletonFrame.SkeletonArrayLength)
                {
                    this.skeletonData = new Skeleton[skeletonFrame.SkeletonArrayLength];
                }

                colorImageFrame.CopyPixelDataTo(this.colorImage);
                depthImageFrame.CopyPixelDataTo(this.depthImage);
                skeletonFrame.CopySkeletonDataTo(this.skeletonData);

                // Update the list of trackers and the trackers with the current frame information
                foreach (Skeleton skeleton in this.skeletonData)
                {
                    if (skeleton.TrackingState == SkeletonTrackingState.Tracked
                        || skeleton.TrackingState == SkeletonTrackingState.PositionOnly)
                    {
                        // We want keep a record of any skeleton, tracked or untracked.
                        if (!this.trackedSkeletons.ContainsKey(skeleton.TrackingId))
                        {
                            this.trackedSkeletons.Add(skeleton.TrackingId, new SkeletonFaceTracker());
                        }

                        // Give each tracker the upated frame.
                        SkeletonFaceTracker skeletonFaceTracker;
                        if (this.trackedSkeletons.TryGetValue(skeleton.TrackingId, out skeletonFaceTracker))
                        {
                            skeletonFaceTracker.OnFrameReady(this.Kinect, colorImageFormat, colorImage, depthImageFormat, depthImage, skeleton);
                            skeletonFaceTracker.LastTrackedFrame = skeletonFrame.FrameNumber;
                        }
                    }
                }

                this.RemoveOldTrackers(skeletonFrame.FrameNumber);

                this.InvalidateVisual();
            }
            finally
            {
                if (colorImageFrame != null)
                {
                    colorImageFrame.Dispose();
                }

                if (depthImageFrame != null)
                {
                    depthImageFrame.Dispose();
                }

                if (skeletonFrame != null)
                {
                    skeletonFrame.Dispose();
                }
            }
        }
Ejemplo n.º 55
0
        private void Kinect_AllFramesReady(object sender, AllFramesReadyEventArgs allFramesReadyEventArgs)
        {
            ColorImageFrame colorImageFrame = null;
            DepthImageFrame depthImageFrame = null;
            SkeletonFrame   skeletonFrame   = null;

            try
            {
                colorImageFrame = allFramesReadyEventArgs.OpenColorImageFrame();
                depthImageFrame = allFramesReadyEventArgs.OpenDepthImageFrame();
                skeletonFrame   = allFramesReadyEventArgs.OpenSkeletonFrame();

                if (colorImageFrame == null || depthImageFrame == null || skeletonFrame == null)
                {
                    return;
                }

                // Check for image format changes.  The FaceTracker doesn't
                // deal with that so we need to reset.
                if (this.depthImageFormat != depthImageFrame.Format)
                {
                    this.depthImage       = null;
                    this.depthImageFormat = depthImageFrame.Format;
                }

                if (this.colorImageFormat != colorImageFrame.Format)
                {
                    this.colorImage       = null;
                    this.colorImageFormat = colorImageFrame.Format;
                }

                // Create any buffers to store copies of the data we work with
                if (this.depthImage == null)
                {
                    this.depthImage = new short[depthImageFrame.PixelDataLength];
                }

                if (this.colorImage == null)
                {
                    this.colorImage = new byte[colorImageFrame.PixelDataLength];
                }

                // Get the skeleton information
                if (this.skeletonData == null || this.skeletonData.Length != skeletonFrame.SkeletonArrayLength)
                {
                    this.skeletonData = new Skeleton[skeletonFrame.SkeletonArrayLength];
                }


                // TODO look into using the Timestamp on each frame
                var time = pipeline.GetCurrentTime();

                var sharedColorImage = ImagePool.GetOrCreate(colorImageFrame.Width, colorImageFrame.Height, Imaging.PixelFormat.BGRX_32bpp);
                var sharedDepthImage = ImagePool.GetOrCreate(depthImageFrame.Width, depthImageFrame.Height, Imaging.PixelFormat.Gray_16bpp);

                colorImageFrame.CopyPixelDataTo(sharedColorImage.Resource.ImageData, (colorImageFrame.Width * colorImageFrame.Height * 4));
                this.ColorImage.Post(sharedColorImage, time);

                //depthImageFrame.CopyPixelDataTo(sharedDepthImage.Resource.ImageData, (depthImageFrame.Width * depthImageFrame.Height * 2));
                depthImageFrame.CopyPixelDataTo(sharedDepthImage.Resource.ImageData, depthImageFrame.PixelDataLength);
                this.DepthImage.Post(sharedDepthImage, time);


                skeletonFrame.CopySkeletonDataTo(this.skeletonData);
                this.Skeletons.Post(this.skeletonData.ToList(), time);
            }
            catch
            {
                // TODO catch a cold
            }
        }
Ejemplo n.º 56
0
        private void OnAllFramesReady(object sender, AllFramesReadyEventArgs allFramesReadyEventArgs)
        {
            Trace.WriteLine("allframesready");
            ColorImageFrame colorImageFrame = null;
            DepthImageFrame depthImageFrame = null;
            SkeletonFrame   skeletonFrame   = null;

            try
            {
                Trace.WriteLine("try start");
                colorImageFrame = allFramesReadyEventArgs.OpenColorImageFrame();
                depthImageFrame = allFramesReadyEventArgs.OpenDepthImageFrame();
                skeletonFrame   = allFramesReadyEventArgs.OpenSkeletonFrame();

                if (colorImageFrame == null || depthImageFrame == null || skeletonFrame == null)
                {
                    Trace.WriteLine("return from frameready");
                    return;
                }
                Trace.WriteLine("wayaround null");

                // Check for image format changes.  The FaceTracker doesn't
                // deal with that so we need to reset.
                if (this.depthImageFormat != depthImageFrame.Format)
                {
                    this.ResetFaceTracking();
                    this.depthImage       = null;
                    this.depthImageFormat = depthImageFrame.Format;
                    Trace.WriteLine("depth image fromat no work");
                }

                if (this.colorImageFormat != colorImageFrame.Format)
                {
                    this.ResetFaceTracking();
                    this.colorImage       = null;
                    this.colorImageFormat = colorImageFrame.Format;
                    Trace.WriteLine("color image fromat no work");
                }

                // Create any buffers to store copies of the data we work with
                if (this.depthImage == null)
                {
                    this.depthImage = new short[depthImageFrame.PixelDataLength];
                }

                if (this.colorImage == null)
                {
                    this.colorImage = new byte[colorImageFrame.PixelDataLength];
                }

                // Get the skeleton information
                if (this.skeletonData == null || this.skeletonData.Length != skeletonFrame.SkeletonArrayLength)
                {
                    this.skeletonData = new Skeleton[skeletonFrame.SkeletonArrayLength];
                }
                Trace.WriteLine("starting copying of data");

                colorImageFrame.CopyPixelDataTo(this.colorImage);
                depthImageFrame.CopyPixelDataTo(this.depthImage);
                skeletonFrame.CopySkeletonDataTo(this.skeletonData);
                Trace.WriteLine("end copying of data");
                // Update the list of trackers and the trackers with the current frame information
                foreach (Skeleton skeleton in this.skeletonData)
                {
                    Trace.WriteLine("inside foreach loop");
                    if (skeleton.TrackingState == SkeletonTrackingState.Tracked ||
                        skeleton.TrackingState == SkeletonTrackingState.PositionOnly)
                    {
                        Trace.WriteLine("iftracked");
                        // We want keep a record of any skeleton, tracked or untracked.
                        if (!this.trackedSkeletons.ContainsKey(skeleton.TrackingId))
                        {
                            this.trackedSkeletons.Add(skeleton.TrackingId, new SkeletonFaceTracker());
                            Trace.WriteLine("add skeleton tracking");
                        }

                        // Give each tracker the upated frame.
                        SkeletonFaceTracker skeletonFaceTracker;
                        if (this.trackedSkeletons.TryGetValue(skeleton.TrackingId, out skeletonFaceTracker))
                        {
                            Trace.WriteLine("before");
                            skeletonFaceTracker.OnFrameReady(this.Kinect, colorImageFormat, colorImage, depthImageFormat, depthImage, skeleton, this.activityWindow);
                            skeletonFaceTracker.LastTrackedFrame = skeletonFrame.FrameNumber;
                        }
                    }
                }

                this.RemoveOldTrackers(skeletonFrame.FrameNumber);

                this.InvalidateVisual();
            }
            finally
            {
                if (colorImageFrame != null)
                {
                    colorImageFrame.Dispose();
                }

                if (depthImageFrame != null)
                {
                    depthImageFrame.Dispose();
                }

                if (skeletonFrame != null)
                {
                    skeletonFrame.Dispose();
                }
            }
        }
        /// <summary>
        /// Set the background removed color frame format.
        /// </summary>
        /// <param name="format">
        /// The given color image format.
        /// </param>
        /// <param name="forceEnable">
        /// Streams should be enabled even if new color image format is the same as the old one.
        /// This is useful for the initial enabling of the stream.
        /// </param>
        private void UpdateBackgroundRemovalFrameFormat(ColorImageFormat format, bool forceEnable)
        {
            if (!forceEnable && (format == this.colorImageFormat))
            {
                // No work to do
                return;
            }

            if (this.sensor != null)
            {
                try
                {
                    this.sensor.ColorStream.Enable(format);
                    this.backgroundRemovalStream.Enable(format, DepthImageFormat.Resolution640x480Fps30);
                }
                catch (InvalidOperationException)
                {
                    // KinectSensor might enter an invalid state while enabling/disabling streams or stream features.
                    // E.g.: sensor might be abruptly unplugged.
                }
            }

            // Update the image format property if the action succeeded.
            this.colorImageFormat = format;
        }
Ejemplo n.º 58
0
        private void AllFramesReady(object sender, AllFramesReadyEventArgs allFramesReadyEventArgs)
        {
            ColorImageFrame colorImageFrame = null;
            DepthImageFrame depthImageFrame = null;
            SkeletonFrame   skeletonFrame   = null;

            try
            {
                colorImageFrame = allFramesReadyEventArgs.OpenColorImageFrame();
                depthImageFrame = allFramesReadyEventArgs.OpenDepthImageFrame();
                skeletonFrame   = allFramesReadyEventArgs.OpenSkeletonFrame();

                if (colorImageFrame == null || depthImageFrame == null || skeletonFrame == null)
                {
                    return;
                }

                // Check for changes in any of the data this function is receiving
                // and reset things appropriately.
                if (this.depthImageFormat != depthImageFrame.Format)
                {
                    this.DestroyFaceTracker();
                    this.depthImage       = null;
                    this.depthImageFormat = depthImageFrame.Format;
                }

                if (this.colorImageFormat != colorImageFrame.Format)
                {
                    this.DestroyFaceTracker();
                    this.colorImage               = null;
                    this.colorImageFormat         = colorImageFrame.Format;
                    this.colorImageWritableBitmap = null;
                    this.ColorImage.Source        = null;
                    this.theMaterial.Brush        = null;
                }

                if (this.skeletonData != null && this.skeletonData.Length != skeletonFrame.SkeletonArrayLength)
                {
                    this.skeletonData = null;
                }

                // Create any buffers to store copies of the data we work with
                if (this.depthImage == null)
                {
                    this.depthImage = new short[depthImageFrame.PixelDataLength];
                }

                if (this.colorImage == null)
                {
                    this.colorImage = new byte[colorImageFrame.PixelDataLength];
                }

                if (this.colorImageWritableBitmap == null)
                {
                    this.colorImageWritableBitmap = new WriteableBitmap(
                        colorImageFrame.Width, colorImageFrame.Height, 96, 96, PixelFormats.Bgr32, null);
                    this.ColorImage.Source = this.colorImageWritableBitmap;
                    this.theMaterial.Brush = new ImageBrush(this.colorImageWritableBitmap)
                    {
                        ViewportUnits = BrushMappingMode.Absolute
                    };
                }

                if (this.skeletonData == null)
                {
                    this.skeletonData = new Skeleton[skeletonFrame.SkeletonArrayLength];
                }

                // Copy data received in this event to our buffers.
                colorImageFrame.CopyPixelDataTo(this.colorImage);
                depthImageFrame.CopyPixelDataTo(this.depthImage);
                skeletonFrame.CopySkeletonDataTo(this.skeletonData);
                this.colorImageWritableBitmap.WritePixels(
                    new Int32Rect(0, 0, colorImageFrame.Width, colorImageFrame.Height),
                    this.colorImage,
                    colorImageFrame.Width * Bgr32BytesPerPixel,
                    0);

                // Find a skeleton to track.
                // First see if our old one is good.
                // When a skeleton is in PositionOnly tracking state, don't pick a new one
                // as it may become fully tracked again.
                Skeleton skeletonOfInterest =
                    this.skeletonData.FirstOrDefault(
                        skeleton =>
                        skeleton.TrackingId == this.trackingId &&
                        skeleton.TrackingState != SkeletonTrackingState.NotTracked);

                if (skeletonOfInterest == null)
                {
                    // Old one wasn't around.  Find any skeleton that is being tracked and use it.
                    skeletonOfInterest =
                        this.skeletonData.FirstOrDefault(
                            skeleton => skeleton.TrackingState == SkeletonTrackingState.Tracked);

                    if (skeletonOfInterest != null)
                    {
                        // This may be a different person so reset the tracker which
                        // could have tuned itself to the previous person.
                        if (this.faceTracker != null)
                        {
                            this.faceTracker.ResetTracking();
                        }

                        this.trackingId = skeletonOfInterest.TrackingId;
                    }
                }

                if (skeletonOfInterest != null && skeletonOfInterest.TrackingState == SkeletonTrackingState.Tracked)
                {
                    if (this.faceTracker == null)
                    {
                        try
                        {
                            this.faceTracker = new FaceTracker(this.Kinect);
                        }
                        catch (InvalidOperationException)
                        {
                            // During some shutdown scenarios the FaceTracker
                            // is unable to be instantiated.  Catch that exception
                            // and don't track a face.
                            Debug.WriteLine("AllFramesReady - creating a new FaceTracker threw an InvalidOperationException");
                            this.faceTracker = null;
                        }
                    }

                    if (this.faceTracker != null)
                    {
                        FaceTrackFrame faceTrackFrame = this.faceTracker.Track(
                            this.colorImageFormat,
                            this.colorImage,
                            this.depthImageFormat,
                            this.depthImage,
                            skeletonOfInterest);

                        if (faceTrackFrame.TrackSuccessful)
                        {
                            if (!visited)
                            {
                                visited = true;
                                //counter.Text = "60 seconds";
                                aTimer.Interval = 1000;
                                aTimer.Tick    += new EventHandler(aTimer_Tick);
                                aTimer.Start();
                            }
                            if (saveModel)
                            {
                                saveDepthImagebmp(depthImageFrame);
                                saveColorImage(colorImageFrame.Width, colorImageFrame.Height, (colorImageFrame.Width * Bgr32BytesPerPixel));
                                saveFaceModel();
                            }
                        }
                    }
                }
                else
                {
                    this.trackingId = -1;
                }
            }
            finally
            {
                if (colorImageFrame != null)
                {
                    colorImageFrame.Dispose();
                }

                if (depthImageFrame != null)
                {
                    depthImageFrame.Dispose();
                }

                if (skeletonFrame != null)
                {
                    skeletonFrame.Dispose();
                }
            }
        }
            /// <summary>
            /// Updates the face tracking information for this skeleton
            /// </summary>
            internal void OnFrameReady(KinectSensor kinectSensor, ColorImageFormat colorImageFormat, byte[] colorImage, DepthImageFormat depthImageFormat, short[] depthImage, Skeleton skeletonOfInterest)
            {
                this.skeletonTrackingState = skeletonOfInterest.TrackingState;

                if (this.skeletonTrackingState != SkeletonTrackingState.Tracked)
                {
                    // nothing to do with an untracked skeleton.
                    return;
                }

                if (this.faceTracker == null)
                {
                    try
                    {
                        this.faceTracker = new FaceTracker(kinectSensor);
                    }
                    catch (InvalidOperationException)
                    {
                        // During some shutdown scenarios the FaceTracker
                        // is unable to be instantiated.  Catch that exception
                        // and don't track a face.
                        Debug.WriteLine("AllFramesReady - creating a new FaceTracker threw an InvalidOperationException");
                        this.faceTracker = null;
                    }
                }

                if (this.faceTracker != null)
                {
                    FaceTrackFrame frame = this.faceTracker.Track(
                        colorImageFormat, colorImage, depthImageFormat, depthImage, skeletonOfInterest);

                    this.lastFaceTrackSucceeded = frame.TrackSuccessful;
                    if (this.lastFaceTrackSucceeded)
                    {
                        if (faceTriangles == null)
                        {
                            // only need to get this once.  It doesn't change.
                            faceTriangles = frame.GetTriangles();
                        }

                        //getting the Animation Unit Coefficients
                        this.AUs = frame.GetAnimationUnitCoefficients();
                        var jawLowerer = AUs[AnimationUnit.JawLower];
                        var browLower = AUs[AnimationUnit.BrowLower];
                        var browRaiser = AUs[AnimationUnit.BrowRaiser];
                        var lipDepressor = AUs[AnimationUnit.LipCornerDepressor];
                        var lipRaiser = AUs[AnimationUnit.LipRaiser];
                        var lipStretcher = AUs[AnimationUnit.LipStretcher];
                        //set up file for output
                        using (System.IO.StreamWriter file = new System.IO.StreamWriter
                            (@"C:\Users\Public\data.txt"))
                        {
                            file.WriteLine("FaceTrack Data, started recording at " + DateTime.Now.ToString("HH:mm:ss tt"));
                        }

                        //here is the algorithm to test different facial features

                        //BrowLower is messed up if you wear glasses, works if you don't wear 'em

                        //surprised
                        if ((jawLowerer < 0.25 || jawLowerer > 0.25) && browLower < 0)
                        {
                            System.Diagnostics.Debug.WriteLine("surprised");
                            using (System.IO.StreamWriter file = new System.IO.StreamWriter
                                (@"C:\Users\Public\data.txt", true))
                            {
                                file.WriteLine(DateTime.Now.ToString("HH:mm:ss tt") + ": surprised");
                                file.WriteLine("JawLowerer: " + jawLowerer);
                                file.WriteLine("BrowLowerer: " + browLower);
                            }
                        }
                        //smiling
                        if (lipStretcher > 0.4 || lipDepressor<0)
                        {
                            System.Diagnostics.Debug.WriteLine("Smiling");
                            using (System.IO.StreamWriter file = new System.IO.StreamWriter
                                (@"C:\Users\Public\data.txt", true))
                            {
                                file.WriteLine(DateTime.Now.ToString("HH:mm:ss tt") + ": smiling");
                                file.WriteLine("LipStretcher: " + lipStretcher);
                            }
                        }
                        //kissing face
                        if (lipStretcher < -0.75)
                        {
                            System.Diagnostics.Debug.WriteLine("kissing face");
                            using (System.IO.StreamWriter file = new System.IO.StreamWriter
                                (@"C:\Users\Public\data.txt", true))
                            {
                                file.WriteLine(DateTime.Now.ToString("HH:mm:ss tt") + ": kissing face");
                                file.WriteLine("LipStretcher: " + lipStretcher);
                            }
                        }
                        //sad
                        if (browRaiser < 0 && lipDepressor>0)
                        {
                            System.Diagnostics.Debug.WriteLine("sad");
                            using (System.IO.StreamWriter file = new System.IO.StreamWriter
                                (@"C:\Users\Public\data.txt", true))
                            {
                                file.WriteLine(DateTime.Now.ToString("HH:mm:ss tt") + ": sad");
                                file.WriteLine("LipCornerDepressor: " + lipDepressor);
                                file.WriteLine("OuterBrowRaiser: " + browRaiser);
                            }
                        }
                        //angry
                        if ((browLower > 0 && (jawLowerer > 0.25 || jawLowerer < -0.25)) ||
                            (browLower > 0 && lipDepressor > 0))
                        {
                            System.Diagnostics.Debug.WriteLine("angry");
                            using (System.IO.StreamWriter file = new System.IO.StreamWriter
                                (@"C:\Users\Public\data.txt", true))
                            {
                                file.WriteLine(DateTime.Now.ToString("HH:mm:ss tt") + ": angry");
                                file.WriteLine("LipCornerDepressor: " + lipDepressor);
                                file.WriteLine("BrowLowerer: " + browLower);
                                file.WriteLine("JawLowerer: " + jawLowerer);
                            }
                        }
                        //System.Diagnostics.Debug.WriteLine(browLower);

                        this.facePoints = frame.GetProjected3DShape();
                    }
                }
            }
Ejemplo n.º 60
0
        private void OnAllFramesReady(object sender, AllFramesReadyEventArgs allFramesReadyEventArgs)
        {
            ColorImageFrame colorImageFrame = null;
            DepthImageFrame depthImageFrame = null;
            SkeletonFrame   skeletonFrame   = null;

            try
            {
                colorImageFrame = allFramesReadyEventArgs.OpenColorImageFrame();
                depthImageFrame = allFramesReadyEventArgs.OpenDepthImageFrame();
                skeletonFrame   = allFramesReadyEventArgs.OpenSkeletonFrame();

                if (colorImageFrame == null || depthImageFrame == null || skeletonFrame == null)
                {
                    return;
                }

                // Check for image format changes.  The FaceTracker doesn't
                // deal with that so we need to reset.
                if (this.depthImageFormat != depthImageFrame.Format)
                {
                    this.resetFaceTracking();
                    this.depthImage       = null;
                    this.depthImageFormat = depthImageFrame.Format;
                }

                if (this.colorImageFormat != colorImageFrame.Format)
                {
                    this.resetFaceTracking();
                    this.colorImage       = null;
                    this.colorImageFormat = colorImageFrame.Format;
                }

                // Create any buffers to store copies of the data we work with
                if (this.depthImage == null)
                {
                    this.depthImage = new short[depthImageFrame.PixelDataLength];
                }

                if (this.colorImage == null)
                {
                    this.colorImage = new byte[colorImageFrame.PixelDataLength];
                }

                // Get the skeleton information
                if (this.skeletonData == null || this.skeletonData.Length != skeletonFrame.SkeletonArrayLength)
                {
                    this.skeletonData = new Skeleton[skeletonFrame.SkeletonArrayLength];
                }

                colorImageFrame.CopyPixelDataTo(this.colorImage);
                depthImageFrame.CopyPixelDataTo(this.depthImage);
                skeletonFrame.CopySkeletonDataTo(this.skeletonData);

                // Update the list of trackers and the trackers with the current frame information
                foreach (Skeleton skeleton in this.skeletonData)
                {
                    if (skeleton.TrackingState == SkeletonTrackingState.Tracked ||
                        skeleton.TrackingState == SkeletonTrackingState.PositionOnly)
                    {
                        try
                        {
                            this.trackedSkeleton.OnFrameReady(this.Kinect, colorImageFormat, colorImage, depthImageFormat, depthImage, skeleton);
                        }
                        catch (NullReferenceException)
                        {
                            //se si perder il tracking del viso si evita un crash
                        }
                        this.trackedSkeleton.LastTrackedFrame = skeletonFrame.FrameNumber;
                    }
                }

                this.InvalidateVisual();
            }
            finally
            {
                if (colorImageFrame != null)
                {
                    colorImageFrame.Dispose();
                }

                if (depthImageFrame != null)
                {
                    depthImageFrame.Dispose();
                }

                if (skeletonFrame != null)
                {
                    skeletonFrame.Dispose();
                }
            }
        }