internal static void CopyToFrameToDrawingContext(this HighDefinitionFaceFrame highDefinitionFaceFrame, DrawingContext context, bool useDepthSpace = true, byte bodyIndex = 1, double pointRadius = 2F) { var faceAlignment = new FaceAlignment(); var coordinateMapper = highDefinitionFaceFrame.HighDefinitionFaceFrameSource.KinectSensor.CoordinateMapper; var brush = BodyIndexColor.GetBrushFromBodyIndex(bodyIndex); highDefinitionFaceFrame.GetAndRefreshFaceAlignmentResult(faceAlignment); var faceModel = new FaceModel(); var vertices = faceModel.CalculateVerticesForAlignment(faceAlignment); if (vertices.Count > 0) { for (int index = 0; index < vertices.Count; index++) { CameraSpacePoint vertice = vertices[index]; DepthSpacePoint point = coordinateMapper.MapCameraPointToDepthSpace(vertice); if (float.IsInfinity(point.X) || float.IsInfinity(point.Y)) return; context.DrawEllipse(brush, null, point.GetPoint(), pointRadius, pointRadius); } } }
public MainWindow() { InitializeComponent(); _sensor = KinectSensor.GetDefault(); if (_sensor != null) { // Listen for body data. _bodySource = _sensor.BodyFrameSource; _bodyReader = _bodySource.OpenReader(); _bodyReader.FrameArrived += BodyReader_FrameArrived; _colorReader = _sensor.ColorFrameSource.OpenReader(); _colorReader.FrameArrived += ColorReader_FrameArrived; // Listen for HD face data. _faceSource = new HighDefinitionFaceFrameSource(_sensor); _faceSourceSub = new HighDefinitionFaceFrameSource(_sensor); // _faceSource.TrackingIdLost += OnTrackingIdLost; _faceReader = _faceSource.OpenReader(); _faceReaderSub = _faceSourceSub.OpenReader(); _faceReader.FrameArrived += FaceReader_FrameArrived; _faceReaderSub.FrameArrived += FaceReaderSub_FrameArrived; _faceModel = new FaceModel(); _faceAlignment = new FaceAlignment(); _faceAlignmentSub = new FaceAlignment(); // Start tracking! _sensor.Open(); } }
public void TestValid() { FaceModel model = new FaceModel(); FaceAlignment align = new FaceAlignment(); HdFaceFrameResultEventArgs args = new HdFaceFrameResultEventArgs(1,model, align); Assert.AreEqual(model, args.FaceModel); Assert.AreEqual(align, args.FaceAlignment); }
/// <summary> /// Constructor /// </summary> /// <param name="trackingId">Tracking Id</param> /// <param name="faceModel">Face Model</param> /// <param name="faceAlignment">Face Alignment</param> public HdFaceFrameResultEventArgs(ulong trackingId, FaceModel faceModel, FaceAlignment faceAlignment) { if (faceModel == null) throw new ArgumentNullException("faceModel"); if (faceAlignment == null) throw new ArgumentNullException("faceAlignment"); this.trackingId = trackingId; this.faceAlignment = faceAlignment; this.faceModel = faceModel; }
/// <summary> /// Initializes a new instance of the KinectFaceTrackingResult class from a set of Kinect face points /// </summary> public KinectFaceTrackingResult(FaceModel faceModel, FaceModel constructedFaceModel, FaceModelBuilderCollectionStatus builderStatus, FaceAlignment faceAlignment, CoordinateMapper mapper) { this.FaceModel = faceModel; this.ConstructedFaceModel = constructedFaceModel; this.BuilderStatus = builderStatus; this.FaceAlignment = faceAlignment; var vertices = faceModel.CalculateVerticesForAlignment(faceAlignment); this.ColorSpaceFacePoints = this.FaceBoundaryPoints(vertices, mapper); // Calculate facerect manually from facepoints var rectX = this.ColorSpaceFacePoints.Min(x => x.X); var rectWidth = this.ColorSpaceFacePoints.Max(x => x.X) - rectX; var rectY = this.ColorSpaceFacePoints.Min(x => x.Y); var rectHeight = this.ColorSpaceFacePoints.Max(x => x.Y) - rectY; this.FaceRect = new System.Drawing.Rectangle(rectX, rectY, rectWidth, rectHeight); }
private void Window_Loaded(object sender, RoutedEventArgs e) { #region Interface ColorRButton.IsChecked = true; RecordButton.IsEnabled = false; BodyCheckBox.IsEnabled = false; ColorCheckBox.IsEnabled = false; DepthCheckBox.IsEnabled = false; InfraredCheckBox.IsEnabled = false; FaceCheckBox.IsEnabled = false; AudioCheckBox.IsEnabled = false; #endregion _sensor = KinectSensor.GetDefault(); if (_sensor != null) { _sensor.Open(); _bodies = new List<CustomBody>(); _faceSource = new HighDefinitionFaceFrameSource(_sensor); _faceModel = new FaceModel(); _faceAlignment = new FaceAlignment(); _bodyReader = _sensor.BodyFrameSource.OpenReader(); _bodyReader.FrameArrived += _bodyReader_FrameArrived; _colorReader = _sensor.ColorFrameSource.OpenReader(); _colorReader.FrameArrived += _colorReader_FrameArrived; _depthReader = _sensor.DepthFrameSource.OpenReader(); _depthReader.FrameArrived += _depthReader_FrameArrived; _faceReader = _faceSource.OpenReader(); _faceReader.FrameArrived += _faceReader_FrameArrived; _infraredReader = _sensor.InfraredFrameSource.OpenReader(); _infraredReader.FrameArrived += _infraredReader_FrameArrived; } }
private void Window_Loaded(object sender, RoutedEventArgs e) { _sensor = KinectSensor.GetDefault(); if (_sensor != null) { _bodySource = _sensor.BodyFrameSource; _bodyReader = _bodySource.OpenReader(); _bodyReader.FrameArrived += BodyReader_FrameArrived; _faceSource = new HighDefinitionFaceFrameSource(_sensor); _faceReader = _faceSource.OpenReader(); _faceReader.FrameArrived += FaceReader_FrameArrived; _faceModel = new FaceModel(); _faceAlignment = new FaceAlignment(); _sensor.Open(); } }
public MainPage() { InitializeComponent(); _sensor = KinectSensor.GetDefault(); if (_sensor != null) { _bodySource = _sensor.BodyFrameSource; _bodyReader = _bodySource.OpenReader(); _bodyReader.FrameArrived += BodyReader_FrameArrived; _faceSource = new HighDefinitionFaceFrameSource(_sensor); _faceReader = _faceSource.OpenReader(); _faceReader.FrameArrived += FaceReader_FrameArrived; _faceModel = new FaceModel(); _faceAlignment = new FaceAlignment(); _sensor.Open(); } }
/// <summary> /// Calculates normalized 3D face points /// </summary> public static List<Point3D> CalculateNormalized3DFacePoints(this IReadOnlyList<CameraSpacePoint> vertices, FaceAlignment alignment) { float pitch, yaw, roll; alignment.ExtractFaceRotationInRadians(out pitch, out yaw, out roll); var result = new List<Point3D>(); float maxValue = 0; foreach (var vertex in vertices) { var x = vertex.X - alignment.HeadPivotPoint.X; var y = vertex.Y - alignment.HeadPivotPoint.Y; var z = vertex.Z - alignment.HeadPivotPoint.Z; RotateX3D(pitch * -1, ref y, ref z); RotateY3D(yaw * -1, ref x, ref z); RotateZ3D(roll * -1, ref x, ref y); result.Add(new Point3D { X = x, Y = y, Z = z }); maxValue = Math.Max(maxValue, Math.Abs(x)); maxValue = Math.Max(maxValue, Math.Abs(y)); maxValue = Math.Max(maxValue, Math.Abs(z)); } var ratio = 1 / maxValue; foreach (var point in result) { point.X *= ratio; point.Y *= ratio; point.Z *= ratio; } return result; }
private void InitializeHDFace() { hdFaceFrameSource = new HighDefinitionFaceFrameSource( kinect ); if ( hdFaceFrameSource==null ) { throw new Exception( "Cannot create HD Face Frame Source" ); } hdFaceFrameReader = hdFaceFrameSource.OpenReader(); hdFaceFrameReader.FrameArrived += hdFaceFrameReader_FrameArrived; faceModel = new FaceModel(); faceAlignment = new FaceAlignment(); FaceModelBuilderAttributes attributes = FaceModelBuilderAttributes.None; faceModelBuilder = hdFaceFrameSource.OpenModelBuilder( attributes ); if ( faceModelBuilder==null ) { throw new Exception( "Cannot open Face Model Builder" ); } faceModelBuilder.BeginFaceDataCollection(); faceModelBuilder.CollectionCompleted += faceModelBuilder_CollectionCompleted; }
static void Main() { Application.EnableVisualStyles(); Application.SetCompatibleTextRenderingDefault(false); RenderForm form = new RenderForm("Kinect Simple hd face sample"); RenderDevice device = new RenderDevice(SharpDX.Direct3D11.DeviceCreationFlags.BgraSupport | DeviceCreationFlags.Debug); RenderContext context = new RenderContext(device); DX11SwapChain swapChain = DX11SwapChain.FromHandle(device, form.Handle); VertexShader vertexShader = ShaderCompiler.CompileFromFile <VertexShader>(device, "HdFaceView.fx", "VS"); PixelShader pixelShader = ShaderCompiler.CompileFromFile<PixelShader>(device, "HdFaceView.fx", "PS"); HdFaceIndexBuffer faceIndexBuffer = new HdFaceIndexBuffer(device, 1); DynamicHdFaceStructuredBuffer faceVertexBuffer = new DynamicHdFaceStructuredBuffer(device, 1); KinectSensor sensor = KinectSensor.GetDefault(); sensor.Open(); cbCamera camera = new cbCamera(); camera.Projection = Matrix.PerspectiveFovLH(1.57f*0.5f, 1.3f, 0.01f, 100.0f); camera.View = Matrix.Translation(0.0f, 0.0f, 0.5f); camera.Projection.Transpose(); camera.View.Transpose(); ConstantBuffer<cbCamera> cameraBuffer = new ConstantBuffer<cbCamera>(device); cameraBuffer.Update(context, ref camera); bool doQuit = false; bool doUpload = false; KinectBody[] bodyFrame = null; KinectSensorBodyFrameProvider provider = new KinectSensorBodyFrameProvider(sensor); form.KeyDown += (sender, args) => { if (args.KeyCode == Keys.Escape) { doQuit = true; } }; FaceModel currentFaceModel = new FaceModel(); FaceAlignment currentFaceAlignment = new FaceAlignment(); SingleHdFaceProcessor hdFaceProcessor = new SingleHdFaceProcessor(sensor); hdFaceProcessor.HdFrameReceived += (sender, args) => { currentFaceModel = args.FaceModel; currentFaceAlignment = args.FaceAlignment; doUpload = true; }; provider.FrameReceived += (sender, args) => { bodyFrame = args.FrameData; var body = bodyFrame.TrackedOnly().ClosestBodies().FirstOrDefault(); if (body != null) { hdFaceProcessor.AssignBody(body); } else { hdFaceProcessor.Suspend(); } }; context.Context.Rasterizer.State = device.RasterizerStates.WireFrame; RenderLoop.Run(form, () => { if (doQuit) { form.Dispose(); return; } if (doUpload) { var vertices = currentFaceModel.CalculateVerticesForAlignment(currentFaceAlignment).ToArray(); faceVertexBuffer.Copy(context, vertices); doUpload = false; } context.Context.ClearRenderTargetView(swapChain.RenderView, SharpDX.Color.Black); if (hdFaceProcessor.IsValid) { context.RenderTargetStack.Push(swapChain); context.Context.VertexShader.SetShaderResource(0, faceVertexBuffer.ShaderView); context.Context.VertexShader.SetConstantBuffer(0, cameraBuffer.Buffer); //Draw lines context.Context.PixelShader.Set(pixelShader); context.Context.VertexShader.Set(vertexShader); //Attach index buffer, null topology since we fetch faceIndexBuffer.AttachWithLayout(context); faceIndexBuffer.Draw(context, 1); context.RenderTargetStack.Pop(); } swapChain.Present(0, SharpDX.DXGI.PresentFlags.None); }); swapChain.Dispose(); context.Dispose(); device.Dispose(); cameraBuffer.Dispose(); faceIndexBuffer.Dispose(); faceVertexBuffer.Dispose(); provider.Dispose(); pixelShader.Dispose(); vertexShader.Dispose(); hdFaceProcessor.Dispose(); sensor.Close(); }
/// <summary> /// Kinectセンサーを初期化し、データの取得用に各種変数を初期化します /// </summary> private void Initialize() { // Kinectセンサーを取得 this.kinect = KinectSensor.GetDefault(); if (kinect == null) return; // KinectセンサーからBody(骨格情報)とColor(色情報)を取得するFrameReaderを作成 reader = kinect.OpenMultiSourceFrameReader(FrameSourceTypes.Body); reader.MultiSourceFrameArrived += OnMultiSourceFrameArrived; // Kinectセンサーから詳細なFaceTrackingを行う、ソースとFrameReaderを宣言 // 1st persion this.hdFaceFrameSource = new HighDefinitionFaceFrameSource(this.kinect); this.hdFaceFrameSource.TrackingIdLost += this.OnTrackingIdLost; this.hdFaceFrameReader = this.hdFaceFrameSource.OpenReader(); this.hdFaceFrameReader.FrameArrived += this.OnFaceFrameArrived; this.faceModel = new FaceModel(); this.faceAlignment = new FaceAlignment(); this._colorReader = this.kinect.ColorFrameSource.OpenReader(); this._colorReader.FrameArrived += ColorReader_FrameArrived; // 各種Viewのアップデート InitializeMesh(); UpdateMesh(); // センサーの開始 kinect.Open(); }
static void Main() { Application.EnableVisualStyles(); Application.SetCompatibleTextRenderingDefault(false); RenderForm form = new RenderForm("Kinect hd face projected to rgb"); RenderDevice device = new RenderDevice(SharpDX.Direct3D11.DeviceCreationFlags.BgraSupport | DeviceCreationFlags.Debug); RenderContext context = new RenderContext(device); DX11SwapChain swapChain = DX11SwapChain.FromHandle(device, form.Handle); VertexShader vertexShader = ShaderCompiler.CompileFromFile<VertexShader>(device, "ProjectedHdFaceView.fx", "VS"); PixelShader pixelShader = ShaderCompiler.CompileFromFile<PixelShader>(device, "ProjectedHdFaceView.fx", "PS"); HdFaceIndexBuffer faceIndexBuffer = new HdFaceIndexBuffer(device, 1); DynamicRgbSpaceFaceStructuredBuffer faceRgbBuffer = new DynamicRgbSpaceFaceStructuredBuffer(device, 1); KinectSensor sensor = KinectSensor.GetDefault(); sensor.Open(); bool doQuit = false; bool doUpload = false; KinectBody[] bodyFrame = null; KinectSensorBodyFrameProvider provider = new KinectSensorBodyFrameProvider(sensor); form.KeyDown += (sender, args) => { if (args.KeyCode == Keys.Escape) { doQuit = true; } }; FaceModel currentFaceModel = new FaceModel(); FaceAlignment currentFaceAlignment = new FaceAlignment(); SingleHdFaceProcessor hdFaceProcessor = new SingleHdFaceProcessor(sensor); hdFaceProcessor.HdFrameReceived += (sender, args) => { currentFaceModel = args.FaceModel; currentFaceAlignment = args.FaceAlignment; doUpload = true; }; bool uploadColor = false; ColorRGBAFrameData currentData = null; DynamicColorRGBATexture colorTexture = new DynamicColorRGBATexture(device); KinectSensorColorRGBAFrameProvider colorProvider = new KinectSensorColorRGBAFrameProvider(sensor); colorProvider.FrameReceived += (sender, args) => { currentData = args.FrameData; uploadColor = true; }; provider.FrameReceived += (sender, args) => { bodyFrame = args.FrameData; var body = bodyFrame.TrackedOnly().ClosestBodies().FirstOrDefault(); if (body != null) { hdFaceProcessor.AssignBody(body); } else { hdFaceProcessor.Suspend(); } }; RenderLoop.Run(form, () => { if (doQuit) { form.Dispose(); return; } if (doUpload) { var vertices = currentFaceModel.CalculateVerticesForAlignment(currentFaceAlignment).ToArray(); var vertRgb = new ColorSpacePoint[vertices.Length]; sensor.CoordinateMapper.MapCameraPointsToColorSpace(vertices, vertRgb); faceRgbBuffer.Copy(context, vertRgb); doUpload = false; } if (uploadColor) { colorTexture.Copy(context, currentData); uploadColor = false; } context.Context.ClearRenderTargetView(swapChain.RenderView, SharpDX.Color.Black); context.RenderTargetStack.Push(swapChain); context.Context.Rasterizer.State = device.RasterizerStates.BackCullSolid; context.Context.OutputMerger.BlendState = device.BlendStates.Disabled; device.Primitives.ApplyFullTri(context, colorTexture.ShaderView); device.Primitives.FullScreenTriangle.Draw(context); if (hdFaceProcessor.IsValid) { context.Context.Rasterizer.State = device.RasterizerStates.WireFrame; context.Context.OutputMerger.BlendState = device.BlendStates.AlphaBlend; context.Context.VertexShader.SetShaderResource(0, faceRgbBuffer.ShaderView); //Draw lines context.Context.PixelShader.Set(pixelShader); context.Context.VertexShader.Set(vertexShader); //Attach index buffer, null topology since we fetch faceIndexBuffer.AttachWithLayout(context); faceIndexBuffer.Draw(context, 1); } context.RenderTargetStack.Pop(); swapChain.Present(0, SharpDX.DXGI.PresentFlags.None); }); swapChain.Dispose(); context.Dispose(); device.Dispose(); colorProvider.Dispose(); colorTexture.Dispose(); faceIndexBuffer.Dispose(); faceRgbBuffer.Dispose(); provider.Dispose(); pixelShader.Dispose(); vertexShader.Dispose(); sensor.Close(); }
private void InitializeHDFace() // Initializes Kinect object { this.sensor = KinectSensor.GetDefault(); this.bodySource = this.sensor.BodyFrameSource; this.bodyReader = this.bodySource.OpenReader(); this.bodyReader.FrameArrived += this.BodyReader_FrameArrived; this.highDefinitionFaceFrameSource = new HighDefinitionFaceFrameSource(this.sensor); this.highDefinitionFaceFrameSource.TrackingIdLost += this.HdFaceSource_TrackingIdLost; this.highDefinitionFaceFrameReader = this.highDefinitionFaceFrameSource.OpenReader(); this.highDefinitionFaceFrameReader.FrameArrived += this.HdFaceReader_FrameArrived; this.currentFaceModel = new FaceModel(); this.currentFaceAlignment = new FaceAlignment(); this.sensor.Open(); }
/// <summary> /// Initialize Kinect object /// </summary> public void InitializeHDFace() { this.sensor = KinectSensor.GetDefault(); this.bodySource = this.sensor.BodyFrameSource; this.bodyReader = this.bodySource.OpenReader(); this.bodyReader.FrameArrived += this.BodyReader_FrameArrived; this.highDefinitionFaceFrameSource = new HighDefinitionFaceFrameSource(this.sensor); this.highDefinitionFaceFrameSource.TrackingIdLost += this.HdFaceSource_TrackingIdLost; this.highDefinitionFaceFrameReader = this.highDefinitionFaceFrameSource.OpenReader(); this.highDefinitionFaceFrameReader.FrameArrived += this.HdFaceReader_FrameArrived; this.highDefinitionFaceFrameSource.TrackingIdLost += (x, y) => Log.LogMessage("Lost tracking id " + y.TrackingId); this.currentFaceModel = new FaceModel(); this.currentFaceAlignment = new FaceAlignment(); this.sensor.Open(); }
public void TestNullModel() { FaceAlignment align = new FaceAlignment(); HdFaceFrameResultEventArgs args = new HdFaceFrameResultEventArgs(0,null, align); }
/// <summary> /// Initialize Kinect object /// </summary> private void InitializeHDFace() { this.CurrentBuilderStatus = "Ready To Start Capture"; this.sensor = KinectSensor.GetDefault(); this.bodySource = this.sensor.BodyFrameSource; this.bodyReader = this.bodySource.OpenReader(); this.bodyReader.FrameArrived += this.BodyReader_FrameArrived; this.highDefinitionFaceFrameSource = new HighDefinitionFaceFrameSource(this.sensor); this.highDefinitionFaceFrameSource.TrackingIdLost += this.HdFaceSource_TrackingIdLost; this.highDefinitionFaceFrameReader = this.highDefinitionFaceFrameSource.OpenReader(); this.highDefinitionFaceFrameReader.FrameArrived += this.HdFaceReader_FrameArrived; this.currentFaceModel = new FaceModel(); this.currentFaceAlignment = new FaceAlignment(); this.InitializeMesh(); this.UpdateMesh(); this.sensor.Open(); Console.Write("\n\n******************************************************\n" + "Command, Issued_TS, Checked_TS, Done_TS, Errors\n"); }
public Server() { Form = new CustomPerPixelAlphaForm(); FormSetProperties(); FormDock(); Form.Show(); var clientBuildDirectory = Environment.CurrentDirectory + "\\..\\..\\..\\..\\..\\Reflecta.Client\\bin"; var clientStartInfo = new ProcessStartInfo { FileName = clientBuildDirectory + "\\Client.exe", WorkingDirectory = clientBuildDirectory, WindowStyle = ProcessWindowStyle.Minimized }; Client = Process.Start(clientStartInfo); OpenPipes(); SpeechSynthesizer = new SpeechSynthesizer(); SpeechSynthesizer.SelectVoiceByHints(VoiceGender.Female); SpeechSynthesizer.SpeakStarted += SpeechSynthesizer_SpeakStarted; SpeechSynthesizer.VisemeReached += SpeechSynthesizer_VisemeReached; SpeechSynthesizer.SpeakCompleted += SpeechSynthesizer_SpeakCompleted; SpeechRecognitionEngine = new SpeechRecognitionEngine(); SpeechRecognitionEngine.UnloadAllGrammars(); SpeechRecognitionEngine.LoadGrammar(new Grammar(new GrammarBuilder(KnownCommands))); SpeechRecognitionEngine.SpeechRecognized += SpeechRecognitionEngine_SpeechRecognized; SpeechRecognitionEngine.SetInputToDefaultAudioDevice(); SpeechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple); KinectSensor = KinectSensor.GetDefault(); KinectSensor.Open(); BodyFrameSource = KinectSensor.BodyFrameSource; BodyFrameReader = BodyFrameSource.OpenReader(); BodyFrameReader.FrameArrived += BodyFrameReader_FrameArrived; Bodies = null; BodyDESP = new DESPQuaternion[(int) MoCapKinectBone.Count]; for (var i = 0; i < (int) MoCapKinectBone.Count; i++) BodyDESP[i] = new DESPQuaternion(); HighDefinitionFaceFrameSource = new HighDefinitionFaceFrameSource(KinectSensor); HighDefinitionFaceFrameSource.TrackingQuality = FaceAlignmentQuality.High; HighDefinitionFaceFrameReader = HighDefinitionFaceFrameSource.OpenReader(); HighDefinitionFaceFrameReader.FrameArrived += HighDefinitionFaceFrameReader_FrameArrived; FaceAlignment = new FaceAlignment(); FaceDESP = new DESPQuaternion(); FaceExpressionDESP = new DESPFloat[(int) MoCapKinectFacialExpression.Count]; for (var i = 0; i < (int) MoCapKinectFacialExpression.Count; i++) FaceExpressionDESP[i] = new DESPFloat(); }
public void InitializeHDFace() { this.sensor = KinectSensor.GetDefault(); this.bodySource = this.sensor.BodyFrameSource; FrameDescription colorFrameDescription = this.sensor.ColorFrameSource.CreateFrameDescription(ColorImageFormat.Bgra); this.highDefinitionFaceFrameSource = new HighDefinitionFaceFrameSource(sensor); this.highDefinitionFaceFrameSource.TrackingIdLost += this.HdFaceSource_TrackingIdLost; this.highDefinitionFaceFrameReader = this.highDefinitionFaceFrameSource.OpenReader(); this.highDefinitionFaceFrameReader.FrameArrived += this.HdFaceReader_FrameArrived; //event gor high def face if (scenarioselected == 2) { this.highDefinitionFaceFrameSource2 = new HighDefinitionFaceFrameSource(sensor); this.highDefinitionFaceFrameSource2.TrackingIdLost += this.HdFaceSource_TrackingIdLost2; this.highDefinitionFaceFrameReader2 = this.highDefinitionFaceFrameSource2.OpenReader(); this.highDefinitionFaceFrameReader2.FrameArrived += this.HdFaceReader_FrameArrived2; //event gor high def face faceSource2 = new FaceFrameSource(sensor, 0, DefaultFaceFrameFeatures); faceReader2 = faceSource2.OpenReader(); faceReader2.FrameArrived += OnFaceFrameArrived2; //event for face data faceSource2.TrackingIdLost += OnTrackingIdLost2; } this.reader = sensor.OpenMultiSourceFrameReader(FrameSourceTypes.Body | FrameSourceTypes.Color); this.reader.MultiSourceFrameArrived += OnMultiSourceFrameArrived; //event for multiple source (Position) this.currentFaceAlignment = new FaceAlignment(); faceSource = new FaceFrameSource(sensor, 0, DefaultFaceFrameFeatures); faceReader = faceSource.OpenReader(); faceReader.FrameArrived += OnFaceFrameArrived; //event for face data faceSource.TrackingIdLost += OnTrackingIdLost; this.pixels = new byte[colorFrameDescription.Width * colorFrameDescription.Height * colorFrameDescription.BytesPerPixel]; this.sensor.IsAvailableChanged += this.Sensor_IsAvailableChanged; this.sensor.Open(); }