// 初期化 private void xnInitialize() { // コンテキストの初期化 ScriptNode scriptNode; context = Context.CreateFromXmlFile(CONFIG_XML_PATH, out scriptNode); // イメージジェネレータの作成 image = context.FindExistingNode(NodeType.Image) as ImageGenerator; if (image == null) { throw new Exception(context.GlobalErrorState); } // デプスジェネレータの作成 depth = context.FindExistingNode(NodeType.Depth) as DepthGenerator; if (depth == null) { throw new Exception(context.GlobalErrorState); } // デプスの座標をイメージに合わせる depth.AlternativeViewpointCapability.SetViewpoint(image); // レコーダーの作成と記録対象の追加 recoder = new OpenNI.Recorder(context); recoder.SetDestination(RecordMedium.File, RECORD_PATH); recoder.AddNodeToRecording(image); recoder.AddNodeToRecording(depth); recoder.Record(); }
static void Run() { string SAMPLE_XML_FILE = @"../../../../Data/SamplesConfig.xml"; ScriptNode scriptNode; Context context = Context.CreateFromXmlFile(SAMPLE_XML_FILE, out scriptNode); DepthGenerator depth = context.FindExistingNode(NodeType.Depth) as DepthGenerator; if (depth == null) { Console.WriteLine("Sample must have a depth generator!"); return; } MapOutputMode mapMode = depth.MapOutputMode; DepthMetaData depthMD = new DepthMetaData(); Console.WriteLine("Press any key to stop..."); while (!Console.KeyAvailable) { context.WaitOneUpdateAll(depth); depth.GetMetaData(depthMD); Console.WriteLine("Frame {0} Middle point is: {1}.", depthMD.FrameID, depthMD[(int)mapMode.XRes / 2, (int)mapMode.YRes / 2]); } }
void Initialize() { this.context = new Context("SamplesConfig.xml"); this.depth = context.FindExistingNode(NodeType.Depth) as DepthGenerator; if (this.depth == null) { throw new Exception("Viewer must have a depth node!"); } this.userGenerator = new UserGenerator(this.context); this.skeletonCapbility = new SkeletonCapability(this.userGenerator); this.poseDetectionCapability = new PoseDetectionCapability(this.userGenerator); this.calibPose = this.skeletonCapbility.GetCalibrationPose(); this.userGenerator.NewUser += new UserGenerator.NewUserHandler(userGenerator_NewUser); this.userGenerator.LostUser += new UserGenerator.LostUserHandler(userGenerator_LostUser); this.poseDetectionCapability.PoseDetected += new PoseDetectionCapability.PoseDetectedHandler(poseDetectionCapability_PoseDetected); this.skeletonCapbility.CalibrationEnd += new SkeletonCapability.CalibrationEndHandler(skeletonCapbility_CalibrationEnd); this.skeletonCapbility.SetSkeletonProfile(SkeletonProfile.All); this.userGenerator.StartGenerating(); App.ViewModel.Status = "Waiting to acquire user"; this.ShouldRun = true; this.readerThread = new Thread(ReaderThread); this.readerThread.Start(); }
public StreamView() { InitializeComponent(); pictureBoxOverlay.BackColor = Color.Transparent; try { this.context = new Context(@".\Data\openniconfig.xml"); this.depth = context.FindExistingNode(NodeType.Depth) as DepthGenerator; if (this.depth == null) { throw new Exception(@"Error in Data\openniconfig.xml. No depth node found."); } this.histogram = new int[this.depth.GetDeviceMaxDepth()]; MapOutputMode mapMode = this.depth.GetMapOutputMode(); this.bitmap = new Bitmap((int)mapMode.nXRes, (int)mapMode.nYRes, System.Drawing.Imaging.PixelFormat.Format24bppRgb); } catch (Exception ex) { /// /// - todo: proper error logging here /// MessageBox.Show("Error initializing OpenNI."); MessageBox.Show(ex.Message); this.Close(); } }
public MainWindow() { InitializeComponent(); this.context = new Context(SAMPLE_XML_FILE); this.depth = context.FindExistingNode(NodeType.Depth) as DepthGenerator; this.image = context.FindExistingNode(NodeType.Image) as ImageGenerator; //this.scene = context.FindExistingNode(NodeType.Scene) as SceneAnalyzer; // align depth with rgb this.depth.GetAlternativeViewPointCap().SetViewPoint(image); if (this.depth == null || this.image == null) { throw new Exception("Viewer must have depth and image nodes!"); } this.userGenerator = new UserGenerator(this.context); this.userGenerator.StartGenerating(); this.histogram = new int[this.depth.GetDeviceMaxDepth()]; MapOutputMode mapMode = this.depth.GetMapOutputMode(); this.bitmap = new Bitmap((int)mapMode.nXRes, (int)mapMode.nYRes, System.Drawing.Imaging.PixelFormat.Format24bppRgb); this.shouldRun = true; this.readerThread = new Thread(ReaderThread); this.readerThread.Start(); }
// private methods /// @brief private constructor /// /// This is part of the singleton pattern, a protected constructor cannot be called externally (although /// it can be inherited for extensions. In which case the extender should also replace the singleton! private NIContext() { m_context = null; m_depth = null; m_scriptNode = null; m_recordingPlayer = null; }
void Start() { Debug.Log("Start(): Initializing nodes."); this.context = new Context(SAMPLE_XML_FILE); this.depth = context.FindExistingNode(NodeType.Depth) as DepthGenerator; if (this.depth == null) { Debug.LogError("Viewer must have a depth node!"); } this.hands = context.FindExistingNode(NodeType.Hands) as HandsGenerator; if (this.hands == null) { Debug.LogError("Viewer must have a hands node!"); } this.gestures = context.FindExistingNode(NodeType.Gesture) as GestureGenerator; if (this.gestures == null) { Debug.LogError("Viewer must have a gestures node!"); } this.hands.HandCreate += new HandsGenerator.HandCreateHandler(hands_HandCreate); this.hands.HandUpdate += new HandsGenerator.HandUpdateHandler(hands_HandUpdate); this.hands.HandDestroy += new HandsGenerator.HandDestroyHandler(hands_HandDestroy); this.gestures.AddGesture("Wave"); this.gestures.AddGesture("RaiseHand"); this.gestures.GestureRecognized += new GestureGenerator.GestureRecognizedHandler(gestures_GestureRecognized); this.gestures.StartGenerating(); }
// 初期化 private void xnInitialize() { // コンテキストの初期化 ScriptNode scriptNode; context = Context.CreateFromXmlFile(CONFIG_XML_PATH, out scriptNode); // イメージジェネレータの作成 image = context.FindExistingNode(NodeType.Image) as ImageGenerator; if (image == null) { throw new Exception(context.GlobalErrorState); } // デプスジェネレータの作成 depth = context.FindExistingNode(NodeType.Depth) as DepthGenerator; if (depth == null) { throw new Exception(context.GlobalErrorState); } // デプスの座標をイメージに合わせる AlternativeViewpointCapability Viewpoint = depth.AlternativeViewpointCapability; Viewpoint.SetViewpoint(image); // ヒストグラムバッファの作成 histogram = new int[depth.DeviceMaxDepth]; }
public void DrawStickFigure(ref WriteableBitmap image, DepthGenerator depthGenerator, DepthMetaData data, UserGenerator userGenerator, Ray3D[] rays) { Point3D corner = new Point3D(data.XRes, data.YRes, data.ZRes); corner = depthGenerator.ConvertProjectiveToRealWorld(corner); this.depthGenerator = depthGenerator; int nXRes = data.XRes; int nYRes = data.YRes; // TODO: Fix these. /*foreach (Ray3D ray in rays) * { * if (ray != null) * { * int[] p0 = ray.point0(); * int[] p1 = ray.point1(); * DrawTheLine(ref image, p0, p1); * } * }*/ int[] users = userGenerator.GetUsers(); foreach (int user in users) { if (userGenerator.SkeletonCapability.IsTracking(user)) { DrawSingleUser(ref image, user, userGenerator, corner); } } }
// 初期化 private void xnInitialize() { // コンテキストの初期化 context = new Context(); // OpenFileRecordingExを使うように促されるが、使用するとアクセスバイオレーションになる context.OpenFileRecording(RECORD_PATH); // プレーヤーの作成 player = context.FindExistingNode(NodeType.Player) as OpenNI.Player; if (player == null) { throw new Exception(context.GlobalErrorState); } // 終端に達したら通知するコールバックを登録する player.EndOfFileReached += new EventHandler(player_EndOfFileReached); // イメージジェネレータの作成 image = context.FindExistingNode(NodeType.Image) as ImageGenerator; if (image == null) { throw new Exception(context.GlobalErrorState); } // デプスジェネレータの作成 depth = context.FindExistingNode(NodeType.Depth) as DepthGenerator; if (depth == null) { throw new Exception(context.GlobalErrorState); } // ヒストグラムバッファの作成 histogram = new int[depth.DeviceMaxDepth]; }
void Start() { this.context = new Context(SAMPLE_XML_FILE); this.depth = context.FindExistingNode(NodeType.Depth) as DepthGenerator; if (this.depth == null) { throw new Exception("Viewer must have a depth node!"); } this.userGenerator = new UserGenerator(this.context); this.skeletonCapbility = new SkeletonCapability(this.userGenerator); this.poseDetectionCapability = new PoseDetectionCapability(this.userGenerator); this.calibPose = this.skeletonCapbility.GetCalibrationPose(); this.userGenerator.NewUser += new UserGenerator.NewUserHandler(userGenerator_NewUser); this.userGenerator.LostUser += new UserGenerator.LostUserHandler(userGenerator_LostUser); this.poseDetectionCapability.PoseDetected += new PoseDetectionCapability.PoseDetectedHandler(poseDetectionCapability_PoseDetected); this.skeletonCapbility.CalibrationEnd += new SkeletonCapability.CalibrationEndHandler(skeletonCapbility_CalibrationEnd); this.skeletonCapbility.SetSkeletonProfile(SkeletonProfile.All); this.userGenerator.StartGenerating(); MapOutputMode mapMode = this.depth.GetMapOutputMode(); this.shouldRun = true; }
/* * private int distanciaAnt=0; * private int escalaAnt=0; */ void Start() { Debug.Log("START APP"); this.context = Context.CreateFromXmlFile(XML_CONFIG, out scriptNode); this.depth = this.context.FindExistingNode(NodeType.Depth) as DepthGenerator; if (depth == null) { throw new Exception("Nodo de Profundidad no encontrado"); } //User Tracking this.userGenerator = new UserGenerator(this.context); this.skeletonCapability = this.userGenerator.SkeletonCapability; this.poseDetectionCapability = this.userGenerator.PoseDetectionCapability; this.calibPose = this.skeletonCapability.CalibrationPose; this.userGenerator.NewUser += userGenerator_NewUser; this.userGenerator.LostUser += userGenerator_LostUser; this.poseDetectionCapability.PoseDetected += poseDetectionCapability_PoseDetected; this.skeletonCapability.CalibrationComplete += skeletonCapability_CalibrationComplete; this.skeletonCapability.SetSkeletonProfile(SkeletonProfile.All); this.userGenerator.StartGenerating(); this.joints = new Dictionary <int, Dictionary <SkeletonJoint, SkeletonJointPosition> >(); // }
// Use this for initialization void Start() { DepthGenerator depth = OpenNIContext.OpenNode(NodeType.Depth) as DepthGenerator; ImageGenerator image = OpenNIContext.OpenNode(NodeType.Image) as ImageGenerator; depth.AlternativeViewpointCapability.SetViewpoint(image); }
/// <summary> /// This method gets executed when the window loads. In it, we initialize our connection with Kinect /// and set up the timer which will update our depth image. /// </summary> /// <param name="sender"></param> /// <param name="e"></param> private void Window_Loaded(object sender, RoutedEventArgs e) { try { // Initialize the context from the configuration file this.context = new Context(@"..\..\data\openniconfig.xml"); // Get the depth generator from the config file. this.depth = context.FindExistingNode(NodeType.Depth) as DepthGenerator; if (this.depth == null) { throw new Exception(@"Error in Data\openniconfig.xml. No depth node found."); } MapOutputMode mapMode = this.depth.GetMapOutputMode(); this.bitmap = new Bitmap((int)mapMode.nXRes, (int)mapMode.nYRes, System.Drawing.Imaging.PixelFormat.Format24bppRgb); } catch (Exception ex) { MessageBox.Show("Error initializing OpenNI."); MessageBox.Show(ex.Message); this.Close(); } // Set the timer to update teh depth image every 10 ms. DispatcherTimer dispatcherTimer = new DispatcherTimer(); dispatcherTimer.Tick += new EventHandler(dispatcherTimer_Tick); dispatcherTimer.Interval = new TimeSpan(0, 0, 0, 0, 10); dispatcherTimer.Start(); Console.WriteLine("Finished loading"); }
// Use this for initialization void Start() { this.context = Context.CreateFromXmlFile(XML_CONFIG, out scriptNode); //Al Ejecutar esta linea al salir de probar el nodo sigue trabajando para eso ocupamos el onApplicationQuit this.depth = context.FindExistingNode(NodeType.Depth) as DepthGenerator; if (depth == null) { throw new Exception("Nodo de Profundidad no encontrado"); } this.userGenerator = new UserGenerator(this.context); this.skeletonCapability = this.userGenerator.SkeletonCapability; this.poseDetectionCapability = this.userGenerator.PoseDetectionCapability; this.calibPose = this.skeletonCapability.CalibrationPose; //Agregas los handlers this.userGenerator.NewUser += userGenerator_NewUser; this.userGenerator.LostUser += userGenerator_LostUser; this.poseDetectionCapability.PoseDetected += poseDetectionCapability_PoseDetected; this.skeletonCapability.CalibrationComplete += skeletonCapability_CalibrationComplete; //Activar los joints depende del profile //http://openni.org/docs2/Reference/_xn_types_8h_a294999eabe6eeab319a61d3d0093b174.html#a294999eabe6eeab319a61d3d0093b174 this.skeletonCapability.SetSkeletonProfile(SkeletonProfile.All); this.joints = new Dictionary <int, Dictionary <SkeletonJoint, SkeletonJointPosition> >(); this.userGenerator.StartGenerating(); this.shouldRun = true; }
public bool initializeSensor(String xmlPath) { try { pbuffer = new Point[6]; openpalm = new OpenPalm(); scrHeight = SystemInformation.PrimaryMonitorSize.Height; scrWidth = SystemInformation.PrimaryMonitorSize.Width; mouseSpeed = SystemInformation.MouseSpeed * 0.15; pointCollections = new PointCollection(); /*OpenNI objects - Context, DepthGenerator and DepthMetaData are initialized here*/ cxt = new Context(xmlPath); depthGen = cxt.FindExistingNode(NodeType.Depth) as DepthGenerator; gsHandsGenerator = cxt.FindExistingNode(NodeType.Hands) as HandsGenerator; gsHandsGenerator.SetSmoothing(0.1f); depthMeta = new DepthMetaData(); if (depthGen == null) { return(false); } xRes = depthGen.MapOutputMode.XRes; yRes = depthGen.MapOutputMode.YRes; /*NITE objects - Session manager, PointControl is initialized here*/ sessionMgr = new SessionManager(cxt, "Wave", "RaiseHand"); pointCtrl = new PointControl("PointTracker"); steadydetector = new SteadyDetector(); flrouter = new FlowRouter(); brodcaster = new Broadcaster(); steadydetector.DetectionDuration = 200; steadydetector.Steady += new EventHandler <SteadyEventArgs>(steadydetector_Steady); steadydetector.NotSteady += new EventHandler <SteadyEventArgs>(steadydetector_NotSteady); /* pointCtrl.PrimaryPointCreate += new EventHandler<HandFocusEventArgs>(pointCtrl_PrimaryPointCreate); * pointCtrl.PrimaryPointUpdate += new EventHandler<HandEventArgs>(pointCtrl_PrimaryPointUpdate); * pointCtrl.PrimaryPointDestroy += new EventHandler<IdEventArgs>(pointCtrl_PrimaryPointDestroy);*/ pointCtrl.PointCreate += new EventHandler <HandEventArgs>(pointCtrl_PointCreate); pointCtrl.PointUpdate += new EventHandler <HandEventArgs>(pointCtrl_PointUpdate); pointCtrl.PointDestroy += new EventHandler <IdEventArgs>(pointCtrl_PointDestroy); sessionMgr.AddListener(steadydetector); sessionMgr.AddListener(pointCtrl); //make the session manager listen to the point control isActive = false; //set lifecycle flag to false //fill the handpoint coordinates with invalid values //initialize the clipping matrix HandPointBuffer = new ArrayList(); } catch (Exception e) { return(false); } return(true); }
// ビューポイントが変化したことを通知する void Viewpoint_ViewpointChanged(object sender, EventArgs e) { DepthGenerator depth = sender as DepthGenerator; if (depth != null) { isViewpoint = depth.AlternativeViewpointCapability.IsViewpointAs(image); } }
void Start() { /*really unity? * * do you do that for real? * */ InitializeCharacter(); this.context = new Context(SAMPLE_XML_FILE); this.depth = context.FindExistingNode(NodeType.Depth) as DepthGenerator; if (this.depth == null) { throw new Exception("Viewer must have a depth node!"); } this.userGenerator = new UserGenerator(this.context); this.skeletonCapbility = new SkeletonCapability(this.userGenerator); this.poseDetectionCapability = new PoseDetectionCapability(this.userGenerator); this.calibPose = this.skeletonCapbility.GetCalibrationPose(); this.userGenerator.NewUser += new UserGenerator.NewUserHandler(userGenerator_NewUser); this.userGenerator.LostUser += new UserGenerator.LostUserHandler(userGenerator_LostUser); this.poseDetectionCapability.PoseDetected += new PoseDetectionCapability.PoseDetectedHandler(poseDetectionCapability_PoseDetected); this.skeletonCapbility.CalibrationEnd += new SkeletonCapability.CalibrationEndHandler(skeletonCapbility_CalibrationEnd); this.skeletonCapbility.SetSkeletonProfile(SkeletonProfile.All); this.joints = new Dictionary <uint, Dictionary <SkeletonJoint, SkeletonJointPosition> >(); this.userGenerator.StartGenerating(); this.histogram = new int[this.depth.GetDeviceMaxDepth()]; MapOutputMode mapMode = this.depth.GetMapOutputMode(); // this.bitmap = new Bitmap((int)mapMode.nXRes, (int)mapMode.nYRes/*, System.Drawing.Imaging.PixelFormat.Format24bppRgb*/); usersLblTex = new Texture2D((int)mapMode.nXRes, (int)mapMode.nYRes); Debug.Log("usersLblTex = w: " + usersLblTex.width + " h: " + usersLblTex.height); usersMapSize = mapMode.nXRes * mapMode.nYRes; usersMapColors = new Color[usersMapSize]; usersMapRect = new Rect(Screen.width - usersLblTex.width / 2, Screen.height - usersLblTex.height / 2, usersLblTex.width / 2, usersLblTex.height / 2); usersLabelMap = new short[usersMapSize]; usersDepthMap = new short[usersMapSize]; usersHistogramMap = new float[5000]; //DepthMetaData depthMD = new DepthMetaData(); this.shouldRun = true; //this.readerThread = new Thread(ReaderThread); // this.readerThread.Start(); }
// 初期化 private void xnInitialize() { // コンテキストの初期化 ScriptNode scriptNode; context = Context.CreateFromXmlFile(CONFIG_XML_PATH, out scriptNode); // 鏡モード(反転)にしない context.GlobalMirror = false; // イメージジェネレータの作成 image = context.FindExistingNode(NodeType.Image) as ImageGenerator; if (image == null) { throw new Exception(context.GlobalErrorState); } // デプスジェネレータの作成 depth = context.FindExistingNode(NodeType.Depth) as DepthGenerator; if (depth == null) { throw new Exception(context.GlobalErrorState); } // デプスの座標をイメージに合わせる depth.AlternativeViewpointCapability.SetViewpoint(image); // ジェスチャージェネレータの作成 gesture = context.FindExistingNode(NodeType.Gesture) as GestureGenerator; if (depth == null) { throw new Exception(context.GlobalErrorState); } // ジェスチャーの登録 gesture.AddGesture("RaiseHand"); // ジェスチャー用のコールバックを登録 gesture.GestureRecognized += new EventHandler <GestureRecognizedEventArgs>(gesture_GestureRecognized); gesture.GestureProgress += new EventHandler <GestureProgressEventArgs>(gesture_GestureProgress); // ハンドジェネレータの作成 hands = context.FindExistingNode(NodeType.Hands) as HandsGenerator; if (depth == null) { throw new Exception(context.GlobalErrorState); } // ハンドトラッキング用のコールバックを登録する hands.HandCreate += new EventHandler <HandCreateEventArgs>(hands_HandCreate); hands.HandUpdate += new EventHandler <HandUpdateEventArgs>(hands_HandUpdate); hands.HandDestroy += new EventHandler <HandDestroyEventArgs>(hands_HandDestroy); // ジェスチャーの検出開始 context.StartGeneratingAll(); }
/// <summary> /// Updates image and depth values. /// </summary> private unsafe void CameraThread() { while (_isRunning) { Context.WaitAndUpdateAll(); ImageGenerator.GetMetaData(_imgMD); DepthGenerator.GetMetaData(_depthMD); } }
}//end UpdateDepth() #endregion #region Display Depth Map /// <summary> /// This method gets executed when the window loads. In it, we initialize our connection with Kinect /// and set up the timer which will update our depth image. /// </summary> /// <param name="sender"></param> /// <param name="e"></param> private void Window_Loaded(object sender, RoutedEventArgs e) { try { // Initialize the context from the configuration file this.context = new Context(@"..\..\data\openniconfig.xml"); // Get the depth generator from the config file. this.depth = context.FindExistingNode(NodeType.Depth) as DepthGenerator; if (this.depth == null) { throw new Exception(@"Error in Data\openniconfig.xml. No depth node found."); } MapOutputMode mapMode = this.depth.GetMapOutputMode(); this.bitmap = new Bitmap((int)mapMode.nXRes, (int)mapMode.nYRes, System.Drawing.Imaging.PixelFormat.Format24bppRgb); // Initialize Matrixes this.depthMatrix = new Matrix <Double>(480, 640); this.depthMatrixROI = new Matrix <Double>(480, 640); this.depthMatrixROIByte = new Matrix <Byte>(480, 640); this.backgroundDepthMatrix = new Matrix <Double>(480, 640); // Initialize Images this.depthMatrixImage = new Image <Gray, double>(640, 480); this.depthMatrixROIImage = new Image <Gray, byte>(640, 480); this.backgroundDepthImage = new Image <Gray, double>(640, 480); this.trackingImage = new Image <Bgr, byte>(640, 480, new Bgr(0, 0, 0)); // Initialize Blob Tracking Components // Blob Tracking Parameter blobParam = new Emgu.CV.VideoSurveillance.BlobTrackerAutoParam <Gray>(); blobParam.BlobTracker = new Emgu.CV.VideoSurveillance.BlobTracker(Emgu.CV.CvEnum.BLOBTRACKER_TYPE.CC); blobParam.BlobDetector = new Emgu.CV.VideoSurveillance.BlobDetector(Emgu.CV.CvEnum.BLOB_DETECTOR_TYPE.CC); //blobParam.FGDetector = new Emgu.CV.VideoSurveillance.FGDetector<Gray>(Emgu.CV.CvEnum.FORGROUND_DETECTOR_TYPE.FGD); //blobParam.FGTrainFrames = 10; // Blob Tracking initialization blobTracker = new Emgu.CV.VideoSurveillance.BlobTrackerAuto <Gray>(blobParam); progressBar1.Minimum = 0; progressBar1.Maximum = ((Convert.ToInt16(textBoxLifespan.Text) * 4) / _tickDuration); } catch (Exception ex) { MessageBox.Show("Error initializing OpenNI."); MessageBox.Show(ex.Message); this.Close(); } // Set the timer to update teh depth image every 10 ms. DispatcherTimer dispatcherTimer = new DispatcherTimer(); dispatcherTimer.Tick += new EventHandler(dispatcherTimer_Tick); dispatcherTimer.Interval = new TimeSpan(0, 0, 0, 0, _tickDuration); dispatcherTimer.Start(); Console.WriteLine("Finished loading"); }
public Xtion() { Trace.WriteLine( Assembly.GetExecutingAssembly().FullName ); context = new Context(); context.GlobalMirror = true; Image = new ImageGenerator( context ); Image.NewDataAvailable += new EventHandler( ImageGenrator_NewDataAvailable ); Image.MapOutputMode = new MapOutputMode() { XRes = Width, YRes = Height, FPS = FPS }; Depth = new DepthGenerator( context ); Depth.NewDataAvailable += new EventHandler( DepthGenerator_NewDataAvailable ); Depth.MapOutputMode = new MapOutputMode() { XRes = Width, YRes = Height, FPS = FPS }; User = new UserGenerator( context ); User.NewUser += new EventHandler<NewUserEventArgs>( UserGenerator_NewUser ); User.LostUser += new EventHandler<UserLostEventArgs>( UserGenerator_LostUser ); User.UserExit += new EventHandler<UserExitEventArgs>( UserGenerator_UserExit ); User.UserReEnter += new EventHandler<UserReEnterEventArgs>( UserGenerator_UserReEnter ); User.NewDataAvailable += new EventHandler( UserGenerator_NewDataAvailable ); Skeleton = User.SkeletonCapability; if ( Skeleton.DoesNeedPoseForCalibration ) { throw new Exception( "OpenNI 1.4.0.2 以降をインストールしてください" ); } Skeleton.CalibrationComplete += new EventHandler<CalibrationProgressEventArgs>( Skeleton_CalibrationComplete ); Skeleton.SetSkeletonProfile( SkeletonProfile.HeadAndHands ); Skeleton.SetSmoothing( 0.7f ); // 画像更新のためのスレッドを作成 shouldRun = true; readerThread = new Thread( new ThreadStart( () => { while ( shouldRun ) { context.WaitAndUpdateAll(); } } ) ); readerThread.Start(); context.StartGeneratingAll(); }
public MainWindow() { InitializeComponent(); console = new Console(); console.Show(); console.Top = 0; console.Left = 0; Console.Write("TrackingNI by Richard Pianka and Ramsey Abouzahra"); context = new Context(CONFIG_FILE); imageGenerator = new ImageGenerator(context); depthGenerator = new DepthGenerator(context); userGenerator = new UserGenerator(context); poseDetectionCapability = userGenerator.PoseDetectionCapability; skeletonCapability = userGenerator.SkeletonCapability; MapOutputMode mapMode = depthGenerator.MapOutputMode; int width = (int)mapMode.XRes; int height = (int)mapMode.YRes; imageBitmap = new WriteableBitmap(width, height, DPI_X, DPI_Y, PixelFormats.Rgb24, null); depthBitmap = new WriteableBitmap(width, height, DPI_X, DPI_Y, PixelFormats.Rgb24, null); depthBitmapCorrected = new WriteableBitmap(width, height, DPI_X, DPI_Y, PixelFormats.Rgb24, null); imageData = new ImageMetaData(); depthData = new DepthMetaData(); skeletonDraw = new SkeletonDraw(); Histogram = new int[depthGenerator.DeviceMaxDepth]; reader = new Thread(new ThreadStart(Reader)); reader.IsBackground = true; worker = new BackgroundWorker(); stop = false; CompositionTarget.Rendering += new EventHandler(Worker); Closing += new System.ComponentModel.CancelEventHandler(MainWindow_Closing); userGenerator.NewUser += new EventHandler <NewUserEventArgs>(NewUser); userGenerator.LostUser += new EventHandler <UserLostEventArgs>(LostUser); skeletonCapability.CalibrationStart += new EventHandler <CalibrationStartEventArgs>(CalibrationStart); skeletonCapability.CalibrationEnd += new EventHandler <CalibrationEndEventArgs>(CalibrationEnd); skeletonCapability.SetSkeletonProfile(SkeletonProfile.All); poseDetectionCapability.PoseDetected += new EventHandler <PoseDetectedEventArgs>(PoseDetected); poseDetectionCapability.PoseEnded += new EventHandler <PoseEndedEventArgs>(PoseEnded); reader.Start(); worker.DoWork += new DoWorkEventHandler(WorkerTick); }
// 初期化 private void xnInitialize() { // コンテキストの初期化 ScriptNode scriptNode; context = Context.CreateFromXmlFile(CONFIG_XML_PATH, out scriptNode); // イメージジェネレータの作成 image = context.FindExistingNode(NodeType.Image) as ImageGenerator; if (image == null) { throw new Exception(context.GlobalErrorState); } // デプスジェネレータの作成 depth = context.FindExistingNode(NodeType.Depth) as DepthGenerator; if (depth == null) { throw new Exception(context.GlobalErrorState); } // デプスの座標をイメージに合わせる depth.AlternativeViewpointCapability.SetViewpoint(image); // ジェスチャージェネレータの作成 gesture = context.FindExistingNode(NodeType.Gesture) as GestureGenerator; if (depth == null) { throw new Exception(context.GlobalErrorState); } // ジェスチャーの作成と登録 gestures = gesture.EnumerateAllGestures(); gesture.AddGesture(gestures[gestureIndex]); string[] activeGestures = gesture.GetAllActiveGestures(); // ジェスチャーの機能確認 foreach (string name in gestures) { Trace.WriteLine(name + ":" + "Available:" + gesture.IsGestureAvailable(name) + " ProgressSupported:" + gesture.IsGestureProgressSupported(name)); } // ジェスチャー用のコールバックを登録 gesture.GestureRecognized += new EventHandler <GestureRecognizedEventArgs>(gesture_GestureRecognized); gesture.GestureProgress += new EventHandler <GestureProgressEventArgs>(gesture_GestureProgress); gesture.GestureChanged += new EventHandler(gesture_GestureChanged); // ジェスチャーの検出開始 context.StartGeneratingAll(); }
public bool SetConfig(XmlElement xmlconfig) { // TO-DO: add some configuration parameters if of any use this.context = new Context("NiteKinectConfig.xml"); this.depth = context.FindExistingNode(NodeType.Depth) as DepthGenerator; if (this.depth == null) { throw new Exception("Viewer must have a depth node!"); } this.userGenerator = new UserGenerator(this.context); this.userGenerator.NewUser += userGenerator_NewUser; this.userGenerator.LostUser += userGenerator_LostUser; // this.skeletonCapbility = this.userGenerator.SkeletonCapability; this.skeletonCapbility.SetSkeletonProfile(SkeletonProfile.All); this.skeletonCapbility.CalibrationEnd += skeletonCapbility_CalibrationEnd; // this.poseDetectionCapability = this.userGenerator.PoseDetectionCapability; this.calibPose = this.skeletonCapbility.CalibrationPose; this.poseDetectionCapability.PoseDetected += poseDetectionCapability_PoseDetected; // this.handsGenerator = new HandsGenerator(this.context); this.handsGenerator.HandCreate += handsGenerator_HandCreate; this.handsGenerator.HandDestroy += handsGenerator_HandDestroy; this.handsGenerator.HandUpdate += handsGenerator_HandUpdate; // this.gestureGenerator = new GestureGenerator(this.context); this.gestureGenerator.AddGesture("Wave"); this.gestureGenerator.AddGesture("Click"); this.gestureGenerator.AddGesture("RaiseHand"); this.gestureGenerator.GestureRecognized += gestureGenerator_GestureRecognized; // this.joints = new Dictionary <int, Dictionary <SkeletonJoint, SkeletonJointPosition> >(); // this.userGenerator.StartGenerating(); this.handsGenerator.StartGenerating(); this.gestureGenerator.StartGenerating(); // this.histogram = new int[this.depth.DeviceMaxDepth]; MapOutputMode mapMode = this.depth.MapOutputMode; // this.bitmap = new Bitmap((int)mapMode.XRes, (int)mapMode.YRes); this.shouldRun = true; this.readerThread = new Thread(ReaderThread); this.readerThread.Priority = ThreadPriority.Lowest; this.readerThread.Start(); // return(true); }
public TrackingClusterDataSource(Context context, DepthGenerator depthGenerator, IDepthPointerDataSource dataSource) : base(dataSource) { this.context = context; this.depthGenerator = depthGenerator; this.size = dataSource.Size; this.CurrentValue = new ClusterCollection(); this.sessionManager = new SessionManager(context, "Wave", "RaiseHand"); this.pointControl = new PointControl(); this.pointControl.PrimaryPointUpdate += new EventHandler <HandEventArgs>(pointControl_PrimaryPointUpdate); this.sessionManager.AddListener(this.pointControl); }
private void OpenContext() { //try to open Kinect Context try { FContext = new Context(); FContext.ErrorStateChanged += FContext_ErrorStateChanged; /* var devices = FContext.EnumerateProductionTrees(OpenNI.NodeType.Depth, null); * * foreach (var device in devices) * FDevices.Add(device.CreationInfo, device); * * string[] deviceNames = new string[FDevices.Count]; * FDevices.Keys.CopyTo(deviceNames, 0); * EnumManager.UpdateEnum("OpenNIDevices", deviceNames[0], deviceNames); * * Query q = new Query(); * q.SetCreationInfo(deviceNames[0]); * var depths = FContext.EnumerateProductionTrees(OpenNI.NodeType.Depth, q); * * //here is one central depth generator that is used by downstream nodes like depth, user, hand, skeleton,.. * //since this is one central generator it should not be allowed to disable it downstream * foreach (var depth in depths)*/ FDepthGenerator = (DepthGenerator)FContext.CreateAnyProductionTree(OpenNI.NodeType.Depth, null); // .CreateProductionTree(depth); FDepthGenerator.StartGenerating(); //creation of usergenerators requires generation of depthgenerator //depth node needs imagegenerator to adaptview //read out driver versions: var v = OpenNI.Version.Current; FOpenNI = "OpenNI: " + v.Major + "." + v.Minor + "." + v.Maintenance + "." + v.Build; //create a usergenerator here just for getting the NITE version var user = FContext.CreateAnyProductionTree(OpenNI.NodeType.User, null); v = user.Info.Description.Version; FMiddleware = user.Info.Description.Vendor + " " + user.Info.Description.Name + ": " + v.Major + "." + v.Minor + "." + v.Maintenance + "." + v.Build; user.Dispose(); v = FDepthGenerator.Info.Description.Version; FSensor = FDepthGenerator.Info.Description.Vendor + " " + FDepthGenerator.Info.Description.Name + ": " + v.Major + "." + v.Minor + "." + v.Maintenance + "." + v.Build; } catch (Exception e) { FLogger.Log(e); } }
private void CloseContext() { if (FContext != null) { FContext.StopGeneratingAll(); FContext.ErrorStateChanged -= FContext_ErrorStateChanged; if (FDepthGenerator != null) { FDepthGenerator.Dispose(); FDepthGenerator = null; } FContext.Release(); FContext = null; } }
private void Init() { this.context = new Context(OpenNIXMLFilename); if (null == context) { return; } this.Depth = new DepthGenerator(this.context); this.mirror = this.Depth.MirrorCapability; MonoBehaviour.print("OpenNI inited"); validContext = true; Start(); }
public void Awake() { if (LoadFromRecording) { context.OpenFileRecording(RecordingFilename); Player player = openNode(NodeType.Player) as Player; player.PlaybackSpeed = 0.0; StartCoroutine(ReadNextFrameFromRecording(player)); } this.Depth = openNode(NodeType.Depth) as DepthGenerator; this.mirrorCap = this.Depth.MirrorCapability; if (!LoadFromRecording) { this.mirrorCap.SetMirror(initialMirror); } }
//called when data for any output pin is requested public void Evaluate(int SpreadMax) { if (FPinInContext[0] != FContext) { FContext = FPinInContext[0]; } if (FContext == null) { FRunning = false; return; } if (FContext.running && !FRunning && FContext.context != null) { try { FImageGenerator = FContext.context.FindExistingNode(global::OpenNI.NodeType.Image) as ImageGenerator; FDepthGenerator = FContext.context.FindExistingNode(global::OpenNI.NodeType.Depth) as DepthGenerator; FDepthGenerator.AlternativeViewpointCapability.SetViewpoint(FImageGenerator); FImageRGB = new ImageRGB(); FImageDepth = new ImageL16(); FImageWorld = new ImageRGB32F(); Size size = new Size(640, 480); FImageRGBBuffer = new Image <Bgr, byte>(size); FImageDepthBuffer = new Image <Gray, ushort>(size); FImageWorldBuffer = new Image <Rgb, float>(size); FPinOutRGB[0] = FImageRGB; FPinOutDepth[0] = FImageDepth; FPinOutWorld[0] = FImageWorld; FThread = new Thread(fnThread); FRunning = true; FThread.Start(); FPinOutStatus[0] = "OK"; } catch (StatusException e) { FRunning = false; FPinOutStatus[0] = e.Message; } } }
public static Point3D ScaleTo( this Point3D point, DepthGenerator depth, int width, int height ) { return new Point3D( (point.X * width) / depth.MapOutputMode.XRes, (point.Y * height) / depth.MapOutputMode.YRes, point.Z ); }