public SpeechRecognition(KinectSensor kinect, KinectAudioStream convertStream) { this.kinectSensor = kinect; this.useSystemMic = true; this.speechPublisher = new NetworkPublisher(); this.speechPublisher.Bind("33405"); this.convertStream = convertStream; this.sentenceId = 0; }
public SkeletonFaceTracking(KinectSensor kinect) { this.kinectSensor = kinect; this.skeletonPublisher = new NetworkPublisher(); this.skeletonPublisher.SetConflate(); this.skeletonPublisher.Bind("33406"); this.coordinateMapper = this.kinectSensor.CoordinateMapper; this.bodyFrameReader = this.kinectSensor.BodyFrameSource.OpenReader(); this.bodyFrameReader.IsPaused = true; this.filter = new KinectJointFilter(smoothingParam, smoothingParam, smoothingParam); this.filter.Init(smoothingParam, smoothingParam, smoothingParam); this.dicoPos = new Dictionary <JointType, object>(25); this.jointPoints = new Dictionary <JointType, Point>(25); this.dicoBodies = new Dictionary <ulong, Dictionary <JointType, object> >(25); this.dicoFaces = new Dictionary <ulong, Dictionary <String, String> >(11); this.dicoFeatures = new Dictionary <string, string>(11); this.dicoOr = new Dictionary <JointType, Vector4>(25); this.qChild = new Quaternion(); this.qParent = new Quaternion(); this.maxBodyCount = this.kinectSensor.BodyFrameSource.BodyCount; // specify the required face frame results FaceFrameFeatures faceFrameFeatures = FaceFrameFeatures.BoundingBoxInColorSpace | FaceFrameFeatures.PointsInColorSpace | FaceFrameFeatures.RotationOrientation | FaceFrameFeatures.FaceEngagement | FaceFrameFeatures.Glasses | FaceFrameFeatures.Happy | FaceFrameFeatures.LeftEyeClosed | FaceFrameFeatures.RightEyeClosed | FaceFrameFeatures.LookingAway | FaceFrameFeatures.MouthMoved | FaceFrameFeatures.MouthOpen; // create a face frame source + reader to track each face in the FOV this.faceFrameSources = new FaceFrameSource[this.maxBodyCount]; this.faceFrameReaders = new FaceFrameReader[this.maxBodyCount]; for (int i = 0; i < this.maxBodyCount; i++) { // create the face frame source with the required face frame features and an initial tracking Id of 0 this.faceFrameSources[i] = new FaceFrameSource(this.kinectSensor, 0, faceFrameFeatures); // open the corresponding reader this.faceFrameReaders[i] = this.faceFrameSources[i].OpenReader(); // pausing the reader to prevent getting frames before we need them this.faceFrameReaders[i].IsPaused = true; } // allocate storage to store face frame results for each face in the FOV this.faceFrameResults = new FaceFrameResult[this.maxBodyCount]; }
public MultiSourceImage(KinectSensor kinect, NetworkPublisher dPub, NetworkPublisher cPub, NetworkPublisher mPub) { this.kinect = kinect; this.coordinateMapper = this.kinect.CoordinateMapper; this.depthPublisher = dPub; this.colorPublisher = cPub; this.mappingPublisher = mPub; this.multiSourceFrameReader = this.kinect.OpenMultiSourceFrameReader(FrameSourceTypes.Color | FrameSourceTypes.Depth); }
public AudioFrame(KinectSensor kinect, NetworkPublisher pub) { this.kinect = kinect; this.publisher = pub; AudioSource audioSource = this.kinect.AudioSource; this.audioBeamFrameReader = audioSource.OpenReader(); this.audioBeamFrameReader.IsPaused = true; // Allocate 1024 bytes to hold a single audio sub frame. Duration sub frame // is 16 msec, the sample rate is 16khz, which means 256 samples per sub frame. // With 4 bytes per sample, that gives us 1024 bytes. this.audioBuffer = new byte[audioSource.SubFrameLengthInBytes]; this.audioBeamFrameReader.FrameArrived += this.Reader_AudioBeamFrameArrived; }
public SkeletonTracking(KinectSensor kinect, NetworkPublisher network) { this.kinectSensor = kinect; this.network = network; this.coordinateMapper = this.kinectSensor.CoordinateMapper; this.bodyFrameReader = this.kinectSensor.BodyFrameSource.OpenReader(); this.filter = new KinectJointFilter(smoothingParam, smoothingParam, smoothingParam); this.filter.Init(smoothingParam, smoothingParam, smoothingParam); this.dicoPos = new Dictionary <JointType, object>(25); this.jointPoints = new Dictionary <JointType, Point>(25); this.dicoBodies = new Dictionary <ulong, Dictionary <JointType, object> >(25); this.dicoOr = new Dictionary <JointType, Vector4>(25); this.qChild = new Quaternion(); this.qParent = new Quaternion(); }
public MainWindow() { setKinectSensor(); this.publisher = new NetworkPublisher(); this.publisher.Bind("33405"); this.subscriber = new NetworkSubscriber(); this.subscriber.Bind("33406"); this.colorPublisher = new NetworkPublisher(); this.colorPublisher.Bind("33407"); this.colorPublisher.SetConflate(); this.depthPublisher = new NetworkPublisher(); this.depthPublisher.Bind("33408"); this.depthPublisher.SetConflate(); this.audioPublisher = new NetworkPublisher(); this.audioPublisher.Bind("33409"); this.audioPublisher.SetConflate(); this.mappingPublisher = new NetworkPublisher(); this.mappingPublisher.Bind("33411"); this.mappingPublisher.SetConflate(); this.sr = new SpeechRecognition(this.kinectSensor, this.publisher, this.convertStream); this.st = new SkeletonTracking(this.kinectSensor, this.publisher); this.tts = new TextToSpeech(this.subscriber); this.af = new AudioFrame(this.kinectSensor, this.audioPublisher); this.msi = new MultiSourceImage(this.kinectSensor, this.depthPublisher, this.colorPublisher, this.mappingPublisher); InitializeComponent(); this.srv = this.srview; this.stv = this.stview; this.ttsv = this.ttsview; this.rgbdplusmicv = this.rgbdmicview; // Need to create the responder after models because it's using instance of sr, srw, st & tts this.responder = new NetworkResponder(); this.responder.Bind("33410"); }
public MultiSourceImage(KinectSensor kinect) { this.kinect = kinect; this.coordinateMapper = this.kinect.CoordinateMapper; this.colorPublisher = new NetworkPublisher(); this.colorPublisher.SetConflate(); this.colorPublisher.Bind("33408"); this.mappingPublisher = new NetworkPublisher(); this.mappingPublisher.SetConflate(); this.mappingPublisher.Bind("33409"); this.maskPublisher = new NetworkPublisher(); this.maskPublisher.SetConflate(); this.maskPublisher.Bind("33410"); this.multiSourceFrameReader = this.kinect.OpenMultiSourceFrameReader(FrameSourceTypes.Color | FrameSourceTypes.Depth); this.multiSourceFrameReader.IsPaused = true; this.reqRep = true; this.repColorDelivered = true; this.repMappingDelivered = true; this.repMaskDelivered = true; }
public SpeechRecognition(KinectSensor kinect, NetworkPublisher network, KinectAudioStream convertStream) { this.kinectSensor = kinect; this.network = network; this.convertStream = convertStream; }