private static void TestFaceRecognition() { _cam.Face.FaceRecognized += (sender, eventArgs) => { WriteLine("User: "******"Press any key"); ReadLine(); _cam.Face.RecognizeFace(); WriteLine("Recognize!"); } }
private static void Main(string[] args) { Item.DefaultNoiseThreshold = 0; RealSenseAssembliesLoader.Load(); _cam = new RealSenseCamera(); //TestHands(); //TestFace(); //TestFaceRecognition(); //TestFacialExpressions(); //TestEmotions(); //TestSpeech(); //TestGestures(); TestImageStreaming(); _cam.Start(); ReadLine(); _cam.Dispose(); }
private static void Main(string[] args) { Item.DefaultNoiseThreshold = 0; RealSenseAssembliesLoader.Load(); _cam = new RealSenseCamera(); //TestHands(); //TestFingers(); TestFace(); //TestFaceRecognition(); //TestFacialExpressions(); //TestSpeech(); //TestGestures(); //TestImageStreaming(); _cam.Start(); ReadLine(); _cam.Dispose(); }
/// <summary> /// /// </summary> static void InitInternal() { Console.WriteLine(ConsoleColor.DarkCyan, "Starting RealSense"); if (!FakePosition) { // The variable Resolution contains all available combinations of resolutions, formats sensors, etc. you can get from the camera. IEnumerable <FrameData> Resolutions = RealSenseCamera.QueryResolutions().OrderBy((Data) => - Data.Framerate); // Resolutions is a list of frame data which contains the sensors and the resolutions. Basically all the possible formats/outputs of the camera. Depth,ARGB, etc. in different resolutions. IEnumerable <FrameData> DepthResolutions = Resolutions.Where((Data) => Data.Type == FrameType.Depth); // Selects the resolutions/formats for depth frame type int ReqW = 640; int ReqH = 480; //int ReqW = 848; //int ReqH = 480; //int ReqW = 1280; //int ReqH = 720; // From the available resolutions/formats pick only the one that we want. FrameData DepthRes = DepthResolutions.Where((Data) => Data.Width == ReqW && Data.Height == ReqH && Data.Format == FrameFormat.Z16).First(); FrameData ColorRes = Resolutions.Where((Data) => Data.Width == ReqW && Data.Height == ReqH && Data.Format == FrameFormat.Rgb8).First(); W = ColorRes.Width; H = ColorRes.Height; Console.WriteLine(ConsoleColor.DarkCyan, "RealSense running at {0}x{1}", W, H); // This options were copied from the Intel RealSense Viewer. The demo program to connect to the camera... RealSenseCamera.SetOption(DepthRes, RealSenseOption.VisualPreset, 1); //4 RealSenseCamera.SetOption(DepthRes, RealSenseOption.EmitterEnabled, 0); // This enables/disables the IR pattern projector on the camera. Curently turned off, because it interfeers with the ototrack cameras. RealSenseCamera.SetOption(DepthRes, RealSenseOption.EnableAutoExposure, 1); //RealSenseCamera.SetOption(DepthRes, RealSenseOption.LaserPower, 30); // Set different power leves of the IR laser emitter RealSenseCamera.DisableAllStreams(); // Make sure to terminate any open data stream from the camera. RealSenseCamera.EnableStream(DepthRes, ColorRes); // This tells the camera what kind of data to send back. Type of video/data streams. In this case the depth image in specified format and color image. RealSenseCamera.Start(); // Starts the camera with the selected streams Console.WriteLine(ConsoleColor.DarkCyan, "RealSense ready"); if (Program.UseThreading) { while (true) { Loop(); } } } else // Fake point cloud of the camera { Ready = true; float Scale = 1.0f / 500.0f; int PlaneSize = 100; Vertex3[] Verts = OnPointCloud(PlaneSize * PlaneSize, null, null); for (int y = 0; y < PlaneSize; y++) { for (int x = 0; x < PlaneSize; x++) { Verts[y * PlaneSize + x] = new Vertex3(x * Scale - ((PlaneSize / 2) * Scale), y * Scale - ((PlaneSize / 2) * Scale), 0.5f); } } while (true) { OnPointCloud(Verts.Length, Verts, null); } } }
static void InitInternal() { Console.WriteLine(ConsoleColor.DarkCyan, "Starting RealSense"); if (!FakePosition) { IEnumerable <FrameData> Resolutions = RealSenseCamera.QueryResolutions().OrderBy((Data) => - Data.Framerate); IEnumerable <FrameData> DepthResolutions = Resolutions.Where((Data) => Data.Type == FrameType.Depth); int ReqW = 640; int ReqH = 480; //int ReqW = 848; //int ReqH = 480; //int ReqW = 1280; //int ReqH = 720; FrameData DepthRes = DepthResolutions.Where((Data) => Data.Width == ReqW && Data.Height == ReqH && Data.Format == FrameFormat.Z16).First(); FrameData ColorRes = Resolutions.Where((Data) => Data.Width == ReqW && Data.Height == ReqH && Data.Format == FrameFormat.Rgb8).First(); W = ColorRes.Width; H = ColorRes.Height; Console.WriteLine(ConsoleColor.DarkCyan, "RealSense running at {0}x{1}", W, H); RealSenseCamera.SetOption(DepthRes, RealSenseOption.VisualPreset, 1); //4 RealSenseCamera.SetOption(DepthRes, RealSenseOption.EmitterEnabled, 0); RealSenseCamera.SetOption(DepthRes, RealSenseOption.EnableAutoExposure, 1); //RealSenseCamera.SetOption(DepthRes, RealSenseOption.LaserPower, 30); RealSenseCamera.DisableAllStreams(); RealSenseCamera.EnableStream(DepthRes, ColorRes); RealSenseCamera.Start(); Console.WriteLine(ConsoleColor.DarkCyan, "RealSense ready"); if (Program.UseThreading) { while (true) { Loop(); } } } else { Ready = true; float Scale = 1.0f / 500.0f; int PlaneSize = 100; Vertex3[] Verts = OnPointCloud(PlaneSize * PlaneSize, null, null); for (int y = 0; y < PlaneSize; y++) { for (int x = 0; x < PlaneSize; x++) { Verts[y * PlaneSize + x] = new Vertex3(x * Scale - ((PlaneSize / 2) * Scale), y * Scale - ((PlaneSize / 2) * Scale), 0.5f); } } while (true) { OnPointCloud(Verts.Length, Verts, null); } } }