public CaptureWindow() { try { pipeline = new Pipeline(); colorizer = new Colorizer(); var cfg = new Config(); cfg.EnableStream(Stream.Depth, 640, 480); cfg.EnableStream(Stream.Color, Format.Rgb8); pipeline.Start(cfg); var token = tokenSource.Token; var t = Task.Factory.StartNew(() => { while (!token.IsCancellationRequested) { var frames = pipeline.WaitForFrames(); var depth_frame = frames.DepthFrame; var color_frame = frames.ColorFrame; var colorized_depth = colorizer.Colorize(depth_frame); UploadImage(imgDepth, colorized_depth); UploadImage(imgColor, color_frame); // It is important to pre-emptively dispose of native resources // to avoid creating bottleneck at finalization stage after GC // (Also see FrameReleaser helper object in next tutorial) frames.Dispose(); depth_frame.Dispose(); colorized_depth.Dispose(); color_frame.Dispose(); } }, token); } catch (Exception ex) { MessageBox.Show(ex.Message); Application.Current.Shutdown(); } InitializeComponent(); }
public CaptureWindow() { try { pipeline = new Pipeline(); colorizer = new Colorizer(); var cfg = new Config(); cfg.EnableStream(Stream.Depth, 640, 480); cfg.EnableStream(Stream.Color, Format.Rgb8); pipeline.Start(cfg); var token = tokenSource.Token; var t = Task.Factory.StartNew(() => { while (!token.IsCancellationRequested) { var frames = pipeline.WaitForFrames(); var colorized_depth = colorizer.Colorize(frames.DepthFrame); UploadImage(imgDepth, colorized_depth); UploadImage(imgColor, frames.ColorFrame); } }, token); } catch (Exception ex) { MessageBox.Show(ex.Message); Application.Current.Shutdown(); } InitializeComponent(); }
/** * NOTES * Curently it records immediately after linking the program with LabStreamLayer. * There might be a better solution, but we don't want to increase the number of button presses for the protoccol. It is probably better to record more than to forget pressing * the record button before an experiment. * * **/ // Code Taken Directly from the LibRealSense 2 Examples -- Captures and Displays Depth and RGB Camera. private void startRecordingProcess() { try { pipeline = new Pipeline(); colorizer = new Colorizer(); var cfg = new Config(); cfg.EnableStream(Stream.Depth, 640, 480, Format.Z16, 30); cfg.EnableStream(Stream.Color, 640, 480, Format.Bgr8, 30); //cfg.EnableRecordToFile(fileRecording); // This is now taken care of by FFMPEG pipeline.Start(cfg); applyRecordingConfig(); processBlock = new CustomProcessingBlock((f, src) => { using (var releaser = new FramesReleaser()) { var frames = FrameSet.FromFrame(f, releaser); VideoFrame depth = FramesReleaser.ScopedReturn(releaser, frames.DepthFrame); VideoFrame color = FramesReleaser.ScopedReturn(releaser, frames.ColorFrame); var res = src.AllocateCompositeFrame(releaser, depth, color); src.FramesReady(res); } }); processBlock.Start(f => { using (var releaser = new FramesReleaser()) { var frames = FrameSet.FromFrame(f, releaser); var depth_frame = FramesReleaser.ScopedReturn(releaser, frames.DepthFrame); var color_frame = FramesReleaser.ScopedReturn(releaser, frames.ColorFrame); var colorized_depth = colorizer.Colorize(depth_frame); UploadImage(imgDepth, colorized_depth); UploadImage(imgColor, color_frame); // Record FFMPEG Bitmap bmpColor = new Bitmap(color_frame.Width, color_frame.Height, color_frame.Stride, System.Drawing.Imaging.PixelFormat.Format24bppRgb, color_frame.Data); vidWriter_Color.WriteVideoFrame(bmpColor); Bitmap bmpDepth = new Bitmap(colorized_depth.Width, colorized_depth.Height, colorized_depth.Stride, System.Drawing.Imaging.PixelFormat.Format24bppRgb, colorized_depth.Data); vidWriter_Depth.WriteVideoFrame(bmpDepth); if (lslOutlet != null) { // Do LSL Streaming Here sample[0] = "" + colorized_depth.Number + "_" + colorized_depth.Timestamp; sample[1] = "" + color_frame.Number + "_" + color_frame.Timestamp; lslOutlet.push_sample(sample, liblsl.local_clock()); } } }); var token = tokenSource.Token; var t = Task.Factory.StartNew(() => { // Main Loop -- while (!token.IsCancellationRequested) { using (var frames = pipeline.WaitForFrames()) { processBlock.ProcessFrames(frames); } } }, token); } catch (Exception ex) { MessageBox.Show(ex.Message); Application.Current.Shutdown(); } }
public void timer1_cycle(object sender, EventArgs e) { try { double[] ptx = new double[W_H]; //640*480 double[] pty = new double[W_H]; //640*480 double[] ptz = new double[W_H]; //640*480 ushort[] depth = new ushort[W_H]; //640*480 //获取相机内参 var stream = (VideoStreamProfile)pProfile.GetStream(Stream.Color); var colorIntrinsics = stream.GetIntrinsics(); //获取深度图像 var frames = pipeline.WaitForFrames(); var dframe = frames.DepthFrame; dframe.CopyTo(depth); //Write_Flag = true; //是否开始识别 if (Write_Flag == true) { Write_Flag = false; //获取有效空间范围内的深度并转换为点云 int pc_cnt = 0; for (int i = 0; i < W_H; i++) { if (depth[i] > 400 && depth[i] < 6000) { ptx[pc_cnt] = (((i % WIDTH) - colorIntrinsics.ppx) / colorIntrinsics.fx) * depth[i]; pty[pc_cnt] = (((i / WIDTH) - colorIntrinsics.ppy) / colorIntrinsics.fy) * depth[i]; ptz[pc_cnt] = depth[i]; pc_cnt++; } } double[] pcx = new double[pc_cnt]; //640*480 double[] pcy = new double[pc_cnt]; //640*480 double[] pcz = new double[pc_cnt]; //640*480 Array.Copy(ptx, pcx, pc_cnt); Array.Copy(pty, pcy, pc_cnt); Array.Copy(ptz, pcz, pc_cnt); //******************************************************** //RANSAC地面提取 int ITER = 200; double bpa = 0; double bpb = 0; double bpc = 0; int best_count = 0; Random rd = new Random(Guid.NewGuid().GetHashCode()); var rd1 = rd.Next(1, pcx.Length); var rd2 = rd.Next(1, pcx.Length); var rd3 = rd.Next(1, pcx.Length); for (int i = 0; i < ITER; i++) { rd1 = rd.Next(1, pcx.Length); rd2 = rd.Next(1, pcx.Length); rd3 = rd.Next(1, pcx.Length); //拟合直线方程z=ax+by+c double[] plane = new double[3]; double a = ((pcz[rd1] - pcz[rd2]) * (pcy[rd2] - pcy[rd3]) - (pcz[rd2] - pcz[rd3]) * (pcy[rd1] - pcy[rd2])) / ((pcx[rd1] - pcx[rd2]) * (pcy[rd2] - pcy[rd3]) - (pcx[rd2] - pcx[rd3]) * (pcy[rd1] - pcy[rd2])); double b = ((pcz[rd1] - pcz[rd2]) - a * (pcx[rd1] - pcx[rd2])) / (pcy[rd1] - pcy[rd2]); double c = pcz[rd1] - a * pcx[rd1] - b * pcy[rd1]; double[] dist = new double[pcx.Length]; int m = 0; for (int k = 0; k < pcx.Length; k++) { dist[k] = Math.Abs(a * pcx[k] + b * pcy[k] + c - pcz[k]); if (dist[k] < 1.0) { m++; } } if (m > best_count) { best_count = m; bpa = a; bpb = b; bpc = c; } } //将点云分为地面点和其他点 double[] o_pcx = new double[pcx.Length]; double[] o_pcy = new double[pcx.Length]; double[] o_pcz = new double[pcx.Length]; int o_pc_cnt = 0; for (int i = 0; i < pcx.Length; i++) { double dist = bpa * pcx[i] + bpb * pcy[i] + bpc - pcz[i]; if (dist < -20 || dist > 20) { o_pcx[o_pc_cnt] = pcx[i]; o_pcy[o_pc_cnt] = pcy[i]; o_pcz[o_pc_cnt] = pcz[i]; o_pc_cnt++; } } double[] on_pcx = new double[o_pc_cnt]; double[] on_pcy = new double[o_pc_cnt]; double[] on_pcz = new double[o_pc_cnt]; Array.Copy(o_pcx, on_pcx, o_pc_cnt); Array.Copy(o_pcy, on_pcy, o_pc_cnt); Array.Copy(o_pcz, on_pcz, o_pc_cnt); //获取新的深度图,不包含地面 int[,] new_depth = new int[WIDTH, HEIGHT]; double[,] depth_label = new double[WIDTH, HEIGHT]; for (int i = 0; i < on_pcx.Length; i++) { var x = Math.Round((on_pcx[i] / on_pcz[i]) * colorIntrinsics.fx + colorIntrinsics.ppx); var y = Math.Round((on_pcy[i] / on_pcz[i]) * colorIntrinsics.fy + colorIntrinsics.ppy); if (x < WIDTH && y < HEIGHT) { new_depth[(int)x, (int)y] = 1; depth_label[(int)x, (int)y] = on_pcz[i]; } } //连通域标记 var cc = CalConnections(new_depth); //获取距离人中心最近的地面物体 double[] dist1 = new double[cc.Count]; int[] dist_key = new int[cc.Count]; int dist_cnt = 0; foreach (int key in cc.Keys) { var cc_values = cc[key]; foreach (var val in cc_values) { var d = (val.X - 480) * (val.X - 480) + (val.Y - 320) * (val.Y - 320); //var d = val.Y * val.Y + (val.X - 320) * (val.X - 320); if (dist1[dist_cnt] == 0) { dist1[dist_cnt] = d; } if (d < dist1[dist_cnt]) { dist1[dist_cnt] = d; } } dist_key[dist_cnt] = key; dist_cnt++; } //获取最近区域 int ii = 0; int index = 0; while (true) { if (dist1[ii] == dist1.Min()) { index = ii; break; } ii++; } //将障碍物转换为点云 var obs_depth = cc[dist_key[index]]; //obsize_cnt.Content = obs_depth.Count.ToString(); double[] obs_pcx = new double[obs_depth.Count]; double[] obs_pcy = new double[obs_depth.Count]; double[] obs_pcz = new double[obs_depth.Count]; byte[,] todisp = new byte[WIDTH, HEIGHT]; for (int i = 0; i < obs_depth.Count; i++) { todisp[obs_depth[i].Y, obs_depth[i].X] = 255; obs_pcx[i] = ((obs_depth[i].Y - colorIntrinsics.ppx) / colorIntrinsics.fx) * depth_label[obs_depth[i].Y, obs_depth[i].X]; obs_pcy[i] = ((obs_depth[i].X - colorIntrinsics.ppy) / colorIntrinsics.fy) * depth_label[obs_depth[i].Y, obs_depth[i].X]; obs_pcz[i] = depth_label[obs_depth[i].Y, obs_depth[i].X]; } //计算障碍物尺寸、距离等信息 double[] size_zs = new double[obs_pcz.Length]; for (int i = 0; i < size_zs.Length; i++) { size_zs[i] = Math.Abs((bpa * obs_pcx[i] + bpb * obs_pcy[i] - obs_pcz[i] + bpc) / (Math.Sqrt(bpa * bpa + bpb * bpb + 1))); } double size_z = size_zs.Max(); var theta = Math.Acos(1 / Math.Sqrt(bpa * bpa + bpb * bpb + 1));//夹角要加绝对值 double[] obs_pcx_pj = new double[obs_pcx.Length]; double[] obs_pcy_pj = new double[obs_pcx.Length]; double[] obs_pcz_pj = new double[obs_pcx.Length]; double[] dist_ys = new double[obs_pcx.Length]; for (int i = 0; i < obs_pcx_pj.Length; i++) { var T = (bpa * obs_pcx[i] + bpb * obs_pcy[i] - obs_pcz[i] + bpc) / (bpa * bpa + bpb * bpb + 1); obs_pcx_pj[i] = obs_pcx[i] - bpa * T; obs_pcy_pj[i] = obs_pcy[i] - bpb * T; obs_pcz_pj[i] = obs_pcz[i] + T; dist_ys[i] = Math.Abs(obs_pcz_pj[i] * Math.Sin(theta) - obs_pcy_pj[i] * Math.Cos(theta)); } var size_x = Math.Abs(obs_pcx_pj.Max() - obs_pcx_pj.Min()); var size_y = Math.Abs((obs_pcy_pj.Max() - obs_pcy_pj.Min()) * (-1 / Math.Sqrt(bpa * bpa + bpb * bpb + 1))); var dist_y = dist_ys.Min(); //******************************************************** obsize_x.Content = size_x.ToString("f2"); obsize_y.Content = size_y.ToString("f2"); obsize_z.Content = size_z.ToString("f2"); obdist_y.Content = dist_y.ToString("f2"); byte[] ethbuf = new byte[11]; //新增加:将障碍物高度、长度和距离转换为下一步的步长和步高 int Dmin = 80; //预留脚离障碍物的最小距离 int Lmax = 800; //机器人允许的最大步长 int Hmax = 500; //机器人允许的最大步高 int DeltaL = 30; //测量的最大误差 int DeltaH = 30; //测量的最大误差 int Lnormal = 400; //正常行走的步长 int Hnormal = 100; //正常行走的步高 int nStep = 0; //接下来应该按正常步长走nStep步 int lastStepLength = 0; //走完nStep步后最后一小步的步长 int lastStepHeight = 0; //走完nStep步后最后一小步的步高 int overStepLength = 0; //跨越的步长 int overStepHeight = 0; //跨越的步高 nStep = ((int)dist_y - Dmin) / Lnormal; if (((int)dist_y - Dmin) % Lnormal <= 80) { // 若最后一步小于80则使最后一步步长等于80 lastStepLength = 80; } else { lastStepLength = ((int)dist_y - Dmin) % Lnormal; } lastStepHeight = Hnormal; if ((int)size_z + DeltaH < 200) { // 若跨越步高小于200则设为200 overStepHeight = 200; } else { //overStepHeight = (int)size_z + DeltaH; overStepHeight = 400; } overStepLength = 2 * Dmin + (int)size_y + DeltaL; if (overStepLength > Lmax || overStepHeight > Hmax) //如果步长或步高有一个超限了,将跨越步长和步高都置0,即不跨越 { overStepLength = 0; overStepHeight = 0; } ComWinTextBox.AppendText("Send to Exoskeleton:" + lastStepLength.ToString() + lastStepHeight.ToString() + overStepLength.ToString() + overStepHeight.ToString() + "\n"); //jiang ethbuf[0] = 0xAA; //起始标志 ethbuf[1] = (byte)nStep; ethbuf[2] = (byte)((lastStepLength >> 8) & 0x000000FF); //取最后一步步长的高8位 ethbuf[3] = (byte)(lastStepLength & 0x000000FF); //取最后一步步长的低8位 ethbuf[4] = (byte)((lastStepHeight >> 8) & 0x000000FF); //取最后一步步高的高8位 ethbuf[5] = (byte)(lastStepHeight & 0x000000FF); //取最后一步步高的低8位 ethbuf[6] = (byte)((overStepLength >> 8) & 0x000000FF); //取跨越步长的高8位 ethbuf[7] = (byte)(overStepLength & 0x000000FF); //取跨越步长的低8位 ethbuf[8] = (byte)((overStepHeight >> 8) & 0x000000FF); //取跨越步高的高8位 ethbuf[9] = (byte)(overStepHeight & 0x000000FF); //取跨越步高的低8位 ethbuf[10] = 0xEE; //结束标志 //将ethbuf通过网络发送出去 NetworkStream sendStream = client.GetStream(); //获得用于数据传输的流 sendStream.Write(ethbuf, 0, ethbuf.Length); //最终写入流中 //string showmsg = Encoding.Default.GetString(ethbuf, 0, ethbuf.Length); //ComWinTextBox.AppendText("发送给客户端数据:" + showmsg + "\n"); ////write_point_to_txt(pcx, pcy, pcz, "valid_pc"); ////write_point_to_txt_1(bpa, bpb, bpc, "best_plane"); obsize_x.Content = size_x.ToString("f2"); obsize_y.Content = size_y.ToString("f2"); obsize_z.Content = size_z.ToString("f2"); obdist_y.Content = dist_y.ToString("f2"); var bytes = new byte[HEIGHT * WIDTH]; for (int i = 0; i < HEIGHT; i++) { for (int j = 0; j < WIDTH; j++) { bytes[i * WIDTH + j] = todisp[j, i]; } } var bs = BitmapSource.Create(WIDTH, HEIGHT, 300, 300, PixelFormats.Gray8, null, bytes, WIDTH); var imgSrc = bs as ImageSource; imgObs.Source = imgSrc; //write_point_to_txt_2(bytes, "obstacle"); } var colorized_depth = colorizer.Colorize(frames.DepthFrame); UploadImage(imgDepth, colorized_depth); UploadImage(imgColor, frames.ColorFrame); } catch (Exception ex) { } }
public CaptureWindow() { //Log.ToFile(LogSeverity.Debug, "1.log"); try { pipeline = new Pipeline(); colorizer = new Colorizer(); var cfg = new Config(); cfg.EnableStream(Stream.Depth, 640, 480, Format.Z16, 30); cfg.EnableStream(Stream.Color, 640, 480, Format.Rgb8, 30); var profile = pipeline.Start(cfg); var software_dev = new SoftwareDevice(); var depth_sensor = software_dev.AddSensor("Depth"); var depth_profile = depth_sensor.AddVideoStream(new VideoStream { type = Stream.Depth, index = 0, uid = 100, width = 640, height = 480, fps = 30, bpp = 2, fmt = Format.Z16, intrinsics = (profile.GetStream(Stream.Depth) as VideoStreamProfile).GetIntrinsics() }); var color_sensor = software_dev.AddSensor("Color"); var color_profile = color_sensor.AddVideoStream(new VideoStream { type = Stream.Color, index = 0, uid = 101, width = 640, height = 480, fps = 30, bpp = 2, fmt = Format.Z16, intrinsics = (profile.GetStream(Stream.Color) as VideoStreamProfile).GetIntrinsics() }); // Note about the Syncer: If actual FPS is significantly different from reported FPS in AddVideoStream // this can confuse the syncer and prevent it from producing synchronized pairs software_dev.SetMatcher(Matchers.Default); var sync = new Syncer(); depth_sensor.Open(depth_profile); color_sensor.Open(color_profile); depth_sensor.Start(f => { sync.SubmitFrame(f); //Debug.WriteLine("D"); }); color_sensor.Start(f => { sync.SubmitFrame(f); //Debug.WriteLine("C"); }); var token = tokenSource.Token; var t = Task.Factory.StartNew(() => { while (!token.IsCancellationRequested) { var frames = pipeline.WaitForFrames(); var depth_frame = frames.DepthFrame; var color_frame = frames.ColorFrame; var bytes = new byte[depth_frame.Stride * depth_frame.Height]; depth_frame.CopyTo(bytes); depth_sensor.AddVideoFrame(bytes, depth_frame.Stride, 2, depth_frame.Timestamp, depth_frame.TimestampDomain, (int)depth_frame.Number, depth_profile); bytes = new byte[color_frame.Stride * color_frame.Height]; color_frame.CopyTo(bytes); color_sensor.AddVideoFrame(bytes, color_frame.Stride, 2, depth_frame.Timestamp, color_frame.TimestampDomain, (int)depth_frame.Number, color_profile); depth_frame.Dispose(); color_frame.Dispose(); frames.Dispose(); var new_frames = sync.WaitForFrames(); if (new_frames.Count == 2) { depth_frame = new_frames.DepthFrame; color_frame = new_frames.ColorFrame; var colorized_depth = colorizer.Colorize(depth_frame); UploadImage(imgDepth, colorized_depth); UploadImage(imgColor, color_frame); depth_frame.Dispose(); colorized_depth.Dispose(); color_frame.Dispose(); } new_frames.Dispose(); } }, token); } catch (Exception ex) { MessageBox.Show(ex.Message); Application.Current.Shutdown(); } InitializeComponent(); }
public ProcessingWindow() { try { var cfg = new Config(); cfg.EnableStream(Stream.Depth, 640, 480); cfg.EnableStream(Stream.Color, Format.Rgb8); pipeline.Start(cfg); // Create custom processing block // For demonstration purposes it will: // a. Get a frameset // b. Break it down to frames // c. Run post-processing on the depth frame // d. Combine the result back into a frameset // Processing blocks are inherently thread-safe and play well with // other API primitives such as frame-queues, // and can be used to encapsulate advanced operations. // All invokations are, however, synchronious so the high-level threading model // is up to the developer block = new CustomProcessingBlock((f, src) => { // We create a FrameReleaser object that would track // all newly allocated .NET frames, and ensure deterministic finalization // at the end of scope. using (var releaser = new FramesReleaser()) { var frames = FrameSet.FromFrame(f, releaser); VideoFrame depth = FramesReleaser.ScopedReturn(releaser, frames.DepthFrame); VideoFrame color = FramesReleaser.ScopedReturn(releaser, frames.ColorFrame); // Apply depth post-processing depth = decimate.ApplyFilter(depth, releaser); depth = spatial.ApplyFilter(depth, releaser); depth = temp.ApplyFilter(depth, releaser); // Combine the frames into a single result var res = src.AllocateCompositeFrame(releaser, depth, color); // Send it to the next processing stage src.FramesReady(res); } }); // Register to results of processing via a callback: block.Start(f => { using (var releaser = new FramesReleaser()) { // Align, colorize and upload frames for rendering var frames = FrameSet.FromFrame(f, releaser); // Align both frames to the viewport of color camera frames = align.Process(frames, releaser); var depth_frame = FramesReleaser.ScopedReturn(releaser, frames.DepthFrame); var color_frame = FramesReleaser.ScopedReturn(releaser, frames.ColorFrame); UploadImage(imgDepth, colorizer.Colorize(depth_frame, releaser)); UploadImage(imgColor, color_frame); } }); var token = tokenSource.Token; var t = Task.Factory.StartNew(() => { while (!token.IsCancellationRequested) { using (var frames = pipeline.WaitForFrames()) { // Invoke custom processing block block.ProcessFrames(frames); } } }, token); } catch (Exception ex) { MessageBox.Show(ex.Message); Application.Current.Shutdown(); } InitializeComponent(); }
public override Frame Process(Frame frame) { return(_enabled ? _pb.Colorize(frame as VideoFrame) : frame); }