public ProcessingWindow() { InitializeComponent(); try { var cfg = new Config(); cfg.EnableStream(Stream.Depth, 640, 480); cfg.EnableStream(Stream.Color, Format.Rgb8); var pp = pipeline.Start(cfg); var s = pp.Device.Sensors; var blocks = new List <ProcessingBlock>(); foreach (var sensor in pp.Device.Sensors) { var list = sensor.ProcessingBlocks; foreach (var block in list) { blocks.Add(block); } } // Allocate bitmaps for rendring. // Since the sample aligns the depth frames to the color frames, both of the images will have the color resolution using (var p = pp.GetStream(Stream.Color).As <VideoStreamProfile>()) { imgColor.Source = new WriteableBitmap(p.Width, p.Height, 96d, 96d, PixelFormats.Rgb24, null); imgDepth.Source = new WriteableBitmap(p.Width, p.Height, 96d, 96d, PixelFormats.Rgb24, null); } var updateColor = UpdateImage(imgColor); var updateDepth = UpdateImage(imgDepth); // Create custom processing block // For demonstration purposes it will: // a. Get a frameset // b. Run post-processing on the depth frame // c. Combine the result back into a frameset // Processing blocks are inherently thread-safe and play well with // other API primitives such as frame-queues, // and can be used to encapsulate advanced operations. // All invokations are, however, synchronious so the high-level threading model // is up to the developer block = new CustomProcessingBlock((f, src) => { // We create a FrameReleaser object that would track // all newly allocated .NET frames, and ensure deterministic finalization // at the end of scope. using (var releaser = new FramesReleaser()) { var frames = FrameSet.FromFrame(f).DisposeWith(releaser); foreach (ProcessingBlock p in blocks) { frames = p.Process(frames).DisposeWith(releaser); } frames = frames.ApplyFilter(align).DisposeWith(releaser); frames = frames.ApplyFilter(colorizer).DisposeWith(releaser); var colorFrame = frames[Stream.Color, Format.Rgb8].DisposeWith(releaser); var colorizedDepth = frames[Stream.Depth, Format.Rgb8].DisposeWith(releaser); // Combine the frames into a single result var res = src.AllocateCompositeFrame(colorizedDepth, colorFrame).DisposeWith(releaser); // Send it to the next processing stage src.FramesReady(res); } }); // Register to results of processing via a callback: block.Start(f => { using (var frames = FrameSet.FromFrame(f)) { var colorFrame = frames.ColorFrame.DisposeWith(frames); var colorizedDepth = frames[Stream.Depth, Format.Rgb8].As <VideoFrame>().DisposeWith(frames); Dispatcher.Invoke(DispatcherPriority.Render, updateDepth, colorizedDepth); Dispatcher.Invoke(DispatcherPriority.Render, updateColor, colorFrame); } }); var token = tokenSource.Token; var t = Task.Factory.StartNew(() => { while (!token.IsCancellationRequested) { using (var frames = pipeline.WaitForFrames()) { // Invoke custom processing block block.ProcessFrames(frames); } } }, token); } catch (Exception ex) { MessageBox.Show(ex.Message); Application.Current.Shutdown(); } InitializeComponent(); }
public ProcessingWindow() { try { var cfg = new Config(); cfg.EnableStream(Stream.Depth, 640, 480); cfg.EnableStream(Stream.Color, Format.Rgb8); pipeline.Start(cfg); // Create custom processing block // For demonstration purposes it will: // a. Get a frameset // b. Break it down to frames // c. Run post-processing on the depth frame // d. Combine the result back into a frameset // Processing blocks are inherently thread-safe and play well with // other API primitives such as frame-queues, // and can be used to encapsulate advanced operations. // All invokations are, however, synchronious so the high-level threading model // is up to the developer block = new CustomProcessingBlock((f, src) => { // We create a FrameReleaser object that would track // all newly allocated .NET frames, and ensure deterministic finalization // at the end of scope. using (var releaser = new FramesReleaser()) { var frames = FrameSet.FromFrame(f, releaser); VideoFrame depth = FramesReleaser.ScopedReturn(releaser, frames.DepthFrame); VideoFrame color = FramesReleaser.ScopedReturn(releaser, frames.ColorFrame); // Apply depth post-processing depth = decimate.ApplyFilter(depth, releaser); depth = spatial.ApplyFilter(depth, releaser); depth = temp.ApplyFilter(depth, releaser); // Combine the frames into a single result var res = src.AllocateCompositeFrame(releaser, depth, color); // Send it to the next processing stage src.FramesReady(res); } }); // Register to results of processing via a callback: block.Start(f => { using (var releaser = new FramesReleaser()) { // Align, colorize and upload frames for rendering var frames = FrameSet.FromFrame(f, releaser); // Align both frames to the viewport of color camera frames = align.Process(frames, releaser); var depth_frame = FramesReleaser.ScopedReturn(releaser, frames.DepthFrame); var color_frame = FramesReleaser.ScopedReturn(releaser, frames.ColorFrame); UploadImage(imgDepth, colorizer.Colorize(depth_frame, releaser)); UploadImage(imgColor, color_frame); } }); var token = tokenSource.Token; var t = Task.Factory.StartNew(() => { while (!token.IsCancellationRequested) { using (var frames = pipeline.WaitForFrames()) { // Invoke custom processing block block.ProcessFrames(frames); } } }, token); } catch (Exception ex) { MessageBox.Show(ex.Message); Application.Current.Shutdown(); } InitializeComponent(); }
/** * NOTES * Curently it records immediately after linking the program with LabStreamLayer. * There might be a better solution, but we don't want to increase the number of button presses for the protoccol. It is probably better to record more than to forget pressing * the record button before an experiment. * * **/ // Code Taken Directly from the LibRealSense 2 Examples -- Captures and Displays Depth and RGB Camera. private void startRecordingProcess() { try { pipeline = new Pipeline(); colorizer = new Colorizer(); var cfg = new Config(); cfg.EnableStream(Stream.Depth, 640, 480, Format.Z16, 30); cfg.EnableStream(Stream.Color, 640, 480, Format.Bgr8, 30); //cfg.EnableRecordToFile(fileRecording); // This is now taken care of by FFMPEG pipeline.Start(cfg); applyRecordingConfig(); processBlock = new CustomProcessingBlock((f, src) => { using (var releaser = new FramesReleaser()) { var frames = FrameSet.FromFrame(f, releaser); VideoFrame depth = FramesReleaser.ScopedReturn(releaser, frames.DepthFrame); VideoFrame color = FramesReleaser.ScopedReturn(releaser, frames.ColorFrame); var res = src.AllocateCompositeFrame(releaser, depth, color); src.FramesReady(res); } }); processBlock.Start(f => { using (var releaser = new FramesReleaser()) { var frames = FrameSet.FromFrame(f, releaser); var depth_frame = FramesReleaser.ScopedReturn(releaser, frames.DepthFrame); var color_frame = FramesReleaser.ScopedReturn(releaser, frames.ColorFrame); var colorized_depth = colorizer.Colorize(depth_frame); UploadImage(imgDepth, colorized_depth); UploadImage(imgColor, color_frame); // Record FFMPEG Bitmap bmpColor = new Bitmap(color_frame.Width, color_frame.Height, color_frame.Stride, System.Drawing.Imaging.PixelFormat.Format24bppRgb, color_frame.Data); vidWriter_Color.WriteVideoFrame(bmpColor); Bitmap bmpDepth = new Bitmap(colorized_depth.Width, colorized_depth.Height, colorized_depth.Stride, System.Drawing.Imaging.PixelFormat.Format24bppRgb, colorized_depth.Data); vidWriter_Depth.WriteVideoFrame(bmpDepth); if (lslOutlet != null) { // Do LSL Streaming Here sample[0] = "" + colorized_depth.Number + "_" + colorized_depth.Timestamp; sample[1] = "" + color_frame.Number + "_" + color_frame.Timestamp; lslOutlet.push_sample(sample, liblsl.local_clock()); } } }); var token = tokenSource.Token; var t = Task.Factory.StartNew(() => { // Main Loop -- while (!token.IsCancellationRequested) { using (var frames = pipeline.WaitForFrames()) { processBlock.ProcessFrames(frames); } } }, token); } catch (Exception ex) { MessageBox.Show(ex.Message); Application.Current.Shutdown(); } }
public ProcessingWindow() { InitializeComponent(); try { var cfg = new Config(); using (var ctx = new Context()) { var devices = ctx.QueryDevices(); var dev = devices[0]; Console.WriteLine("\nUsing device 0, an {0}", dev.Info[CameraInfo.Name]); Console.WriteLine(" Serial number: {0}", dev.Info[CameraInfo.SerialNumber]); Console.WriteLine(" Firmware version: {0}", dev.Info[CameraInfo.FirmwareVersion]); var sensors = dev.QuerySensors(); var depthSensor = sensors[0]; var colorSensor = sensors[1]; var depthProfile = depthSensor.StreamProfiles .Where(p => p.Stream == Stream.Depth) .OrderBy(p => p.Framerate) .Select(p => p.As <VideoStreamProfile>()).First(); var colorProfile = colorSensor.StreamProfiles .Where(p => p.Stream == Stream.Color) .OrderBy(p => p.Framerate) .Select(p => p.As <VideoStreamProfile>()).First(); cfg.EnableStream(Stream.Depth, depthProfile.Width, depthProfile.Height, depthProfile.Format, depthProfile.Framerate); cfg.EnableStream(Stream.Color, colorProfile.Width, colorProfile.Height, colorProfile.Format, colorProfile.Framerate); } var pp = pipeline.Start(cfg); // Get the recommended processing blocks for the depth sensor var sensor = pp.Device.QuerySensors().First(s => s.Is(Extension.DepthSensor)); var blocks = sensor.ProcessingBlocks.ToList(); // Allocate bitmaps for rendring. // Since the sample aligns the depth frames to the color frames, both of the images will have the color resolution using (var p = pp.GetStream(Stream.Color).As <VideoStreamProfile>()) { imgColor.Source = new WriteableBitmap(p.Width, p.Height, 96d, 96d, PixelFormats.Rgb24, null); imgDepth.Source = new WriteableBitmap(p.Width, p.Height, 96d, 96d, PixelFormats.Rgb24, null); } var updateColor = UpdateImage(imgColor); var updateDepth = UpdateImage(imgDepth); // Create custom processing block // For demonstration purposes it will: // a. Get a frameset // b. Run post-processing on the depth frame // c. Combine the result back into a frameset // Processing blocks are inherently thread-safe and play well with // other API primitives such as frame-queues, // and can be used to encapsulate advanced operations. // All invocations are, however, synchronious so the high-level threading model // is up to the developer block = new CustomProcessingBlock((f, src) => { // We create a FrameReleaser object that would track // all newly allocated .NET frames, and ensure deterministic finalization // at the end of scope. using (var releaser = new FramesReleaser()) { foreach (ProcessingBlock p in blocks) { f = p.Process(f).DisposeWith(releaser); } f = f.ApplyFilter(align).DisposeWith(releaser); f = f.ApplyFilter(colorizer).DisposeWith(releaser); var frames = f.As <FrameSet>().DisposeWith(releaser); var colorFrame = frames[Stream.Color, Format.Rgb8].DisposeWith(releaser); var colorizedDepth = frames[Stream.Depth, Format.Rgb8].DisposeWith(releaser); // Combine the frames into a single result var res = src.AllocateCompositeFrame(colorizedDepth, colorFrame).DisposeWith(releaser); // Send it to the next processing stage src.FrameReady(res); } }); // Register to results of processing via a callback: block.Start(f => { using (var frames = f.As <FrameSet>()) { var colorFrame = frames.ColorFrame.DisposeWith(frames); var colorizedDepth = frames.First <VideoFrame>(Stream.Depth, Format.Rgb8).DisposeWith(frames); Dispatcher.Invoke(DispatcherPriority.Render, updateDepth, colorizedDepth); Dispatcher.Invoke(DispatcherPriority.Render, updateColor, colorFrame); } }); var token = tokenSource.Token; var t = Task.Factory.StartNew(() => { while (!token.IsCancellationRequested) { using (var frames = pipeline.WaitForFrames()) { // Invoke custom processing block block.Process(frames); } } }, token); } catch (Exception ex) { MessageBox.Show(ex.Message); Application.Current.Shutdown(); } InitializeComponent(); }