Exemplo n.º 1
0
        // Load color data (a video frame) into the internal buffer.
        public void LoadColorData(RealSense.VideoFrame frame)
        {
            if (frame == null)
            {
                return;
            }
            if (frame.Data == IntPtr.Zero)
            {
                return;
            }

            var size = frame.Width * frame.Height;

            if (_colorBuffer != null && _colorBuffer.count != size)
            {
                _colorBuffer.Dispose();
                _colorBuffer = null;
            }

            if (_colorBuffer == null)
            {
                _colorBuffer = new ComputeBuffer(size, 4);
            }

            UnsafeUtility.SetUnmanagedData(_colorBuffer, frame.Data, size, 4);

            _dimensions = math.int2(frame.Width, frame.Height);
        }
Exemplo n.º 2
0
 // Load color data (a video frame) into the internal buffer.
 public void LoadColorData
     (RealSense.VideoFrame frame, in RealSense.Intrinsics intrinsics)
Exemplo n.º 3
0
        public ProcessingWindow()
        {
            try
            {
                var cfg = new Config();
                cfg.EnableStream(Stream.Depth, 640, 480);
                cfg.EnableStream(Stream.Color, Format.Rgb8);
                pipeline.Start(cfg);

                // Create custom processing block
                // For demonstration purposes it will:
                // a. Get a frameset
                // b. Break it down to frames
                // c. Run post-processing on the depth frame
                // d. Combine the result back into a frameset
                // Processing blocks are inherently thread-safe and play well with
                // other API primitives such as frame-queues,
                // and can be used to encapsulate advanced operations.
                // All invokations are, however, synchronious so the high-level threading model
                // is up to the developer
                block = new CustomProcessingBlock((f, src) =>
                {
                    // We create a FrameReleaser object that would track
                    // all newly allocated .NET frames, and ensure deterministic finalization
                    // at the end of scope.
                    using (var releaser = new FramesReleaser())
                    {
                        var frames = FrameSet.FromFrame(f, releaser);

                        VideoFrame depth = FramesReleaser.ScopedReturn(releaser, frames.DepthFrame);
                        VideoFrame color = FramesReleaser.ScopedReturn(releaser, frames.ColorFrame);

                        // Apply depth post-processing
                        depth = decimate.ApplyFilter(depth, releaser);
                        depth = spatial.ApplyFilter(depth, releaser);
                        depth = temp.ApplyFilter(depth, releaser);

                        // Combine the frames into a single result
                        var res = src.AllocateCompositeFrame(releaser, depth, color);
                        // Send it to the next processing stage
                        src.FramesReady(res);
                    }
                });

                // Register to results of processing via a callback:
                block.Start(f =>
                {
                    using (var releaser = new FramesReleaser())
                    {
                        // Align, colorize and upload frames for rendering
                        var frames = FrameSet.FromFrame(f, releaser);

                        // Align both frames to the viewport of color camera
                        frames = align.Process(frames, releaser);

                        var depth_frame = FramesReleaser.ScopedReturn(releaser, frames.DepthFrame);
                        var color_frame = FramesReleaser.ScopedReturn(releaser, frames.ColorFrame);

                        UploadImage(imgDepth, colorizer.Colorize(depth_frame, releaser));
                        UploadImage(imgColor, color_frame);
                    }
                });

                var token = tokenSource.Token;

                var t = Task.Factory.StartNew(() =>
                {
                    while (!token.IsCancellationRequested)
                    {
                        using (var frames = pipeline.WaitForFrames())
                        {
                            // Invoke custom processing block
                            block.ProcessFrames(frames);
                        }
                    }
                }, token);
            }
            catch (Exception ex)
            {
                MessageBox.Show(ex.Message);
                Application.Current.Shutdown();
            }

            InitializeComponent();
        }
Exemplo n.º 4
0
        public CaptureWindow()
        {
            InitializeComponent();

            try
            {
                Action <VideoFrame> updateDepth;
                Action <VideoFrame> updateColor;

                pipeline  = new Pipeline();
                colorizer = new Colorizer();

                var cfg = new Config();
                cfg.EnableStream(Stream.Depth, 640, 480, Format.Z16, 30);
                cfg.EnableStream(Stream.Color, 640, 480, Format.Rgb8, 30);

                var profile = pipeline.Start(cfg);

                SetupWindow(profile, out updateDepth, out updateColor);

                // Setup the SW device and sensors
                var software_dev  = new SoftwareDevice();
                var depth_sensor  = software_dev.AddSensor("Depth");
                var depth_profile = depth_sensor.AddVideoStream(new VideoStream
                {
                    type       = Stream.Depth,
                    index      = 0,
                    uid        = 100,
                    width      = 640,
                    height     = 480,
                    fps        = 30,
                    bpp        = 2,
                    fmt        = Format.Z16,
                    intrinsics = (profile.GetStream(Stream.Depth) as VideoStreamProfile).GetIntrinsics()
                });
                var color_sensor  = software_dev.AddSensor("Color");
                var color_profile = color_sensor.AddVideoStream(new VideoStream
                {
                    type       = Stream.Color,
                    index      = 0,
                    uid        = 101,
                    width      = 640,
                    height     = 480,
                    fps        = 30,
                    bpp        = 3,
                    fmt        = Format.Rgb8,
                    intrinsics = (profile.GetStream(Stream.Color) as VideoStreamProfile).GetIntrinsics()
                });

                // Note about the Syncer: If actual FPS is significantly different from reported FPS in AddVideoStream
                // this can confuse the syncer and prevent it from producing synchronized pairs
                software_dev.SetMatcher(Matchers.Default);

                var sync = new Syncer();

                depth_sensor.Open(depth_profile);
                color_sensor.Open(color_profile);

                // Push the SW device frames to the syncer
                depth_sensor.Start(f => { sync.SubmitFrame(f); });
                color_sensor.Start(f => { sync.SubmitFrame(f); });

                var token = tokenSource.Token;

                var t = Task.Factory.StartNew(() =>
                {
                    while (!token.IsCancellationRequested)
                    {
                        // We use the frames that are captured from live camera as the input data for the SW device
                        using (var frames = pipeline.WaitForFrames())
                        {
                            var depthFrame = frames.DepthFrame.DisposeWith(frames);
                            var colorFrame = frames.ColorFrame.DisposeWith(frames);

                            var depthBytes = new byte[depthFrame.Stride * depthFrame.Height];
                            depthFrame.CopyTo(depthBytes);
                            depth_sensor.AddVideoFrame(depthBytes, depthFrame.Stride, depthFrame.BitsPerPixel / 8, depthFrame.Timestamp,
                                                       depthFrame.TimestampDomain, (int)depthFrame.Number, depth_profile);

                            var colorBytes = new byte[colorFrame.Stride * colorFrame.Height];
                            colorFrame.CopyTo(colorBytes);
                            color_sensor.AddVideoFrame(colorBytes, colorFrame.Stride, colorFrame.BitsPerPixel / 8, colorFrame.Timestamp,
                                                       colorFrame.TimestampDomain, (int)colorFrame.Number, color_profile);
                        }

                        // Dispaly the frames that come from the SW device after synchronization
                        using (var new_frames = sync.WaitForFrames())
                        {
                            if (new_frames.Count == 2)
                            {
                                var depthFrame = new_frames.DepthFrame.DisposeWith(new_frames);
                                var colorFrame = new_frames.ColorFrame.DisposeWith(new_frames);

                                VideoFrame colorizedDepth = colorizer.Process(depthFrame).DisposeWith(new_frames) as VideoFrame;

                                // Render the frames.
                                Dispatcher.Invoke(DispatcherPriority.Render, updateDepth, colorizedDepth);
                                Dispatcher.Invoke(DispatcherPriority.Render, updateColor, colorFrame);
                            }
                        }
                    }
                }, token);
            }
            catch (Exception ex)
            {
                MessageBox.Show(ex.Message);
                Application.Current.Shutdown();
            }
        }