private void InitializePointCloud(Vector3[] positions, BoundingBox positionsBounds, Color4[] positionColors)
        {
            if (MainDXViewportView.DXScene == null)
            {
                return; // If this happens, then this method is called too soon (before DXEngine is initialized) or we are using WPF 3D
            }
            // First, set up the material:

            // Create a new PixelMaterial
            _pixelMaterial = new PixelMaterial()
            {
                PixelColor  = Color4.White, // When using PixelColors, PixelColor is used as a mask (multiplied with each color)
                PixelSize   = 2,
                PixelColors = positionColors,
            };

            _pixelMaterial.InitializeResources(MainDXViewportView.DXScene.DXDevice);

            _disposables.Add(_pixelMaterial);


            // Now set up the mesh and create SceneNode to show it
            _optimizedPointMesh = new OptimizedPointMesh <Vector3>(positions,
                                                                   positionsBounds,
                                                                   segmentsCount: 100);

            // NOTE that you can also use OptimizedPointMesh that takes more complex vertex struct for example PositionColor or PositionNormal. In this case use the other constructor.

            _optimizedPointMesh.OptimizationIndicesNumberThreshold = 100000; // We are satisfied with reducing the number of shown positions to 100000 (no need to optimize further - higher number reduced the initialization time)
            _optimizedPointMesh.MaxOptimizationViewsCount          = 10;     // Maximum number of created data sub-sets. The actual number can be lower when we hit the OptimizationIndicesNumberThreshold or when all vertices needs to be shown.

            _optimizedPointMesh.Optimize(new SharpDX.Size2(MainDXViewportView.DXScene.Width, MainDXViewportView.DXScene.Height), standardPointSize: 1);

            _optimizedPointMesh.InitializeResources(MainDXViewportView.DXScene.DXDevice);

            _disposables.Add(_optimizedPointMesh);


            // To render OptimizedPointMesh we need to use CustomRenderableNode that provides custom rendering callback action.
            var customRenderableNode = new CustomRenderableNode(RenderAction, _optimizedPointMesh.Bounds, _optimizedPointMesh, _pixelMaterial);

            customRenderableNode.Name = "CustomRenderableNode";
            //customRenderableNode.CustomRenderingQueue = MainDXViewportView.DXScene.BackgroundRenderingQueue;

            _disposables.Add(customRenderableNode);

            var sceneNodeVisual3D = new SceneNodeVisual3D(customRenderableNode);

            //sceneNodeVisual3D.Transform = transform;

            MainViewport.Children.Add(sceneNodeVisual3D);


            Camera1.TargetPosition = positionsBounds.Center.ToWpfPoint3D();
            Camera1.Distance       = positionsBounds.ToRect3D().GetDiagonalLength() * 0.5;
        }
Exemplo n.º 2
0
        private void ShowPositionsArray(Vector3[] positionsArray, float pixelSize, Color4 pixelColor, Bounds positionBounds)
        {
            BoundingBox positionsBoundingBox;

            // To correctly set the Camera's Near and Far distance, we need to provide the correct bounds of each shown 3D model.
            if (positionBounds != null && !positionBounds.IsEmpty)
            {
                // It is highly recommended to manually set the Bounds.
                positionsBoundingBox = positionBounds.BoundingBox;
            }
            else
            {
                // if we do not manually set the Bounds, then we need to call CalculateBounds to calculate the bounds
                positionsBoundingBox = BoundingBox.FromPoints(positionsArray);
            }


            // Create OptimizedPointMesh that will optimize rendering or positions.
            // It uses two techniques to do that:

            _optimizedPointMesh = new OptimizedPointMesh <Vector3>(positionsArray,
                                                                   positionsBoundingBox,
                                                                   segmentsCount: 100); // All the positions are divided into 100 segments - when rendering each segment is checked if it is visible in the current camera (if not, then it is not rendered)

            // NOTE that you can also use OptimizedPointMesh that takes more complex vertex struct for example PositionColor or PositionNormal. In this case use the other constructor.

            _optimizedPointMesh.OptimizationIndicesNumberTreshold = 100000; // We are satisfied with reducing the number of shown positions to 100000 (no need to optimize further - higher number reduced the initialization time)
            _optimizedPointMesh.MaxOptimizationViewsCount         = 10;     // Maximum number of created data sub-sets. The actual number can be lower when we hit the OptimizationIndicesNumberTreshold or when all vertices needs to be shown.

            _optimizedPointMesh.Optimize(new SharpDX.Size2(MainDXViewportView.DXScene.Width, MainDXViewportView.DXScene.Height), pixelSize);

            _optimizedPointMesh.InitializeResources(MainDXViewportView.DXScene.DXDevice);



            // We will need to dispose the SimpleMesh
            _modelDisposables.Add(_optimizedPointMesh);


            // Create a new PixelMaterial
            _pixelMaterial = new PixelMaterial()
            {
                PixelColor = pixelColor,
                PixelSize  = pixelSize,

                // By default graphics card renders objects that are closer to the camera over the objects that are farther away from the camera.
                // This means that positions that are closer to the camera will be rendered over the positions that are farther away.
                // This may distort the shown colors.
                // Therefore when using pixel colors it is better to disable depth buffer checking and render all the pixels.
                // This is done with setting ReadZBuffer and WriteZBuffer to false.
                ReadZBuffer  = false,
                WriteZBuffer = false
            };


            // It is also possible to set per-pixel colors (or per-pixel sizes with setting PixelSizes - not demonstrated here).
            // This comes with a performance drawback (see comment below).
            //
            // To test per-pixel colors, uncomment the following code:

            //var pixelColors = new Color4[positionsArray.Length];
            //for (int i = 0; i < positionsArray.Length; i++)
            //    pixelColors[i] = new Color4((i % 2 == 0) ? 1 : 0, 0, (i % 2 != 0) ? 1 : 0, 1);

            //_pixelMaterial.PixelColors = pixelColors;
            //_pixelMaterial.PixelColor = Color4.White; // When PixelColors array is used, then PixelColor is used as mask (each color in PixelColors is multiplied with PixelColor). To preserve the colors in PixelColors we need to set PixelColor to White.

            // By default the OptimizedPointCloud "combines" positions that are close together (closer the the size of one pixel on the screen).
            // and rendered only some of them. In this case it is possible that only each second point (or each tenth point) is rendered
            // and this can removes the "color mixing" in our sample.
            // In such cases is is possible to disable this optimization with setting OptimizePositions to false:
            //_optimizedPointMesh.OptimizePositions = false;
            //
            // After this the OptimizedPointCloud will only provide optimization that works with grouping positions into 100 segments
            // and then checking which segments is visible in the camera (by checking segment bounding box).
            // But when the camera is positioned in such a way that all positions are visible,
            // then all positions will be sent to graphics card - in this case the OptimizePositions can provide good results with skipping some pixels.
            //
            // But if the actual colors from your data will not have such sharp color differences (will have more gradients),
            // then this problem should not be visible.


            _pixelMaterial.InitializeResources(MainDXViewportView.DXScene.DXDevice);

            _modelDisposables.Add(_pixelMaterial);


            // To render OptimizedPointMesh we need to use CustomRenderableNode that provides custom rendering callback action.
            _customRenderableNode      = new CustomRenderableNode(RenderAction, _optimizedPointMesh.Bounds, _optimizedPointMesh, _pixelMaterial);
            _customRenderableNode.Name = "CustomRenderableNode";

            _modelDisposables.Add(_customRenderableNode);

            var sceneNodeVisual3D = new SceneNodeVisual3D(_customRenderableNode);

            //sceneNodeVisual3D.Transform = transform;

            MainViewport.Children.Add(sceneNodeVisual3D);
        }
Exemplo n.º 3
0
        private void InitializePointCloud(Vector3[] positions, BoundingBox positionsBounds, Color4[] positionColors, bool useOptimizedPointMesh, bool disableDepthRead, bool disableDepthWrite)
        {
            if (MainDXViewportView.DXScene == null)
            {
                return; // If this happens, then this method is called too soon (before DXEngine is initialized) or we are using WPF 3D
            }
            // First, set up the material:

            // Create a new PixelMaterial
            _pixelMaterial = new PixelMaterial()
            {
                PixelColor  = Color4.White, // When using PixelColors, PixelColor is used as a mask (multiplied with each color)
                PixelSize   = 2,
                PixelColors = positionColors,

                // By default graphics card renders objects that are closer to the camera over the objects that are farther away from the camera.
                // This means that positions that are closer to the camera will be rendered over the positions that are farther away.
                // This may distort the shown colors.
                // Therefore when using pixel colors it is better to disable depth buffer checking and render all the pixels.
                // This is done with setting ReadZBuffer and WriteZBuffer to false.
                ReadZBuffer  = !disableDepthRead,
                WriteZBuffer = !disableDepthWrite
            };

            _pixelMaterial.InitializeResources(MainDXViewportView.DXScene.DXDevice);

            _disposables.Add(_pixelMaterial);


            // Now set up the mesh and create SceneNode to show it

            if (useOptimizedPointMesh)
            {
                _optimizedPointMesh = new OptimizedPointMesh <Vector3>(positions,
                                                                       positionsBounds,
                                                                       segmentsCount: 100);

                // NOTE that you can also use OptimizedPointMesh that takes more complex vertex struct for example PositionColor or PositionNormal. In this case use the other constructor.

                _optimizedPointMesh.OptimizationIndicesNumberThreshold = 100000; // We are satisfied with reducing the number of shown positions to 100000 (no need to optimize further - higher number reduced the initialization time)
                _optimizedPointMesh.MaxOptimizationViewsCount          = 10;     // Maximum number of created data sub-sets. The actual number can be lower when we hit the OptimizationIndicesNumberThreshold or when all vertices needs to be shown.

                _optimizedPointMesh.Optimize(new SharpDX.Size2(MainDXViewportView.DXScene.Width, MainDXViewportView.DXScene.Height), standardPointSize: 1);

                _optimizedPointMesh.InitializeResources(MainDXViewportView.DXScene.DXDevice);

                _disposables.Add(_optimizedPointMesh);


                // To render OptimizedPointMesh we need to use CustomRenderableNode that provides custom rendering callback action.
                var customRenderableNode = new CustomRenderableNode(RenderAction, _optimizedPointMesh.Bounds, _optimizedPointMesh, _pixelMaterial);
                customRenderableNode.Name = "CustomRenderableNode";
                //customRenderableNode.CustomRenderingQueue = MainDXViewportView.DXScene.BackgroundRenderingQueue;

                _disposables.Add(customRenderableNode);

                var sceneNodeVisual3D = new SceneNodeVisual3D(customRenderableNode);
                //sceneNodeVisual3D.Transform = transform;

                MainViewport.Children.Add(sceneNodeVisual3D);
            }
            else
            {
                // Use SimpleMesh - all positions will be always rendered:

                var simpleMesh = new SimpleMesh <Vector3>(vertexBufferArray: positions,
                                                          indexBufferArray: null,
                                                          inputLayoutType: InputLayoutType.Position);

                simpleMesh.PrimitiveTopology = PrimitiveTopology.PointList; // We need to change the default PrimitiveTopology.TriangleList to PointList

                // To correctly set the Camera's Near and Far distance, we need to provide the correct bounds of each shown 3D model.

                // It is highly recommended to manually set the Bounds.
                simpleMesh.Bounds = new Bounds(positionsBounds);

                // if we do not manually set the Bounds, then we need to call CalculateBounds to calculate the bounds
                //simpleMesh.CalculateBounds();

                // We will need to dispose the SimpleMesh
                _disposables.Add(simpleMesh);


                // Now create a new MeshObjectNode
                _meshObjectNode = new Ab3d.DirectX.MeshObjectNode(simpleMesh, _pixelMaterial);

                _disposables.Add(_meshObjectNode);

                // To be able to add the MeshObjectNode (or any other SceneNode) to WPF's Viewport3D,
                // we need to create a SceneNodeVisual3D
                var sceneNodeVisual3D = new SceneNodeVisual3D(_meshObjectNode);

                MainViewport.Children.Add(sceneNodeVisual3D);
            }


            Camera1.TargetPosition = positionsBounds.Center.ToWpfPoint3D();
            Camera1.Distance       = positionsBounds.ToRect3D().GetDiagonalLength();
        }
        private void ShowPositionsArray(Vector3[] positionsArray, float pixelSize, Color4 pixelColor, Bounds positionBounds)
        {
            if (_isUsingPixelsVisual3D)
            {
                // The easiest way to show many pixels is to use PixelsVisual3D.
                var pixelsVisual3D = new PixelsVisual3D()
                {
                    Positions  = positionsArray,
                    PixelColor = pixelColor.ToWpfColor(),
                    PixelSize  = pixelSize
                };

                // It is highly recommended to manually set the PositionsBounds.
                // If this is not done, the bounds are calculated by the DXEngine with checking all the positions.
                pixelsVisual3D.PositionsBounds = positionBounds;

                MainViewport.Children.Add(pixelsVisual3D);

                // !!! IMPORTANT !!!
                // When PixelsVisual3D is not used any more, it needs to be disposed (we are using DisposeList to dispose all in Unloaded event handler)
                _disposables.Add(pixelsVisual3D);

                return;
            }


            // First stop in showing positions in the positionsArray as pixels is to create a SimpleMesh<Vector3>.
            // This will create a DirectX VertexBuffer that will be passed to the shaders.
            var simpleMesh = new SimpleMesh <Vector3>(vertexBufferArray: positionsArray,
                                                      indexBufferArray: null,
                                                      inputLayoutType: InputLayoutType.Position);

            simpleMesh.PrimitiveTopology = PrimitiveTopology.PointList; // We need to change the default PrimitiveTopology.TriangleList to PointList

            // To correctly set the Camera's Near and Far distance, we need to provide the correct bounds of each shown 3D model.

            if (positionBounds != null && !positionBounds.IsEmpty)
            {
                // It is highly recommended to manually set the Bounds.
                simpleMesh.Bounds = positionBounds;
            }
            else
            {
                // if we do not manually set the Bounds, then we need to call CalculateBounds to calculate the bounds
                simpleMesh.CalculateBounds();
            }

            simpleMesh.CalculateBounds();


            // We will need to dispose the SimpleMesh
            _disposables.Add(simpleMesh);


            // Create a new PixelMaterial
            _pixelMaterial = new PixelMaterial()
            {
                PixelColor = pixelColor,
                PixelSize  = pixelSize
            };

            _pixelMaterial.InitializeResources(MainDXViewportView.DXScene.DXDevice);

            _disposables.Add(_pixelMaterial);


            // Now create a new MeshObjectNode
            _meshObjectNode = new Ab3d.DirectX.MeshObjectNode(simpleMesh, _pixelMaterial);

            _disposables.Add(_meshObjectNode);

            // To be able to add the MeshObjectNode (or any other SceneNode) to WPF's Viewport3D,
            // we need to create a SceneNodeVisual3D
            var sceneNodeVisual3D = new SceneNodeVisual3D(_meshObjectNode);

            MainViewport.Children.Add(sceneNodeVisual3D);
        }