Exemple #1
0
        public CustomRenderingStep4()
        {
            InitializeComponent();


            // Instead of creating new RenderingStep as in the previous sample,
            // we will use CustomRenderableNode instead.
            // This type of SceneNode object allows specifying custom rendering action
            // that is called to render the object.

            var bounds = CustomRenderingStep1.GetSharpDXBoxBounds(); // CustomRenderableNode also requires bounds so that the camera near and far calculations can account the custom data.
            var customRenderableNode = new CustomRenderableNode(CustomRenderAction, bounds);

            // To add CustomRenderableNode to the 3D scene, we need to embed it into a SceneNodeVisual3D
            var sceneNodeVisual3D = new SceneNodeVisual3D(customRenderableNode);

            MainViewport.Children.Add(sceneNodeVisual3D);



            MainDXViewportView.DXSceneInitialized += delegate(object sender, EventArgs args)
            {
                if (MainDXViewportView.DXScene == null) // When DXEngine falls back to WPF 3D rendering, the DXScene is null; we could also check for MainDXViewportView.UsedGraphicsProfile.DriverType != GraphicsProfile.DriverTypes.Wpf3D
                {
                    return;
                }

                InitializeSharpDXRendering(MainDXViewportView.DXScene);
            };

            this.Unloaded += delegate { Dispose(); };
        }
Exemple #2
0
        // This method is called on each rendering step.
        private void CustomRenderAction(RenderingContext renderingContext, CustomRenderableNode customRenderableNode, object originalObject)
        {
            // Just in case the data were already disposed
            if (_constantBuffer == null)
            {
                return;
            }

            // If we have not yet read the _viewProjectionMatrix in this frame, do this now
            if (renderingContext.FrameNumber != _viewProjectionMatrixFrameNumber)
            {
                UpdateViewProjectionMatrix(renderingContext);
            }


            var context       = renderingContext.DXDevice.ImmediateContext;
            var statesManager = renderingContext.ContextStatesManager;


            // Write the new world view projection matrix to the constant buffer
            context.UpdateSubresource(ref _viewProjectionMatrix, _constantBuffer);

            // Prepare all the stages
            // It is possible to do that directly on the DirectX ImmediateContext:

            //context.InputAssembler.InputLayout = _layout;
            //context.InputAssembler.PrimitiveTopology = PrimitiveTopology.TriangleList;
            //context.InputAssembler.SetVertexBuffers(0, _vertexBufferBinding);
            //context.VertexShader.SetConstantBuffer(0, _contantBuffer);
            //context.VertexShader.Set(_vertexShader);
            //context.PixelShader.Set(_pixelShader);

            // But when custom rendering is mixed with DXEngine rendering,
            // It is highly recommended to use DXEngine's ContextStatesManager.
            // This provides caching of existing states and prevent unneeded state changes.
            // What is more, if we change the state directly, then the ContextStatesManager will
            // not be aware of the change and will still "think" that the previous state is currently set.
            // This way the DXEngine might be rendered with state set by SharpDX code.
            //
            // If you will use ImmediateContext directly, then you need to call Reset method on ContextStatesManager:
            //statesManager.Reset(ContextStatesManager.ResetType.All);


            statesManager.InputLayout       = _layout;
            statesManager.PrimitiveTopology = PrimitiveTopology.TriangleList;
            statesManager.RasterizerState   = renderingContext.DXDevice.CommonStates.CullCounterClockwise;

            statesManager.SetVertexBuffer(0, _vertexBufferBinding.Buffer, _vertexBufferBinding.Stride, _vertexBufferBinding.Offset);
            statesManager.SetVertexShaderConstantBuffer(_constantBuffer, 0);

            statesManager.SetVertexShader(_vertexShader);
            statesManager.SetPixelShader(_pixelShader);


            // Draw the cube
            context.Draw(36, 0);
        }
        private void InitializePointCloud(Vector3[] positions, BoundingBox positionsBounds, Color4[] positionColors)
        {
            if (MainDXViewportView.DXScene == null)
            {
                return; // If this happens, then this method is called too soon (before DXEngine is initialized) or we are using WPF 3D
            }
            // First, set up the material:

            // Create a new PixelMaterial
            _pixelMaterial = new PixelMaterial()
            {
                PixelColor  = Color4.White, // When using PixelColors, PixelColor is used as a mask (multiplied with each color)
                PixelSize   = 2,
                PixelColors = positionColors,
            };

            _pixelMaterial.InitializeResources(MainDXViewportView.DXScene.DXDevice);

            _disposables.Add(_pixelMaterial);


            // Now set up the mesh and create SceneNode to show it
            _optimizedPointMesh = new OptimizedPointMesh <Vector3>(positions,
                                                                   positionsBounds,
                                                                   segmentsCount: 100);

            // NOTE that you can also use OptimizedPointMesh that takes more complex vertex struct for example PositionColor or PositionNormal. In this case use the other constructor.

            _optimizedPointMesh.OptimizationIndicesNumberThreshold = 100000; // We are satisfied with reducing the number of shown positions to 100000 (no need to optimize further - higher number reduced the initialization time)
            _optimizedPointMesh.MaxOptimizationViewsCount          = 10;     // Maximum number of created data sub-sets. The actual number can be lower when we hit the OptimizationIndicesNumberThreshold or when all vertices needs to be shown.

            _optimizedPointMesh.Optimize(new SharpDX.Size2(MainDXViewportView.DXScene.Width, MainDXViewportView.DXScene.Height), standardPointSize: 1);

            _optimizedPointMesh.InitializeResources(MainDXViewportView.DXScene.DXDevice);

            _disposables.Add(_optimizedPointMesh);


            // To render OptimizedPointMesh we need to use CustomRenderableNode that provides custom rendering callback action.
            var customRenderableNode = new CustomRenderableNode(RenderAction, _optimizedPointMesh.Bounds, _optimizedPointMesh, _pixelMaterial);

            customRenderableNode.Name = "CustomRenderableNode";
            //customRenderableNode.CustomRenderingQueue = MainDXViewportView.DXScene.BackgroundRenderingQueue;

            _disposables.Add(customRenderableNode);

            var sceneNodeVisual3D = new SceneNodeVisual3D(customRenderableNode);

            //sceneNodeVisual3D.Transform = transform;

            MainViewport.Children.Add(sceneNodeVisual3D);


            Camera1.TargetPosition = positionsBounds.Center.ToWpfPoint3D();
            Camera1.Distance       = positionsBounds.ToRect3D().GetDiagonalLength() * 0.5;
        }
Exemple #4
0
        private void RenderAction(RenderingContext renderingContext, CustomRenderableNode customRenderableNode, object objectToRender)
        {
            SharpDX.Matrix worldViewProjectionMatrix = renderingContext.UsedCamera.GetViewProjection();

            if (customRenderableNode.Transform != null && !customRenderableNode.Transform.IsIdentity)
            {
                worldViewProjectionMatrix = customRenderableNode.Transform.Value * worldViewProjectionMatrix;
            }

            var optimizedPointMesh = (OptimizedPointMesh <Vector3>)objectToRender;

            optimizedPointMesh.UpdateVisibleSegments(worldViewProjectionMatrix);
            optimizedPointMesh.RenderGeometry(renderingContext);
        }
        private void RenderAction(RenderingContext renderingContext, CustomRenderableNode customRenderableNode, object objectToRender)
        {
            SharpDX.Matrix worldViewProjectionMatrix = renderingContext.UsedCamera.GetViewProjection();

            if (_hasTransformationMatrix)
            {
                worldViewProjectionMatrix = _transformationMatrix * worldViewProjectionMatrix;
            }

            // We could also read the transformation manually (as in the commented code below),
            // but it is faster to prepare the matrix when it is changed and not read WPF's DependencyProperties in rendering loop.

            //if (_sceneNodeVisual3D.Transform != null && !_sceneNodeVisual3D.Transform.Value.IsIdentity)
            //    worldViewProjectionMatrix = _sceneNodeVisual3D.Transform.Value.ToMatrix() * worldViewProjectionMatrix;

            //if (customRenderableNode.Transform != null && !customRenderableNode.Transform.IsIdentity)
            //    worldViewProjectionMatrix = customRenderableNode.Transform.Value * worldViewProjectionMatrix;


            var optimizedPointMesh = (OptimizedPointMesh <Vector3>)objectToRender;

            optimizedPointMesh.UpdateVisibleSegments(worldViewProjectionMatrix);
            optimizedPointMesh.RenderGeometry(renderingContext);
        }
Exemple #6
0
        private void ShowPositionsArray(Vector3[] positionsArray, float pixelSize, Color4 pixelColor, Bounds positionBounds)
        {
            BoundingBox positionsBoundingBox;

            // To correctly set the Camera's Near and Far distance, we need to provide the correct bounds of each shown 3D model.
            if (positionBounds != null && !positionBounds.IsEmpty)
            {
                // It is highly recommended to manually set the Bounds.
                positionsBoundingBox = positionBounds.BoundingBox;
            }
            else
            {
                // if we do not manually set the Bounds, then we need to call CalculateBounds to calculate the bounds
                positionsBoundingBox = BoundingBox.FromPoints(positionsArray);
            }


            // Create OptimizedPointMesh that will optimize rendering or positions.
            // It uses two techniques to do that:

            _optimizedPointMesh = new OptimizedPointMesh <Vector3>(positionsArray,
                                                                   positionsBoundingBox,
                                                                   segmentsCount: 100); // All the positions are divided into 100 segments - when rendering each segment is checked if it is visible in the current camera (if not, then it is not rendered)

            // NOTE that you can also use OptimizedPointMesh that takes more complex vertex struct for example PositionColor or PositionNormal. In this case use the other constructor.

            _optimizedPointMesh.OptimizationIndicesNumberTreshold = 100000; // We are satisfied with reducing the number of shown positions to 100000 (no need to optimize further - higher number reduced the initialization time)
            _optimizedPointMesh.MaxOptimizationViewsCount         = 10;     // Maximum number of created data sub-sets. The actual number can be lower when we hit the OptimizationIndicesNumberTreshold or when all vertices needs to be shown.

            _optimizedPointMesh.Optimize(new SharpDX.Size2(MainDXViewportView.DXScene.Width, MainDXViewportView.DXScene.Height), pixelSize);

            _optimizedPointMesh.InitializeResources(MainDXViewportView.DXScene.DXDevice);



            // We will need to dispose the SimpleMesh
            _modelDisposables.Add(_optimizedPointMesh);


            // Create a new PixelMaterial
            _pixelMaterial = new PixelMaterial()
            {
                PixelColor = pixelColor,
                PixelSize  = pixelSize,

                // By default graphics card renders objects that are closer to the camera over the objects that are farther away from the camera.
                // This means that positions that are closer to the camera will be rendered over the positions that are farther away.
                // This may distort the shown colors.
                // Therefore when using pixel colors it is better to disable depth buffer checking and render all the pixels.
                // This is done with setting ReadZBuffer and WriteZBuffer to false.
                ReadZBuffer  = false,
                WriteZBuffer = false
            };


            // It is also possible to set per-pixel colors (or per-pixel sizes with setting PixelSizes - not demonstrated here).
            // This comes with a performance drawback (see comment below).
            //
            // To test per-pixel colors, uncomment the following code:

            //var pixelColors = new Color4[positionsArray.Length];
            //for (int i = 0; i < positionsArray.Length; i++)
            //    pixelColors[i] = new Color4((i % 2 == 0) ? 1 : 0, 0, (i % 2 != 0) ? 1 : 0, 1);

            //_pixelMaterial.PixelColors = pixelColors;
            //_pixelMaterial.PixelColor = Color4.White; // When PixelColors array is used, then PixelColor is used as mask (each color in PixelColors is multiplied with PixelColor). To preserve the colors in PixelColors we need to set PixelColor to White.

            // By default the OptimizedPointCloud "combines" positions that are close together (closer the the size of one pixel on the screen).
            // and rendered only some of them. In this case it is possible that only each second point (or each tenth point) is rendered
            // and this can removes the "color mixing" in our sample.
            // In such cases is is possible to disable this optimization with setting OptimizePositions to false:
            //_optimizedPointMesh.OptimizePositions = false;
            //
            // After this the OptimizedPointCloud will only provide optimization that works with grouping positions into 100 segments
            // and then checking which segments is visible in the camera (by checking segment bounding box).
            // But when the camera is positioned in such a way that all positions are visible,
            // then all positions will be sent to graphics card - in this case the OptimizePositions can provide good results with skipping some pixels.
            //
            // But if the actual colors from your data will not have such sharp color differences (will have more gradients),
            // then this problem should not be visible.


            _pixelMaterial.InitializeResources(MainDXViewportView.DXScene.DXDevice);

            _modelDisposables.Add(_pixelMaterial);


            // To render OptimizedPointMesh we need to use CustomRenderableNode that provides custom rendering callback action.
            _customRenderableNode      = new CustomRenderableNode(RenderAction, _optimizedPointMesh.Bounds, _optimizedPointMesh, _pixelMaterial);
            _customRenderableNode.Name = "CustomRenderableNode";

            _modelDisposables.Add(_customRenderableNode);

            var sceneNodeVisual3D = new SceneNodeVisual3D(_customRenderableNode);

            //sceneNodeVisual3D.Transform = transform;

            MainViewport.Children.Add(sceneNodeVisual3D);
        }
Exemple #7
0
        private void InitializePointCloud(Vector3[] positions, BoundingBox positionsBounds, Color4[] positionColors, bool useOptimizedPointMesh, bool disableDepthRead, bool disableDepthWrite)
        {
            if (MainDXViewportView.DXScene == null)
            {
                return; // If this happens, then this method is called too soon (before DXEngine is initialized) or we are using WPF 3D
            }
            // First, set up the material:

            // Create a new PixelMaterial
            _pixelMaterial = new PixelMaterial()
            {
                PixelColor  = Color4.White, // When using PixelColors, PixelColor is used as a mask (multiplied with each color)
                PixelSize   = 2,
                PixelColors = positionColors,

                // By default graphics card renders objects that are closer to the camera over the objects that are farther away from the camera.
                // This means that positions that are closer to the camera will be rendered over the positions that are farther away.
                // This may distort the shown colors.
                // Therefore when using pixel colors it is better to disable depth buffer checking and render all the pixels.
                // This is done with setting ReadZBuffer and WriteZBuffer to false.
                ReadZBuffer  = !disableDepthRead,
                WriteZBuffer = !disableDepthWrite
            };

            _pixelMaterial.InitializeResources(MainDXViewportView.DXScene.DXDevice);

            _disposables.Add(_pixelMaterial);


            // Now set up the mesh and create SceneNode to show it

            if (useOptimizedPointMesh)
            {
                _optimizedPointMesh = new OptimizedPointMesh <Vector3>(positions,
                                                                       positionsBounds,
                                                                       segmentsCount: 100);

                // NOTE that you can also use OptimizedPointMesh that takes more complex vertex struct for example PositionColor or PositionNormal. In this case use the other constructor.

                _optimizedPointMesh.OptimizationIndicesNumberThreshold = 100000; // We are satisfied with reducing the number of shown positions to 100000 (no need to optimize further - higher number reduced the initialization time)
                _optimizedPointMesh.MaxOptimizationViewsCount          = 10;     // Maximum number of created data sub-sets. The actual number can be lower when we hit the OptimizationIndicesNumberThreshold or when all vertices needs to be shown.

                _optimizedPointMesh.Optimize(new SharpDX.Size2(MainDXViewportView.DXScene.Width, MainDXViewportView.DXScene.Height), standardPointSize: 1);

                _optimizedPointMesh.InitializeResources(MainDXViewportView.DXScene.DXDevice);

                _disposables.Add(_optimizedPointMesh);


                // To render OptimizedPointMesh we need to use CustomRenderableNode that provides custom rendering callback action.
                var customRenderableNode = new CustomRenderableNode(RenderAction, _optimizedPointMesh.Bounds, _optimizedPointMesh, _pixelMaterial);
                customRenderableNode.Name = "CustomRenderableNode";
                //customRenderableNode.CustomRenderingQueue = MainDXViewportView.DXScene.BackgroundRenderingQueue;

                _disposables.Add(customRenderableNode);

                var sceneNodeVisual3D = new SceneNodeVisual3D(customRenderableNode);
                //sceneNodeVisual3D.Transform = transform;

                MainViewport.Children.Add(sceneNodeVisual3D);
            }
            else
            {
                // Use SimpleMesh - all positions will be always rendered:

                var simpleMesh = new SimpleMesh <Vector3>(vertexBufferArray: positions,
                                                          indexBufferArray: null,
                                                          inputLayoutType: InputLayoutType.Position);

                simpleMesh.PrimitiveTopology = PrimitiveTopology.PointList; // We need to change the default PrimitiveTopology.TriangleList to PointList

                // To correctly set the Camera's Near and Far distance, we need to provide the correct bounds of each shown 3D model.

                // It is highly recommended to manually set the Bounds.
                simpleMesh.Bounds = new Bounds(positionsBounds);

                // if we do not manually set the Bounds, then we need to call CalculateBounds to calculate the bounds
                //simpleMesh.CalculateBounds();

                // We will need to dispose the SimpleMesh
                _disposables.Add(simpleMesh);


                // Now create a new MeshObjectNode
                _meshObjectNode = new Ab3d.DirectX.MeshObjectNode(simpleMesh, _pixelMaterial);

                _disposables.Add(_meshObjectNode);

                // To be able to add the MeshObjectNode (or any other SceneNode) to WPF's Viewport3D,
                // we need to create a SceneNodeVisual3D
                var sceneNodeVisual3D = new SceneNodeVisual3D(_meshObjectNode);

                MainViewport.Children.Add(sceneNodeVisual3D);
            }


            Camera1.TargetPosition = positionsBounds.Center.ToWpfPoint3D();
            Camera1.Distance       = positionsBounds.ToRect3D().GetDiagonalLength();
        }
Exemple #8
0
        private void AddPointCloudNode(Transform3D transform)
        {
            int positionsCount = _vertexBuffer.Length;
            var positions      = new Vector3[positionsCount];

            for (int i = 0; i < positionsCount; i++)
            {
                positions[i] = _vertexBuffer[i].Position;
            }


            var boundingBox = BoundingBox.FromPoints(positions);


            var optimizedPointMesh = new OptimizedPointMesh <PositionNormal>(_vertexBuffer,
                                                                             positions,
                                                                             InputLayoutType.Position | InputLayoutType.Normal,
                                                                             boundingBox,
                                                                             segmentsCount: SegmentsCount,
                                                                             name: "ShaderOptimizedPointMesh");


            float selectedPointSize = (float)PointSizeComboBox.SelectedItem;

            if (!MainDXViewportView.DXScene.BuffersInitialized)
            {
                throw new Exception("Cannot create OptimizedPointMesh without know DXScene Size");
            }

            // Use size from DXScene, because this also takes DPI settings into account and gives us the most accurate amount of available pixels (better then DXViewportView.ActualWidth / Height)
            optimizedPointMesh.Optimize(new SharpDX.Size2(MainDXViewportView.DXScene.Width, MainDXViewportView.DXScene.Height), selectedPointSize);

            optimizedPointMesh.InitializeResources(MainDXViewportView.DXScene.DXDevice);


            _pointCloudDisposables.Add(optimizedPointMesh); // _pointsMesh is not added to disposables because it is disposed separately and can be disposed after the number of positions is changed in the DropDown


            if (_optimizedPointMeshes == null)
            {
                _optimizedPointMeshes = new List <OptimizedPointMesh <PositionNormal> >();
            }

            _optimizedPointMeshes.Add(optimizedPointMesh);


            _shadedPointCloudEffect.DiffuseColor = Colors.Orange.ToColor4();


            var customRenderableNode = new CustomRenderableNode(RenderAction, new Bounds(boundingBox), optimizedPointMesh, _effectMaterial);

            customRenderableNode.Name = "CustomRenderableNode";

            _pointCloudDisposables.Add(customRenderableNode);

            var sceneNodeVisual3D = new SceneNodeVisual3D(customRenderableNode);

            sceneNodeVisual3D.Transform = transform;

            PointCloudRootVisual3D.Children.Add(sceneNodeVisual3D);
        }