public static void Run()
        {
            try
            {
                // ExStart:RenderFisheyeLensEffectof3DScene
                // The path to the documents directory.
                string dataDir = RunExamples.GetDataDir();

                //load the scene
                Scene scene = new Scene(dataDir + "VirtualCity.glb");
                //create a camera for capturing the cube map
                Camera cam = new Camera(ProjectionType.Perspective)
                {
                    NearPlane    = 0.1,
                    FarPlane     = 200,
                    RotationMode = RotationMode.FixedDirection
                };
                scene.RootNode.CreateChildNode(cam).Transform.Translation = new Vector3(5, 6, 0);

                //create two lights to illuminate the scene
                scene.RootNode.CreateChildNode(new Light()
                {
                    LightType = LightType.Point
                }).Transform.Translation = new Vector3(-10, 7, -10);
                scene.RootNode.CreateChildNode(new Light()
                {
                    Color = new Vector3(Color.CadetBlue)
                }).Transform.Translation = new Vector3(49, 0, 49);

                //create a renderer
                using (var renderer = Renderer.CreateRenderer())
                {
                    //Create a cube map render target with depth texture, depth is required when rendering a scene.
                    IRenderTexture rt = renderer.RenderFactory.CreateCubeRenderTexture(new RenderParameters(false), 512, 512);
                    //create a 2D texture render target with no depth texture used for image processing
                    IRenderTexture final = renderer.RenderFactory.CreateRenderTexture(new RenderParameters(false, 32, 0, 0), 1024, 1024);

                    //a viewport is required on the render target
                    rt.CreateViewport(cam, RelativeRectangle.FromScale(0, 0, 1, 1));
                    renderer.Render(rt);

                    //execute the fisheye projection post-processing with the previous rendered cube map as input
                    //the fisheye can have field of view more than 180 degree, so a cube map with all direction is required.
                    PostProcessing fisheye = renderer.GetPostProcessing("fisheye");
                    // we can change the fov to 360 instead of the default value 180.
                    fisheye.FindProperty("fov").Value = 360.0;
                    //Specify the cube map rendered from the scene as this post processing's input
                    fisheye.Input = rt.Targets[0];
                    //Execute the post processing effect and save the result to render target final
                    renderer.Execute(fisheye, final);
                    //save the texture into disk
                    ((ITexture2D)final.Targets[0]).Save(dataDir + "fisheye.png", ImageFormat.Png);
                }
                // ExEnd:RenderFisheyeLensEffectof3DScene
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
            }
        }
예제 #2
0
        private void Render()
        {
            if (CubeBasedPostProcessing != null)
            {
                if (cubeTarget == null)
                {
                    cubeTarget =
                        renderer.RenderFactory.CreateCubeRenderTexture(new RenderParameters(false), 1024, 1024);
                    cubeTarget.CreateViewport(camera, RelativeRectangle.FromScale(0, 0, 1, 1));
                }
                renderer.Render(cubeTarget);
                CubeBasedPostProcessing.Input = cubeTarget.Targets[0];
                ITextureCubemap cm = (ITextureCubemap)cubeTarget.Targets[0];

                /*
                 * for (int i = 0; i < 6; i++)
                 * {
                 *  cm.ToBitmap((CubeFace)i).Save(string.Format(@"d:\test-{0}.png", i), ImageFormat.Png);
                 *
                 * }
                 */
                //execute the post processing and output its result to the window
                renderer.Execute(CubeBasedPostProcessing, window);
            }
            else
            {
                renderer.Render(window);
            }
        }
        public static void Run()
        {
            try
            {
                // ExStart:RenderSceneIntoCubemapwithsixfaces
                //load the scene
                Scene scene = new Scene(RunExamples.GetDataFilePath("VirtualCity.glb"));
                //create a camera for capturing the cube map
                Camera cam = new Camera(ProjectionType.Perspective)
                {
                    NearPlane    = 0.1,
                    FarPlane     = 200,
                    RotationMode = RotationMode.FixedDirection
                };
                scene.RootNode.CreateChildNode(cam).Transform.Translation = new Vector3(5, 6, 0);
                //create two lights to illuminate the scene
                scene.RootNode.CreateChildNode(new Light()
                {
                    LightType = LightType.Point
                }).Transform.Translation = new Vector3(-10, 7, -10);
                scene.RootNode.CreateChildNode(new Light()
                {
                    Color = new Vector3(Color.CadetBlue)
                }).Transform.Translation = new Vector3(49, 0, 49);

                //create a renderer
                using (var renderer = Renderer.CreateRenderer())
                {
                    //create a cube map render target with depth texture, depth is required when rendering a scene.
                    IRenderTexture rt = renderer.RenderFactory.CreateCubeRenderTexture(new RenderParameters(false), 512, 512);
                    //a viewport is required on the render target
                    rt.CreateViewport(cam, RelativeRectangle.FromScale(0, 0, 1, 1));
                    renderer.Render(rt);
                    //now lets get the cubemap texture
                    ITextureCubemap cubemap = rt.Targets[0] as ITextureCubemap;
                    //we can directly save each face to disk by specifing the file name
                    CubeFaceData <string> fileNames = new CubeFaceData <string>()
                    {
                        Right  = RunExamples.GetOutputFilePath("right.png"),
                        Left   = RunExamples.GetOutputFilePath("left.png"),
                        Back   = RunExamples.GetOutputFilePath("back.png"),
                        Front  = RunExamples.GetOutputFilePath("front.png"),
                        Bottom = RunExamples.GetOutputFilePath("bottom.png"),
                        Top    = RunExamples.GetOutputFilePath("top.png")
                    };
                    //and call Save method
                    cubemap.Save(fileNames, ImageFormat.Png);
                    //or we just need to use the render result in memory, we can save it to CubeFaceData<Bitmap>
                    //CubeFaceData<Bitmap> bitmaps = new CubeFaceData<Bitmap>();
                    //cubemap.Save(bitmaps);
                    //bitmaps.Back.Save("back.bmp", ImageFormat.Bmp);
                }
                // ExEnd:RenderSceneIntoCubemapwithsixfaces
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
            }
        }
        public static void Run()
        {
            try
            {
                // ExStart:RenderPanaromaViewof3DScene

                //load the scene
                Scene scene = new Scene(RunExamples.GetDataFilePath("VirtualCity.glb"));
                //create a camera for capturing the cube map
                Camera cam = new Camera(ProjectionType.Perspective)
                {
                    NearPlane    = 0.1,
                    FarPlane     = 200,
                    RotationMode = RotationMode.FixedDirection
                };
                scene.RootNode.CreateChildNode(cam).Transform.Translation = new Vector3(5, 6, 0);

                //create two lights to illuminate the scene
                scene.RootNode.CreateChildNode(new Light()
                {
                    LightType = LightType.Point
                }).Transform.Translation = new Vector3(-10, 7, -10);
                scene.RootNode.CreateChildNode(new Light()
                {
                    Color = new Vector3(Color.CadetBlue)
                }).Transform.Translation = new Vector3(49, 0, 49);
                //create a renderer
                using (var renderer = Renderer.CreateRenderer())
                {
                    //Create a cube map render target with depth texture, depth is required when rendering a scene.
                    IRenderTexture rt = renderer.RenderFactory.CreateCubeRenderTexture(new RenderParameters(false), 512, 512);
                    //create a 2D texture render target with no depth texture used for image processing
                    IRenderTexture final = renderer.RenderFactory.CreateRenderTexture(new RenderParameters(false, 32, 0, 0), 1024 * 3, 1024);

                    //a viewport is required on the render target
                    rt.CreateViewport(cam, RelativeRectangle.FromScale(0, 0, 1, 1));
                    renderer.Render(rt);

                    //execute the equirectangular projection post-processing with the previous rendered cube map as input
                    PostProcessing equirectangular = renderer.GetPostProcessing("equirectangular");
                    //Specify the cube map rendered from the scene as this post processing's input
                    equirectangular.Input = rt.Targets[0];
                    //Execute the post processing effect and save the result to render target final
                    renderer.Execute(equirectangular, final);
                    //save the texture into disk
                    ((ITexture2D)final.Targets[0]).Save(RunExamples.GetOutputFilePath("panaroma.png"), ImageFormat.Png);
                }
                // ExEnd:RenderPanaromaViewof3DScene
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
            }
        }
        // ExStart:RenderSceneWithPanoramaInDepth
        public static void Run()
        {
            // The path to the documents directory.
            string dataDir = RunExamples.GetDataDir();
            //load the scene
            Scene scene = new Scene(dataDir + "skybox.obj");
            //create a camera for capturing the cube map
            Camera cam = new Camera(ProjectionType.Perspective);

            cam.NearPlane = 0.1;
            cam.FarPlane  = 200;
            scene.RootNode.CreateChildNode(cam).Transform.Translation = new Vector3(5, 6, 0);
            cam.RotationMode = RotationMode.FixedDirection;

            //create two lights to illuminate the scene
            scene.RootNode.CreateChildNode(new Light()
            {
                LightType = LightType.Point
            }).Transform.Translation = new Vector3(-10, 7, -10);
            scene.RootNode.CreateChildNode(new Light()
            {
                LightType           = LightType.Point,
                ConstantAttenuation = 0.1,
                Color = new Vector3(Color.CadetBlue)
            }).Transform.Translation = new Vector3(49, 0, 49);

            //create a render target
            using (var renderer = Renderer.CreateRenderer())
            {
                //Create a cube map render target with depth texture, depth is required when rendering a scene.
                IRenderTexture rt = renderer.RenderFactory.CreateCubeRenderTexture(new RenderParameters(false), 512, 512);
                //create a 2D texture render target with no depth texture used for image processing
                IRenderTexture final = renderer.RenderFactory.CreateRenderTexture(new RenderParameters(false, 32, 0, 0), 1024 * 3, 1024);

                //a viewport is required on the render target
                rt.CreateViewport(cam, RelativeRectangle.FromScale(0, 0, 1, 1));
                renderer.ShaderSet = CreateDepthShader(renderer);
                renderer.Render(rt);

                //execute the equirectangular projection post-processing with the previous rendered cube map as input
                PostProcessing equirectangular = renderer.GetPostProcessing("equirectangular");
                equirectangular.Input = rt.Targets[0];
                renderer.Execute(equirectangular, final);
                //save the texture into disk
                ((ITexture2D)final.Targets[0]).Save(dataDir + "RenderSceneWithPanoramaInDepth_Out.png", ImageFormat.Png);
            }
        }
예제 #6
0
        public static void Run()
        {
            // ExStart:ApplyVisualEffects
            // The path to the documents directory.
            string MyDir = RunExamples.GetDataDir();

            // Load an existing 3D scene
            Scene scene = new Scene(MyDir + "scene.obj");
            // Create an instance of the camera
            Camera camera = new Camera();

            scene.RootNode.CreateChildNode("camera", camera).Transform.Translation = new Vector3(2, 44, 66);
            // Set the target
            camera.LookAt = new Vector3(50, 12, 0);
            // Create a light
            scene.RootNode.CreateChildNode("light", new Light()
            {
                Color = new Vector3(Color.White), LightType = LightType.Point
            }).Transform.Translation = new Vector3(26, 57, 43);

            // The CreateRenderer will create a hardware OpenGL-backend renderer, more renderer will be added in the future
            // And some internal initializations will be done.
            // When the renderer left using the scope, the unmanaged hardware resources will also be disposed
            using (var renderer = Renderer.CreateRenderer())
            {
                renderer.EnableShadows = false;

                // Create a new render target that renders the scene to texture(s)
                // Use default render parameters
                // And one output targets
                // Size is 1024 x 1024
                // This render target can have multiple render output textures, but here we only need one output.
                // The other textures and depth textures are mainly used by deferred shading in the future.
                // But you can also access the depth texture through IRenderTexture.DepthTeture
                using (IRenderTexture rt = renderer.RenderFactory.CreateRenderTexture(new RenderParameters(), 1, 1024, 1024))
                {
                    // This render target has one viewport to render, the viewport occupies the 100% width and 100% height
                    Viewport vp = rt.CreateViewport(camera, new RelativeRectangle()
                    {
                        ScaleWidth = 1, ScaleHeight = 1
                    });
                    // Render the target and save the target texture to external file
                    renderer.Render(rt);
                    rt.Targets[0].Save(MyDir + "Original_viewport_out.png", ImageFormat.Png);

                    // Create a post-processing effect
                    PostProcessing pixelation = renderer.GetPostProcessing("pixelation");
                    renderer.PostProcessings.Add(pixelation);
                    renderer.Render(rt);
                    rt.Targets[0].Save(MyDir + "VisualEffect_pixelation_out.png", ImageFormat.Png);

                    // Clear previous post-processing effects and try another one
                    PostProcessing grayscale = renderer.GetPostProcessing("grayscale");
                    renderer.PostProcessings.Clear();
                    renderer.PostProcessings.Add(grayscale);
                    renderer.Render(rt);
                    rt.Targets[0].Save(MyDir + "VisualEffect_grayscale_out.png", ImageFormat.Png);

                    // We can also combine post-processing effects
                    renderer.PostProcessings.Clear();
                    renderer.PostProcessings.Add(grayscale);
                    renderer.PostProcessings.Add(pixelation);
                    renderer.Render(rt);
                    rt.Targets[0].Save(MyDir + "VisualEffect_grayscale+pixelation_out.png", ImageFormat.Png);

                    // Clear previous post-processing effects and try another one
                    PostProcessing edgedetection = renderer.GetPostProcessing("edge-detection");
                    renderer.PostProcessings.Clear();
                    renderer.PostProcessings.Add(edgedetection);
                    renderer.Render(rt);
                    rt.Targets[0].Save(MyDir + "VisualEffect_edgedetection_out.png", ImageFormat.Png);

                    // Clear previous post-processing effects and try another one
                    PostProcessing blur = renderer.GetPostProcessing("blur");
                    renderer.PostProcessings.Clear();
                    renderer.PostProcessings.Add(blur);
                    renderer.Render(rt);
                    rt.Targets[0].Save(MyDir + "VisualEffect_blur_out.png", ImageFormat.Png);
                }
            }
            // ExEnd:ApplyVisualEffects
        }
        public static void Run()
        {
            // ExStart:CaptureViewPort

            // Load an existing 3D scene
            Scene scene = new Scene(RunExamples.GetDataFilePath("scene.obj"));
            // Create an instance of the camera
            Camera camera = new Camera();

            scene.RootNode.CreateChildNode("camera", camera).Transform.Translation = new Vector3(2, 44, 66);
            // Set the target
            camera.LookAt = new Vector3(50, 12, 0);
            // Create a light
            scene.RootNode.CreateChildNode("light", new Light()
            {
                Color = new Vector3(Color.White), LightType = LightType.Point
            }).Transform.Translation = new Vector3(26, 57, 43);

            // The CreateRenderer will create a hardware OpenGL-backend renderer
            // And some internal initializations will be done.
            // When the renderer left using the scope, the unmanaged hardware resources will also be disposed
            using (var renderer = Renderer.CreateRenderer())
            {
                renderer.EnableShadows = false;

                // Create a new render target that renders the scene to texture(s)
                // Use default render parameters
                // And one output targets
                // Size is 1024 x 1024
                // This render target can have multiple render output textures, but here we only need one output.
                // The other textures and depth textures are mainly used by deferred shading in the future.
                // But you can also access the depth texture through IRenderTexture.DepthTeture
                // Use CreateRenderWindow method to render in window, like:
                // Window = renderer.RenderFactory.CreateRenderWindow(new RenderParameters(), Handle);
                using (IRenderTexture rt = renderer.RenderFactory.CreateRenderTexture(new RenderParameters(), 1, 1024, 1024))
                {
                    // This render target has one viewport to render, the viewport occupies the 100% width and 100% height
                    Viewport vp = rt.CreateViewport(camera, new RelativeRectangle()
                    {
                        ScaleWidth = 1, ScaleHeight = 1
                    });
                    // Render the target and save the target texture to external file
                    renderer.Render(rt);
                    ((ITexture2D)rt.Targets[0]).Save(RunExamples.GetOutputFilePath("file-1viewports_out.png"), ImageFormat.Png);

                    // Now let's change the previous viewport only uses the half left side(50% width and 100% height)
                    vp.Area = new RelativeRectangle()
                    {
                        ScaleWidth = 0.5f, ScaleHeight = 1
                    };
                    // And create a new viewport that occupies the 50% width and 100% height and starts from 50%
                    // Both of them are using the same camera, so the rendered content should be the same
                    rt.CreateViewport(camera, new RelativeRectangle()
                    {
                        ScaleX = 0.5f, ScaleWidth = 0.5f, ScaleHeight = 1
                    });
                    // But this time let's increase the field of view of the camera to 90 degree so it can see more part of the scene
                    camera.FieldOfView = 90;
                    renderer.Render(rt);
                    ((ITexture2D)rt.Targets[0]).Save(RunExamples.GetOutputFilePath("file-2viewports_out.png"), ImageFormat.Png);
                }
            }
            // ExEnd:CaptureViewPort
        }
예제 #8
0
        static void Main(string[] args)
        {
            Scene scene = new Scene("scene.obj");
            // Create a camera
            var camera = new Camera();

            scene.RootNode.CreateChildNode("camera", camera).Transform.Translation = new Vector3(2, 44, 66);
            camera.LookAt = new Vector3(50, 12, 0);
            // Create a light
            scene.RootNode.CreateChildNode("light", new Light()
            {
                Color = new Vector3(Color.White), LightType = LightType.Point
            }).Transform.Translation = new Vector3(26, 57, 43);

            // The CreateRenderer will create a hardware OpenGL-backend renderer, more renderer can be added in the future on user's demand
            // And some internal initializations will be done.
            // When the renderer left the using scope, the unmanaged hardware resources will also be disposed
            using (var renderer = Renderer.CreateRenderer())
            {
                renderer.EnableShadows = false;

                //create a new render target that renders the scene to texture(s)
                //use default render parameters
                //and one output targets
                //size is 1024 x 1024
                //this render target can have multiple render output textures, but here we only need one output.
                //The other textures and depth textures are mainly used by deferred shading in the future.
                //But you can also access the depth texture through IRenderTexture.DepthTeture
                using (
                    IRenderTexture rt = renderer.RenderFactory.CreateRenderTexture(new RenderParameters(), 1, 1024, 1024)
                    )
                {
                    //This render target has one viewport to render, the viewport occupies the 100% width and 100% height
                    Viewport vp = rt.CreateViewport(camera, new RelativeRectangle()
                    {
                        ScaleWidth = 1, ScaleHeight = 1
                    });
                    //render the target and save the target texture to external file
                    renderer.Render(rt);
                    rt.Targets[0].Save("file-1viewports.png", ImageFormat.Png);


                    //now lets change the previous viewport only uses the half left side(50% width and 100% height)
                    vp.Area = new RelativeRectangle()
                    {
                        ScaleWidth = 0.5f, ScaleHeight = 1
                    };
                    //and create a new viewport that occupies the 50% width and 100% height and starts from 50%
                    //both of them are using the same camera, so the rendered content should be the same
                    rt.CreateViewport(camera, new RelativeRectangle()
                    {
                        ScaleX = 0.5f, ScaleWidth = 0.5f, ScaleHeight = 1
                    });
                    //but this time let's increase the field of view of the camera to 90degree so it can see more part of the scene
                    camera.FieldOfView = 90;
                    renderer.Render(rt);
                    rt.Targets[0].Save("file-2viewports.png", ImageFormat.Png);

                    //add a post-processing effect(or filter)

                    PostProcessing pixelation = renderer.GetPostProcessing("pixelation");
                    renderer.PostProcessings.Add(pixelation);
                    renderer.Render(rt);
                    rt.Targets[0].Save("file-pixelation.png", ImageFormat.Png);

                    //clear previous post-processing effects and try another one
                    PostProcessing grayscale = renderer.GetPostProcessing("grayscale");
                    renderer.PostProcessings.Clear();
                    renderer.PostProcessings.Add(grayscale);
                    renderer.Render(rt);
                    rt.Targets[0].Save("file-grayscale.png", ImageFormat.Png);

                    //we can also combine post-processing effects
                    renderer.PostProcessings.Clear();
                    renderer.PostProcessings.Add(grayscale);
                    renderer.PostProcessings.Add(pixelation);
                    renderer.Render(rt);
                    rt.Targets[0].Save("file-grayscale+pixelation.png", ImageFormat.Png);
                }
            }
        }