示例#1
0
        public Scene(Device device, DeviceContext context, RenderForm window, Size resolution)
        {
            Device = device;

            SetupTweakBars(window);
            CompileVertexShader();
            LoadSceneAssets();

            RenderDimensions = resolution;
            CreateCamera(resolution);
            CreateInput(window);

            window.MouseDown += MouseDown;
            window.MouseMove += MouseMove;
            window.MouseUp   += MouseUp;

            proxy = new ResourceProxy(device);

            // TODO: create SkyGenerator instance here

            skyEnvMap = new GraphicsResource(device, new Size(2048, 1024), Format.R32G32B32A32_Float, true, true);
        }
示例#2
0
 /// <summary>
 /// Initializes graphics resources.
 /// </summary>
 /// <param name="dimensions">The frame dimensions.</param>
 private void InitializeResources(Size dimensions)
 {
     if (temporary != null) temporary.Dispose();
     temporary = new GraphicsResource(Device, dimensions, Format.R32G32B32A32_Float, true, true, true);
 }
示例#3
0
 /// <summary>
 /// Applies the layer to an aperture texture. The output
 /// is to be understood as the transmittance through any
 /// pixel, and will be blended multiplicatively together
 /// with other layers (as such, order does not matter).
 /// </summary>
 /// <param name="context">The device context.</param>
 /// <param name="output">The output aperture.</param>
 /// <param name="profile">An optical profile.</param>
 /// <param name="pass">A SurfacePass instance.</param>
 /// <param name="time">The elapsed time.</param>
 /// <param name="dt">The time since last call.</param>
 public abstract void ApplyLayer(DeviceContext context, GraphicsResource output, OpticalProfile profile, SurfacePass pass, double time, double dt);
示例#4
0
        /// <summary>
        /// Creates a ConvolutionEngine instance.
        /// </summary>
        /// <param name="device">The graphics device to use.</param>
        /// <param name="context">The graphics context to use.</param>
        /// <param name="resolution">The convolution resolution.</param>
        public ConvolutionEngine(Device device, DeviceContext context, Size resolution)
        {
            fft = FastFourierTransform.Create2DComplex(context, resolution.Width, resolution.Height);
            fft.InverseScale = 1.0f / (float)(resolution.Width * resolution.Height);
            this.resolution = resolution;

            FastFourierTransformBufferRequirements bufferReqs = fft.BufferRequirements;
            precomputed = new FFTBuffer[bufferReqs.PrecomputeBufferCount];
            temporaries = new FFTBuffer[bufferReqs.TemporaryBufferCount];

            for (int t = 0; t < precomputed.Length; ++t)
                precomputed[t] = FFTUtils.AllocateBuffer(device, bufferReqs.PrecomputeBufferSizes[t]);

            for (int t = 0; t < temporaries.Length; ++t)
                temporaries[t] = FFTUtils.AllocateBuffer(device, bufferReqs.TemporaryBufferSizes[t]);

            UnorderedAccessView[] precomputedUAV = new UnorderedAccessView[bufferReqs.PrecomputeBufferCount];
            for (int t = 0; t < precomputed.Length; ++t) precomputedUAV[t] = precomputed[t].view;

            UnorderedAccessView[] temporariesUAV = new UnorderedAccessView[bufferReqs.TemporaryBufferCount];
            for (int t = 0; t < temporaries.Length; ++t) temporariesUAV[t] = temporaries[t].view;

            fft.AttachBuffersAndPrecompute(temporariesUAV, precomputedUAV);

            lBuf = FFTUtils.AllocateBuffer(device, 2 * resolution.Width * resolution.Height);
            rBuf = FFTUtils.AllocateBuffer(device, 2 * resolution.Width * resolution.Height);
            tBuf = FFTUtils.AllocateBuffer(device, 2 * resolution.Width * resolution.Height);

            rConvolved = new GraphicsResource(device, resolution, Format.R32_Float, true, true);
            gConvolved = new GraphicsResource(device, resolution, Format.R32_Float, true, true);
            bConvolved = new GraphicsResource(device, resolution, Format.R32_Float, true, true);
            staging    = new GraphicsResource(device, new Size(resolution.Width / 2, resolution.Height / 2), Format.R32G32B32A32_Float, true, true);

            BlendStateDescription description = new BlendStateDescription()
            {
                AlphaToCoverageEnable = false,
                IndependentBlendEnable = false,
            };

            description.RenderTarget[0] = new RenderTargetBlendDescription()
            {
                IsBlendEnabled = true,

                SourceBlend = BlendOption.One,
                DestinationBlend = BlendOption.One,
                BlendOperation = BlendOperation.Add,

                SourceAlphaBlend = BlendOption.Zero,
                DestinationAlphaBlend = BlendOption.Zero,
                AlphaBlendOperation = BlendOperation.Add,

                RenderTargetWriteMask = ColorWriteMaskFlags.Red
                                      | ColorWriteMaskFlags.Green
                                      | ColorWriteMaskFlags.Blue,
            };

            blendState = new BlendState(device, description);
        }
示例#5
0
        /// <summary>
        /// Creates a DiffractionEngine instance.
        /// </summary>
        /// <param name="device">The graphics device to use.</param>
        /// <param name="context">The graphics context to use.</param>
        /// <param name="resolution">The diffraction resolution.</param>
        public DiffractionEngine(Device device, DeviceContext context, Size resolution)
        {
            fft = FastFourierTransform.Create2DComplex(context, resolution.Width, resolution.Height);
            fft.ForwardScale = 1.0f / (float)(resolution.Width * resolution.Height);
            this.resolution = resolution;

            FastFourierTransformBufferRequirements bufferReqs = fft.BufferRequirements;
            precomputed = new FFTBuffer[bufferReqs.PrecomputeBufferCount];
            temporaries = new FFTBuffer[bufferReqs.TemporaryBufferCount];

            for (int t = 0; t < precomputed.Length; ++t)
                precomputed[t] = FFTUtils.AllocateBuffer(device, bufferReqs.PrecomputeBufferSizes[t]);

            for (int t = 0; t < temporaries.Length; ++t)
                temporaries[t] = FFTUtils.AllocateBuffer(device, bufferReqs.TemporaryBufferSizes[t]);

            UnorderedAccessView[] precomputedUAV = new UnorderedAccessView[bufferReqs.PrecomputeBufferCount];
            for (int t = 0; t < precomputed.Length; ++t) precomputedUAV[t] = precomputed[t].view;

            UnorderedAccessView[] temporariesUAV = new UnorderedAccessView[bufferReqs.TemporaryBufferCount];
            for (int t = 0; t < temporaries.Length; ++t) temporariesUAV[t] = temporaries[t].view;

            fft.AttachBuffersAndPrecompute(temporariesUAV, precomputedUAV);

            /* We are doing a complex to complex transform, so we need two floats per pixel. */
            buffer = FFTUtils.AllocateBuffer(device, 2 * resolution.Width * resolution.Height);

            transform = new GraphicsResource(device, resolution, Format.R32_Float, true, true);
            spectrum  = new GraphicsResource(device, resolution, Format.R32G32B32A32_Float, true, true, true);
        }
示例#6
0
        private void ConvolveChannel(Device device, DeviceContext context, SurfacePass pass, ShaderResourceView sourceA, ShaderResourceView sourceB, GraphicsResource target, String channel)
        {
            if ((channel != "x") && (channel != "y") && (channel != "z")) throw new ArgumentException("Invalid RGB channel specified.");

            ViewportF viewport = new ViewportF(0, 0, resolution.Width, resolution.Height);

            ZeroPad(device, context, pass, sourceA, lBuf.view, channel);
            ZeroPad(device, context, pass, sourceB, rBuf.view, channel);

            fft.ForwardTransform(lBuf.view, tBuf.view);
            fft.ForwardTransform(rBuf.view, lBuf.view);

            DataStream cbuffer = new DataStream(8, true, true);
            cbuffer.Write<uint>((uint)resolution.Width);
            cbuffer.Write<uint>((uint)resolution.Height);
            cbuffer.Position = 0;

            pass.Pass(context, Encoding.ASCII.GetString(Resources.ConvolutionMultiply), viewport, null, null, new[] { tBuf.view, lBuf.view }, cbuffer);

            cbuffer.Dispose();

            cbuffer = new DataStream(8, true, true);
            cbuffer.Write<uint>((uint)resolution.Width);
            cbuffer.Write<uint>((uint)resolution.Height);
            cbuffer.Position = 0;

            UnorderedAccessView fftView = fft.InverseTransform(tBuf.view);

            pass.Pass(context, Encoding.ASCII.GetString(Resources.ConvolutionOutput), target.Dimensions, target.RTV, null, new[] { fftView }, cbuffer);

            fftView.Dispose();
            cbuffer.Dispose();
        }
示例#7
0
        /// <summary>
        /// Composes an aperture.
        /// </summary>
        /// <param name="context">The device context.</param>
        /// <param name="output">The output render target.</param>
        /// <param name="profile">The optical profile to use.</param>
        /// <param name="pass">A SurfacePass instance to use.</param>
        /// <param name="time">The elapsed time.</param>
        /// <param name="dt">The time since last call.</param>
        public void Compose(DeviceContext context, GraphicsResource output, OpticalProfile profile, SurfacePass pass, double time, double dt)
        {
            context.ClearRenderTargetView(output.RTV, Color4.White);
            context.OutputMerger.SetBlendState(blendState);

            foreach (ApertureLayer layer in layers)
                layer.ApplyLayer(context, output, profile, pass, time, dt);

            context.OutputMerger.SetBlendState(null);
        }
示例#8
0
        private void InitializeResources(Size dimensions)
        {
            if (  toneMapper != null) toneMapper.Dispose();
            if (   hdrBuffer != null) hdrBuffer.Dispose();
            if (   ldrBuffer != null) ldrBuffer.Dispose();
            if (    resolved != null) resolved.Dispose();

            swapChain.ResizeBuffers(0, 0, 0, Format.Unknown, SwapChainFlags.None);

            toneMapper   = new ToneMapper(device, dimensions, (Double)mainBar["exposure"].Value, (Double)mainBar["gamma"].Value);
            resolved     = new GraphicsResource(device, dimensions, Format.R32G32B32A32_Float, true, true);
            ldrBuffer    = new GraphicsResource(device, swapChain.GetBackBuffer<Texture2D>(0));

            /* hdrBuffer is a bit special since it can be multisampled - create this one manually. */
            hdrBuffer = new GraphicsResource(device, new Texture2D(device, new Texture2DDescription()
            {
                ArraySize = 1,
                MipLevels = 1,
                Width = dimensions.Width,
                Height = dimensions.Height,
                Usage = ResourceUsage.Default,
                Format = Format.R32G32B32A32_Float,
                CpuAccessFlags = CpuAccessFlags.None,
                OptionFlags = ResourceOptionFlags.None,
                SampleDescription = Settings.MultisamplingOptions,
                BindFlags = BindFlags.RenderTarget | BindFlags.ShaderResource,
            }));
        }