예제 #1
0
        /// <summary>
        /// UpdateRenderingContext is called from the BeginVirtualRealityRenderingStep and should update the properties in the RenderingContext according to the current eye.
        /// See remarks for more info about the usual tasks that are preformed in this method.
        /// </summary>
        /// <remarks>
        /// <para>
        /// <b>UpdateRenderingContext</b> is called from the BeginVirtualRealityRenderingStep and should update the properties in the RenderingContext according to the current eye.
        /// </para>
        /// <para>
        /// This method is usually called from the <see cref="BeginVirtualRealityRenderingStep"/> (when the virtual reality provider is enabled).
        /// </para>
        /// <para>
        /// Usually this method does the following:<br/>
        /// - Creates the back buffers and views that are needed for rendering
        /// (the back buffers are also recreated if the size of <see cref="RenderingContext.CurrentBackBufferDescription"/> is different from the size of previously created back buffers).<br/>
        /// - Updates the <see cref="VirtualRealityContext.CurrentEye"/> property.<br/>
        /// - Sets the <see cref="RenderingContext.UsedCamera"/> property to a <see cref="StereoscopicCamera"/><br/>
        /// - Calls <see cref="RenderingContext.SetBackBuffer"/> method and sets the new back buffers.<br/>
        /// - Updates <see cref="ResolveMultisampledBackBufferRenderingStep.DestinationBuffer"/> on the <see cref="DXScene.DefaultResolveMultisampledBackBufferRenderingStep"/> and sets it to the eye texture.
        /// </para>
        /// </remarks>
        /// <param name="renderingContext">RenderingContext</param>
        /// <param name="isNewFrame">true if no eye was yet rendered for the current frame; false if the first eye was already rendered for the current frame and we need to render the second eye</param>
        public override void UpdateRenderingContext(RenderingContext renderingContext, bool isNewFrame)
        {
            // This code is executed inside BeginVirtualRealityRenderingStep before all the objects are rendered.

            // Base method does:
            // - sets the virtualRealityContext.CurrentEye based on the isNewFrame parameter: isNewFrame == true => LeftEye else RightEye
            // - ensures that stereoscopicCamera is created and sets its properties for the current eye and based on the current EyeSeparation, Parallax and InvertLeftRightView
            // - sets renderingContext.UsedCamera = stereoscopicCamera
            base.UpdateRenderingContext(renderingContext, isNewFrame);


            var virtualRealityContext = renderingContext.VirtualRealityContext;

            Eye     currentEye = virtualRealityContext.CurrentEye;
            EyeType ovrEye     = currentEye == Eye.Left ? EyeType.Left : EyeType.Right;

            int eyeIndex = currentEye == Eye.Left ? 0 : 1;


            FovPort defaultEyeFov = _hmdDesc.DefaultEyeFov[eyeIndex];
            var     idealSize     = _ovr.GetFovTextureSize(_sessionPtr, ovrEye, defaultEyeFov, _pixelsPerDisplayPixel);

            // When we render this frame for the first time
            // we also check that all the required resources are created
            // Check if we need to create or recreate the RenderTargetViews and DepthStencilViews
            if (isNewFrame &&
                (_eyeTextureSwapChains[eyeIndex] == null ||
                 _eyeTextureSwapChains[eyeIndex].Size.Width != idealSize.Width ||
                 _eyeTextureSwapChains[eyeIndex].Size.Height != idealSize.Height))
            {
                CreateResources(renderingContext.DXScene);
            }

            if (isNewFrame)
            {
                _ovr.GetEyePoses(_sessionPtr, 0L, true, _hmdToEyeOffset, ref _eyePoses, out _sensorSampleTime);
            }

            var camera = renderingContext.DXScene.Camera;


            // From OculusRoomTiny main.cpp #221

            //Get the pose information
            var eyeQuat = SharpDXHelpers.ToQuaternion(_eyePoses[eyeIndex].Orientation);
            var eyePos  = SharpDXHelpers.ToVector3(_eyePoses[eyeIndex].Position);

            // Get view and projection matrices for the Rift camera
            Vector3 cameraPosition       = camera.GetCameraPosition();
            Matrix  cameraRotationMatrix = camera.View;

            cameraRotationMatrix.M41 = 0; // Remove translation
            cameraRotationMatrix.M42 = 0;
            cameraRotationMatrix.M43 = 0;

            cameraRotationMatrix.Invert(); // Invert to get rotation matrix

            Vector4 rotatedEyePos4 = Vector3.Transform(eyePos, cameraRotationMatrix);
            var     rotatedEyePos  = new Vector3(rotatedEyePos4.X, rotatedEyePos4.Y, rotatedEyePos4.Z);

            var finalCameraPosition = cameraPosition + rotatedEyePos;

            var eyeQuaternionMatrix = Matrix.RotationQuaternion(eyeQuat);
            var finalRotationMatrix = eyeQuaternionMatrix * cameraRotationMatrix;

            Vector4 lookDirection4 = Vector3.Transform(new Vector3(0, 0, -1), finalRotationMatrix);
            var     lookDirection  = new Vector3(lookDirection4.X, lookDirection4.Y, lookDirection4.Z);

            Vector4 upDirection4 = Vector3.Transform(Vector3.UnitY, finalRotationMatrix);
            var     upDirection  = new Vector3(upDirection4.X, upDirection4.Y, upDirection4.Z);

            var viewMatrix = Matrix.LookAtRH(finalCameraPosition, finalCameraPosition + lookDirection, upDirection);



            float zNear = camera.NearPlaneDistance;
            float zFar  = camera.FarPlaneDistance;

            if (zNear < 0.05f)
            {
                zNear = 0.05f;
            }

            zFar *= 1.2f; // increase the zFar - the FarPlaneDistance is not exactly correct because the camera can be higher because the eye's Position can be over the Camera's position

            //zNear = 0.1f;
            //zFar = 100;

            var eyeRenderDesc = _ovr.GetRenderDesc(_sessionPtr, ovrEye, _hmdDesc.DefaultEyeFov[eyeIndex]);

            var projectionMatrix = _ovr.Matrix4f_Projection(eyeRenderDesc.Fov, zNear, zFar, ProjectionModifier.None).ToMatrix();

            projectionMatrix.Transpose();

            _matrixCamera.Projection = projectionMatrix;
            _matrixCamera.View       = viewMatrix;
            _matrixCamera.SetCameraPosition(finalCameraPosition);

            renderingContext.UsedCamera = _matrixCamera;


            // Change the current viewport
            renderingContext.CurrentViewport = _eyeTextureSwapChains[eyeIndex].Viewport;
            renderingContext.DeviceContext.Rasterizer.SetViewport(renderingContext.CurrentViewport);

            if (_msaaBackBuffer == null)
            {
                renderingContext.SetBackBuffer(backBuffer: _eyeTextureSwapChains[eyeIndex].CurrentTexture,
                                               backBufferDescription: _eyeTextureSwapChains[eyeIndex].CurrentTextureDescription,
                                               renderTargetView: _eyeTextureSwapChains[eyeIndex].CurrentRTView,
                                               depthStencilView: _eyeTextureSwapChains[eyeIndex].CurrentDepthStencilView,
                                               bindNewRenderTargetsToDeviceContext: false); // Do not bind new buffers because this is done in the next rendering step - PrepareRenderTargetsRenderingStep
            }
            else
            {
                // MSAA
                renderingContext.SetBackBuffer(backBuffer: _msaaBackBuffer,
                                               backBufferDescription: _msaaBackBufferDescription,
                                               renderTargetView: _msaaBackBufferRenderTargetView,
                                               depthStencilView: _msaaDepthStencilView,
                                               bindNewRenderTargetsToDeviceContext: false); // Do not bind new buffers because this is done in the next rendering step - PrepareRenderTargetsRenderingStep

                renderingContext.DXScene.DefaultResolveMultisampledBackBufferRenderingStep.DestinationBuffer = _eyeTextureSwapChains[eyeIndex].CurrentTexture;
            }


            // When we render this frame for the first time set the NewViewport on the ChangeBackBufferRenderingStep to resets the Viewport from split screen viewport to the final full screen viewport
            if (isNewFrame && _resetViewportRenderingStep != null)
            {
                int backBufferWidth  = renderingContext.FinalBackBufferDescription.Width;
                int backBufferHeight = renderingContext.FinalBackBufferDescription.Height;

                _resetViewportRenderingStep.NewViewport = new ViewportF(0, 0, backBufferWidth, backBufferHeight);
            }
        }