public bool AllocTextureArray(int numCubeMaps, int width, GraphicsFormat format, bool isMipMapped, Material cubeBlitMaterial) { var res = AllocTextureArray(numCubeMaps); m_NumMipLevels = GetNumMips(width, width); // will calculate same way whether we have cube array or not if (!TextureCache.supportsCubemapArrayTextures) { m_CubeBlitMaterial = cubeBlitMaterial; int panoWidthTop = 4 * width; int panoHeightTop = 2 * width; // create panorama 2D array. Hardcoding the render target for now. No convenient way atm to m_CacheNoCubeArray = new Texture2DArray(panoWidthTop, panoHeightTop, numCubeMaps, format, isMipMapped ? TextureCreationFlags.MipChain : TextureCreationFlags.None) { hideFlags = HideFlags.HideAndDontSave, wrapMode = TextureWrapMode.Repeat, wrapModeV = TextureWrapMode.Clamp, filterMode = FilterMode.Trilinear, anisoLevel = 0, name = CoreUtils.GetTextureAutoName(panoWidthTop, panoHeightTop, format, TextureDimension.Tex2DArray, depth: numCubeMaps, name: m_CacheName) }; m_NumPanoMipLevels = isMipMapped ? GetNumMips(panoWidthTop, panoHeightTop) : 1; m_StagingRTs = new RenderTexture[m_NumPanoMipLevels]; for (int m = 0; m < m_NumPanoMipLevels; m++) { m_StagingRTs[m] = new RenderTexture(Mathf.Max(1, panoWidthTop >> m), Mathf.Max(1, panoHeightTop >> m), 0, format) { hideFlags = HideFlags.HideAndDontSave }; m_StagingRTs[m].name = CoreUtils.GetRenderTargetAutoName(Mathf.Max(1, panoWidthTop >> m), Mathf.Max(1, panoHeightTop >> m), 1, format, String.Format("PanaCache{0}", m)); } if (m_CubeBlitMaterial) { m_CubeMipLevelPropName = Shader.PropertyToID("_cubeMipLvl"); m_cubeSrcTexPropName = Shader.PropertyToID("_srcCubeTexture"); } } else { var desc = new RenderTextureDescriptor(width, width, format, 0) { dimension = TextureDimension.CubeArray, volumeDepth = numCubeMaps * 6, // We need to multiply by the face count of a cubemap here autoGenerateMips = false, useMipMap = isMipMapped, msaaSamples = 1, }; m_Cache = new RenderTexture(desc) { hideFlags = HideFlags.HideAndDontSave, wrapMode = TextureWrapMode.Clamp, filterMode = FilterMode.Trilinear, anisoLevel = 0, // It is important to set 0 here, else unity force anisotropy filtering name = CoreUtils.GetTextureAutoName(width, width, format, desc.dimension, depth: numCubeMaps, name: m_CacheName, mips: isMipMapped) }; // We need to clear the content in case it is read on first frame, since on console we have no guarantee that // the content won't be NaN ClearCache(); m_Cache.Create(); } return(res); }
public static bool RequiresIntermediateColorTexture(ref CameraData cameraData, RenderTextureDescriptor baseDescriptor) { if (cameraData.isOffscreenRender) { return(false); } bool isScaledRender = !Mathf.Approximately(cameraData.renderScale, 1.0f); bool isTargetTexture2DArray = baseDescriptor.dimension == TextureDimension.Tex2DArray; bool noAutoResolveMsaa = cameraData.msaaSamples > 1 && !SystemInfo.supportsMultisampleAutoResolve; return(noAutoResolveMsaa || cameraData.isSceneViewCamera || isScaledRender || cameraData.isHdrEnabled || cameraData.postProcessEnabled || cameraData.requiresOpaqueTexture || isTargetTexture2DArray || !cameraData.isDefaultViewport); }
private static extern void INTERNAL_get_eyeTextureDesc(out RenderTextureDescriptor value);
public void FrameUpdate(PipelineCamera cam, ref PipelineCommandData data) { CommandBuffer buffer = data.buffer; if (!Decal.decalDatas.isCreated) { buffer.SetGlobalInt(_EnableDecal, 0); return; } buffer.SetGlobalInt(_EnableDecal, 1); handle.Complete(); if (cullJob.count > decalBuffer.count) { int oldCount = decalBuffer.count; decalBuffer.Dispose(); decalBuffer = new ComputeBuffer((int)max(oldCount * 1.5f, cullJob.count), sizeof(DecalStrct)); } if (!minMaxBoundMat) { minMaxBoundMat = new Material(data.resources.shaders.minMaxDepthBounding); } int pixelWidth = cam.cam.pixelWidth; int pixelHeight = cam.cam.pixelHeight; decalBuffer.SetDataPtr(sortJob.sortedDecalDatas.unsafePtr, 0, cullJob.count); buffer.GetTemporaryRT(_DownSampledDepth0, pixelWidth / 2, pixelHeight / 2, 0, FilterMode.Point, RenderTextureFormat.RGHalf, RenderTextureReadWrite.Linear, 1, false, RenderTextureMemoryless.None, false); buffer.GetTemporaryRT(_DownSampledDepth1, pixelWidth / 4, pixelHeight / 4, 0, FilterMode.Point, RenderTextureFormat.RGHalf, RenderTextureReadWrite.Linear, 1, false, RenderTextureMemoryless.None, false); buffer.GetTemporaryRT(_DownSampledDepth2, pixelWidth / 8, pixelHeight / 8, 0, FilterMode.Point, RenderTextureFormat.RGHalf, RenderTextureReadWrite.Linear, 1, false, RenderTextureMemoryless.None, false); buffer.GetTemporaryRT(_DownSampledDepth3, pixelWidth / 16, pixelHeight / 16, 0, FilterMode.Point, RenderTextureFormat.RGHalf, RenderTextureReadWrite.Linear, 1, false, RenderTextureMemoryless.None, false); buffer.BlitSRT(_DownSampledDepth0, minMaxBoundMat, 0); buffer.SetGlobalTexture(ShaderIDs._TargetDepthTexture, _DownSampledDepth0); buffer.BlitSRT(_DownSampledDepth1, minMaxBoundMat, 1); buffer.SetGlobalTexture(ShaderIDs._TargetDepthTexture, _DownSampledDepth1); buffer.BlitSRT(_DownSampledDepth2, minMaxBoundMat, 1); buffer.SetGlobalTexture(ShaderIDs._TargetDepthTexture, _DownSampledDepth2); buffer.BlitSRT(_DownSampledDepth3, minMaxBoundMat, 1); int2 tileSize = int2(pixelWidth / 16, pixelHeight / 16); RenderTextureDescriptor lightTileDisc = new RenderTextureDescriptor { autoGenerateMips = false, bindMS = false, colorFormat = RenderTextureFormat.RInt, depthBufferBits = 0, dimension = TextureDimension.Tex3D, enableRandomWrite = true, width = tileSize.x, height = tileSize.y, msaaSamples = 1, volumeDepth = CBDRSharedData.MAXLIGHTPERTILE }; buffer.GetTemporaryRT(ShaderIDs._DecalTile, lightTileDisc); tileSizeArray[0] = tileSize.x; tileSizeArray[1] = tileSize.y; float3 * corners = stackalloc float3[4]; PerspCam perspCam = new PerspCam { fov = cam.cam.fieldOfView, up = cam.cam.transform.up, right = cam.cam.transform.right, forward = cam.cam.transform.forward, position = cam.cam.transform.position, aspect = cam.cam.aspect, }; PipelineFunctions.GetFrustumCorner(ref perspCam, 1, corners); for (int i = 0; i < 4; ++i) { frustumCorners[i] = float4(corners[i], 1); } buffer.SetComputeVectorArrayParam(cbdrShader, ShaderIDs._FrustumCorners, frustumCorners); buffer.SetComputeVectorParam(cbdrShader, ShaderIDs._CameraPos, cam.cam.transform.position); buffer.SetComputeIntParams(cbdrShader, ShaderIDs._TileSize, tileSizeArray); buffer.SetGlobalVector(ShaderIDs._TileSize, new Vector4(tileSize.x, tileSize.y)); buffer.SetComputeVectorParam(cbdrShader, ShaderIDs._CameraForward, cam.cam.transform.forward); buffer.SetComputeTextureParam(cbdrShader, CBDRSharedData.DecalCull, ShaderIDs._DepthBoundTexture, new RenderTargetIdentifier(_DownSampledDepth3)); buffer.SetComputeTextureParam(cbdrShader, CBDRSharedData.DecalCull, ShaderIDs._DecalTile, new RenderTargetIdentifier(ShaderIDs._DecalTile)); buffer.SetComputeBufferParam(cbdrShader, CBDRSharedData.DecalCull, ShaderIDs._AllDecals, decalBuffer); buffer.SetComputeIntParam(cbdrShader, ShaderIDs._DecalCount, cullJob.count); buffer.SetGlobalBuffer(ShaderIDs._AllDecals, decalBuffer); buffer.DispatchCompute(cbdrShader, CBDRSharedData.DecalCull, Mathf.CeilToInt(tileSize.x / 8f), Mathf.CeilToInt(tileSize.y / 8f), 1); buffer.ReleaseTemporaryRT(_DownSampledDepth0); buffer.ReleaseTemporaryRT(_DownSampledDepth1); buffer.ReleaseTemporaryRT(_DownSampledDepth2); buffer.ReleaseTemporaryRT(_DownSampledDepth3); RenderPipeline.ReleaseRTAfterFrame(ShaderIDs._DecalTile); decalCullResults.Dispose(); decalCompareResults.Dispose(); }
private void MyAfterTransparent(Camera camera, ScriptableRenderContext context, RenderTargetIdentifier RTid, RenderTextureDescriptor Desc) { }
/// <inheritdoc/> protected override void Setup() { var myCamera = perceptionCamera.GetComponent <Camera>(); camWidth = myCamera.pixelWidth; camHeight = myCamera.pixelHeight; if (labelConfig == null) { throw new InvalidOperationException( "SemanticSegmentationLabeler's LabelConfig must be assigned"); } m_AsyncAnnotations = new Dictionary <int, AsyncAnnotation>(); if (targetTexture != null) { if (targetTexture.sRGB) { Debug.LogError("targetTexture supplied to SemanticSegmentationLabeler must be in Linear mode. Disabling labeler."); this.enabled = false; } var renderTextureDescriptor = new RenderTextureDescriptor(camWidth, camHeight, GraphicsFormat.R8G8B8A8_UNorm, 8); targetTexture.descriptor = renderTextureDescriptor; } else { m_TargetTextureOverride = new RenderTexture(camWidth, camHeight, 8, RenderTextureFormat.ARGB32, RenderTextureReadWrite.Linear); } targetTexture.Create(); targetTexture.name = "Labeling"; #if HDRP_PRESENT var gameObject = perceptionCamera.gameObject; var customPassVolume = gameObject.GetComponent <CustomPassVolume>() ?? gameObject.AddComponent <CustomPassVolume>(); customPassVolume.injectionPoint = CustomPassInjectionPoint.BeforeRendering; customPassVolume.isGlobal = true; m_SemanticSegmentationPass = new SemanticSegmentationPass(myCamera, targetTexture, labelConfig) { name = "Labeling Pass" }; customPassVolume.customPasses.Add(m_SemanticSegmentationPass); #endif #if URP_PRESENT perceptionCamera.AddScriptableRenderPass(new SemanticSegmentationUrpPass(myCamera, targetTexture, labelConfig)); #endif var specs = labelConfig.labelEntries.Select((l) => new SemanticSegmentationSpec() { label_name = l.label, pixel_value = l.color }).ToArray(); m_SemanticSegmentationAnnotationDefinition = DatasetCapture.RegisterAnnotationDefinition( "semantic segmentation", specs, "pixel-wise semantic segmentation label", "PNG", id: Guid.Parse(annotationId)); m_SemanticSegmentationTextureReader = new RenderTextureReader <Color32>(targetTexture, myCamera, (frameCount, data, tex) => OnSemanticSegmentationImageRead(frameCount, data)); visualizationEnabled = supportsVisualization; }
/// <inheritdoc/> public override void Execute(ScriptableRenderContext context, ref RenderingData renderingData) { if (m_CopyDepthMaterial == null) { Debug.LogErrorFormat("Missing {0}. {1} render pass will not execute. Check for missing reference in the renderer resources.", m_CopyDepthMaterial, GetType().Name); return; } CommandBuffer cmd = CommandBufferPool.Get(); using (new ProfilingScope(cmd, ProfilingSampler.Get(URPProfileId.CopyDepth))) { int cameraSamples = 0; if (MssaSamples == -1) { RenderTextureDescriptor descriptor = renderingData.cameraData.cameraTargetDescriptor; cameraSamples = descriptor.msaaSamples; } else { cameraSamples = MssaSamples; } CameraData cameraData = renderingData.cameraData; switch (cameraSamples) { case 8: cmd.DisableShaderKeyword(ShaderKeywordStrings.DepthMsaa2); cmd.DisableShaderKeyword(ShaderKeywordStrings.DepthMsaa4); cmd.EnableShaderKeyword(ShaderKeywordStrings.DepthMsaa8); break; case 4: cmd.DisableShaderKeyword(ShaderKeywordStrings.DepthMsaa2); cmd.EnableShaderKeyword(ShaderKeywordStrings.DepthMsaa4); cmd.DisableShaderKeyword(ShaderKeywordStrings.DepthMsaa8); break; case 2: cmd.EnableShaderKeyword(ShaderKeywordStrings.DepthMsaa2); cmd.DisableShaderKeyword(ShaderKeywordStrings.DepthMsaa4); cmd.DisableShaderKeyword(ShaderKeywordStrings.DepthMsaa8); break; // MSAA disabled default: cmd.DisableShaderKeyword(ShaderKeywordStrings.DepthMsaa2); cmd.DisableShaderKeyword(ShaderKeywordStrings.DepthMsaa4); cmd.DisableShaderKeyword(ShaderKeywordStrings.DepthMsaa8); break; } cmd.SetGlobalTexture("_CameraDepthAttachment", source.Identifier()); #if ENABLE_VR && ENABLE_XR_MODULE // XR uses procedural draw instead of cmd.blit or cmd.DrawFullScreenMesh if (renderingData.cameraData.xr.enabled) { // XR flip logic is not the same as non-XR case because XR uses draw procedure // and draw procedure does not need to take projection matrix yflip into account // We y-flip if // 1) we are bliting from render texture to back buffer and // 2) renderTexture starts UV at top // XRTODO: handle scalebias and scalebiasRt for src and dst separately bool isRenderToBackBufferTarget = destination.Identifier() == cameraData.xr.renderTarget && !cameraData.xr.renderTargetIsRenderTexture; bool yflip = isRenderToBackBufferTarget && SystemInfo.graphicsUVStartsAtTop; float flipSign = (yflip) ? -1.0f : 1.0f; Vector4 scaleBiasRt = (flipSign < 0.0f) ? new Vector4(flipSign, 1.0f, -1.0f, 1.0f) : new Vector4(flipSign, 0.0f, 1.0f, 1.0f); cmd.SetGlobalVector(ShaderPropertyId.scaleBiasRt, scaleBiasRt); cmd.DrawProcedural(Matrix4x4.identity, m_CopyDepthMaterial, 0, MeshTopology.Quads, 4); } else #endif { // Blit has logic to flip projection matrix when rendering to render texture. // Currently the y-flip is handled in CopyDepthPass.hlsl by checking _ProjectionParams.x // If you replace this Blit with a Draw* that sets projection matrix double check // to also update shader. // scaleBias.x = flipSign // scaleBias.y = scale // scaleBias.z = bias // scaleBias.w = unused // In game view final target acts as back buffer were target is not flipped bool isGameViewFinalTarget = (cameraData.cameraType == CameraType.Game && destination == RenderTargetHandle.CameraTarget); bool yflip = (cameraData.IsCameraProjectionMatrixFlipped()) && !isGameViewFinalTarget; float flipSign = yflip ? -1.0f : 1.0f; Vector4 scaleBiasRt = (flipSign < 0.0f) ? new Vector4(flipSign, 1.0f, -1.0f, 1.0f) : new Vector4(flipSign, 0.0f, 1.0f, 1.0f); cmd.SetGlobalVector(ShaderPropertyId.scaleBiasRt, scaleBiasRt); cmd.DrawMesh(RenderingUtils.fullscreenMesh, Matrix4x4.identity, m_CopyDepthMaterial); } } context.ExecuteCommandBuffer(cmd); CommandBufferPool.Release(cmd); }
/// <summary> /// Configure the pass /// </summary> /// <param name="baseDescriptor"></param> /// <param name="colorHandle"></param> public void Setup(RenderTextureDescriptor baseDescriptor, RenderTargetHandle colorHandle) { m_Source = colorHandle; }
private bool PopulateLayer(int mipLevels, bool isHdr, OVRPlugin.Sizei size, int sampleCount) { bool ret = false; RenderTextureFormat rtFormat = (isHdr) ? RenderTextureFormat.ARGBHalf : RenderTextureFormat.ARGB32; for (int eyeId = 0; eyeId < texturesPerStage; ++eyeId) { int dstElement = (layout == OVRPlugin.LayerLayout.Array) ? eyeId : 0; int stage = frameIndex % stageCount; Texture et = layerTextures[eyeId].swapChain[stage]; if (et == null) { continue; } for (int mip = 0; mip < mipLevels; ++mip) { #if UNITY_2017_1_1 || UNITY_2017_2_OR_NEWER int width = size.w >> mip; if (width < 1) { width = 1; } int height = size.h >> mip; if (height < 1) { height = 1; } RenderTextureDescriptor descriptor = new RenderTextureDescriptor(width, height, rtFormat, 0); descriptor.msaaSamples = sampleCount; descriptor.useMipMap = true; descriptor.autoGenerateMips = false; var tempRTDst = RenderTexture.GetTemporary(descriptor); #else var tempRTDst = RenderTexture.GetTemporary(size.w >> mip, size.h >> mip, 0, rtFormat, RenderTextureReadWrite.Default, sampleCount); #endif if (!tempRTDst.IsCreated()) { tempRTDst.Create(); } tempRTDst.DiscardContents(); if (currentOverlayShape != OverlayShape.Cubemap && currentOverlayShape != OverlayShape.OffcenterCubemap) { #if UNITY_ANDROID && !UNITY_EDITOR if (((textures[eyeId] as Cubemap) != null) && ((et as Cubemap) != null) && ((textures[eyeId] as Cubemap).format == (et as Cubemap).format)) { Graphics.CopyTexture(textures[eyeId], 0, mip, et, 0, mip); } else { Graphics.Blit(textures[eyeId], tempRTDst); //Resolve, decompress, swizzle, etc not handled by simple CopyTexture. Graphics.CopyTexture(tempRTDst, 0, 0, et, dstElement, mip); } #else // The PC compositor uses premultiplied alpha, so multiply it here. Graphics.Blit(textures[eyeId], tempRTDst, premultiplyMaterial); Graphics.CopyTexture(tempRTDst, 0, 0, et, dstElement, mip); #endif } #if UNITY_2017_1_OR_NEWER else // Cubemap { var tempRTSrc = RenderTexture.GetTemporary(size.w >> mip, size.h >> mip, 0, rtFormat, RenderTextureReadWrite.Default, sampleCount); if (!tempRTSrc.IsCreated()) { tempRTSrc.Create(); } tempRTSrc.DiscardContents(); for (int face = 0; face < 6; ++face) { #if UNITY_ANDROID && !UNITY_EDITOR if ((textures[eyeId] as Cubemap).format == (et as Cubemap).format) { Graphics.CopyTexture(textures[eyeId], face, mip, et, 0, mip); } else { //HACK: It would be much more efficient to blit directly from textures[eyeId] to et, but Unity's API doesn't support that. //Suggest using a native plugin to render directly to a cubemap layer for 360 video, etc. Graphics.CopyTexture(textures[eyeId], face, mip, tempRTSrc, 0, 0); Graphics.Blit(tempRTSrc, tempRTDst); Graphics.CopyTexture(tempRTDst, 0, 0, et, face, mip); } #else //HACK: It would be much more efficient to blit directly from textures[eyeId] to et, but Unity's API doesn't support that. //Suggest using a native plugin to render directly to a cubemap layer for 360 video, etc. Graphics.CopyTexture(textures[eyeId], face, mip, tempRTSrc, 0, 0); // The PC compositor uses premultiplied alpha, so multiply it here. Graphics.Blit(tempRTSrc, tempRTDst, premultiplyMaterial); Graphics.CopyTexture(tempRTDst, 0, 0, et, face, mip); #endif } RenderTexture.ReleaseTemporary(tempRTSrc); } #endif RenderTexture.ReleaseTemporary(tempRTDst); ret = true; } } return(ret); }
public override void Render(PostProcessRenderContext context) { var cmd = context.command; cmd.BeginSample("ScanLine"); RenderTextureDescriptor desc = new RenderTextureDescriptor(context.width, context.height); desc.enableRandomWrite = true; //cmd.GetTemporaryRT(tempCanvas, desc); desc.graphicsFormat = UnityEngine.Experimental.Rendering.GraphicsFormat.R32G32B32A32_SFloat; cmd.GetTemporaryRT(tempRTID1, desc); cmd.GetTemporaryRT(tempRTID2, desc); cmd.GetTemporaryRT(tempRTID3, desc); Matrix4x4 V = Camera.main.worldToCameraMatrix; Matrix4x4 P = GL.GetGPUProjectionMatrix(Camera.main.projectionMatrix, true); Matrix4x4 VP = P * V; Matrix4x4 InvMVP = VP.inverse; // depth edge cmd.SetComputeIntParam(depthEdgeShader, "Width", context.width - 1); cmd.SetComputeIntParam(depthEdgeShader, "Height", context.height - 1); cmd.SetComputeFloatParam(depthEdgeShader, "EdgeStrength", settings.DepthEdgeStrength.value); cmd.SetComputeTextureParam(depthEdgeShader, sobelForDepthKernelID, "Source", BuiltinRenderTextureType.Depth);// or ResolvedDepth cmd.SetComputeTextureParam(depthEdgeShader, sobelForDepthKernelID, "Result", tempRTID1); cmd.DispatchCompute(depthEdgeShader, sobelForDepthKernelID, context.width, context.height, 1); // distance field cmd.SetComputeIntParam(distanceFieldShader, "Width", context.width - 1); cmd.SetComputeIntParam(distanceFieldShader, "Height", context.height - 1); cmd.SetComputeFloatParam(distanceFieldShader, "DistanceScale", settings.DistanceScale.value); cmd.SetComputeMatrixParam(distanceFieldShader, "InvVPMatrix", InvMVP); var tar = settings.Target.value; cmd.SetComputeFloatParams(distanceFieldShader, "Target", new float[3] { tar.x, tar.y, tar.z }); cmd.SetComputeTextureParam(distanceFieldShader, distanceFromTargetKernelID, "Source", BuiltinRenderTextureType.Depth);// or ResolvedDepth cmd.SetComputeTextureParam(distanceFieldShader, distanceFromTargetKernelID, "Result", tempRTID2); cmd.DispatchCompute(distanceFieldShader, distanceFromTargetKernelID, context.width, context.height, 1); // distance field edge cmd.SetComputeFloatParam(depthEdgeShader, "EdgeStrength", settings.DistanceEdgeStrength.value); cmd.SetComputeFloatParam(depthEdgeShader, "DistanceThresold", settings.DistanceThresold.value); cmd.SetComputeTextureParam(depthEdgeShader, sobelForDisatnceKernelID, "Source", tempRTID2);// or ResolvedDepth cmd.SetComputeTextureParam(depthEdgeShader, sobelForDisatnceKernelID, "Result", tempRTID3); cmd.DispatchCompute(depthEdgeShader, sobelForDisatnceKernelID, context.width, context.height, 1); // mix color var depthColor = settings.DepthEdgeColor.value; cmd.SetComputeFloatParams(colorMixerShader, "DepthEdgeColor", new float[3] { depthColor.r, depthColor.g, depthColor.b }); var distanceColor = settings.DistanceEdgeColor.value; cmd.SetComputeFloatParams(colorMixerShader, "DistanceEdgeColor", new float[3] { distanceColor.r, distanceColor.g, distanceColor.b }); cmd.SetComputeTextureParam(colorMixerShader, colroMixerKernelID, "ColorSource", context.source); cmd.SetComputeTextureParam(colorMixerShader, colroMixerKernelID, "DepthSource", tempRTID1); cmd.SetComputeTextureParam(colorMixerShader, colroMixerKernelID, "DistanceSource", tempRTID3); cmd.SetComputeTextureParam(colorMixerShader, colroMixerKernelID, "Result", tempRTID2); cmd.DispatchCompute(colorMixerShader, colroMixerKernelID, context.width, context.height, 1); cmd.Blit(tempRTID2, context.destination); cmd.ReleaseTemporaryRT(tempRTID1); cmd.ReleaseTemporaryRT(tempRTID2); cmd.ReleaseTemporaryRT(tempRTID3); cmd.EndSample("ScanLine"); }
public void ScatterAlphamap(string editorUndoName) { Vector4[] layerMasks = { new Vector4(1, 0, 0, 0), new Vector4(0, 1, 0, 0), new Vector4(0, 0, 1, 0), new Vector4(0, 0, 0, 1) }; Material copyTerrainLayerMaterial = TerrainPaintUtility.GetCopyTerrainLayerMaterial(); for (int i = 0; i < m_TerrainTiles.Count; i++) { TerrainTile terrainTile = m_TerrainTiles[i]; if (terrainTile.clippedLocal.width == 0 || terrainTile.clippedLocal.height == 0) { continue; } if (onTerrainTileBeforePaint != null) { onTerrainTileBeforePaint(terrainTile, ToolAction.PaintTexture, editorUndoName); } var rtdesc = new RenderTextureDescriptor(destinationRenderTexture.width, destinationRenderTexture.height, RenderTextureFormat.ARGB32); rtdesc.sRGB = false; rtdesc.useMipMap = false; rtdesc.autoGenerateMips = false; RenderTexture destTarget = RenderTexture.GetTemporary(rtdesc); RenderTexture.active = destTarget; var writeRect = new RectInt( terrainTile.clippedLocal.x + terrainTile.rect.x - pixelRect.x + terrainTile.writeOffset.x, terrainTile.clippedLocal.y + terrainTile.rect.y - pixelRect.y + terrainTile.writeOffset.y, terrainTile.clippedLocal.width, terrainTile.clippedLocal.height); var readRect = new Rect( writeRect.x / (float)pixelRect.width, writeRect.y / (float)pixelRect.height, writeRect.width / (float)pixelRect.width, writeRect.height / (float)pixelRect.height); destinationRenderTexture.filterMode = FilterMode.Point; for (int j = 0; j < terrainTile.terrain.terrainData.alphamapTextureCount; j++) { Texture2D sourceTex = terrainTile.terrain.terrainData.alphamapTextures[j]; if ((sourceTex.width != targetTextureWidth) || (sourceTex.height != targetTextureHeight)) { Debug.LogWarning("PaintContext alphamap operations must use the same resolution for all Terrains - mismatched Terrains are ignored.", terrainTile.terrain); continue; } int mapIndex = terrainTile.mapIndex; int channelIndex = terrainTile.channelIndex; Rect combineRect = new Rect( terrainTile.clippedLocal.x / (float)sourceTex.width, terrainTile.clippedLocal.y / (float)sourceTex.height, terrainTile.clippedLocal.width / (float)sourceTex.width, terrainTile.clippedLocal.height / (float)sourceTex.height); copyTerrainLayerMaterial.SetTexture("_MainTex", destinationRenderTexture); copyTerrainLayerMaterial.SetTexture("_OldAlphaMapTexture", sourceRenderTexture); copyTerrainLayerMaterial.SetTexture("_AlphaMapTexture", sourceTex); copyTerrainLayerMaterial.SetVector("_LayerMask", j == mapIndex ? layerMasks[channelIndex] : Vector4.zero); copyTerrainLayerMaterial.SetPass(1); GL.PushMatrix(); GL.LoadOrtho(); GL.LoadPixelMatrix(0, destTarget.width, 0, destTarget.height); GL.Begin(GL.QUADS); GL.Color(new Color(1.0f, 1.0f, 1.0f, 1.0f)); GL.MultiTexCoord2(0, readRect.x, readRect.y); GL.MultiTexCoord2(1, combineRect.x, combineRect.y); GL.Vertex3(writeRect.x, writeRect.y, 0.0f); GL.MultiTexCoord2(0, readRect.x, readRect.yMax); GL.MultiTexCoord2(1, combineRect.x, combineRect.yMax); GL.Vertex3(writeRect.x, writeRect.yMax, 0.0f); GL.MultiTexCoord2(0, readRect.xMax, readRect.yMax); GL.MultiTexCoord2(1, combineRect.xMax, combineRect.yMax); GL.Vertex3(writeRect.xMax, writeRect.yMax, 0.0f); GL.MultiTexCoord2(0, readRect.xMax, readRect.y); GL.MultiTexCoord2(1, combineRect.xMax, combineRect.y); GL.Vertex3(writeRect.xMax, writeRect.y, 0.0f); GL.End(); GL.PopMatrix(); if (TerrainPaintUtility.paintTextureUsesCopyTexture) { var rtdesc2 = new RenderTextureDescriptor(sourceTex.width, sourceTex.height, RenderTextureFormat.ARGB32); rtdesc2.sRGB = false; rtdesc2.useMipMap = true; rtdesc2.autoGenerateMips = false; var mips = RenderTexture.GetTemporary(rtdesc2); if (!mips.IsCreated()) { mips.Create(); } // Composes mip0 in a RT with full mipchain. Graphics.CopyTexture(sourceTex, 0, 0, mips, 0, 0); Graphics.CopyTexture(destTarget, 0, 0, writeRect.x, writeRect.y, writeRect.width, writeRect.height, mips, 0, 0, terrainTile.clippedLocal.x, terrainTile.clippedLocal.y); mips.GenerateMips(); // Copy them into sourceTex. Graphics.CopyTexture(mips, sourceTex); RenderTexture.ReleaseTemporary(mips); } else { GraphicsDeviceType deviceType = SystemInfo.graphicsDeviceType; if (deviceType == GraphicsDeviceType.Metal || deviceType == GraphicsDeviceType.OpenGLCore) { sourceTex.ReadPixels(new Rect(writeRect.x, writeRect.y, writeRect.width, writeRect.height), terrainTile.clippedLocal.x, terrainTile.clippedLocal.y); } else { sourceTex.ReadPixels(new Rect(writeRect.x, destTarget.height - writeRect.y - writeRect.height, writeRect.width, writeRect.height), terrainTile.clippedLocal.x, terrainTile.clippedLocal.y); } sourceTex.Apply(); } } RenderTexture.active = null; RenderTexture.ReleaseTemporary(destTarget); OnTerrainPainted(terrainTile, ToolAction.PaintTexture); } }
public override void Configure(CommandBuffer cmd, RenderTextureDescriptor cameraTextureDescriptor) { base.Configure(cmd, cameraTextureDescriptor); }
public static RenderTexture CreateRenderTexture(RenderTextureDescriptor rtDsc) { return(new RenderTexture(rtDsc)); }
public override void Execute(ScriptableRenderContext context, ref RenderingData renderingData) { if (!renderingData.cameraData.postProcessEnabled) { return; } var stack = VolumeManager.instance.stack; m_SunShafts = stack.GetComponent <SunShafts>(); if (m_SunShafts == null) { return; } if (!m_SunShafts.IsActive()) { return; } if (sunShaftsMaterial == null) { return; } var cmd = CommandBufferPool.Get(k_RenderTag); if (RenderSettings.sun == null) { return; } Vector3 vSun; bool positiveZ = UpdateSun(renderingData.cameraData.camera, out vSun); int divider = 4; if (m_SunShafts.resolution == SunShaftsResolution.Normal) { divider = 2; } if (!positiveZ) { return; } float sradius = m_SunShafts.sunShaftBlurRadius.value; RenderTextureDescriptor desc = renderingData.cameraData.cameraTargetDescriptor; var material = sunShaftsMaterial; var destination = currentTarget; cmd.SetGlobalTexture(MainTexId, destination); cmd.GetTemporaryRT(TempTargetId, desc.width, desc.height, 0, FilterMode.Bilinear, desc.colorFormat); cmd.Blit(destination, TempTargetId); int rtW = desc.width / divider; int rtH = desc.height / divider; RenderTexture lrColorB; RenderTexture lrDepthBuffer = RenderTexture.GetTemporary(rtW, rtH, 0, desc.colorFormat); // mask out everything except the skybox // we have 2 methods, one of which requires depth buffer support, the other one is just comparing images material.SetVector("_SunPosition", new Vector4(vSun.x, vSun.y, vSun.z, m_SunShafts.maxRadius.value)); cmd.Blit(TempTargetId, lrDepthBuffer, material, 2); // paint a small black small border to get rid of clamping problems //DrawBorder(lrDepthBuffer, Color.clear); // radial blur: m_SunShafts.radialBlurIterations.value = Mathf.Clamp(m_SunShafts.radialBlurIterations.value, 1, 4); int iter = m_SunShafts.radialBlurIterations.value; float ofs = sradius * (1.0f / 768.0f); material.SetVector("_BlurRadius4", new Vector4(ofs, ofs, 0.0f, 0.0f)); material.SetVector("_SunPosition", new Vector4(vSun.x, vSun.y, vSun.z, m_SunShafts.maxRadius.value)); for (int it2 = 0; it2 < iter; it2++) { // each iteration takes 2 * 6 samples // we update _BlurRadius each time to cheaply get a very smooth look lrColorB = RenderTexture.GetTemporary(rtW, rtH, 0, desc.colorFormat); cmd.Blit(lrDepthBuffer, lrColorB, material, 1); RenderTexture.ReleaseTemporary(lrDepthBuffer); ofs = sradius * (((it2 * 2.0f + 1.0f) * 6.0f)) / 768.0f; material.SetVector("_BlurRadius4", new Vector4(ofs, ofs, 0.0f, 0.0f)); lrDepthBuffer = RenderTexture.GetTemporary(rtW, rtH, 0, desc.colorFormat); cmd.Blit(lrColorB, lrDepthBuffer, material, 1); RenderTexture.ReleaseTemporary(lrColorB); ofs = sradius * (((it2 * 2.0f + 2.0f) * 6.0f)) / 768.0f; material.SetVector("_BlurRadius4", new Vector4(ofs, ofs, 0.0f, 0.0f)); } if (m_SunShafts.lastBlur.value) { lrColorB = RenderTexture.GetTemporary(rtW, rtH, 0, desc.colorFormat); cmd.Blit(lrDepthBuffer, lrColorB, material, 5); RenderTexture.ReleaseTemporary(lrDepthBuffer); lrDepthBuffer = RenderTexture.GetTemporary(rtW, rtH, 0, desc.colorFormat); cmd.Blit(lrColorB, lrDepthBuffer, material, 6); RenderTexture.ReleaseTemporary(lrColorB); } // put together: material.SetTexture("_ColorBuffer", lrDepthBuffer); cmd.Blit(TempTargetId, destination, material, (m_SunShafts.screenBlendMode == ShaftsScreenBlendMode.Screen) ? 0 : 4); RenderTexture.ReleaseTemporary(lrDepthBuffer); cmd.ReleaseTemporaryRT(TempTargetId); context.ExecuteCommandBuffer(cmd); CommandBufferPool.Release(cmd); }
/// <summary>Renders a vector sprite to Texture2D.</summary> /// <param name="sprite">The sprite to render</param> /// <param name="width">The desired width of the resulting texture</param> /// <param name="height">The desired height of the resulting texture</param> /// <param name="mat">The material used to render the sprite</param> /// <param name="antiAliasing">The number of samples per pixel for anti-aliasing</param> /// <param name="expandEdges">When true, expand the edges to avoid a dark banding effect caused by filtering. This is slower to render and uses more graphics memory.</param> /// <returns>A Texture2D object containing the rendered vector sprite</returns> public static Texture2D RenderSpriteToTexture2D(Sprite sprite, int width, int height, Material mat, int antiAliasing = 1, bool expandEdges = false) { if (width <= 0 || height <= 0) { return(null); } RenderTexture tex = null; var oldActive = RenderTexture.active; var desc = new RenderTextureDescriptor(width, height, RenderTextureFormat.ARGB32, 0) { msaaSamples = 1, sRGB = true }; if (expandEdges) { // Draw the sprite normally to be used as a background, no-antialiasing var normalTex = RenderTexture.GetTemporary(desc); RenderTexture.active = normalTex; RenderSprite(sprite, mat); // Expand the edges and make completely transparent if (s_ExpandEdgesMat == null) { s_ExpandEdgesMat = new Material(Shader.Find("Hidden/VectorExpandEdges")); } var expandTex = RenderTexture.GetTemporary(desc); RenderTexture.active = expandTex; GL.Clear(false, true, Color.clear); Graphics.Blit(normalTex, expandTex, s_ExpandEdgesMat, 0); RenderTexture.ReleaseTemporary(normalTex); // Draw the sprite again, but clear with the texture rendered in the previous step, // this will make the bilinear filter to interpolate the colors with values different // than "transparent black", which causes black-ish outlines around the shape. desc.msaaSamples = antiAliasing; tex = RenderTexture.GetTemporary(desc); RenderTexture.active = tex; Graphics.Blit(expandTex, tex); RenderTexture.ReleaseTemporary(expandTex); // Use the expanded texture to clear the buffer RenderTexture.active = tex; RenderSprite(sprite, mat, false); } else { desc.msaaSamples = antiAliasing; tex = RenderTexture.GetTemporary(desc); RenderTexture.active = tex; RenderSprite(sprite, mat); } Texture2D copy = new Texture2D(width, height, TextureFormat.RGBA32, false); copy.hideFlags = HideFlags.HideAndDontSave; copy.ReadPixels(new Rect(0, 0, width, height), 0, 0); copy.Apply(); RenderTexture.active = oldActive; RenderTexture.ReleaseTemporary(tex); return(copy); }
public void Setup(ScriptableRenderer renderer, ref RenderingData renderingData) { Init(); Camera camera = renderingData.cameraData.camera; renderer.SetupPerObjectLightIndices(ref renderingData.cullResults, ref renderingData.lightData); RenderTextureDescriptor baseDescriptor = ScriptableRenderer.CreateRenderTextureDescriptor(ref renderingData.cameraData); RenderTextureDescriptor shadowDescriptor = baseDescriptor; shadowDescriptor.dimension = TextureDimension.Tex2D; bool mainLightShadows = false; if (renderingData.shadowData.supportsMainLightShadows) { mainLightShadows = m_MainLightShadowCasterPass.Setup(m_MainLightShadowmap, ref renderingData); if (mainLightShadows) { renderer.EnqueuePass(m_MainLightShadowCasterPass); } } if (renderingData.shadowData.supportsAdditionalLightShadows) { bool additionalLightShadows = m_AdditionalLightsShadowCasterPass.Setup(m_AdditionalLightsShadowmap, ref renderingData, renderer.maxVisibleAdditionalLights); if (additionalLightShadows) { renderer.EnqueuePass(m_AdditionalLightsShadowCasterPass); } } bool resolveShadowsInScreenSpace = mainLightShadows && renderingData.shadowData.requiresScreenSpaceShadowResolve; bool requiresDepthPrepass = resolveShadowsInScreenSpace || renderingData.cameraData.isSceneViewCamera || (renderingData.cameraData.requiresDepthTexture && (!CanCopyDepth(ref renderingData.cameraData))); // For now VR requires a depth prepass until we figure out how to properly resolve texture2DMS in stereo requiresDepthPrepass |= renderingData.cameraData.isStereoEnabled; renderer.EnqueuePass(m_SetupForwardRenderingPass); camera.GetComponents(m_AfterDepthpasses); camera.GetComponents(m_AfterOpaquePasses); camera.GetComponents(m_AfterOpaquePostProcessPasses); camera.GetComponents(m_AfterSkyboxPasses); camera.GetComponents(m_AfterTransparentPasses); camera.GetComponents(m_AfterRenderPasses); if (requiresDepthPrepass) { m_DepthOnlyPass.Setup(baseDescriptor, m_DepthTexture, SampleCount.One); renderer.EnqueuePass(m_DepthOnlyPass); foreach (var pass in m_AfterDepthpasses) { renderer.EnqueuePass(pass.GetPassToEnqueue(m_DepthOnlyPass.descriptor, m_DepthTexture)); } } if (resolveShadowsInScreenSpace) { m_ScreenSpaceShadowResolvePass.Setup(baseDescriptor, m_ScreenSpaceShadowmap); renderer.EnqueuePass(m_ScreenSpaceShadowResolvePass); } bool requiresRenderToTexture = RequiresIntermediateColorTexture(ref renderingData.cameraData, baseDescriptor) || m_AfterDepthpasses.Count != 0 || m_AfterOpaquePasses.Count != 0 || m_AfterOpaquePostProcessPasses.Count != 0 || m_AfterSkyboxPasses.Count != 0 || m_AfterTransparentPasses.Count != 0 || m_AfterRenderPasses.Count != 0 || Display.main.requiresBlitToBackbuffer || renderingData.killAlphaInFinalBlit; RenderTargetHandle colorHandle = RenderTargetHandle.CameraTarget; RenderTargetHandle depthHandle = RenderTargetHandle.CameraTarget; var sampleCount = (SampleCount)renderingData.cameraData.msaaSamples; if (requiresRenderToTexture) { colorHandle = m_ColorAttachment; depthHandle = m_DepthAttachment; m_CreateLightweightRenderTexturesPass.Setup(baseDescriptor, colorHandle, depthHandle, sampleCount); renderer.EnqueuePass(m_CreateLightweightRenderTexturesPass); } if (renderingData.cameraData.isStereoEnabled) { renderer.EnqueuePass(m_BeginXrRenderingPass); } var perObjectFlags = ScriptableRenderer.GetPerObjectLightFlags(renderingData.lightData.mainLightIndex, renderingData.lightData.additionalLightsCount); m_SetupLightweightConstants.Setup(renderer.maxVisibleAdditionalLights, renderer.perObjectLightIndices); renderer.EnqueuePass(m_SetupLightweightConstants); m_RenderOpaqueForwardPass.Setup(baseDescriptor, colorHandle, depthHandle, ScriptableRenderer.GetCameraClearFlag(camera), camera.backgroundColor, perObjectFlags); renderer.EnqueuePass(m_RenderOpaqueForwardPass); foreach (var pass in m_AfterOpaquePasses) { renderer.EnqueuePass(pass.GetPassToEnqueue(baseDescriptor, colorHandle, depthHandle)); } if (renderingData.cameraData.postProcessEnabled && renderingData.cameraData.postProcessLayer.HasOpaqueOnlyEffects(renderer.postProcessingContext)) { m_CreatePostOpaqueColorPass.Setup(baseDescriptor, m_ColorAttachmentAfterOpaquePost, sampleCount); renderer.EnqueuePass(m_CreatePostOpaqueColorPass); m_OpaquePostProcessPass.Setup(baseDescriptor, colorHandle, m_ColorAttachmentAfterOpaquePost, true, false); renderer.EnqueuePass(m_OpaquePostProcessPass); colorHandle = m_ColorAttachmentAfterOpaquePost; foreach (var pass in m_AfterOpaquePostProcessPasses) { renderer.EnqueuePass(pass.GetPassToEnqueue(baseDescriptor, colorHandle, depthHandle)); } } if (camera.clearFlags == CameraClearFlags.Skybox) { m_DrawSkyboxPass.Setup(colorHandle, depthHandle); renderer.EnqueuePass(m_DrawSkyboxPass); } foreach (var pass in m_AfterSkyboxPasses) { renderer.EnqueuePass(pass.GetPassToEnqueue(baseDescriptor, colorHandle, depthHandle)); } if (renderingData.cameraData.requiresDepthTexture && !requiresDepthPrepass) { m_CopyDepthPass.Setup(depthHandle, m_DepthTexture); renderer.EnqueuePass(m_CopyDepthPass); } if (renderingData.cameraData.requiresOpaqueTexture) { m_CopyColorPass.Setup(colorHandle, m_OpaqueColor); renderer.EnqueuePass(m_CopyColorPass); } m_RenderTransparentForwardPass.Setup(baseDescriptor, colorHandle, depthHandle, perObjectFlags); renderer.EnqueuePass(m_RenderTransparentForwardPass); foreach (var pass in m_AfterTransparentPasses) { renderer.EnqueuePass(pass.GetPassToEnqueue(baseDescriptor, colorHandle, depthHandle)); } #if UNITY_EDITOR m_LitGizmoRenderingPass.Setup(true); renderer.EnqueuePass(m_LitGizmoRenderingPass); #endif bool afterRenderExists = m_AfterRenderPasses.Count != 0; // if we have additional filters // we need to stay in a RT if (afterRenderExists) { // perform post with src / dest the same if (renderingData.cameraData.postProcessEnabled) { m_CreatePostTransparentColorPass.Setup(baseDescriptor, m_ColorAttachmentAfterTransparentPost, sampleCount); renderer.EnqueuePass(m_CreatePostTransparentColorPass); m_PostProcessPass.Setup(baseDescriptor, colorHandle, m_ColorAttachmentAfterTransparentPost, false, false); renderer.EnqueuePass(m_PostProcessPass); colorHandle = m_ColorAttachmentAfterTransparentPost; } //execute after passes foreach (var pass in m_AfterRenderPasses) { renderer.EnqueuePass(pass.GetPassToEnqueue(baseDescriptor, colorHandle, depthHandle)); } //now blit into the final target if (colorHandle != RenderTargetHandle.CameraTarget) { m_FinalBlitPass.Setup(baseDescriptor, colorHandle, Display.main.requiresSrgbBlitToBackbuffer, renderingData.killAlphaInFinalBlit); renderer.EnqueuePass(m_FinalBlitPass); } } else { if (renderingData.cameraData.postProcessEnabled) { m_PostProcessPass.Setup(baseDescriptor, colorHandle, RenderTargetHandle.CameraTarget, false, renderingData.cameraData.camera.targetTexture == null); renderer.EnqueuePass(m_PostProcessPass); } else if (colorHandle != RenderTargetHandle.CameraTarget) { m_FinalBlitPass.Setup(baseDescriptor, colorHandle, Display.main.requiresSrgbBlitToBackbuffer, renderingData.killAlphaInFinalBlit); renderer.EnqueuePass(m_FinalBlitPass); } } if (renderingData.cameraData.isStereoEnabled) { renderer.EnqueuePass(m_EndXrRenderingPass); } #if UNITY_EDITOR m_UnlitGizmoRenderingPass.Setup(false); renderer.EnqueuePass(m_UnlitGizmoRenderingPass); if (renderingData.cameraData.isSceneViewCamera) { m_SceneViewDepthCopyPass.Setup(m_DepthTexture); renderer.EnqueuePass(m_SceneViewDepthCopyPass); } #endif }
private bool PopulateLayer(int mipLevels, bool isHdr, OVRPlugin.Sizei size, int sampleCount, int stage) { if (isExternalSurface) { return(true); } bool ret = false; RenderTextureFormat rtFormat = (isHdr) ? RenderTextureFormat.ARGBHalf : RenderTextureFormat.ARGB32; for (int eyeId = 0; eyeId < texturesPerStage; ++eyeId) { Texture et = layerTextures[eyeId].swapChain[stage]; if (et == null) { continue; } for (int mip = 0; mip < mipLevels; ++mip) { int width = size.w >> mip; if (width < 1) { width = 1; } int height = size.h >> mip; if (height < 1) { height = 1; } #if UNITY_2017_1_1 || UNITY_2017_2_OR_NEWER RenderTextureDescriptor descriptor = new RenderTextureDescriptor(width, height, rtFormat, 0); descriptor.msaaSamples = sampleCount; descriptor.useMipMap = true; descriptor.autoGenerateMips = false; descriptor.sRGB = false; var tempRTDst = RenderTexture.GetTemporary(descriptor); #else var tempRTDst = RenderTexture.GetTemporary(width, height, 0, rtFormat, RenderTextureReadWrite.Linear, sampleCount); #endif if (!tempRTDst.IsCreated()) { tempRTDst.Create(); } tempRTDst.DiscardContents(); bool dataIsLinear = isHdr || (QualitySettings.activeColorSpace == ColorSpace.Linear); #if !UNITY_2017_1_OR_NEWER var rt = textures[eyeId] as RenderTexture; dataIsLinear |= rt != null && rt.sRGB; //HACK: Unity 5.6 and earlier convert to linear on read from sRGB RenderTexture. #endif #if UNITY_ANDROID && !UNITY_EDITOR dataIsLinear = true; //HACK: Graphics.CopyTexture causes linear->srgb conversion on target write with D3D but not GLES. #endif if (currentOverlayShape != OverlayShape.Cubemap && currentOverlayShape != OverlayShape.OffcenterCubemap) { tex2DMaterial.SetInt("_linearToSrgb", (!isHdr && dataIsLinear) ? 1 : 0); //Resolve, decompress, swizzle, etc not handled by simple CopyTexture. #if !UNITY_ANDROID || UNITY_EDITOR // The PC compositor uses premultiplied alpha, so multiply it here. tex2DMaterial.SetInt("_premultiply", 1); #endif Graphics.Blit(textures[eyeId], tempRTDst, tex2DMaterial); Graphics.CopyTexture(tempRTDst, 0, 0, et, 0, mip); } #if UNITY_2017_1_OR_NEWER else // Cubemap { for (int face = 0; face < 6; ++face) { cubeMaterial.SetInt("_linearToSrgb", (!isHdr && dataIsLinear) ? 1 : 0); #if !UNITY_ANDROID || UNITY_EDITOR // The PC compositor uses premultiplied alpha, so multiply it here. cubeMaterial.SetInt("_premultiply", 1); #endif cubeMaterial.SetInt("_face", face); //Resolve, decompress, swizzle, etc not handled by simple CopyTexture. Graphics.Blit(textures[eyeId], tempRTDst, cubeMaterial); Graphics.CopyTexture(tempRTDst, 0, 0, et, face, mip); } } #endif RenderTexture.ReleaseTemporary(tempRTDst); ret = true; } } return(ret); }
public override void Setup(ScriptableRenderContext context, ref RenderingData renderingData) { Camera camera = renderingData.cameraData.camera; RenderTextureDescriptor cameraTargetDescriptor = renderingData.cameraData.cameraTargetDescriptor; bool mainLightShadows = m_MainLightShadowCasterPass.Setup(ref renderingData); bool additionalLightShadows = m_AdditionalLightsShadowCasterPass.Setup(ref renderingData); bool resolveShadowsInScreenSpace = mainLightShadows && renderingData.shadowData.requiresScreenSpaceShadowResolve; // Depth prepass is generated in the following cases: // - We resolve shadows in screen space // - Scene view camera always requires a depth texture. We do a depth pre-pass to simplify it and it shouldn't matter much for editor. // - If game or offscreen camera requires it we check if we can copy the depth from the rendering opaques pass and use that instead. bool requiresDepthPrepass = renderingData.cameraData.isSceneViewCamera || (renderingData.cameraData.requiresDepthTexture && (!CanCopyDepth(ref renderingData.cameraData))); requiresDepthPrepass |= resolveShadowsInScreenSpace; bool createColorTexture = RequiresIntermediateColorTexture(ref renderingData, cameraTargetDescriptor) || rendererFeatures.Count != 0; // If camera requires depth and there's no depth pre-pass we create a depth texture that can be read // later by effect requiring it. bool createDepthTexture = renderingData.cameraData.requiresDepthTexture && !requiresDepthPrepass; bool postProcessEnabled = renderingData.cameraData.postProcessEnabled; bool hasOpaquePostProcess = postProcessEnabled && renderingData.cameraData.postProcessLayer.HasOpaqueOnlyEffects(RenderingUtils.postProcessRenderContext); m_ActiveCameraColorAttachment = (createColorTexture) ? m_CameraColorAttachment : RenderTargetHandle.CameraTarget; m_ActiveCameraDepthAttachment = (createDepthTexture) ? m_CameraDepthAttachment : RenderTargetHandle.CameraTarget; if (createColorTexture || createDepthTexture) { CreateCameraRenderTarget(context, ref renderingData.cameraData); } ConfigureCameraTarget(m_ActiveCameraColorAttachment.Identifier(), m_ActiveCameraDepthAttachment.Identifier()); for (int i = 0; i < rendererFeatures.Count; ++i) { rendererFeatures[i].AddRenderPasses(this, ref renderingData); } int count = activeRenderPassQueue.Count; for (int i = count - 1; i >= 0; i--) { if (activeRenderPassQueue[i] == null) { activeRenderPassQueue.RemoveAt(i); } } bool hasAfterRendering = activeRenderPassQueue.Find(x => x.renderPassEvent == RenderPassEvent.AfterRendering) != null; if (mainLightShadows) { EnqueuePass(m_MainLightShadowCasterPass); } if (additionalLightShadows) { EnqueuePass(m_AdditionalLightsShadowCasterPass); } if (requiresDepthPrepass) { m_DepthPrepass.Setup(cameraTargetDescriptor, m_DepthTexture); EnqueuePass(m_DepthPrepass); } if (resolveShadowsInScreenSpace) { m_ScreenSpaceShadowResolvePass.Setup(cameraTargetDescriptor); EnqueuePass(m_ScreenSpaceShadowResolvePass); if (renderingData.shadowData.ssShadowDownSampleSize > 1) { m_SSSDownsamplePass.Setup(cameraTargetDescriptor, renderingData); EnqueuePass(m_SSSDownsamplePass); } } EnqueuePass(m_RenderOpaqueForwardPass); if (hasOpaquePostProcess) { m_OpaquePostProcessPass.Setup(cameraTargetDescriptor, m_ActiveCameraColorAttachment, m_ActiveCameraColorAttachment); } //EnqueuePass(m_RenderOpaqueDiscardAndBlendPass); if (camera.clearFlags == CameraClearFlags.Skybox && RenderSettings.skybox != null) { EnqueuePass(m_DrawSkyboxPass); } // If a depth texture was created we necessarily need to copy it, otherwise we could have render it to a renderbuffer if (createDepthTexture) { m_CopyDepthPass.Setup(m_ActiveCameraDepthAttachment, m_DepthTexture); EnqueuePass(m_CopyDepthPass); } if (renderingData.cameraData.requiresOpaqueTexture) { m_CopyColorPass.Setup(m_ActiveCameraColorAttachment.Identifier(), m_OpaqueColor); EnqueuePass(m_CopyColorPass); } //OIT Pass m_BlendWeightedAccumulatePass.Setup(cameraTargetDescriptor, accumulateHandle, m_DepthTexture); //m_BlendWeightedAccumulatePass.Setup(new RenderTextureDescriptor(cameraTargetDescriptor.width, cameraTargetDescriptor.height), accumulateHandle, m_DepthTexture); EnqueuePass(m_BlendWeightedAccumulatePass); m_BlendWeightedRevealagePass.Setup(cameraTargetDescriptor, revealageHandle, m_DepthTexture); //m_BlendWeightedRevealagePass.Setup(new RenderTextureDescriptor(cameraTargetDescriptor.width, cameraTargetDescriptor.height), revealageHandle, m_DepthTexture); EnqueuePass(m_BlendWeightedRevealagePass); //cameraTargetDescriptor.msaaSamples = msaaSamples; //cameraTargetDescriptor.bindMS = bindMS; //Blit Feature AddPass(EnqueuePass bool afterRenderExists = renderingData.cameraData.captureActions != null || hasAfterRendering; // if we have additional filters // we need to stay in a RT if (afterRenderExists) { // perform post with src / dest the same if (postProcessEnabled) { m_PostProcessPass.Setup(cameraTargetDescriptor, m_ActiveCameraColorAttachment, m_ActiveCameraColorAttachment); EnqueuePass(m_PostProcessPass); } //now blit into the final target if (m_ActiveCameraColorAttachment != RenderTargetHandle.CameraTarget) { if (renderingData.cameraData.captureActions != null) { m_CapturePass.Setup(m_ActiveCameraColorAttachment); EnqueuePass(m_CapturePass); } m_FinalBlitPass.Setup(cameraTargetDescriptor, m_ActiveCameraColorAttachment); EnqueuePass(m_FinalBlitPass); } } else { if (postProcessEnabled) { m_PostProcessPass.Setup(cameraTargetDescriptor, m_ActiveCameraColorAttachment, RenderTargetHandle.CameraTarget); EnqueuePass(m_PostProcessPass); } else if (m_ActiveCameraColorAttachment != RenderTargetHandle.CameraTarget) { m_FinalBlitPass.Setup(cameraTargetDescriptor, m_ActiveCameraColorAttachment); EnqueuePass(m_FinalBlitPass); } } #if UNITY_EDITOR if (renderingData.cameraData.isSceneViewCamera) { m_SceneViewDepthCopyPass.Setup(m_DepthTexture); EnqueuePass(m_SceneViewDepthCopyPass); } #endif }
/// <summary> /// Set RenderBuffer for camera; /// </summary> private void RefreshRenderBufferForSingleCamera(ScriptableRenderContext context, ref RenderingData renderingData, ref CameraData cameraData, out bool requiresDepthPrepass, out bool createDepthTexture) { Camera camera = renderingData.cameraData.camera; RenderTextureDescriptor cameraTargetDescriptor = renderingData.cameraData.cameraTargetDescriptor; bool applyPostProcessing = cameraData.postProcessEnabled; bool isSceneViewCamera = cameraData.isSceneViewCamera; bool isPreviewCamera = cameraData.isPreviewCamera; bool requiresDepthTexture = cameraData.requiresDepthTexture; bool isStereoEnabled = cameraData.isStereoEnabled; // Depth prepass is generated in the following cases: // - If game or offscreen camera requires it we check if we can copy the depth from the rendering opaques pass and use that instead. // - Scene or preview cameras always require a depth texture. We do a depth pre-pass to simplify it and it shouldn't matter much for editor. requiresDepthPrepass = requiresDepthTexture && !CanCopyDepth(ref renderingData.cameraData); requiresDepthPrepass |= isSceneViewCamera; requiresDepthPrepass |= isPreviewCamera; // The copying of depth should normally happen after rendering opaques. // But if we only require it for post processing or the scene camera then we do it after rendering transparent objects m_CopyDepthPass.renderPassEvent = (!requiresDepthTexture && (applyPostProcessing || isSceneViewCamera)) ? RenderPassEvent.AfterRenderingTransparents : RenderPassEvent.AfterRenderingOpaques; // TODO: CopyDepth pass is disabled in XR due to required work to handle camera matrices in URP. // IF this condition is removed make sure the CopyDepthPass.cs is working properly on all XR modes. This requires PureXR SDK integration. if (isStereoEnabled && requiresDepthTexture) { requiresDepthPrepass = true; } bool isRunningHololens = false; #if ENABLE_VR && ENABLE_VR_MODULE isRunningHololens = UniversalRenderPipeline.IsRunningHololens(camera); #endif bool createColorTexture = RequiresIntermediateColorTexture(ref cameraData); createColorTexture |= (rendererFeatures.Count != 0 && !isRunningHololens); createColorTexture &= !isPreviewCamera; // If camera requires depth and there's no depth pre-pass we create a depth texture that can be read later by effect requiring it. createDepthTexture = cameraData.requiresDepthTexture && !requiresDepthPrepass; createDepthTexture |= (cameraData.renderType == CameraRenderType.Base && !cameraData.resolveFinalTarget); #if UNITY_ANDROID || UNITY_WEBGL if (SystemInfo.graphicsDeviceType != GraphicsDeviceType.Vulkan) { // GLES can not use render texture's depth buffer with the color buffer of the backbuffer // in such case we create a color texture for it too. createColorTexture |= createDepthTexture; } #endif // Configure all settings require to start a new camera stack (base camera only) if (cameraData.renderType == CameraRenderType.Base) { m_ActiveCameraColorAttachment = (createColorTexture) ? m_CameraColorAttachment : RenderTargetHandle.CameraTarget; m_ActiveCameraDepthAttachment = (createDepthTexture) ? m_CameraDepthAttachment : RenderTargetHandle.CameraTarget; bool intermediateRenderTexture = createColorTexture || createDepthTexture; // Doesn't create texture for Overlay cameras as they are already overlaying on top of created textures. bool createTextures = intermediateRenderTexture; if (createTextures) { CreateCameraRenderTarget(context, ref renderingData.cameraData); } // if rendering to intermediate render texture we don't have to create msaa backbuffer int backbufferMsaaSamples = (intermediateRenderTexture) ? 1 : cameraTargetDescriptor.msaaSamples; if (Camera.main == camera && camera.cameraType == CameraType.Game && cameraData.targetTexture == null) { SetupBackbufferFormat(backbufferMsaaSamples, isStereoEnabled); } } else { if (m_SplitUICameraAndSceneCameraRenderer) { RefreshCameraColorAttachment(context, ref renderingData.cameraData); } else { m_ActiveCameraColorAttachment = m_CameraColorAttachment; m_ActiveCameraDepthAttachment = m_CameraDepthAttachment; } } ConfigureCameraTarget(m_ActiveCameraColorAttachment.Identifier(), m_ActiveCameraDepthAttachment.Identifier()); }
public IEnumerator BakeLightmap() { RenderTextureDescriptor texArrayDescriptor = new RenderTextureDescriptor { autoGenerateMips = false, bindMS = false, colorFormat = RenderTextureFormat.ARGB32, depthBufferBits = 16, dimension = TextureDimension.Tex2DArray, enableRandomWrite = false, height = RESOLUTION, width = RESOLUTION, memoryless = RenderTextureMemoryless.None, msaaSamples = 1, shadowSamplingMode = ShadowSamplingMode.None, sRGB = false, useMipMap = true, volumeDepth = 6, vrUsage = VRTextureUsage.None }; RenderTexture rt = RenderTexture.GetTemporary(texArrayDescriptor); rt.filterMode = FilterMode.Trilinear; rt.Create(); texArrayDescriptor.volumeDepth = 1; texArrayDescriptor.dimension = TextureDimension.Tex2D; RenderTexture tempRT = RenderTexture.GetTemporary(texArrayDescriptor); tempRT.Create(); ComputeShader shader = resources.shaders.probeCoeffShader; Action <CommandBuffer> func = (cb) => { cb.SetComputeBufferParam(shader, 0, "_CoeffTemp", coeffTemp); cb.SetComputeBufferParam(shader, 1, "_CoeffTemp", coeffTemp); cb.SetComputeBufferParam(shader, 1, "_Coeff", coeff); cb.SetComputeTextureParam(shader, 0, "_SourceCubemap", rt); cb.SetGlobalVector("_Tex3DSize", new Vector4(probeCount.x + 0.01f, probeCount.y + 0.01f, probeCount.z + 0.01f)); cb.SetGlobalVector("_SHSize", transform.localScale); cb.SetGlobalVector("_LeftDownBack", transform.position - transform.localScale * 0.5f); }; RenderPipeline.ExecuteBufferAtFrameEnding(func); yield return(null); yield return(null); int target = probeCount.x * probeCount.y * probeCount.z; for (int x = 0; x < probeCount.x; ++x) { for (int y = 0; y < probeCount.y; ++y) { for (int z = 0; z < probeCount.z; ++z) { BakeMap(int3(x, y, z), rt, tempRT); cbuffer.GenerateMips(rt); cbuffer.SetComputeIntParam(shader, "_OffsetIndex", PipelineFunctions.DownDimension(int3(x, y, z), probeCount.xy)); cbuffer.DispatchCompute(shader, 0, RESOLUTION / 32, RESOLUTION / 32, 6); cbuffer.DispatchCompute(shader, 1, 1, 1, 1); yield return(null); } } } isRendering = false; yield return(null); isRendered = true; byte[] byteArray = new byte[coeff.count * coeff.stride]; coeff.GetData(byteArray); string path = "Assets/BinaryData/Irradiance/" + volumeName + ".mpipe"; File.WriteAllBytes(path, byteArray); float4x4 localToWorld = transform.localToWorldMatrix; IrradianceResources.Volume volume = new IrradianceResources.Volume { position = transform.position, localToWorld = float3x3(localToWorld.c0.xyz, localToWorld.c1.xyz, localToWorld.c2.xyz), resolution = (uint3)probeCount, volumeName = volumeName, path = path }; Debug.Log(volume.volumeName); saveTarget.allVolume[indexInList] = volume; EditorUtility.SetDirty(saveTarget); RenderTexture.ReleaseTemporary(rt); RenderTexture.ReleaseTemporary(tempRT); yield return(null); Dispose(); }
private static Texture2D Render( Camera camera, Shader shader, Renderer[] renderers, int width = -1, int height = -1) { bool autoSize = width < 0 || height < 0; int _width = autoSize ? (int)camera.pixelRect.width : width; int _height = autoSize ? (int)camera.pixelRect.height : height; GameObject go = new GameObject(); Camera renderCam = go.AddComponent <Camera>(); renderCam.CopyFrom(camera); renderCam.renderingPath = RenderingPath.Forward; renderCam.enabled = false; renderCam.clearFlags = CameraClearFlags.SolidColor; renderCam.backgroundColor = Color.white; renderCam.cullingMask = 0; IRenderPipelineCameraUtility cameraUtility = IOC.Resolve <IRenderPipelineCameraUtility>(); if (cameraUtility != null) { cameraUtility.EnablePostProcessing(renderCam, false); cameraUtility.SetBackgroundColor(renderCam, Color.white); } renderCam.allowHDR = false; renderCam.allowMSAA = false; renderCam.forceIntoRenderTexture = true; float aspect = renderCam.aspect; renderCam.rect = new Rect(Vector2.zero, Vector2.one); renderCam.aspect = aspect; RenderTextureDescriptor descriptor = new RenderTextureDescriptor() { width = _width, height = _height, colorFormat = RenderTextureFormat, autoGenerateMips = false, depthBufferBits = 16, dimension = TextureDimension.Tex2D, enableRandomWrite = false, memoryless = RenderTextureMemoryless.None, sRGB = true, useMipMap = false, volumeDepth = 1, msaaSamples = 1 }; RenderTexture rt = RenderTexture.GetTemporary(descriptor); RenderTexture prev = RenderTexture.active; renderCam.targetTexture = rt; RenderTexture.active = rt; Material replacementMaterial = new Material(shader); IRTEGraphics graphics = IOC.Resolve <IRTEGraphics>(); IRTECamera rteCamera = graphics.CreateCamera(renderCam, CameraEvent.AfterForwardAlpha, false, true); rteCamera.RenderersCache.MaterialOverride = replacementMaterial; rteCamera.Camera.name = "BoxSelectionCamera"; foreach (Renderer renderer in renderers) { Material[] materials = renderer.sharedMaterials; for (int i = 0; i < materials.Length; ++i) { if (materials[i] != null) { rteCamera.RenderersCache.Add(renderer); } } } rteCamera.RefreshCommandBuffer(); if (RenderPipelineInfo.Type != RPType.Standard) { bool invertCulling = GL.invertCulling; GL.invertCulling = true; renderCam.projectionMatrix *= Matrix4x4.Scale(new Vector3(1, -1, 1)); renderCam.Render(); GL.invertCulling = invertCulling; } else { renderCam.Render(); } Texture2D img = new Texture2D(_width, _height, TextureFormat, false, false); img.ReadPixels(new Rect(0, 0, _width, _height), 0, 0); img.Apply(); RenderTexture.active = prev; RenderTexture.ReleaseTemporary(rt); UnityObject.DestroyImmediate(go); UnityObject.Destroy(replacementMaterial); rteCamera.Destroy(); //System.IO.File.WriteAllBytes("Assets/box_selection.png", img.EncodeToPNG()); return(img); }
/// <summary> /// Configure the render pass by configuring the render target and clear values. /// </summary> /// <param name="commandBuffer">The command buffer for configuration.</param> /// <param name="renderTextureDescriptor">The descriptor of the target render texture.</param> public override void Configure(CommandBuffer commandBuffer, RenderTextureDescriptor renderTextureDescriptor) { ConfigureTarget(m_ColorTargetIdentifier, m_DepthTargetIdentifier); ConfigureClear(ClearFlag.Depth, Color.clear); }
public CBDRSharedData(PipelineResources res) { dirLightShadowmap = null; availiableDistance = 0; spotShadowCount = 0; pointshadowCount = 0; lightFlag = 0; cbdrShader = res.shaders.cbdrShader; RenderTextureDescriptor desc = new RenderTextureDescriptor { autoGenerateMips = false, bindMS = false, colorFormat = RenderTextureFormat.ARGBFloat, depthBufferBits = 0, enableRandomWrite = true, dimension = TextureDimension.Tex3D, width = XRES, height = YRES, volumeDepth = 4, memoryless = RenderTextureMemoryless.None, msaaSamples = 1, shadowSamplingMode = ShadowSamplingMode.None, sRGB = false, useMipMap = false, vrUsage = VRTextureUsage.None }; cubeArrayMap = new RenderTexture(new RenderTextureDescriptor { autoGenerateMips = false, bindMS = false, colorFormat = RenderTextureFormat.RHalf, depthBufferBits = 16, dimension = TextureDimension.CubeArray, volumeDepth = 6 * MAXIMUMPOINTLIGHTCOUNT, enableRandomWrite = false, height = MLight.cubemapShadowResolution, width = MLight.cubemapShadowResolution, memoryless = RenderTextureMemoryless.None, msaaSamples = 1, shadowSamplingMode = ShadowSamplingMode.None, sRGB = false, useMipMap = false, vrUsage = VRTextureUsage.None }); cubeArrayMap.filterMode = FilterMode.Bilinear; cubeArrayMap.Create(); spotArrayMap = new RenderTexture(new RenderTextureDescriptor { autoGenerateMips = false, bindMS = false, colorFormat = RenderTextureFormat.Shadowmap, depthBufferBits = 16, dimension = TextureDimension.Tex2DArray, enableRandomWrite = false, height = MLight.perspShadowResolution, memoryless = RenderTextureMemoryless.None, msaaSamples = 1, shadowSamplingMode = ShadowSamplingMode.RawDepth, sRGB = false, useMipMap = false, volumeDepth = MAXIMUMSPOTLIGHTCOUNT, vrUsage = VRTextureUsage.None, width = MLight.perspShadowResolution }); spotArrayMap.filterMode = FilterMode.Bilinear; spotArrayMap.Create(); xyPlaneTexture = new RenderTexture(desc); xyPlaneTexture.filterMode = FilterMode.Point; xyPlaneTexture.Create(); desc.dimension = TextureDimension.Tex2D; desc.volumeDepth = 1; desc.width = ZRES; desc.height = 2; zPlaneTexture = new RenderTexture(desc); zPlaneTexture.Create(); pointlightIndexBuffer = new ComputeBuffer(XRES * YRES * ZRES * (MAXLIGHTPERCLUSTER + 1), sizeof(int)); spotlightIndexBuffer = new ComputeBuffer(XRES * YRES * ZRES * (MAXLIGHTPERCLUSTER + 1), sizeof(int)); allPointLightBuffer = new ComputeBuffer(pointLightInitCapacity, sizeof(PointLightStruct)); allSpotLightBuffer = new ComputeBuffer(pointLightInitCapacity, sizeof(SpotLight)); allFogVolumeBuffer = new ComputeBuffer(30, sizeof(FogVolume)); }
ScriptableRenderPass IAfterRender.GetPassToEnqueue(RenderTextureDescriptor baseDescriptor, RenderTargetHandle colorHandle, RenderTargetHandle depthHandle) { return(new BlitPass(colorHandle, depthHandle)); }
private void MyDebug(Camera camera, ScriptableRenderContext context, RenderTargetIdentifier RTid, RenderTextureDescriptor Desc, CommandBuffer cmd = null) { if (cmd != null) { cmd.name = cmd.name + "HaveCMD!!!!!!!!!!!!!!"; debugRT = new RenderTexture(Desc); cmd.Blit(RTid, debugRT); context.ExecuteCommandBuffer(cmd); cmd.Release(); debugRT.Release(); } else { CommandBuffer cmdTempId = new CommandBuffer(); cmdTempId.name = "(" + camera.name + ")" + "Setup TempRT"; debugRT = new RenderTexture(Desc); cmdTempId.Blit(RTid, debugRT); context.ExecuteCommandBuffer(cmdTempId); cmdTempId.Release(); debugRT.Release(); } }
ScriptableRenderPass IBeforeRender.GetPassToEnqueue(RenderTextureDescriptor baseDescriptor, RenderTargetHandle colorHandle, RenderTargetHandle depthAttachmentHandle, ClearFlag clearFlag) { return(new ClearColorPass(colorHandle, clearFlag)); }
public override void Configure(CommandBuffer cmd, RenderTextureDescriptor cameraTextureDescriptor) { cmd.GetTemporaryRT(depthAttachmentHandle.id, descriptor, FilterMode.Point); ConfigureTarget(depthAttachmentHandle.Identifier()); ConfigureClear(ClearFlag.All, Color.black); }
public override void Configure(CommandBuffer cmd, RenderTextureDescriptor cameraTextureDescriptor) { m_AdditionalLightsShadowmapTexture = ShadowUtils.GetTemporaryShadowTexture(m_ShadowmapWidth, m_ShadowmapHeight, k_ShadowmapBufferBits); ConfigureTarget(new RenderTargetIdentifier(m_AdditionalLightsShadowmapTexture)); ConfigureClear(ClearFlag.All, Color.black); }
// protected virtual bool Equals(RenderTextureDescriptor x, RenderTextureDescriptor y) { return(x.width == y.width && x.height == y.height && x.msaaSamples == y.msaaSamples); // TODO compare all fields? }
/// <summary> /// Configure the pass /// </summary> /// <param name="baseDescriptor"></param> /// <param name="colorHandle"></param> public void Setup(RenderTextureDescriptor baseDescriptor, RenderTargetHandle colorHandle) { m_Source = colorHandle; m_TargetDimension = baseDescriptor.dimension; m_IsMobileOrSwitch = Application.isMobilePlatform || Application.platform == RuntimePlatform.Switch; }