I’m trying to write a simple shader that blits the depth texture for use in a compute shader to implement Hi-Z occlusion culling.
It seems like the depth is read out wrong in my shader code though.
Here’s a simple comparison (1st image):
- Left Cube has a material with an unlit, transparent shader graph.
- Right Cube has a material with my shader code
Left is the result I’d expect. You can even see the depth verly lightly in the right image.
The shader graph (2nd image):
My shader code:
Shader "Unlit/NewUnlitShader"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
}
SubShader
{
//Tags { "RenderType"="Opaque" }
Tags
{
"RenderPipeline"="HDRenderPipeline"
"RenderType"="HDUnlitShader"
"Queue"="Geometry+0"
}
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
float2 depth : TEXCOORD1;
float4 scrPos : TEXCOORD2;
};
sampler2D _MainTex;
float4 _MainTex_ST;
sampler2D _CameraDepthTexture;
v2f vert (appdata v)
{
v2f output;
output.vertex = UnityObjectToClipPos(v.vertex);
output.uv = TRANSFORM_TEX(v.uv, _MainTex);
UNITY_TRANSFER_DEPTH(output.depth);
output.scrPos = ComputeScreenPos(output.vertex);
return output;
}
float4 frag (v2f i) : SV_Target
{
float camDepth = 1.0 - Linear01Depth(SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture, i.uv));
float4 col = float4(camDepth, camDepth, camDepth, 1.0);
return col;
}
ENDCG
}
}
}