Hi,
I’ve been working on shaders for my game on my laptop, which is great, because I can easily switch between testing on Intel integrated graphics (both for low-end performance and compatibility testing) and a more powerful Nvidia card.
While using Intel, I thought I had hardware-filtered shadows working nicely, using UNITY_SAMPLE_SHADOW. But when I tested it on Nvidia, it didn’t work.
Here’s my shader:
Shader "Custom/KustomShader"
{
SubShader
{
Tags { "RenderType"="Opaque" "IgnoreProjector"="True" "Queue"="Geometry" }
LOD 200
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma target 3.0
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
float3 normal : NORMAL;
float2 uv : TEXCOORD0;
float4 tangent : TANGENT;
};
struct v2f
{
float4 vertex : SV_POSITION;
float3 normal : TEXCOORD0;
float4 shadowPos : TEXCOORD1;
};
UNITY_DECLARE_SHADOWMAP(_ShadowDepth);
//sampler2D _ShadowDepth;
float4x4 _ShadowVP;
float4x4 _ShadowV;
float4 _SunVec;
float4 _SunColour;
float4 _SkyAmbientColour;
v2f vert (appdata v)
{
v2f output;
float4x4 modelMatrix = _Object2World;
float4 worldPos = mul(modelMatrix, v.vertex);
output.vertex = mul(UNITY_MATRIX_MVP, worldPos);
output.shadowPos = mul(_ShadowVP, worldPos);
output.shadowPos.z = -mul(_ShadowV, worldPos).z;
output.normal = v.normal * 0.5 + 0.5;
return output;
}
fixed4 frag (v2f input) : SV_Target
{
float3 baseColour = input.normal;
float3 shadowCoords = input.shadowPos.xyz;
shadowCoords.xy /= input.shadowPos.w;
shadowCoords.xy = shadowCoords.xy * 0.5 + 0.5;
shadowCoords.z -= 0.01; // bias...
//float shadow = step(shadowCoords.z, tex2D(_ShadowDepth, shadowCoords.xy).r);
float shadow = UNITY_SAMPLE_SHADOW(_ShadowDepth, shadowCoords.xyz);
return float4(baseColour * shadow, 1.0);
}
ENDCG
}
}
FallBack "Diffuse"
}
If I comment out the lines UNITY_DECLARE_SHADOWMAP… and UNITY_SAMPLE_SHADOW…, and uncomment the lines next to them (sampler2D… and float shadow = step…), I get hard shadows, as one would expect, on both Intel and Nvidia.
I thought UNITY_SAMPLE_SHADOW, apart from being the filtered result of the 4 nearest texels, would be mostly equivalent to step(shadowCoords.z, tex2D(_ShadowDepth, shadowCoords.xy).r), and it works on Intel, but for some reason it’s all zero on Nvidia.
I couldn’t find an example online of the correct usage of UNITY_SAMPLE_SHADOW. Can anyone provide me such an example, or point out what’s wrong with my code?
Thanks so much,
Jibb