I have implemented a custom shadow mapping shader for use in my current project. It works in the editor, and in desktop standalone builds, but doesn’t work in iOS - it renders the entire geometry in shadow, as if the depth values are all 1.0 (or 0.0, I forget which).
Editor:
iPhone:
Here are the relevant parts of the shaders, as you can see, I am rendering depth and d^2 into an ARGB32 render target, as a first pass, and sampling it out the other end to calculate the shadow value (using a VSM implementation):
Depth pass:
inline float4 EncodeDepthAndDepthSquared(float depth, float depth_sq) {
float4 enc_value;
enc_value.xy = EncodeFloatRG(depth_sq);
enc_value.zw = EncodeFloatRG(depth);
return enc_value;
}
struct v2f {
float4 pos : SV_POSITION;
float depth : TEXCOORD0;
};
v2f vert (appdata_base v) {
v2f o;
o.pos = mul(UNITY_MATRIX_MVP, v.vertex);
o.depth = COMPUTE_DEPTH_01;
return o;
}
half4 frag(v2f i) : COLOR {
//float moment1 = Linear01Depth(i.depth);
float moment1 = i.depth;
float moment2 = moment1*moment1;
//Moment2 calculation using partial derivatives:
float dx = ddx(i.depth);
float dy = ddy(i.depth);
moment2 += 0.25*(dx*dx+dy*dy);
return EncodeDepthAndDepthSquared(moment1, moment2);
}
Shadow pass:
half4 frag(v2f i) : COLOR
{
//Shadow mapping:
//===============
//Get the distance (and d^2) to the camera from the depth buffer for this point.
//To do this, we sample the shadow map from our calculated texel position.
//This gives us depth and d^2 moments from the light to the closest lit pixel in view (from the light pov).
float4 shadow_depth4 = tex2Dproj(_RenderPaintTexture, i.sh.xyw);
//Calculate the depth of the visible pixel from the light position,
float view_depth = Linear01Depth((i.sh.z / i.sh.w));
float shadow_result = chebyshevUpperBound(view_depth, shadow_depth4);
half4 shadow_col = half4(0.0, 0.0, 0.5, 1.0);
half4 light_col = half4(1.0, 0.5, 0.0, 1.0);
half4 shadow_blended_col;
if(shadow_result < 1.0) {
//Pixel is in shadow, lerp from shadow col to light col
shadow_blended_col = half4(0.0,0.0,0.5, 1.0);
}
else {
//Pixel is in sun
shadow_blended_col = half4(1.0,0.5,0.0,1.0);
}
//rest of lighting calculations...
chebyshevUpperBound function:
float chebyshevUpperBound(float dist, float4 encoded_moments) {
//retrieve the encoded moments (m1 = depth, m2 = depth_squared)
float moment1 = Linear01Depth(DecodeFloatRG(encoded_moments.zw));
float moment2 = Linear01Depth(DecodeFloatRG(encoded_moments.xy));
//If current fragment is before light occluder, we are fully not in shadow
if(dist <= moment1) {
return 1.0;
}
//We are either shadow or penumbra... Use the upperbound to check the probability
//the pixel is in light (p_lit)
float variance = moment2 - (moment1*moment1);
variance = max(variance, 0.00002);
float d = dist - moment1;
float p_lit = variance / (variance + d*d);
return p_lit;
}
I’ve been racking my brains for the best part of a week trying to get this to work. If I display the render target on a plane in the scene on the iOS device, I see the correct values being rendered (looks like psychadelic stripes of green, same as in the editor).
This makes me think that the problem lies in the way the values are read out in the shadow pass, or how they are compared.
Would really appreciate if any shader gurus could help me get to the bottom of this.
Many thanks!