Color Intersect Shader

I’m trying to write a shader that colors the intersection(s) of a mesh. I have it working for solid colors and alphas, but it’s basically just an optical illusion that falls apart whenever I try to do any sort of computed pattern.

Here’s my collider/mesh inspector:

Here’s the shader working with a base color:

Here’s the same shader with an alpha:

And here’s the problem with a computed world space pattern :eyes::

Is it possible to “flatten” the pattern somehow within the shader so it maintains the optical illusion? Or is there some other way to achieve the desired effect?

Here’s the shader I have so far:

Shader "Unlit/Intersect" {
    Properties {
        _Color("Color", Color) = (1,1,1,1)
        _Position("Position", Vector) = (0,0,0,0)
    }
    SubShader {
        Tags {
            "Queue" = "Transparent" "RenderType" = "Transparent"
        }

        Pass {
            Cull Front ZTest GEqual ZWrite Off
            ColorMask 0
            Stencil {
                Ref 100
                Comp Always
                Pass Replace
            }
        }

        Pass {
            Blend SrcAlpha OneMinusSrcAlpha
            Stencil {
                Ref 100
                Comp Equal
                Pass Zero
            }

            CGPROGRAM
            #pragma vertex vert
            #pragma fragment frag
            #include "UnityCG.cginc"

            float4 _Color;
            float4 _Position;

            struct appdata {
                float4 vertex : POSITION;
            };

            struct v2f {
                float4 vertex : SV_POSITION;
                float4 screenPos : TEXCOORD0;
                float4 worldPos : TEXCOORD1;
            };

            v2f vert(appdata v) {
                v2f o;
                o.vertex = UnityObjectToClipPos(v.vertex);
                o.screenPos = ComputeScreenPos(o.vertex);
                o.worldPos = mul(unity_ObjectToWorld, v.vertex);
                return o;
            }

            fixed4 frag(v2f i) : SV_Target {
                float dist = distance(i.worldPos, _Position);
                float offset = 1 - saturate(round(abs(frac(dist * 2))));
                return _Color * offset;
            }
            ENDCG
        }
    }
}

Any help would be appreciated, thanks!

You need to compute the world position of the surface you’re intersecting with, which you can do using the _CameraDepthTexture and some math.

Like this:

Shader "Unlit/WorldPosFromDepth"
{
    Properties
    {
    }
    SubShader
    {
        Tags { "Queue"="Transparent" "RenderType"="Transparent" "IgnoreProjector"="True" }
        LOD 100

        Pass
        {
            CGPROGRAM
            #pragma vertex vert
            #pragma fragment frag

            #include "UnityCG.cginc"

            struct appdata
            {
                float4 vertex : POSITION;
            };

            struct v2f
            {
                float4 pos : SV_POSITION;
                float4 projPos : TEXCOORD0; 
                float3 camRelativeWorldPos : TEXCOORD1;
            };

            UNITY_DECLARE_DEPTH_TEXTURE(_CameraDepthTexture);

            v2f vert (appdata v)
            {
                v2f o;
                o.pos = UnityObjectToClipPos(v.vertex);
                o.projPos = ComputeScreenPos(o.pos);
                o.camRelativeWorldPos = mul(unity_ObjectToWorld, float4(v.vertex.xyz, 1.0)).xyz - _WorldSpaceCameraPos;
                return o;
            }

            bool depthIsNotSky(float depth)
            {
                #if defined(UNITY_REVERSED_Z)
                return (depth > 0.0);
                #else
                return (depth < 1.0);
                #endif
            }

            half4 frag (v2f i) : SV_Target
            {
                float2 screenUV = i.projPos.xy / i.projPos.w;

                // sample depth texture
                float depth = SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture, screenUV);

                // get linear depth from the depth
                float sceneZ = LinearEyeDepth(depth);

                // calculate the view plane vector
                // note: Something like normalize(i.camRelativeWorldPos.xyz) is what you'll see other
                // examples do, but that is wrong! You need a vector that at a 1 unit view depth, not
                // a1 unit magnitude.
                float3 viewPlane = i.camRelativeWorldPos.xyz / dot(i.camRelativeWorldPos.xyz, unity_WorldToCamera._m20_m21_m22);

                // calculate the world position
                // multiply the view plane by the linear depth to get the camera relative world space position
                // add the world space camera position to get the world space position from the depth texture
                float3 worldPos = viewPlane * sceneZ + _WorldSpaceCameraPos;

                half4 col = 0;

                // draw grid where it's not the sky
                if (depthIsNotSky(depth))
                    col.rgb = saturate(2.0 - abs(frac(worldPos) * 2.0 - 1.0) * 100.0);

                return col;
            }
            ENDCG
        }
    }
}
3 Likes

Wow, that was exactly the missing piece I needed. Thank you so much for the help, and the comments explaining the math!