How to get world coordinates of pixel in fragment shader.

So a big part of this game im making is going to be the oceans and ocean fog. I made a shader with fog and everything already, but I can not find any answers online on finding exact world space coordinates of each pixel in the fragment shader. Its a very simple concept. Im going to get the pixel coordinate, and see if its below or above the water surface transform. If its below, I execute my ocean fog, if its above the water surface, i dont execute the fog. I have already gotten an effect with the shader below thats very close to what i need, but it works awkwardly. Im going for that seamless transition of the camera above and below the water surface (like in subnautica or sea of thieves). I am using the universal render pipeline if that matters at all. The shader is being applied through a blit custom render feature which works great.

Any help is really appreciated.

Here is my image effect script:

Shader "Custom/ScreenFog"
{
    Properties
    {
        _MainTex("Base (RGB)", 2D) = "white" {}
        _DepthLevel("Depth Level", Range(1, 3)) = 2
        _FogColor("FogColor", Color) = (1, 0, 0, 1)
        _Cutoff("Cutoff", Float) = 0.5
        _Falloff("Falloff", Float) = 0.5
        //fake bool, 1 true, 0 false;
        _isUnderwater("is Underwater", Float) = 1
    }
    SubShader
    {
        Pass
        {

            CGPROGRAM

            #pragma target 3.0
            #pragma vertex vert
            #pragma fragment frag
            #include "UnityCG.cginc"

            uniform sampler2D_float _CameraDepthTexture;
            uniform fixed _DepthLevel;
            uniform half4 _MainTex_TexelSize;

            struct uinput
            {
                float4 vertex : POSITION;
                float2 uv : TEXCOORD0;
            };

            //cbuffer cbmat : register(b0)
            //{
            //    float4x4 tW; //World transform
            //    float4x4 tWVP;//World * View * Projection
            //};

            struct v2f
            {
                float4 pos  : SV_POSITION;
                float2 uv   : TEXCOORD0;
                float4 wpos : TEXCOORD1;
            };

            sampler2D _MainTex;
            float _Cutoff;
            float _Falloff;
            fixed4 _FogColor;
            float _isUnderwater;
            float4 _MainTex_ST;

            v2f vert(uinput v)
            {
                v2f o;
                o.pos = UnityObjectToClipPos(v.vertex);
                o.uv = TRANSFORM_TEX(v.uv, _MainTex);

                float3 viewVector = mul(unity_CameraInvProjection, float4(v.uv.xy * 5 - 1, 0, 1));
                o.wpos = mul(unity_CameraToWorld, float4(viewVector, 0));
                return o;
            }

            fixed4 frag(v2f o) : SV_TARGET
            {
                if (o.wpos.y > 0) {
                    _isUnderwater = 1;
                }
                else {
                    _isUnderwater = 0;
                }
                if (_isUnderwater == 1) {
                    float depth = UNITY_SAMPLE_DEPTH(tex2D(_CameraDepthTexture, o.uv));
                    depth = pow(Linear01Depth(depth), _DepthLevel * (_DepthLevel * _DepthLevel / 2));
                    depth = smoothstep(_Cutoff - _Falloff, _Cutoff, depth);

                    fixed4 fogOnly = _FogColor;

                    fixed4 col = tex2D(_MainTex, o.uv);
                    col = lerp(col, fogOnly, depth);
                    return 1 - col;
                }
                else if (_isUnderwater == 0) {
                    float depth = UNITY_SAMPLE_DEPTH(tex2D(_CameraDepthTexture, o.uv));
                    depth = pow(Linear01Depth(depth), _DepthLevel * (_DepthLevel * _DepthLevel / 2));
                    depth = smoothstep(_Cutoff - _Falloff, _Cutoff, depth);

                    fixed4 fogOnly = _FogColor;

                    fixed4 col = tex2D(_MainTex, o.uv);
                    col = lerp(col, fogOnly, depth);
                    return col;
                }
                else {
                    return _FogColor;
                }
            }
            ENDCG
        }
    }
}

The depth in the camera depth texture is the depth from the near plane in view space. The depth you’re presumably trying to compare against is your water’s depth in world space.

Here’s an example shader for getting the world position from the depth buffer, assuming you’re using a perspective camera.

Shader "Unlit/WorldPosFromDepth"
{
    Properties
    {
    }
    SubShader
    {
        Tags { "Queue"="Transparent" "RenderType"="Transparent" "IgnoreProjector"="True" }
        LOD 100

        Pass
        {
            CGPROGRAM
            #pragma vertex vert
            #pragma fragment frag

            #include "UnityCG.cginc"

            struct appdata
            {
                float4 vertex : POSITION;
            };

            struct v2f
            {
                float4 pos : SV_POSITION;
                float4 projPos : TEXCOORD0; 
                float3 camRelativeWorldPos : TEXCOORD1;
            };

            UNITY_DECLARE_DEPTH_TEXTURE(_CameraDepthTexture);

            v2f vert (appdata v)
            {
                v2f o;
                o.pos = UnityObjectToClipPos(v.vertex);
                o.projPos = ComputeScreenPos(o.pos);
                o.camRelativeWorldPos = mul(unity_ObjectToWorld, float4(v.vertex.xyz, 1.0)).xyz - _WorldSpaceCameraPos;
                return o;
            }

            bool depthIsNotSky(float depth)
            {
                #if defined(UNITY_REVERSED_Z)
                return (depth > 0.0);
                #else
                return (depth < 1.0);
                #endif
            }

            half4 frag (v2f i) : SV_Target
            {
                float2 screenUV = i.projPos.xy / i.projPos.w;

                // sample depth texture
                float depth = SAMPLE_DEPTH_TEXTURE(_CameraDepthTexture, screenUV);

                // get linear depth from the depth
                float sceneZ = LinearEyeDepth(depth);

                // calculate the view plane vector
                // note: Something like normalize(i.camRelativeWorldPos.xyz) is what you'll see other
                // examples do, but that is wrong! You need a vector that at a 1 unit view depth, not
                // a 1 unit magnitude.
                float3 viewPlane = i.camRelativeWorldPos.xyz / dot(i.camRelativeWorldPos.xyz, unity_WorldToCamera._m20_m21_m22);

                // calculate the world position
                // multiply the view plane by the linear depth to get the camera relative world space position
                // add the world space camera position to get the world space position from the depth texture
                float3 worldPos = viewPlane * sceneZ + _WorldSpaceCameraPos;
                worldPos = mul(unity_CameraToWorld, float4(worldPos, 1.0));

                half4 col = 0;

                // draw grid where it's not the sky
                if (depthIsNotSky(depth))
                    col.rgb = saturate(2.0 - abs(frac(worldPos) * 2.0 - 1.0) * 100.0);

                return col;
            }
            ENDCG
        }
    }
}

If you need to support orthographic cameras, you need to do it a little differently. You might want to look at Unity’s own Internal-ScreenSpaceShadows.shader to see how they do it:

So this is definitely on the right track. I have access to the camera’s world space position and im able to check if the camera is above or below water to draw water fog or not draw my water fog. But what i wonder is if there is any way that i can have the effect of showing both on screen at the same time.

Like in this image where the fog of water is shown underneath the water line and there is no fog above the water, so just air. I assume the way to do this is doing exactly what the script does now, except instead of check the y value of the camera in world space, check the pixels y value. I know that this would be incredibly intensive to do and impractical. But ive always wondered if this can be done.

But either way thank you so much for your reply, i can still get an incredible effect with this. What I think im going to do is create a script that shoots a vertical ray from the camera and when it hits the water that frame, it checks the y value of the waves at that current spot. Then i can pass that y value into my shader and compare it against the y value of the cam world position, and if its below i fog it up, and if its above i do some kind of atmospheric fog or something. Thanks!

New Script:

Shader "Custom/ScreenFog"
{
    Properties
    {
        _MainTex("Base (RGB)", 2D) = "white" {}
        _DepthLevel("Depth Level", Range(1, 3)) = 2
        _FogColor("FogColor", Color) = (1, 0, 0, 1)
        _Cutoff("Cutoff", Float) = 0.5
        _Falloff("Falloff", Float) = 0.5
        //fake bool, 1 true, 0 false;
        _isUnderwater("is Underwater", Float) = 1
    }
    SubShader
    {
        Pass
        {

            CGPROGRAM

            #pragma target 3.0
            #pragma vertex vert
            #pragma fragment frag
            #include "UnityCG.cginc"

            uniform sampler2D_float _CameraDepthTexture;
            uniform fixed _DepthLevel;
            uniform half4 _MainTex_TexelSize;

            struct uinput
            {
                float4 vertex : POSITION;
                float2 uv : TEXCOORD3;
            };

            //cbuffer cbmat : register(b0)
            //{
            //    float4x4 tW; //World transform
            //    float4x4 tWVP;//World * View * Projection
            //};

            struct v2f
            {
                float4 pos  : SV_POSITION;
                float2 uv   : TEXCOORD3;
                float4 projPos : TEXCOORD0;
                float3 camRelativeWorldPos : TEXCOORD1;
            };

            sampler2D _MainTex;
            float _Cutoff;
            float _Falloff;
            fixed4 _FogColor;
            float _isUnderwater;
            float4 _MainTex_ST;

            v2f vert(uinput v)
            {
                v2f o;
                o.pos = UnityObjectToClipPos(v.vertex);
                o.uv = TRANSFORM_TEX(v.uv, _MainTex);
                o.projPos = ComputeScreenPos(o.pos);
                o.camRelativeWorldPos = mul(unity_ObjectToWorld, float4(v.vertex.xyz, 1.0)).xyz - _WorldSpaceCameraPos;
                return o;
            }

            fixed4 frag(v2f o) : SV_TARGET
            {
                float depth = UNITY_SAMPLE_DEPTH(tex2D(_CameraDepthTexture, o.uv));
                depth = pow(Linear01Depth(depth), _DepthLevel * (_DepthLevel * _DepthLevel / 2));
                depth = smoothstep(_Cutoff - _Falloff, _Cutoff, depth);

                float3 viewPlane = o.camRelativeWorldPos.xyz / dot(o.camRelativeWorldPos.xyz, unity_WorldToCamera._m20_m21_m22);

                fixed4 fogOnly = _FogColor;

                float3 worldPos = viewPlane * depth + _WorldSpaceCameraPos;
                worldPos = mul(unity_CameraToWorld, float4(worldPos, 1.0));

                if (worldPos.y > 0) {
                    fixed4 col = tex2D(_MainTex, o.uv);
                    col = lerp(col, fogOnly, depth);
                    return col;
                }
                else {
                    fixed4 col = tex2D(_MainTex, o.uv);
                    col = lerp(col, fogOnly, depth);
                    return 1 - col;
                }
            }
            ENDCG
        }
    }
}

Oh yeah! Not sure if its worth mentioning at all but the crest ocean asset in the unity store pulls it off flawlessly. So it is possible, and honestly they might even do it a completely different way that im imagining they do.

This is just a screenshot from their asset page

Ah, you’re trying to handle the case of the water line crossing the camera.

Yeah, that’s not done “in shader”, not exactly. There are a couple of ways to accomplish this, but every system I’ve seen either renders a “curtain” of geometry at the camera’s near plane that matches or hides the water geometry surface intersecting with the camera plane, or renders a full screen mask of some kind. Then uses that to render the underwater area differently than above water. But basically it requires some additional c# side support to accomplish.

For Crest you can actually look at how they’re doing it since the non-URP/HDRP version is on github.

1 Like

Thanks for letting me know, i had no idea that this was done outside of the shader.

So I took a bit of time to look through the crest documentation and scripts to see how they do it. For BIRP and URP they use an object called curtain and a meniscus object, which child to the main camera, and I think it is an object that draws fog behind it using its material, and the script creates the geometry curtain from the bottom of the screen upwards. But where i still get lost is how the shader knows where the line of the water is. I have some shader and script knowledge but this is far beyond my understanding of c#. Its really interesting to learn about it and im willing to spend a couple months looking into this and how they do it. I have a separate unity project with their working ocean so ill definitely delve into that some more.

Ive been working at it some more and using a geometry curtain that renders underwater fog is just not working out for me, and there are also 0 references online to do so. And the crest documentation is for the BIRP and not URP so it doesnt translate well. I was wondering, do you think its possible to do the water effect with renderer features. Like maybe masking the water and above somehow and then using the mask for a image effect render feature that renders fog?

It should absolutely be possible as a render feature … if you can render a mask which is a whole extra can of worms that usually involves something like creating a curtain mesh still. * shrug *

And yeah, URP stuff is a huge pain. Unity has put out official tutorials on how to do stuff for the URP that they then made obsolete in updates to the URP weeks, or sometimes only days later. There’s a reason why even “free” assets are now paid assets on the store when they’ve made URP and HDRP compatible versions. It’s a full time job just keeping them working, assuming Unity doesn’t break something else so bad they stop working at all.

Hm yeah pretty interesting stuff. I at least managed out a mask that renders black and white depth onto just my water plane and black as the background. I did all this with render features and custom blits. Now the big issue is how can i essentially get back my normally rendered scene with colors and gameObjects, while saving this mask info, and using the mask to render or not render fog on the black or white pixels in a sort of image effect.

Note: All my dreams could come true with a second camera rendering the mask to a render texture. Except ive been told to avoid this due to it having a heavy impact on performance. If there were a way i could just send the rendered data to a mask then clear all the effects on screen. It feels so simple from here but im not sure what to do.