How do I use DecodeDepthNormal()?

I’m trying to access the normal output from _CameraDepthNormalsTexture by using DecodeDepthNormal(). However for some reason the normals always return as solid black, and I’m not sure what I’m missing.

            ...
            struct appdata
            {
                float4 vertex : POSITION;
            };

            struct v2f
            {
                float4 pos : SV_POSITION;
                float4 scrPos : TEXCOORD0;
            };

            UNITY_DECLARE_DEPTH_TEXTURE(_CameraDepthNormalsTexture);
            float4 _Color;

            v2f vert (appdata v)
            {
                v2f o;
                o.pos = UnityObjectToClipPos(v.vertex);
                o.scrPos = ComputeScreenPos(o.pos);
                o.scrPos.y = 1 - o.scrPos.y;
                return o;
            }

            fixed4 frag (v2f i) : SV_Target
            {
                float depth;
                float3 normals;
                DecodeDepthNormal(tex2D(_CameraDepthNormalsTexture, i.scrPos.xy), depth, normals);

                return float4 (normals.xyz, 1);
            }

I’ve also made sure that the camera’s DepthTextureMode is set to DepthNormals.

I found the issue. It was that the custom shader I was using for most things on screen did not have the “RenderType” = “Opaque” tag included BEFORE the first shader pass.