Render scene depth to a texture

Learning shaders here. I want to produce a greyscale image that represents depth in my scene from my perspective camera. Black being closer, white being farther away from the camera.

I’ve looked at tens of threads about the subject, I just got more and more confused…

Like for ex there’s the concept of writing a shader that outputs depth and setting it as a replacement shader for the camera, there’s the concept of outputting depth into a render texture, and there’s the concept of setting the camera’s depthTextureMode to Depth and then accessing _CameraDepthTexture from the shader code, too many ways I couldn’t get any of them to work :expressionless:

Like for ex, this shader similar to the one in the depth texture doc:

Shader "Hidden/RenderDepth" {
    SubShader {
        Tags { "RenderType"="Opaque" }
        Pass {
            Fog { Mode Off }
            CGPROGRAM
            #pragma vertex vert
            #pragma fragment frag
            #include "UnityCG.cginc"
 
            struct v2f {
                float4 pos : SV_POSITION;
                float2 depth : TEXCOORD0;
            };
 
            v2f vert (appdata_base v) {
                v2f o;
                o.pos = mul (UNITY_MATRIX_MVP, v.vertex);
                UNITY_TRANSFER_DEPTH(o.depth);
                return o;
            }
 
            half4 frag(v2f i) : COLOR {
                UNITY_OUTPUT_DEPTH(i.depth);
            }
            ENDCG
        }
    }
}

A lot of people claimed to have this work for them. I just don’t know wtf to do with it lol - I tried setting it as a replacement shader for the camera, the output was purely black…

I also tried the replacement shader example project, there was a RenderDepth shader (similar to the one above) and a js RenderDepth script to attach to the camera and use the shader as replacement, I attached the script to my camera but again, black output…

Any help would be extremely appreciated!

Thanks!

K, so I’m getting somewhere. I got help from this book. I was actually able to render the scene depth, but now I’m stuck trying to save that depth into a png image. Here’s what I have so far:

1- A RenderDepth.shader: the actual shader that will use the depth texture of the camera (given by _CameraDepthTexture)
2- A RenderDepth.cs: attach this to your main camera

Now just setup a scene, with a plane, spheres and stuff. Attach the script to the camera, and you should see depth - adjust the “depthLevel” value to adjust the depth power/brightness or whatever the term for it is.

using UnityEngine;

[ExecuteInEditMode]
public class RenderDepth : MonoBehaviour
{
	[Range(0f, 3f)]
	public float depthLevel = 0.5f;
	
	private Shader _shader;
	private Shader shader
	{
		get { return _shader != null ? _shader : (_shader = Shader.Find("Custom/RenderDepth")); }
	}

	private Material _material;
	private Material material
	{
		get
		{
			if (_material == null)
			{
				_material = new Material(shader);
				_material.hideFlags = HideFlags.HideAndDontSave;
			}
			return _material;
		}
	}

	private void Start ()
	{
		if (!SystemInfo.supportsImageEffects)
		{
			print("System doesn't support image effects");
			enabled = false;
			return;
		}
		if (shader == null || !shader.isSupported)
		{
			enabled = false;
			print("Shader " + shader.name + " is not supported");
			return;
		}

		// turn on depth rendering for the camera so that the shader can access it via _CameraDepthTexture
		camera.depthTextureMode = DepthTextureMode.Depth;
	}
	
	private void OnDisable()
	{
		if (_material != null)
			DestroyImmediate(_material);
	}
	
	private void OnRenderImage(RenderTexture src, RenderTexture dest)
	{
		if (shader != null)
		{
			material.SetFloat("_DepthLevel", depthLevel);
			Graphics.Blit(src, dest, material);
		}
		else
		{
			Graphics.Blit(src, dest);
		}
	}
}

Here’s the shader code (you don’t need to do anything with it - just let rest in your project):

Shader "Custom/RenderDepth"
{
	Properties
	{
		_MainTex ("Base (RGB)", 2D) = "white" {}
		_DepthLevel ("Depth Level", Range(1, 3)) = 1
	}
	SubShader
	{
		Pass
		{
			CGPROGRAM

			#pragma vertex vert
			#pragma fragment frag
			#include "UnityCG.cginc"
			
			uniform sampler2D _MainTex;
			uniform sampler2D _CameraDepthTexture;
			uniform fixed _DepthLevel;
			uniform half4 _MainTex_TexelSize;

			struct input
			{
				float4 pos : POSITION;
				half2 uv : TEXCOORD0;
			};

			struct output
			{
				float4 pos : SV_POSITION;
				half2 uv : TEXCOORD0;
			};


			output vert(input i)
			{
				output o;
				o.pos = mul(UNITY_MATRIX_MVP, i.pos);
				o.uv = MultiplyUV(UNITY_MATRIX_TEXTURE0, i.uv);

				// why do we need this? cause sometimes the image I get is flipped. see: http://docs.unity3d.com/Manual/SL-PlatformDifferences.html
				#if UNITY_UV_STARTS_AT_TOP
				if (_MainTex_TexelSize.y < 0)
						o.uv.y = 1 - o.uv.y;
				#endif

				return o;
			}
			
			fixed4 frag(output o) : COLOR
			{
				float depth = UNITY_SAMPLE_DEPTH(tex2D(_CameraDepthTexture, o.uv));
				depth = pow(Linear01Depth(depth), _DepthLevel);
				return depth;
			}
			
			ENDCG
		}
	} 
}

Now to save the depth to a png, I read I should use Texture2D.ReadPixel and EncodeToPNG - here’s what I have so far - not working; gives me a pure white image

void RenderToTexture()
{
	var w = 512;
	var h = w;

	var tempRt = new RenderTexture(w, h, 0);
	camera.targetTexture = tempRt;
	camera.Render();

	RenderTexture.active = tempRt;

	var tex2d = new Texture2D(w, h, TextureFormat.ARGB32, false);
	tex2d.ReadPixels(new Rect(0, 0, w, h), 0, 0);
	tex2d.Apply();

	var imageData = tex2d.EncodeToPNG();
	File.WriteAllBytes(Path.Combine("Assets", "depth_img.png"), imageData);

	RenderTexture.active = null;
	camera.targetTexture = null;
	Destroy(tempRt);
}

Not sure what’s up, still struggling with it.


EDIT: Actually, all I needed to do to save a frame from the camera in my case is just to use Application.Screenshot! it’s that simple. In my case I had a bunch of static angles, I just set my camera to each of those angles, turn on depth rendering and take a Screenshot. Bam! no need for Blender to generate depth maps

@vexe, how does the “depth level” translate to Unity distance units? For example, does depth level = 0.3f mean that the depth map will show distances up to 40 Unity distance units?

Btw… Thanks for the explanation and code! This was really helpful.

Did you ever figure out how to solve the “pure white image” problem with Texture2D.ReadPixel ? In my project I need to use Camera.RenderToCubemap to first render the depth view into a cubemap RenderTexture, and then I need to use Texture2D.ReadPixel to dump the image to disk, so cannot use Application.Screenshot as the workaround. Any help is highly appreciated!

@unity_xD5lPQa0K1eRiw Maybe the answer for “pure black image” problem is: UNITY_TRANSFER_DEPTH had been deprecated.

// Legacy; used to do something on platforms that had to emulate depth textures manually. Now all platforms have native depth textures.
#define UNITY_TRANSFER_DEPTH(oo)
// Legacy; used to do something on platforms that had to emulate depth textures manually. Now all platforms have native depth textures.
#define UNITY_OUTPUT_DEPTH(i) return 0