Yes, answer by @bgolus from here
Your shader needs a shadowcaster pass. The easiest way to do that, as long as you’re not modifying the vertex positions or adding alpha testing, is to add a Fallback shader. For most things you want this just before the last }
in your shader:
FallBack “Legacy Shaders/VertexLit”
So you need to add it to the shader where you are trying to use _CameraDepthTexture
.
Also, make sure your camera is setup to use this mode:
_myCamera.depthTextureMode = DepthTextureMode.Depth;
Nowadays, unity will always render depth into an internal buffer, which you can read either via _CameraDepthTexture
or _LastCameraDepthTexture
.
To re-prepare them manually, use this shader before rendering any stuff that would need depth:
shader ‘Render-depth’
// Use this shader to populate depth map of your camera.
//
// _camera.enabled=false; //keep always disabled, will be RenderWithShader() manually.
// NOTICE: if all cameras are always disabled, unity editor-scene-camera will affect your depthmaps!!
// To prevent it, ensure you have at least one main camera that's active. Anyway, continue:
// _camera.depthTextureMode = DepthTextureMode.Depth;
// _camera.targetTexture = _myRenderTex_with32depthBits; // 'new RenderTexture(512,512,32);'
// _camera.RenderWithShader(thisShader,"");
Shader "Unlit/Depth_SimpleShadowcaster"
{
SubShader
{
Pass {
Name "ShadowCaster"
Tags { "RenderType"="Opaque" "LightMode" = "ShadowCaster" }
Cull Off
ZWrite On
ZTest LEqual
ColorMask 0
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
struct v2f {
float4 pos : SV_POSITION;
float2 depth : TEXCOORD0;
};
v2f vert (appdata_base v) {
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
UNITY_TRANSFER_DEPTH(o.depth);
return o;
}
float frag(v2f i) : SV_Target {
//This macro doesn't return anything. Depth will be computed automatically by the native.
// https://discussions.unity.com/t/743717/2
UNITY_OUTPUT_DEPTH(i.depth);
}
ENDCG
}
}//end SubShader
}
after running this replacement shader, you will have _LastCameraDepthTexture
accessible from shaders (that are used later within this same frame), given nothing else is overwriting it. …use FrameDebugger to check the order. And check script compilation order.
You can keep using it (in Graphics.Blit etc), until you overwrite it with _myOtherCameraWithDepthMode.Render()
_CameraDepthTexture
will be empty during Graphics.Bit(myTexA, myTexB, myMaterial);
. Because that texture is only available while rendering through a camera. For using it during Blit(), your blitting shader instead needs _LastCameraDepthTexture
Although not as performant, you could always dump _LastCameraDepthTexture
into a custom black-and-white, texture. Just make sure it’s capacious enough, with R32_SFloat
format:
shader ‘dump depth to preview-texture’
Shader "Custom/ZDepth_to_R_Texture" {
Properties {
_MinRange("Near Plane", Float) = 0
_MaxRange("Far Plane", Float) = 1000
}
SubShader {
Tags { "RenderType"="Opaque" }
LOD 100
Pass {
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
sampler2D _LastCameraDepthTexture;
float _MinRange;
float _MaxRange;
struct appdata {
float4 vertex : POSITION;
};
struct v2f {
float4 screenPos : TEXCOORD0;
float4 pos : SV_POSITION;
};
v2f vert (appdata v) {
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
o.screenPos = ComputeScreenPos(o.pos);
return o;
}
float inverseLerp(float a, float b, float t){
return (t-a)/(b-a);
}
float frag(v2f i) : SV_Target {
float depth = tex2D( _LastCameraDepthTexture, i.screenPos/i.screenPos.w ).r;
depth = LinearEyeDepth(depth);
depth = saturate(inverseLerp(_MinRange, _MaxRange, depth));
depth = 1-depth;
return depth;
}
ENDCG
}
}
}
This is useful if you want to visualize depth at any moment in your frame, to ensure it’s as you expect:
debug depth to Inspector-panel
//any script can invoke DepthDebugMGR.instance.showLastDepth_DEBUG()
//to render the most recently observed depth.
//You can then look at this texture from unity inspector panel.
#region debug the depth
#if UNITY_EDITOR
Material _showLastDepthMat;
public RenderTexture _lastDepth;
float _latestDepthTime = -999;
public void showLastDepth_DEBUG(){
Debug.Assert(_latestDepthTime < Time.unscaledTime,
$"you should only invoke{nameof(showLastDepth_DEBUG)} once per frame");
_latestDepthTime = Time.unscaledTime;
Texture tex = Shader.GetGlobalTexture("_LastCameraDepthTexture");
int width = tex?tex.width:512;
int height = tex?tex.height:512;
bool create = _lastDepth == null;
create |= _lastDepth!=null && tex!=null && _lastDepth.width != tex.width;
if (create){
if(_lastDepth!=null){ DestroyImmediate(_lastDepth); }
_lastDepth = new RenderTexture(width, height, 0, GraphicsFormat.R32_SFloat);
}
//use the material to copy the 'LastCameraDepthTexture' into this _lastDepth RT.
Graphics.Blit(null, _lastDepth, _showLastDepthMat);
}
#endif
#endregion
If you prepared o.screenPos
(see toy example below), remember that in fragment function you’ll need to divide its xy
by w
when you try to sample the depth map. Or use tex2Dproj()
which will do it for you.
More on tex2Dproj
here
float depth = tex2D(_CameraDepthTexture, i.screenPos.xy/i.screenPos.w).r;// Sample the depth texture via xy/w. Or use tex2Dproj(_CameraDepthTexture.xyww).r;
Depending on your depth texture format and what values it contains, you might need to further process the obtained float depth
, by stuff like LinearEyeDepth
, Linear01Depth
…Or not use anything at all, if it contains values beyond [0,1] range. See docs here
Here is a toy example where I read the depth and squash it into visible [0 to 1] range:
toy example
struct v2f{
float4 pos: SV_POSITION;
float4 screenPos : TEXCOORD1;
};
v2f vert (appdata v) {
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
o.screenPos = ComputeScreenPos(o.pos);
return o;
}
fixed4 frag(v2f i) : SV_Target{
const float NEARPLANE = _ProjectionParams.y; //unity provides this constant. Camera's near plane.
const float FARPLANE = _ProjectionParams.z; //Not needed, but I'll use it for an artistic effect of heightmap.
float depth = LinearEyeDepth(tex2D(_CameraDepthTexture, i.screenPos.xy/i.screenPos.w).r);// Sample the depth texture via xy/w. Or use tex2Dproj(_CameraDepthTexture.xyww).r;
float heightmap = (depth - NEARPLANE)/(FARPLANE - NEARPLANE);
heightmap = 1-heightmap;//for heightmap (closer=whiter)
return fixed4(heightmap.rrr,1);
}
Another important thing is:
If you intend to calculate the depth of current fragment (without any depthmap),
you need to divide its z coord by w:
float thisFragDepth = LinearEyeDepth(i.screenPos.z/i.screenPos.w);
Note that in this particular case, i.screenPos.z is distance from camera’s position, not from its near plane.
Remember that DirectX has differences to OpenGL in how it handles Projection matrix, and what will look “white vs dark” in a depth texture (nearer vs further, or other way around).
So if your shader seems to ignore ZTest LEqual
or seems to have weird triangle sort order (or maybe screen is flipped upside down), chances are you need to check those platform differences: Unity - Manual: Writing shaders for different graphics APIs
And if you are doing something with your camera projection matrices (instead of relying on unity’s shader macros / functions), then check GL.GetGPUProjectionMatrix as well.