Apologies for digging up an old topic. I’ve also run into a similar issue with my depth texture looking fine in editor but once it’s built it appears offset in the y position on some devices. After some debugging I found when I removed the use of the depth texture from the effect everything renders properly on all devices.
My camera captures the depth with a replacement shader and creates the target texture before writing it to a global render texture:
using UnityEngine;
[ExecuteInEditMode]
public class DepthPostProcessing : MonoBehaviour
{
public Shader cameraDepthPass;
[SerializeField] private Material postProcessMaterial;
[SerializeField] private int downResFactor = 1;
private string _globalTextureName = "_DepthRenderTex";
private Camera cam;
private void OnEnable()
{
if (cameraDepthPass != null)
{
GetComponent<Camera>().SetReplacementShader(cameraDepthPass, "");
}
GenerateRT();
}
void OnDisable()
{
GetComponent<Camera>().ResetReplacementShader();
}
void GenerateRT()
{
cam = GetComponent<Camera>();
if (cam.targetTexture != null)
{
RenderTexture temp = cam.targetTexture;
cam.targetTexture = null;
DestroyImmediate(temp, true);
}
cam.targetTexture = new RenderTexture(cam.pixelWidth >> downResFactor, cam.pixelHeight >> downResFactor, 16);
cam.targetTexture.filterMode = FilterMode.Bilinear;
Shader.SetGlobalTexture(_globalTextureName, cam.targetTexture);
}
}
This is the replacement shader:
Shader "CameraDepthPass"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
}
SubShader
{
Tags
{
"RenderType"="Opaque"
}
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
};
struct v2f
{
float4 vertex : SV_POSITION;
float depth : DEPTH;
};
sampler2D _MainTex;
float4 _MainTex_ST;
v2f vert (appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.depth = -mul(UNITY_MATRIX_MV, v.vertex).z * _ProjectionParams.w;
return o;
}
fixed4 frag (v2f i) : SV_Target
{
float invert = 1 - i.depth;
return fixed4(invert.rrr,1);
}
ENDCG
}
}
}
Then I access it via shader and it sample the texture to apply a distortion to the outer edge of the mesh captured within the render texture:
Shader "Depth Edge Distortion Shader"
{
Properties
{
[NoScaleOffset] _MainTex ("Mask Texture", 2D) = "white" {}
_Sensitivity("Sensitivity", Vector) = (1,1,1,1)
_EdgeTex("Edge Texture", 2D) = "white" {}
_EdgeTilingOffset("Edge Tiling Offset", Vector) = (1,1,0,0)
_FoldAmount("Fold Amount", Range(0,1)) = 1
_DynamicDistanceAmount("Dynamic Distance Amount", Range(0,1)) = 0
}
SubShader
{
Tags
{
"RenderType"="Transparent"
"Queue"="Transparent"
}
Cull Off
Lighting Off
ZWrite Off
ZTest Always
Blend SrcAlpha OneMinusSrcAlpha
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma shader_feature DEPTH
#include "UnityCG.cginc"
#include "HLSLSupport.cginc"
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv[5] : TEXCOORD0;
float2 screenPos : TEXCOORD5;
float2 edgeScreenUV : TEXCOORD6;
float3 viewVector : TEXCORRD7;
float4 vertex : SV_POSITION;
};
uniform sampler2D _DepthRenderTex;
float4 _DepthRenderTex_TexelSize;
uniform sampler2D _ColorRenderTex;
sampler2D _EdgeTex;
half4 _Sensitivity;
float4 _EdgeTilingOffset;
float _FoldAmount;
float _DynamicDistanceAmount;
v2f vert(appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
half2 uv = v.uv;
o.uv[0] = uv;
#if UNITY_UV_STARTS_AT_TOP
if (_DepthRenderTex_TexelSize.y < 0)
uv.y = 1 - uv.y;
#endif
// used for detecting the amount of the edge we want to distort
float dynamicDistance = lerp(0.5, 1.5, _DynamicDistanceAmount);
o.uv[1] = uv + _DepthRenderTex_TexelSize.xy * half2(1, 1) * dynamicDistance;
o.uv[2] = uv + _DepthRenderTex_TexelSize.xy * half2(-1, -1) * dynamicDistance;
o.uv[3] = uv + _DepthRenderTex_TexelSize.xy * half2(-1, 1) * dynamicDistance;
o.uv[4] = uv + _DepthRenderTex_TexelSize.xy * half2(1, -1) * dynamicDistance;
o.screenPos = ((o.vertex.xy / o.vertex.w) + 1) * 0.5;
// tiling for the edge ditortion texture
o.edgeScreenUV = o.screenPos * _EdgeTilingOffset.xy + float2(_EdgeTilingOffset.zw * 1.3);
return o;
}
half CheckSame(half4 center, half4 sample, float2 screenUVs, sampler2D edgeTex) {
half edgeTexture = tex2D(edgeTex, screenUVs).r;
half2 centerNormal = center.xy;
float centerDepth = DecodeFloatRG(center.zw);
half2 sampleNormal = sample.xy;
float sampleDepth = DecodeFloatRG(sample.zw);
// difference in normals
// do not bother decoding normals - there's no need here
half2 diffNormal = abs(centerNormal - sampleNormal) * _Sensitivity.x;
int isSameNormal = (diffNormal.x + diffNormal.y) < 0.1;
// difference in depth
float diffDepth = abs(centerDepth - sampleDepth) * _Sensitivity.y;
// scale the required threshold by the distance
int isSameDepth = diffDepth < 0.1 * centerDepth;
// return:
// 1 - if normals and depth are similar enough
// 0 - otherwise
float result = isSameNormal * isSameDepth ? 1.0 : 0.0;
return smoothstep(result, 1, edgeTexture);
}
half4 frag(v2f i) : SV_Target
{
half4 colorRT = tex2D(_ColorRenderTex, i.screenPos);
half4 sample1 = tex2D(_DepthRenderTex, i.uv[1]);
sample1 = lerp(sample1, ceil(sample1), _FoldAmount);
half4 sample2 = tex2D(_DepthRenderTex, i.uv[2]);
sample2 = lerp(sample2, ceil(sample2), _FoldAmount);
half4 sample3 = tex2D(_DepthRenderTex, i.uv[3]);
sample3 = lerp(sample3, ceil(sample3), _FoldAmount);
half4 sample4 = tex2D(_DepthRenderTex, i.uv[4]);
sample4 = lerp(sample4, ceil(sample4), _FoldAmount);
//edge detection
half depthEdge = 1.0;
depthEdge *= CheckSame(sample1, sample2, i.edgeScreenUV, _EdgeTex);
depthEdge *= CheckSame(sample3, sample4, i.edgeScreenUV, _EdgeTex);
colorRT.a -= depthEdge;
return float4(colorRT);
}
ENDCG
}
}
}
After reading your suggestion I figured my problem has something to do with not blitting to screen but instead capturing the depth of an object within the scene incorrectly. Admittedly I don’t fully understand on how I would use the ObjSpaceViewDir in this setup or if that even is the appropriate solution in this case. May I get help to understand where I went wrong with this? Any advice would be greatly appreciated.