I have been developing an edge detection (image effect) shader for the built-in render pipeline that compares the scene normal and depth values from a camera texture at 5 different points (current UV coord, 1 pixel above, 1 pixel below, 1 pixel right and 1 pixel left) to determine where the edges are for the 3d models in the view and set the “edge outline pixels” to the color black.
however, in order to solve a common issue with comparing depth values at shallow angles (see attached image), I must check if any of sampled pixels that are being compared are located on different 3D planes (so that only the edge between two flat surfaces of model are considered in the depth calculation).
The part I need help with:
I need help calculating the position of each pixel in world-space using only the UV.xy & linear depth values as well as also converting the scene normal values from “relative to camera normals” to “world-space normals”. Although, if you do have a solution that does the entire “is pixel on different plane” check, that would be super helpful.
Notes:
Ideally, I need the solution to be something like this:
//calculates the position of the pixel in world-space
float3 getPixelWorldSpacePosition (float2 UVCoordinatesOfPixelOnScreen, float linearDepthValueOfPixel)
{
float3 worldSpacePosition =
. . .
return worldSpacePosition ;
}
and
//calculates world space normal from camera relative normal
float3 calculateWorldSpaceNormal (float3 cameraRelativeNormalValues)
{
float3 worldSpaceNormal =
. . .
return worldSpaceNormal ;
}
My Shader Code:
Here is the code for my image effect shader (make sure to attach it to a material and use Graphics.Blit to apply it to a camera render target):
Shader "Custom/customOutlineShader"
{
Properties
{
_MainTex("Texture", 2D) = "white" {}//this will be auto assigned to the camera render image.
_noiseTex ("noiseTexture", 2D) = "white" {}//use perlin texture
_NormalThreshold("_NormalThreshold", Range(0.0,1.0)) = 0.9
_NormalMultiplier("_NormalMultiplier", Range(0.0,100.0)) = 2
_DepthThreshold("_DepthThreshold", Range(0.0,1.0)) = 0.619
_DepthMultiplier("_DepthMultiplier", Range(0.0,1000.0)) = 184
_LineWidth("outline thickness", Range(0.0,3.0)) = 2
_intensity("outline opacity", Range(0.0,1.0)) = 1
}
SubShader
{
// No culling or depth
Cull Off ZWrite Off ZTest Always
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
#define COLORS 32.0
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
};
v2f vert(appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = v.uv;
return o;
}
sampler2D _MainTex, _noiseTex;
sampler2D _CameraDepthNormalsTexture;
float _intensity;
half _NormalThreshold, _DepthThreshold,_NormalMultiplier, _DepthMultiplier, _LineWidth;
float4 sampleTexture(sampler2D Tex, fixed2 uv){
float3 normalValues;
float depthValue;
float4 output = float4(0,0,0,0);//the RBG channels are for the normal.xyz and the alpha channel is for depth,
DecodeDepthNormal(tex2D(Tex, uv), depthValue, normalValues);//retrieve scene depth map and scene normal map
float processedDepth = Linear01Depth(depthValue) * 4;//we do a little bit of multiplying to get the depth in a nice range
output.a = processedDepth.r; //output.a contains the the processed depth value from the camera texture
output.rgb = normalValues.rgb;//rgb represent the scene normal data relative to the camera view. camera forward vector is always 0,0,1
return output;
}
float2 detectDepthNormalEdges(sampler2D renderTexture, fixed2 uv){
//calculate the size of a pixel and use it as a unit of length to space out the pixel samples.
float3 offset = float3((1.0 / _ScreenParams.x), (1.0 / _ScreenParams.y), 0.0);
//_LineWidth makes the spacing bigger to create wider outlines.
offset *= floor(_LineWidth * _ScreenParams.y*0.002-0.5)*0.5;
//sample neighbor pixels. we sample the depth normal texture 5 times, each with a different pixel offset
float4 pixelCenter = sampleTexture(renderTexture, uv);
float4 pixelLeft = sampleTexture(renderTexture, uv - offset.xz);
float4 pixelRight = sampleTexture(renderTexture, uv + offset.xz);
float4 pixelUp = sampleTexture(renderTexture, uv + offset.zy);
float4 pixelDown = sampleTexture(renderTexture, uv - offset.zy);
//compare changes in the sampled normal
float normalOutlineValue =
abs(1-dot(pixelLeft.rgb , pixelCenter.rgb))+
abs(1-dot(pixelRight.rgb, pixelCenter.rgb))+
abs(1-dot(pixelUp.rgb , pixelCenter.rgb))+
abs(1-dot(pixelDown.rgb , pixelCenter.rgb));
//threshold the value
normalOutlineValue = clamp(floor(normalOutlineValue * _NormalMultiplier + _NormalThreshold),0,1);
//compare changes in depth
float depthOutlineValue =
abs(pixelLeft.a - pixelCenter.a) +
abs(pixelRight.a - pixelCenter.a) +
abs(pixelUp.a - pixelCenter.a) +
abs(pixelDown.a - pixelCenter.a) ;
//threshold the value
depthOutlineValue = clamp(floor(depthOutlineValue * _DepthMultiplier + _DepthThreshold ),0,1);
// the depth result and the normal result are combined later on
float2 finalOutlineValue = float2 (depthOutlineValue , normalOutlineValue);
return finalOutlineValue;
}
float drawSceneOutline(v2f i, int randSeed)
{
float2 noiseUV = i.uv*0.5*randSeed;//change how the noise texture is sampled when a different seed is used. Yes, it only works for values 1 and 2
i.uv.xy += (tex2D(_noiseTex, noiseUV).rg-0.5) * ( 0.01);//sample the perlin noise texture and use it to distort the UVs for a cool effect
float2 depthNormalOutline = detectDepthNormalEdges(_CameraDepthNormalsTexture, i.uv);
float finalOutline = depthNormalOutline.x + depthNormalOutline.y;//combine both the depthoutline and the normaloutline
finalOutline = min(finalOutline,1)*_intensity;//apply the effect intensity
return finalOutline;
}
fixed4 frag(v2f i) : SV_Target
{
float4 outlineColor = (1-drawSceneOutline(i, 1));//draw one wobbly outline
outlineColor *= (1-drawSceneOutline(i, 2));// draw second wobbly outline with a different RNG sseed for artistic effect.
float4 combined = outlineColor * tex2D(_MainTex, i.uv);// combine the outlines with the scene color to complete the effect
return combined;
}
ENDCG
}
}
}
Here is a screenshot showing the shader and the artifacts caused by the shallow angle depth issue I am trying to solve.
I would be very grateful if anyone could show or write me a solution, and I will answer any questions as soon as possible. Thanks in advance for your help.