The beauty of Unity is that almost anything is possible - the only limitations are that of modern day hardware. In essence - reconstructing world position from a post effect revolves around a few key principles.
First - we need to find the world-space view direction of the current pixel. You’d think this would be easy; it’s a single line in a regular shader. Unfortunately, because post-effects are drawn to an orthogonal plane starting at the camera’s near plane, we can’t quite do that. Instead, we have to pass some things from a script to the effect.
The following is a script you can attach to the camera (all code samples in this answer will have comments explaining how they work):
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
//Run the effect even when we aren't playing
[ExecuteInEditMode]
public class WorldSpaceEffect : MonoBehaviour
{
private Material mat; //We'll assign this when we need it
public Transform target; //This is the object we are going to be comparing distance to
public float fadeStartDistance = 1f; //The distance at which we will still be the near colour
public float fadeEndDistance = 5f; //The distance over which the fade will take place
public Color nearColour = Color.white; //The colour shown when we are close to the target
public Color farColour = Color.black; //The colour shown when we are far away from the target
//OnRenderImage handles post effects (you can look at the documentation for more)
private void OnRenderImage (RenderTexture src, RenderTexture dest)
{
//If we don't have a target, skip the effect
if (!target)
{
Graphics.Blit (src, dest);
return;
}
//If we don't already have a material, create a new one and assign our shader to it
if (!mat)
mat = new Material (Shader.Find ("Hidden/WorldSpaceEffect"));
//Get the attached camera (and ignore the effect if we don't have one)
Camera cam = GetComponent<Camera>();
if (!cam)
return;
//Make sure we are rendering depth textures (we'll need them)
cam.depthTextureMode |= DepthTextureMode.Depth;
//This is an array containing the world-space positions of each of the far corners of our camera's frustum
//Basically a vector pointing towards each of the corners of the screen
Vector3[] frustumCorners = new Vector3[4];
cam.CalculateFrustumCorners (new Rect(0, 0, 1, 1), cam.farClipPlane, cam.stereoActiveEye, frustumCorners);
var bottomLeft = transform.TransformVector (frustumCorners[0]);
var topLeft = transform.TransformVector (frustumCorners[1]);
var topRight = transform.TransformVector (frustumCorners[2]);
var bottomRight = transform.TransformVector (frustumCorners[3]);
//In order to pass these to the shader, we'll need to store them in a matrix
//In this case, you can just think of the matrix as an array of vectors
Matrix4x4 frustumCornersArray = Matrix4x4.identity;
frustumCornersArray.SetRow (0, bottomLeft);
frustumCornersArray.SetRow (1, bottomRight);
frustumCornersArray.SetRow (2, topLeft);
frustumCornersArray.SetRow (3, topRight);
//Give the shader the frustum corners array
mat.SetMatrix ("_FrustumCorners", frustumCornersArray);
//While we're here, also give the shader the position of our target object
mat.SetVector ("_TargetPos", target.position);
//As well as our effect parameters
mat.SetFloat ("_FadeStart", fadeStartDistance);
mat.SetFloat ("_FadeEnd", fadeEndDistance);
mat.SetColor ("_NearColour", nearColour);
mat.SetColor ("_FarColour", farColour);
//Finally, draw the effect to the screen using our material/shader
Graphics.Blit (src, dest, mat);
}
//When we are finished with the effect;
private void OnDisable ()
{
//Clean up the material
//This is actually necessary, otherwise every time we hit 'Play',
//We will create a new material (these will eventually build up and clog the memory)
if (mat)
DestroyImmediate (mat);
}
}
Next up is the shader. Using the four corner vectors we just passed to the shader, we can interpolate between them by abusing the fact that the effect is rendered on a quad with each vertex in the corners of the screen (therefore approximating world-space view direction). We can then use that vector, alongside the camera’s position (which is automatically passed to the shader; we don’t need to worry about that) and scene depth information (distance from the camera to the scene) to reconstruct the world-space position of the scene itself. From there, we can do the effect you described:
Shader "Hidden/WorldSpaceEffect"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
}
CGINCLUDE
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float4 vertex : SV_POSITION;
float2 uv : TEXCOORD0;
float4 viewDir : TEXCOORD1;
};
//Here is our corners array.
float4x4 _FrustumCorners;
v2f vert (appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos (v.vertex);
//This is the position of the pixel we are rendering from [0,1].
o.uv = v.uv;
//Recreate an index so we can acces the matrix like an array.
//0 = Bottom Left,
//1 = Bottom Right,
//2 = Top Left,
//3 = Top Right.
int frustumIndex = v.uv.x + (2 * o.uv.y);
//Access the matrix like an array to retrieve the current corner direction.
//Because this is a vertex shader, the view direction will be interpolated
//across the face of the fullscreen quad, giving us the correct, unique
//view direction for every pixel.
o.viewDir = _FrustumCorners[frustumIndex];
//Store the index in the w component of the view direction (not necessary).
o.viewDir.w = frustumIndex;
return o;
}
sampler2D _MainTex; //The source image.
sampler2D_float _CameraDepthTexture; //The camera's depth texture.
//Here are our variables that we passed in the script.
float3 _TargetPos;
half _FadeStart;
half _FadeEnd;
fixed4 _NearColour;
fixed4 _FarColour;
fixed4 frag (v2f i) : SV_Target
{
//Sample the depth texture to get the scene depth at the current pixel.
float depth = LinearEyeDepth (tex2D (_CameraDepthTexture, i.uv).r);
//Normalize the view direction so that it has a length of 1.
//Don't actually normalize though, otherwise it messes with the projection;
//Just divide by the distance of the camera's far plane.
i.viewDir.xyz = i.viewDir.xyz / _ProjectionParams.z;
//Reconstruct the pixel's world position based on the camera's world position
//*Note* - This variable is automatically passed to all shaders, so we don't need to declare it.
float3 worldPos = _WorldSpaceCameraPos.xyz + i.viewDir * depth;
//Calculate the distance between our world position and the target's position.
float dist = distance (worldPos, _TargetPos);
//Calculate the start and end points of the fade;
//Making sure that the start is never further away from the end.
float start = min (_FadeStart, _FadeEnd);
float end = max (_FadeStart, _FadeEnd);
//Get a fade factor from [0,1] based on the distance factor.
float fade = smoothstep (start, end, dist);
//Simply output a blend between our chosen near and far colours based on our fade variable.
return lerp (_NearColour, _FarColour, fade);
}
ENDCG
SubShader
{
// No culling or depth
Cull Off ZWrite Off ZTest Always
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
ENDCG
}
}
}
World position reconstruction is a difficult topic - I actually spent about 2 years trying to figure out how to get view direction in a post-effect; but it’s incredibly useful once you figure it out.
Hope that helps you,
-Namey5