I am trying to implement an FOV cone that will scale to tens of enemies on the screen at a time. The code I’m seeing to do FOV cones uses raycasts, but to make it look smooth you need to do a very large amount of raycasts for each cone, I estimate each cone would need 128(!) raycasts per frame. Obviously this won’t scale to tens of enemies.
So, I thought I’d do something clever: use a camera to do 128 raycasts at once. I’ve set up a camera to render to a 128x1 render texture and a fragment shader that stores the distance to the camera of each fragment in the red channel. I was also trying to use the depth of the fragment, but it wasn’t quite working either.
This is what it’s done, it’s almost correct but there seems to be some perspective distortion going on.
And what it’s like looking at a straight wall.
This is the shader I’m using.
Shader "FOV Depth"
{
SubShader
{
Tags { "RenderType"="Opaque" }
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
};
struct v2f
{
float4 pos : SV_POSITION;
float4 worldPos : TEXCOORD0;
};
v2f vert (appdata v)
{
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
o.worldPos = mul(unity_ObjectToWorld, v.vertex);
return o;
}
float frag (v2f i) : SV_Target
{
return distance(i.worldPos, _WorldSpaceCameraPos);
}
ENDCG
}
}
}
So there’s not much going on there. Assuming the fragment world space is accurate, this should be doing more or less exactly what a raycast is doing albeit with visible geometry and not collision geometry.
And the script I’m using for testing. Sorry, it’s a big messy, but the relevant methods are RenderEye and DebugDrawEye at the bottom.
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
namespace FovShader1
{
public class FieldOfView : MonoBehaviour
{
[SerializeField] private int numRays;
[SerializeField] private Shader depthShader;
[SerializeField] private Camera eye;
[SerializeField] private float fov = 90f;
[SerializeField] private float viewDistance = 5f;
private Texture2D pixelReader;
private RenderTexture eyeRenderTexture;
private void Awake()
{
CreateTextures();
SetupEye(eye);
}
private void CreateTextures()
{
eyeRenderTexture = new RenderTexture(numRays, 1, 0, RenderTextureFormat.ARGBFloat, RenderTextureReadWrite.Default);
eye.targetTexture = eyeRenderTexture;
pixelReader = new Texture2D(numRays, 1, TextureFormat.RGBAFloat, false);
}
private void SetupEye(Camera eye)
{
eye.farClipPlane = viewDistance;
eye.nearClipPlane = 0.01f;
eye.depthTextureMode = DepthTextureMode.Depth;
eye.targetTexture = eyeRenderTexture;
eye.aspect = numRays;
eye.fieldOfView = Mathf.Rad2Deg*2*Mathf.Atan(Mathf.Tan((fov*Mathf.Deg2Rad)/2f)/eye.aspect);
}
private void Update()
{
RenderEye(eye);
DebugDrawEye(eye);
}
private void RenderEye(Camera eye)
{
var shadowDistance = QualitySettings.shadowDistance;
QualitySettings.shadowDistance = 0;
eye.RenderWithShader(depthShader, null);
QualitySettings.shadowDistance = shadowDistance;
}
private void DebugDrawEye(Camera eye)
{
RenderTexture.active = eyeRenderTexture;
pixelReader.ReadPixels(new Rect(0, 0, numRays, 1), 0, 0);
pixelReader.Apply();
var rot = Quaternion.Euler(0, -(fov / 2) + (fov / eyeRenderTexture.width / 2), 0);
for(int i = 0; i < eyeRenderTexture.width; i++)
{
var depth = pixelReader.GetPixel(i, 0).r;
if(depth == 0) depth = eye.farClipPlane;
var start = eye.transform.position;
var end = rot * Vector3.forward * depth;
end = eye.transform.TransformPoint(end);
Debug.DrawLine(start, end, Color.red, 0f);
rot *= Quaternion.Euler(0, fov / eyeRenderTexture.width, 0);
}
}
}
}
The project is also attached to this post.
3355517–262616–FOV Test.zip (775 KB)