So, I am fairly new to writing shaders in Unity, and I’ve recently been involved in making a system for visualizing volumetric data in the software. I have a simple raymarching shader that is capable of rendering volumes (stored as 3D textures) to a sufficient quality within a box geometry, but am having considerable issues in getting everything to render correctly depth wise - I am stuck at either having the volume render on top of everything, or on top of only things that are directly behind the physical box geometry.
I am looking to end up with a shader that will render the volume in such a way that you can move mesh objects through it and have them behave as you’d expect.
My current shader code is:
Shader "Custom/VolumeShaderV2WithSplitting"
{
SubShader
{
Tags
{
"Queue" = "Transparent" "RenderType" = "Transparent"
}
CGINCLUDE
#include "UnityCG.cginc"
//sets how far as a proportion of max distance a ray will advance at each step
int _SamplingQuality;
//the 3D texture to be rendered
sampler3D _MainTex;
//the density of the colour values within the volume
float _Density;
//camera depth texture
uniform sampler2D _CameraDepthTexture;
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float4 pos : SV_POSITION;
float2 uv : TEXCOORD0;
float3 localPos : TEXCOORD1;
};
//vertex shader
v2f vert(appdata v)
{
v2f OUT;
OUT.pos = UnityObjectToClipPos(v.vertex);
OUT.localPos = v.vertex.xyz;
OUT.uv = v.uv.xy;
return OUT;
}
// ray/cube intersection algorithm
struct Ray
{
float3 origin;
float3 direction;
};
bool IntersectBox(Ray ray, out float entryPoint, out float exitPoint)
{
float3 invR = 1.0 / ray.direction;
float3 tbot = invR * (float3(-0.5, -0.5, -0.5) - ray.origin);
float3 ttop = invR * (float3(0.5, 0.5, 0.5) - ray.origin);
float3 tmin = min(ttop, tbot);
float3 tmax = max(ttop, tbot);
float2 t = max(tmin.xx, tmin.yz);
entryPoint = max(t.x, t.y);
t = min(tmax.xx, tmax.yz);
exitPoint = min(t.x, t.y);
return entryPoint <= exitPoint;
}
//fragment shader (calculates the colour of a specific pixel)
float4 frag(v2f IN) : COLOR
{
float4 color = float4(0,0,0,0);
float3 localCameraPosition = UNITY_MATRIX_IT_MV[3].xyz;
Ray ray;
ray.origin = localCameraPosition;
ray.direction = normalize(IN.localPos - localCameraPosition);
float entryPoint, exitPoint;
IntersectBox(ray, entryPoint, exitPoint);
if (entryPoint < 0.0) entryPoint = 0.0;
float3 rayStart = ray.origin + ray.direction * entryPoint;
float3 rayStop = ray.origin + ray.direction * exitPoint;
float dist = distance(rayStop, rayStart);
float stepSize = dist / float(_SamplingQuality);
float3 ds = normalize(rayStop - rayStart) * stepSize;
float3 pos = rayStop.xyz + 0.5f;
float2 uv = IN.uv;
#if UNITY_UV_STARTS_AT_TOP
uv.y = 1 - uv.y;
#endif
float depth = LinearEyeDepth(tex2D(_CameraDepthTexture, uv).r);
for (int i = _SamplingQuality; i >= 0; --i)
{
float4 mask = float4(0, 0, 0, 0);
float travelled = i * ds;
if (travelled < depth) { //if not occluded by something in the depth buffer
//accumulate the colour of this point
mask = tex3D(_MainTex, pos);
color.xyz += mask.xyz * mask.w;
}
pos -= ds;
}
color *= _Density / (uint)_SamplingQuality;
return color;
}
ENDCG
Pass
{
Blend One One
Cull front
ZWrite off
ZTest Always
CGPROGRAM
#pragma target 3.0
#pragma vertex vert
#pragma fragment frag
ENDCG
}
}
}
This represents my best attempt at combining my understanding of volumetric rendering using raymarching (in its most basic form) and the advice on implementing depth buffer testing from this.
Unfortunately I’ve had no luck and I’m pretty confident that I’m just making my already poorly written shader even worse at this point.
Currently the rendered scene looks like this:
As you can see the volume is currently rendering “on top” of everything in the scene. The aim is to have the cubes in the scene appear as you’d expect: one in front of the volume, one within it (and should be partially visible), and one behind it.
Hopefully this question isn’t too general, but I’m a bit lost.
Thanks in advance if you’ve any advice or pointers at all.
[Updated to remove bits from shader that were concerned with implementing a slicing plane and toggling colour channels → this is jus the parts that are relevant… hopefully]
[Update again because I included some junk that I was trying to do to fix it, whoops]