I would like to create a 3D outline for my VR application using URP with UNity 2019.3.4f1 but I don’t have any idea of how to make it.
What I have already tried:
- This tutorial but I didn’t work for me - Custom Render Passes with URP - Unity Learn
- And this one - Redirecting…, that I actually think could work but for strange reason give me strange results maybe because I didn’t make it right.
So far this is what I have from that tutorial:
Shader Graph made for the outline in order to have only one object outlined
[157604-shader-graph-of-outline.png*|157604]
Picture of how I created the the renderer asset
[157603-how-i-put-everything.png*|157603]
Code of the Outline.sl
TEXTURE2D(_CameraDepthTexture);
SAMPLER(sampler_CameraDepthTexture);
float4 _CameraDepthTexture_TexelSize;
TEXTURE2D(_CameraDepthNormalsTexture);
SAMPLER(sampler_CameraDepthNormalsTexture);
float3 DecodeNormal(float4 enc)
{
float kScale = 1.7777;
float3 nn = enc.xyz*float3(2*kScale,2*kScale,0) + float3(-kScale,-kScale,1);
float g = 2.0 / dot(nn.xyz,nn.xyz);
float3 n;
n.xy = g*nn.xy;
n.z = g-1;
return n;
}
void Outline_float(float2 UV, float OutlineThickness, float DepthSensitivity, float NormalsSensitivity, out float Out)
{
float halfScaleFloor = floor(OutlineThickness * 0.5);
float halfScaleCeil = ceil(OutlineThickness * 0.5);
float2 uvSamples[4];
float depthSamples[4];
float3 normalSamples[4];
uvSamples[0] = UV - float2(_CameraDepthTexture_TexelSize.x, _CameraDepthTexture_TexelSize.y) * halfScaleFloor;
uvSamples[1] = UV + float2(_CameraDepthTexture_TexelSize.x, _CameraDepthTexture_TexelSize.y) * halfScaleCeil;
uvSamples[2] = UV + float2(_CameraDepthTexture_TexelSize.x * halfScaleCeil, -_CameraDepthTexture_TexelSize.y * halfScaleFloor);
uvSamples[3] = UV + float2(-_CameraDepthTexture_TexelSize.x * halfScaleFloor, _CameraDepthTexture_TexelSize.y * halfScaleCeil);
for(int i = 0; i < 4 ; i++)
{
depthSamples <em>= SAMPLE_TEXTURE2D(_CameraDepthTexture, sampler_CameraDepthTexture, uvSamples*).r;*</em>
normalSamples = DecodeNormal(SAMPLE_TEXTURE2D(CameraDepthNormalsTexture, sampler_CameraDepthNormalsTexture, uvSamples*));*
}_
// Depth
float depthFiniteDifference0 = depthSamples[1] - depthSamples[0];
float depthFiniteDifference1 = depthSamples[3] - depthSamples[2];
float edgeDepth = sqrt(pow(depthFiniteDifference0, 2) + pow(depthFiniteDifference1, 2)) * 100;
float depthThreshold = (1/DepthSensitivity) * depthSamples[0];
edgeDepth = edgeDepth > depthThreshold ? 1 : 0;
// Normals
float3 normalFiniteDifference0 = normalSamples[1] - normalSamples[0];
float3 normalFiniteDifference1 = normalSamples[3] - normalSamples[2];
float edgeNormal = sqrt(dot(normalFiniteDifference0, normalFiniteDifference0) + dot(normalFiniteDifference1, normalFiniteDifference1));
edgeNormal = edgeNormal > (1/NormalsSensitivity) ? 1 : 0;
float edge = max(edgeDepth, edgeNormal);
Out = edge;
*}
Code of the script to calculate the depthnormal texture DepthNormalsFeature.cs:
public class DepthNormalsFeature : ScriptableRendererFeature
{
class DepthNormalsPass : ScriptableRenderPass
{
private RenderTargetHandle destination { get; set; }_
private Material depthNormalsMaterial = null;
private FilteringSettings m_FilteringSettings;
ShaderTagId m_ShaderTagId = new ShaderTagId(“DepthOnly”);
public DepthNormalsPass(RenderQueueRange renderQueueRange, LayerMask layerMask, Material material)
{
m_FilteringSettings = new FilteringSettings(renderQueueRange, layerMask);
this.depthNormalsMaterial = material;
}
public void Setup(RenderTargetHandle destination)
{
this.destination = destination;
}
// This method is called before executing the render pass.
// It can be used to configure render targets and their clear state. Also to create temporary render target textures.
// When empty this render pass will render to the active camera render target.
// You should never call CommandBuffer.SetRenderTarget. Instead call ConfigureTarget and ConfigureClear.
// The render pipeline will ensure target setup and clearing happens in an performance manner.
public override void Configure(CommandBuffer cmd, RenderTextureDescriptor cameraTextureDescriptor)
{
RenderTextureDescriptor descriptor = cameraTextureDescriptor;
descriptor.depthBufferBits = 32;
descriptor.colorFormat = RenderTextureFormat.ARGB32;
cmd.GetTemporaryRT(destination.id, descriptor, FilterMode.Point);
ConfigureTarget(destination.Identifier());
ConfigureClear(ClearFlag.All, Color.black);
}
// Here you can implement the rendering logic.
// Use ScriptableRenderContext to issue drawing commands or execute command buffers
*// https://docs.unity3d.com/ScriptReference/Rendering.ScriptableRenderContext.html*_
// You don’t have to call ScriptableRenderContext.submit, the render pipeline will call it at specific points in the pipeline.
public override void Execute(ScriptableRenderContext context, ref RenderingData renderingData)
{
CommandBuffer cmd = CommandBufferPool.Get(“DepthNormals Prepass”);_
using (new ProfilingSample(cmd, “DepthNormals Prepass”))
{
context.ExecuteCommandBuffer(cmd);
cmd.Clear();
var sortFlags = renderingData.cameraData.defaultOpaqueSortFlags;
var drawSettings = CreateDrawingSettings(m_ShaderTagId, ref renderingData, sortFlags);
drawSettings.perObjectData = PerObjectData.None;
ref CameraData cameraData = ref renderingData.cameraData;
Camera camera = cameraData.camera;
if (cameraData.isStereoEnabled)
context.StartMultiEye(camera);
drawSettings.overrideMaterial = depthNormalsMaterial;
context.DrawRenderers(renderingData.cullResults, ref drawSettings,
ref m_FilteringSettings);
cmd.SetGlobalTexture(“_CameraDepthNormalsTexture”, destination.id);
}
context.ExecuteCommandBuffer(cmd);
CommandBufferPool.Release(cmd);
}
/// Cleanup any allocated resources that were created during the execution of this render pass.
public override void FrameCleanup(CommandBuffer cmd)
{
if (destination != RenderTargetHandle.CameraTarget)
{
cmd.ReleaseTemporaryRT(destination.id);
destination = RenderTargetHandle.CameraTarget;
}
}
}
DepthNormalsPass depthNormalsPass;
RenderTargetHandle depthNormalsTexture;
Material depthNormalsMaterial;
public override void Create()
{
depthNormalsMaterial = CoreUtils.CreateEngineMaterial(“Hidden/Internal-DepthNormalsTexture”);
depthNormalsPass = new DepthNormalsPass(RenderQueueRange.opaque, -1, depthNormalsMaterial);
depthNormalsPass.renderPassEvent = RenderPassEvent.AfterRenderingPrePasses;
depthNormalsTexture.Init(“_CameraDepthNormalsTexture”);
}
// Here you can inject one or multiple render passes in the renderer.
// This method is called when setting up the renderer once per-camera.
public override void AddRenderPasses(ScriptableRenderer renderer, ref RenderingData renderingData)
{
depthNormalsPass.Setup(depthNormalsTexture);
renderer.EnqueuePass(depthNormalsPass);
}
}
What I do at the end is to add the material to Outline.mat to the desired object, but it shows me something (strange lines) only if it doesn’t have any material.
----------
What I would like to achieve is a material that I can attach to an object, that already has one material ,and enables me to show that I selected it.
_*