How to have Multi Render Targets in renderer feature

I am trying to draw something onto multi Render Textures.
If I use ConfigureTarget(rthandles[0]), it does render as expected with only one Render Texture getting painted.
When I try ConfigureTarget(RTHandle[ ]) to draw onto two Render Textures, it failed.
I am wondering what is the proper way to use ConfigureTarget(RTHandle[ ]).
Hope someone can help.

er……Just one minute before going to click【Create Thread】I found the cause!
The size of the render textures should be the same! then everything works.
I keep the thread in case someone may need it.
And still hope someone could tell me more about MRT implementation.

Here is my shader: a simply unlit with SV_Target0, SV_Target1

Shader "Bin/Unlit"{
    Properties{
        _MainTex ("Texture", 2D) = "white" {}
        _BumpMap("Normal Map", 2D) = "bump" {}
    }
    SubShader
    {
        Tags { "RenderType"="Opaque" }
        LOD 100
        Blend SrcAlpha OneMinusSrcAlpha

        Pass
        {
            HLSLPROGRAM
            #pragma vertex vert
            #pragma fragment frag
            #include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Core.hlsl"
            struct Attributes
            {
                float4 vertex : POSITION;
                float2 uv : TEXCOORD0;
            };
            struct Varyings
            {
                float2 uv : TEXCOORD0;
                float4 vertex : SV_POSITION;
            };
            TEXTURE2D(_MainTex);            SAMPLER(sampler_MainTex);
            TEXTURE2D(_BumpMap);            SAMPLER(sampler_BumpMap);
            float4 _MainTex_ST;
            Varyings vert (Attributes v)
            {
                Varyings o;
                o.vertex = TransformObjectToHClip(v.vertex.xyz);  
                o.uv = TRANSFORM_TEX(v.uv, _MainTex);
                return o;
            }

            struct FragOut{
                half4 color     : SV_Target0;
                half4 normal    : SV_Target1;
            };
            FragOut frag (Varyings i) : SV_Target
            {
                half4 col = SAMPLE_TEXTURE2D(_MainTex, sampler_MainTex, i.uv);
                half4 nor = SAMPLE_TEXTURE2D(_BumpMap, sampler_BumpMap, i.uv);
               
                FragOut output;
                output.color = col;
                output.normal = nor;
                return output;
            }
            ENDHLSL
        }
    }
}

Here is my renderer feature script:

using UnityEngine;
using UnityEngine.Rendering;
using UnityEngine.Rendering.Universal;

public class CustomRenderPassFeature : ScriptableRendererFeature
{
    public Material mat;
    public RenderTexture colorRT;
    public RenderTexture normalRT;
    public Renderer objToPaint;
   
    public class CustomRenderPass : ScriptableRenderPass
    {
        public bool ifPaint;
        public Renderer m_objToPaint;
        public Material m_mat;

        RTHandle[] rthandles = new RTHandle[2];
        RTHandle rthColorRT;
        RTHandle rthBumpRT;

        ProfilingSampler m_ProfilingSampler;
        public CustomRenderPass(RenderTexture colorRT,RenderTexture normalRT, string name){
            m_ProfilingSampler = new ProfilingSampler(name);

            if(rthColorRT==null){
                rthColorRT = RTHandles.Alloc(colorRT);
                rthandles[0] = rthColorRT;
            }
            if(rthBumpRT==null){
                rthBumpRT = RTHandles.Alloc(normalRT);
                rthandles[1] = rthBumpRT;
            }
        }
      
        public override void OnCameraSetup(CommandBuffer cmd, ref RenderingData renderingData)
        {
            ConfigureTarget(rthandles);
            // ConfigureTarget(rthandles[0]);
            // ConfigureTarget(rthandles[1]);
        }

        public override void Execute(ScriptableRenderContext context, ref RenderingData renderingData)
        {
            CommandBuffer cmd = CommandBufferPool.Get();
            using (new ProfilingScope(cmd, m_ProfilingSampler)) {
                context.ExecuteCommandBuffer(cmd);
                cmd.Clear();
               
                if(ifPaint){
                    cmd.DrawRenderer(m_objToPaint, m_mat, 0, 0);
                }
            }
            context.ExecuteCommandBuffer(cmd);
            cmd.Clear();
            CommandBufferPool.Release(cmd);
        }

        public override void OnCameraCleanup(CommandBuffer cmd)
        {
        }
    }

    public CustomRenderPass m_ScriptablePass;

    public override void Create()
    {
        m_ScriptablePass = new CustomRenderPass(colorRT, normalRT, name){
            m_mat = mat,
            m_objToPaint = objToPaint
        };

        m_ScriptablePass.renderPassEvent = RenderPassEvent.AfterRenderingOpaques;
    }

    public override void AddRenderPasses(ScriptableRenderer renderer, ref RenderingData renderingData)
    {
        renderer.EnqueuePass(m_ScriptablePass);
    }
}

And in order to pass the Renderer objToPaint into renderer feature, here is my c# script:

using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using System.Linq;
using UnityEngine.Rendering.Universal;

public class Paint : MonoBehaviour
{
    public Renderer objToPaint;
    public CustomRenderPassFeature myFeature;

    void Update()
    {
        if(Input.GetMouseButtonDown(0)){
            myFeature.m_ScriptablePass.ifPaint = true;
            myFeature.m_ScriptablePass.m_objToPaint = objToPaint;
        }
        else{
            myFeature.m_ScriptablePass.ifPaint = false;

        }
    }
}
5 Likes

Very interesting, thanks :slight_smile:

New:
If you run into errors reporting【Dimensions of color surface does not match dimensions of depth surface】in this case, I find it may be caused by the editor camera. So I fix it with the following conditional statements:

public override void AddRenderPasses(ScriptableRenderer renderer, ref RenderingData renderingData)
{
    if (renderingData.cameraData.isPreviewCamera) return;
    // // Ignore feature for editor/inspector previews & asset thumbnails
    if (renderingData.cameraData.isSceneViewCamera) return;
    // // Ignore feature for scene view
    if (renderingData.cameraData.camera != Camera.main) return;
    // // Ignore all cameras except the camera tagged as MainCamera
    // // Though may be better to use Multiple Renderer Assets (see below)

    renderer.EnqueuePass(m_ScriptablePass);
}

(actually it is from Cyanilux’s tutorial: Custom Renderer Features | Cyanilux )

3 Likes

New:
If you find the render textures’ size cannot be larger than Game window resolution, please check this link:

The depth buffer matters in Multi Render Targets, even though I have no need for depth.
Make sure that the size of the depth buffer must be the same as color buffers.

i fixed it by changing the camera viewport rect from w and h 1/1 to 3/3

not sure if its super hacky or not but for me, I was using DLSS which has a * 3 scale reduction and on the lowest setting the error would show up, after I changed the rect it resolved… could be my own bug but just for anyone looking for a bandaid fix or in a hurry.

1 Like

hello, thanks to your code.

but, when i follow them, and show black rt in framedebugger

can you upload a repo, thanks bro.