Lower resolution for raymarching object, HoloLens

Hi all!

I am doing a raymarching shader for Unity which works well but is slow. I am then trying to lower the resolution of the rendering ONLY for those objects (which is targeting a particular layer) to increase speed while not touching too much on the quality (the default screen resolution is high compared to my need).

I then created a camera which has for parent Camera.main, and which I attached this code:

using UnityEngine;
using UnityEngine.XR;
public class RenderTextureCamera : MonoBehaviour
{
    /// <summary>
    /// The render texture use for volume rendering
    /// </summary>
    private RenderTexture m_volumeRenderingTexture = null;
    public Material CopyTextureMaterial;
    void Start()
    {
        //Create a render texture for the volume rendering camera. No depth texture, alpha component
        m_volumeRenderingTexture = new RenderTexture((int)(Camera.main.pixelWidth*0.75), (int)(Camera.main.pixelHeight*0.75), 0, RenderTextureFormat.ARGB32);
        m_volumeRenderingTexture.vrUsage = VRTextureUsage.TwoEyes;
        Camera camera = GetComponent<Camera>();
        int cullingMask = camera.cullingMask;
        float depth     = camera.depth;
        Color color     = camera.backgroundColor;
        camera.CopyFrom(Camera.main); //Copy stereoscopy parameters
        camera.cullingMask     = cullingMask; //Only my layer (in the editor)
        camera.backgroundColor = color; //(0,0,0,0) in the editor
        camera.depth = depth; //I do not know if this was important
        camera.clearFlags = CameraClearFlags.SolidColor;
        camera.targetTexture = m_volumeRenderingTexture;
        camera.rect = new Rect(0, 0, 1, 1);
    }
   
    private void OnPreRender()
    {
        Camera camera = GetComponent<Camera>();
        camera.targetTexture = m_volumeRenderingTexture;
    }
    private void OnPostRender()
    {
        Camera camera = GetComponent<Camera>();
        camera.targetTexture = null;
        Graphics.Blit(m_volumeRenderingTexture, (RenderTexture)null); //Should go on screen, right?
    }

}

However, for un unknown reason, this code does not work. I tried multiple things, and either my camera’s rendering is not displayed, either the alpha component of my camera is not taken into account (I played with RenderTexture.active and camera.targetTexture parameters).

Is there a way to blend two cameras on screen, one after the other?

Best

For people asking how to overlay cameras: the issue was on OnRenderImage which does not use, by default, a transparency-proof material.

Here is my code:

using UnityEngine;
using UnityEngine.XR;
public class RenderTextureCamera : MonoBehaviour
{
    /// <summary>
    /// The render texture use for volume rendering
    /// </summary>
    private RenderTexture m_volumeRenderingTexture = null;

    public Material CopyTextureMaterial;

    private Material m_material;

    void Start()
    {
        //Create a render texture for the volume rendering camera. No depth texture, alpha component
        m_volumeRenderingTexture = new RenderTexture(512, (int)(Camera.main.pixelHeight * 512.0f / Camera.main.pixelWidth), 16, RenderTextureFormat.ARGB32);
        m_volumeRenderingTexture.vrUsage = VRTextureUsage.TwoEyes;

        m_material = new Material(CopyTextureMaterial);

        Camera camera = GetComponent<Camera>();
        int cullingMask = camera.cullingMask;
        float depth = camera.depth;
        camera.CopyFrom(Camera.main);
        camera.cullingMask = cullingMask;
        camera.depth = depth;
        camera.backgroundColor = new Color(0, 0, 0, 0);
        camera.clearFlags = CameraClearFlags.SolidColor;
        camera.targetTexture = m_volumeRenderingTexture;
        camera.rect = new Rect(0, 0, 1, 1);
    }

    private void OnPreRender()
    {
        Camera camera = GetComponent<Camera>();
        camera.targetTexture = m_volumeRenderingTexture;
    }

    private void OnRenderImage(RenderTexture source, RenderTexture destination)
    {
        Graphics.Blit(source, destination, m_material); //The culprit! m_material is a stupid shader that uses alpha blending
    }

    private void OnPostRender()
    {
        Camera camera = GetComponent<Camera>();
        camera.targetTexture = null; //I guess this force the final rendering to be on the screen, without it, the camera never overlay
    }
}

Well, that was what I thought, until I test with stereoscopic display… My lower resolution camera does not appear in full screen for an unknown Reason.

My shader:

Shader "Sereno/CopyTexture"
{
    Properties
    {
        _MainTex("Main Texture", any) = "" {}
    }

    SubShader
    {
        Tags{ "Queue" = "Transparent" "RenderType" = "Transparent" }

        Lighting Off
        Cull Off
        ZWrite Off
        Blend SrcAlpha OneMinusSrcAlpha
        Fog {Mode Off}

        Pass
        {
            CGPROGRAM
            #pragma vertex vert
            #pragma fragment frag

            #include "UnityCG.cginc"

            struct appdata
            {
                float4 vertex : POSITION;
                fixed4 color  : COLOR;
                float2 uv     : TEXCOORD0;
                UNITY_VERTEX_INPUT_INSTANCE_ID
            };

            struct v2f
            {
                float4 vertex : SV_POSITION;
                fixed4 color  : COLOR;
                float2 uv     : TEXCOORD0;
                UNITY_VERTEX_INPUT_INSTANCE_ID
                UNITY_VERTEX_OUTPUT_STEREO
            };

            /** The texture data*/
            UNITY_DECLARE_SCREENSPACE_TEXTURE(_MainTex);
            //sampler2D _MainTex;
            uniform float4 _MainTex_ST; //Scale--translation of that texture
            uniform float2 _Scale = float2(1.0, 1.0);

            v2f vert(appdata v)
            {
                v2f o;
                UNITY_SETUP_INSTANCE_ID(v);
                UNITY_TRANSFER_INSTANCE_ID(v, o);
                UNITY_INITIALIZE_VERTEX_OUTPUT_STEREO(o);

                o.vertex = UnityObjectToClipPos(v.vertex);
                o.color  = v.color;
                o.uv     = TRANSFORM_TEX(v.uv, _MainTex);
               
                return o;
            }

            fixed4  frag(v2f input) : COLOR
            {
                UNITY_SETUP_INSTANCE_ID(input);
                fixed4 color = UNITY_SAMPLE_SCREENSPACE_TEXTURE(_MainTex, input.uv);
                //fixed4 color = tex2D(_MainTex, input.uv);

                return color;
            }
            ENDCG
        }
    }
}

and my C# camera code:

using UnityEngine;
using UnityEngine.XR;
public class RenderTextureCamera : MonoBehaviour
{
    /// <summary>
    /// The render texture use for volume rendering
    /// </summary>
    private RenderTexture m_volumeRenderingTexture = null;

    public Material CopyTextureMaterial;

    private Material m_material;

    void Start()
    {
        //Create a render texture for the volume rendering camera. No depth texture, alpha component
        m_volumeRenderingTexture = new RenderTexture(512, (int)(Screen.height * 512.0f / ((XRSettings.isDeviceActive ? 2 : 1)*Screen.width)), 16, RenderTextureFormat.ARGB32);
        m_volumeRenderingTexture.vrUsage = VRTextureUsage.TwoEyes;

        m_material = new Material(CopyTextureMaterial);

        Camera camera = GetComponent<Camera>();
        int cullingMask = camera.cullingMask;
        float depth = camera.depth;
        camera.CopyFrom(Camera.main);
        camera.cullingMask = cullingMask;
        camera.depth = depth;
        camera.backgroundColor = new Color(0, 0, 0, 0);
        camera.clearFlags = CameraClearFlags.Depth;
        camera.targetTexture = m_volumeRenderingTexture;
        camera.depthTextureMode = DepthTextureMode.None;
        camera.forceIntoRenderTexture = true;
    }

    private void OnPreRender()
    {
        Camera camera = GetComponent<Camera>();
        camera.targetTexture = m_volumeRenderingTexture;
    }

    private void OnRenderImage(RenderTexture source, RenderTexture destination)
    {
        Graphics.Blit(source, destination, m_material);

        //Graphics.SetRenderTarget(destination);
        //m_material.SetPass(0);
        //m_material.SetTexture("_MainTex", source);

        /*GL.PushMatrix();
        GL.LoadOrtho();
       
        GL.Begin(GL.QUADS);
            GL.TexCoord2(0, 0);
            GL.Vertex3(0, 0, 0);
            GL.TexCoord2(0, 1);
            GL.Vertex3(0, 1, 0);
            GL.TexCoord2(1, 1);
            GL.Vertex3(1, 1, 0);
            GL.TexCoord2(1, 0);
            GL.Vertex3(1, 0, 0);
        GL.End();

        GL.PopMatrix();*/
    }

    private void OnPostRender()
    {
        Camera camera = GetComponent<Camera>();
        camera.targetTexture = null;
        Graphics.Blit(m_volumeRenderingTexture, null as RenderTexture, m_material);
    }
}

Well, this issue is not possible to overcome according to Is Single Pass Stereo rendering supported for RenderTextures?
Using targetTexture in Unity breaks the stereoscopy for an unknown reason…