Here’s a visual explanation about my problem.
Goal: Capture a depth buffer as a screenshot using a secondary camera. There are some objects which I don’t want to display to the main camera.
Problem: I’m getting the stream from the main camera not the secondary.
The entire project is at GitHub - SuperShinyEyes/UnityDepthBufferCaptureTest at depthCaptureYoutubeVideo.
The dev environment is Unity 5.5.2f1personal(64bit) on Win 10.
I assume the problem lies on the shader.
// Upgrade NOTE: replaced 'mul(UNITY_MATRIX_MVP,*)' with 'UnityObjectToClipPos(*)'
Shader "Custom/RenderDepth"
{
Properties
{
MainTex("Base (RGB)", 2D) = "white" {}
_DepthLevel("Depth Level", Range(1, 3)) = 1
}
SubShader
{
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
uniform sampler2D _MainTex;
uniform sampler2D _CameraDepthTexture;
uniform sampler2D _LastCameraDepthTexture;
uniform fixed _DepthLevel;
uniform half4 _MainTex_TexelSize;
struct input
{
float4 pos : POSITION;
half2 uv : TEXCOORD0;
};
struct output
{
float4 pos : SV_POSITION;
half2 uv : TEXCOORD0;
};
output vert(input i)
{
output o;
o.pos = UnityObjectToClipPos(i.pos);
o.uv = MultiplyUV(UNITY_MATRIX_TEXTURE0, i.uv);
// why do we need this? cause sometimes the image I get is flipped. see: http://docs.unity3d.com/Manual/SL-PlatformDifferences.html
#if UNITY_UV_STARTS_AT_TOP
if (_MainTex_TexelSize.y < 0)
o.uv.y = 1 - o.uv.y;
#endif
return o;
}
fixed4 frag(output o) : COLOR
{
//float depth = UNITY_SAMPLE_DEPTH(tex2D(_CameraDepthTexture, o.uv));
float depth = UNITY_SAMPLE_DEPTH(tex2D(_LastCameraDepthTexture, o.uv));
depth = Linear01Depth(depth);
return depth;
}
ENDCG
}
}
}
And here’s the C# script:
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
[ExecuteInEditMode]
public class ApplyStream : MonoBehaviour {
private Camera cam;
public RenderTexture tex;
public GameObject display;
// Use this for initialization
public Shader shader;
[Range(0f, 3f)]
public float depthLevel = 0.5f;
private Material _material;
private Material material
{
get
{
if (_material == null)
{
_material = new Material(shader);
_material.hideFlags = HideFlags.HideAndDontSave;
}
return _material;
}
}
void Start () {
cam = GetComponent<Camera>();
cam.targetTexture = tex;
if (!SystemInfo.supportsImageEffects)
{
print("System doesn't support image effects");
enabled = false;
return;
}
if (shader == null || !shader.isSupported)
{
enabled = false;
print("Shader " + shader.name + " is not supported");
return;
}
// turn on depth rendering for the camera so that the shader can access it via _CameraDepthTexture
cam.depthTextureMode = DepthTextureMode.Depth;
}
// Update is called once per frame
void Update () {
material.SetFloat("_DepthLevel", depthLevel);
RenderTexture.active = tex;
//cam.Render();
Graphics.Blit(tex, tex, material);
Texture2D t = new Texture2D(tex.width, tex.height, TextureFormat.ARGB32, false);
// Read pixels from screen into the saved texture data.
t.ReadPixels(new Rect(0, 0, tex.width, tex.height), 0, 0);
// Actually apply all previous SetPixel and SetPixels changes.
t.Apply(); // Unnecessary? YES Necessary!!!
display.GetComponent<MeshRenderer>().sharedMaterial.mainTexture = t;
RenderTexture.active = null;
}
}
I’d appreciate any help. Thanks.