Hi all, I’m tantalizingly close to getting human occlusion working with the user-facing camera, but I’ve hit a wall with what I suspect is probably a relatively simple shader issue.
Like the author of this excellent thread , I’m blitting the human stencil texture to a material and setting the camera image to a second texture named “_CameraTex” on the same material. The key line in the shader below is line 101 (lightly adapted from ar-foundation-samples HumanStencil.shader). The frag function returns RGB from the camera texture, but returns the stencil’s red channel for the alpha channel.
My intuition is that this should return alpha 0 when the human stencil (second image) is black and alpha 1 when it’s red. However, I’m getting that third image, which is successfully returning alpha 0 for all black areas on the stencil texture, but seems to be doing that with the darker areas of _CameraTex, too.
If someone could point me in the right direction, I’d be very grateful!
Shader "Unlit/HumanStencil"
{
Properties
{
_MainTex ("Main Texture", 2D) = "black" {} //human stencil, blitted
_CameraTex ("Camera Texture", 2D) = "black" {}
}
SubShader
{
Tags
{
"Queue" = "Transparent"
"RenderType" = "Transparent"
"ForceNoShadowCasting" = "True"
}
Pass
{
Blend SrcAlpha OneMinusSrcAlpha
Cull Off
ZTest Always
ZWrite Off
Lighting Off
LOD 100
Tags
{
"LightMode" = "Always"
}
HLSLPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
#define real half
#define real3 half3
#define real4 half4
#define TransformObjectToHClip UnityObjectToClipPos
#define DECLARE_TEXTURE2D_FLOAT(texture) UNITY_DECLARE_TEX2D_FLOAT(texture)
#define DECLARE_SAMPLER_FLOAT(sampler)
#define SAMPLE_TEXTURE2D(texture,sampler,texcoord) UNITY_SAMPLE_TEX2D(texture,texcoord)
struct appdata
{
float3 position : POSITION;
float2 texcoord : TEXCOORD0;
UNITY_VERTEX_INPUT_INSTANCE_ID
};
struct v2f
{
float4 position : SV_POSITION;
float2 texcoord : TEXCOORD0;
UNITY_VERTEX_OUTPUT_STEREO
};
struct fragment_output
{
real4 color : SV_Target;
};
CBUFFER_START(DisplayRotationPerFrame)
float4x4 _DisplayRotationPerFrame;
CBUFFER_END
v2f vert (appdata v)
{
v2f o;
UNITY_SETUP_INSTANCE_ID(v);
UNITY_INITIALIZE_OUTPUT(v2f, o);
UNITY_INITIALIZE_VERTEX_OUTPUT_STEREO(o);
o.position = TransformObjectToHClip(v.position);
o.texcoord = mul(float3(v.texcoord, 1.0f), _DisplayRotationPerFrame).xy;
return o;
}
DECLARE_TEXTURE2D_FLOAT(_MainTex);
DECLARE_SAMPLER_FLOAT(sampler_MainTex);
DECLARE_TEXTURE2D_FLOAT(_CameraTex);
fragment_output frag (v2f i)
{
UNITY_SETUP_STEREO_EYE_INDEX_POST_VERTEX(i);
half4 mask = UNITY_SAMPLE_TEX2D(_MainTex, i.texcoord);
half4 cam = UNITY_SAMPLE_TEX2D(_CameraTex, i.texcoord);
fragment_output o;
o.color = half4(cam.r, cam.g, cam.b, mask.r);
return o;
}
ENDHLSL
}
}
}