Hello everyone!
My assumption was that ComputeScreenPos(o.vertex).x would be a value between 0 and 1, 0 being the left most pixel of the screen and 1 being the right most pixel of the screen.
It appears that this is incorrect, as the following shader appears to only put a gradient on about one-tenth of the left side of the screen (in perspective camera mode). In orthographic camera mode this shader works as expected, with a gradient from left to right.
What are the minimum and maximum XYZW coords of ComputeScreenPos and what parameters can we use to normalize at least the XY coords?
Thanks for reading!
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
float4 screenPos : TEXCOORD1;
};
v2f vert (appdata v)
{
v2f o;
o.vertex = UnityObjectToClipPos(v.vertex);
o.uv = v.uv;
o.screenPos = ComputeScreenPos(o.vertex);
return o;
}
fixed4 frag (v2f i) : SV_Target
{
fixed4 col = float4(i.screenPos.x,i.screenPos.x, i.screenPos.x, 1);
return col;
}