Hello everyone
I need to draw a frame in a flat 2D object. I used to solve this problem with a sprite, but the number of variations in the ratio of the length to the width of the frame became a lot, and therefore I began to think towards the procedural method of drawing the frame at the panel. The best way, it seemed to me, was to write a shader, but there is very little information on shaders and I can’t find a ready-made solution that suits me.
I ask you to help with a link to an article or a lesson, or just write here what I need to do to get a frame inside an object with a configurable rounding radius.
below is an example of the effect I want to achieve

I’ve made some progress in solving my problem. I found a super tutorial where they show how to make a button with rounded edges (but it doesn’t really suit me):

And created the following script:

Properties
{
_MainTex ("Texture", 2D) = "red" {}
}
{
Tags { "RenderType"="Opaque" }
LOD 200

Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag

#include "UnityCG.cginc"

sampler2D _MainTex;
float4 _MainTex_ST;

struct v2f
{
float4 pos:SV_POSITION;
float2 srcUV: TEXCOORD0;
};

v2f vert (appdata_base v)
{
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
o.srcUV = v.texcoord;
o.adaptUV = v.texcoord - float2(0.5, 0.5);
return o;
}

fixed4 frag (v2f i) : COLOR
{
fixed4 col = fixed4(0,0,0,0);

col = tex2D(_MainTex, i.srcUV);
}
else
{
col = tex2D(_MainTex, i.srcUV);
}
else {
}
}
return col;
}
ENDCG
}
}

But I am faced with the following problem - how do I find which side is longer vertical or horizontal?

I need this to make the fillets round, not oval. look at the drawing

It depends.

If you have a single scaled quad, you can extract the scale of that quad from its transform.

// scale of quad mesh's x and y axis
float2 scale = float2(
length(unity_ObjectToWorld._m00_m10_m20),
length(unity_ObjectToWorld._m01_m11_m21)
);

You can then use that to get the aspect ratio, or otherwise adjust things in your shader to prevent things from going oval. Like this:

{
Properties
{
_Color ("Color", Color) = (1,1,1,1)
_Thickness ("Frame Thickness", Range(0.001,1)) = 0.1
}
{
Tags { "Queue"="Transparent" }
LOD 100

Pass
{
Blend SrcAlpha OneMinusSrcAlpha

CGPROGRAM
#pragma vertex vert
#pragma fragment frag

#include "UnityCG.cginc"

struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};

struct v2f
{
float4 pos : SV_POSITION;
float2 uv : TEXCOORD0;
};

fixed4 _Color;
float _Thickness;

v2f vert (appdata v)
{
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
o.uv = v.uv;
return o;
}

fixed4 frag (v2f i) : SV_Target
{
fixed4 col = _Color;

// scale assuming you're using the default quad
float2 scale = float2(
length(unity_ObjectToWorld._m00_m10_m20),
length(unity_ObjectToWorld._m01_m11_m21)
);

// magic
float2 frameUV = max(0.0, abs(i.uv * 2.0 - 1.0) * scale - (scale - _Radius)) / _Radius;
float roundedDist = length(frameUV);
float roundedDistF = max(0.0001, fwidth(roundedDist));

// anti-aliasing outer edge
col.a *= saturate((1.0 - roundedDist) / roundedDistF);

// anti-aliasing inner edge
col.a *= saturate((roundedDist - (1.0 - _Thickness)) / roundedDistF);

return col;
}
ENDCG
}
}
}

However, if you have more than one quad with this same material, it’ll likely get batched. I.E.: merged into a single mesh. Or if these are sprite or UI objects, then that won’t work either for the same reason. Because the scale is baked into the final mesh.

So instead you can use screen space partial derivates to figure it out. This example assumes this is a screen facing quad with no rotation, either in world space or from the camera orientation.

{
Properties
{
_Color ("Color", Color) = (1,1,1,1)
_Thickness ("Frame Thickness", Range(0.001,1)) = 0.1
}
{
Tags { "Queue"="Transparent" }
LOD 100

Pass
{
Blend SrcAlpha OneMinusSrcAlpha

CGPROGRAM
#pragma vertex vert
#pragma fragment frag

#include "UnityCG.cginc"

struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};

struct v2f
{
float4 pos : SV_POSITION;
float2 uv : TEXCOORD0;
float2 worldPos : TEXCOORD1;
};

fixed4 _Color;
float _Thickness;

v2f vert (appdata v)
{
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
o.uv = v.uv;
o.worldPos = mul(unity_ObjectToWorld, float4(v.vertex.xyz, 1.0)).xy;
return o;
}

fixed4 frag (v2f i) : SV_Target
{
fixed4 col = _Color;

// scale assuming 0.0 to 1.0 UV range
float2 scale = float2(
ddx(i.worldPos.x) / ddx(i.uv.x),
ddy(i.worldPos.y) / ddy(i.uv.y)
);

// magic
float2 frameUV = max(0.0, abs(i.uv * 2.0 - 1.0) * scale - (scale - _Radius)) / _Radius;
float roundedDist = length(frameUV);
float roundedDistF = max(0.0001, fwidth(roundedDist));

// anti-aliasing outer edge
col.a *= saturate((1.0 - roundedDist) / roundedDistF);

// anti-aliasing inner edge
col.a *= saturate((roundedDist - (1.0 - _Thickness)) / roundedDistF);

return col;
}
ENDCG
}
}
}

And I’m not including a screenshot of this one, because it looks exactly the same!

And if you do need camera or world rotation, that can be solved too, but that math is more complex and I honestly don’t want to deal with that math right now.

(Note: both of these shaders are designed as proof of concepts. If you use them in a UI, which is usually scaled to 1 pixel = 1 unit, the range of the radius and thickness properties won’t be useful. I’ll let you fix that if you want.)

However, I think this might be the wrong way to do it. I love shader based solutions, and have used this kind of thing in many projects. But if this is for a UI, I would probably suggest just using a 9-slice solution as it’ll have way fewer weird gotchas.

1 Like