Is there a way I can get a neighboring pixel’s normal in a vertex or fragment shader? I want to make a shader that compares the current pixel’s normal with its surrounding pixels’ normals and set the pixel color based on it. But so far, I’ve only found a way to get the current pixel’s normal.
ddx / ddy will give you the difference between a given value on the current pixel and the pixel vertically - via ddy - or horizontally - via ddx. That’s about as much as you can get.
float3 diffX = ddx(normal);
float3 diffY = ddy(normal);
Other than that you’ll have to sample the texture multiple times one pixel over and compare the difference. Here’s one that does that to create a detail normal from a height map.
Shader "Debug/Normal Map From Height" {
Properties {
_Color ("Main Color", Color) = (1,1,1,1)
_MainTex ("Diffuse (RGB) Alpha (A)", 2D) = "white" {}
_BumpMap ("Normal (Normal)", 2D) = "bump" {}
_HeightMap ("Heightmap (R)", 2D) = "grey" {}
_HeightmapStrength ("Heightmap Strength", Float) = 1.0
_HeightmapDimX ("Heightmap Width", Float) = 2048
_HeightmapDimY ("Heightmap Height", Float) = 2048
}
SubShader{
Tags { "RenderType" = "Opaque" }
CGPROGRAM
#pragma surface surf NormalsHeight
#pragma target 3.0
struct Input
{
float2 uv_MainTex;
};
sampler2D _MainTex, _BumpMap, _HeightMap;
float _HeightmapStrength, _HeightmapDimX, _HeightmapDimY;
void surf (Input IN, inout SurfaceOutput o)
{
o.Albedo = fixed3(0.5);
float3 normal = UnpackNormal(tex2D(_BumpMap, IN.uv_MainTex));
float me = tex2D(_HeightMap,IN.uv_MainTex).x;
float n = tex2D(_HeightMap,float2(IN.uv_MainTex.x,IN.uv_MainTex.y+1.0/_HeightmapDimY)).x;
float s = tex2D(_HeightMap,float2(IN.uv_MainTex.x,IN.uv_MainTex.y-1.0/_HeightmapDimY)).x;
float e = tex2D(_HeightMap,float2(IN.uv_MainTex.x-1.0/_HeightmapDimX,IN.uv_MainTex.y)).x;
float w = tex2D(_HeightMap,float2(IN.uv_MainTex.x+1.0/_HeightmapDimX,IN.uv_MainTex.y)).x;
float3 norm = normal;
float3 temp = norm; //a temporary vector that is not parallel to norm
if(norm.x==1)
temp.y+=0.5;
else
temp.x+=0.5;
//form a basis with norm being one of the axes:
float3 perp1 = normalize(cross(norm,temp));
float3 perp2 = normalize(cross(norm,perp1));
//use the basis to move the normal in its own space by the offset
float3 normalOffset = -_HeightmapStrength * ( ( (n-me) - (s-me) ) * perp1 + ( ( e - me ) - ( w - me ) ) * perp2 );
norm += normalOffset;
norm = normalize(norm);
o.Normal = norm;
}
inline fixed4 LightingNormalsHeight (SurfaceOutput s, fixed3 lightDir, fixed3 viewDir, fixed atten)
{
viewDir = normalize(viewDir);
lightDir = normalize(lightDir);
s.Normal = normalize(s.Normal);
float NdotL = dot(s.Normal, lightDir);
_LightColor0.rgb = _LightColor0.rgb;
fixed4 c;
c.rgb = float3(0.5) * saturate ( NdotL ) * _LightColor0.rgb * atten;
c.a = 1.0;
return c;
}
ENDCG
}
FallBack "VertexLit"
}
Thanks. I noticed your shader doesn’t actually use ddx, but I’ve been trying to get ddx to work in mine and I keep getting the error “Program ‘frag’, function “ddx” not supported in this profile”
Pass {
CGPROGRAM
#pragma target 3.0
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
struct v2f {
float4 pos : SV_POSITION;
float3 color : COLOR0;
float3 normal : TEXCOORD1;
};
struct V_IN
{
float4 vertex : POSITION;
float3 normal : NORMAL;
float4 texcoord : TEXCOORD0;
};
v2f vert (V_IN v)
{
v2f o;
float4 nrml = float4(v.normal, 1);
o.pos = mul (UNITY_MATRIX_MVP, v.vertex);
o.color = nrml * 0.7 + 0.5;
o.normal = v.normal;
return o;
}
half4 frag (v2f i) : COLOR
{
float3 res;
res = ddx(i.normal);
return half4 (i.color, 1);
}
ENDCG
}
Aras mentioned
Unity compiles Cg into ARB vertex/fragment problems. ARB programs do not support derivative instructions. Cg used in Unity is not out of date; ARB and OpenGL in general is out of date
If you don’t need OpenGL, tell ShaderLab to compile the shader into D3D9 only (#pragma only_renderers d3d9, see docs).
There is a GPU Gems article on how to “emulate” derivatives using texture lookup.
http://forum.unity3d.com/threads/26421-Problems-With-my-Parallax-Occlusion-Mapping
You can also tell Unity to compile it to GLSL on OpenGL platforms, which should support partial derivatives. Try adding this line:
#pragma glsl
I see. I didn’t quite understand that article, but I thought of another way that I think should work. But I’m brand new to shaders and things aren’t working as I’m expecting. I was thinking of making a 2 pass shader where the 1st pass colors the model based on normals and the 2nd pass compares the current pixel’s color with nearby pixels. Since the color is based on the normal, I should be able to compare the colors as I would normals.
To start, I figured I’d just do the 1st pass as described and make the 2nd pass basically a pass through and do nothing. But the 2nd pass is causing my model to turn completely black. I’m either misunderstanding the graphics pipeline and the 2nd pass doesn’t operate on anything the 1st pass does, or I’m sampling from the wrong thing. This is what I have right now.
Shader "Resources/Shaders/myShader" {
Properties {
_Color ("Color", Color) = (0,0,0,1)
}
SubShader {
Pass {
CGPROGRAM
#pragma target 3.0
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
struct v2f {
float4 pos : SV_POSITION;
float3 color : COLOR0;
};
struct V_IN
{
float4 vertex : POSITION;
float3 normal : NORMAL;
float4 texcoord : TEXCOORD0;
};
v2f vert (V_IN v)
{
v2f o;
float4 nrml = float4(v.normal, 1);
o.pos = mul (UNITY_MATRIX_MVP, v.vertex);
o.color = nrml * 0.7 + 0.5;
return o;
}
half4 frag (v2f i) : COLOR
{
return half4 (i.color, 1);
}
ENDCG
}
Pass {
CGPROGRAM
#pragma target 3.0
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
struct v2f {
float4 pos : SV_POSITION;
float2 texcoord : TEXCOORD0;
float4 color : COLOR;
};
struct V_IN
{
float4 vertex : POSITION;
float2 texcoord : TEXCOORD0;
float4 color : COLOR;
};
v2f vert (V_IN v)
{
v2f o;
o.texcoord = v.texcoord;
o.pos = mul (UNITY_MATRIX_MVP, v.vertex);
o.color = v.color;
return o;
}
half4 frag (v2f i) : COLOR
{
return half4 (i.color);
}
ENDCG
}
}
Fallback "VertexLit"
}
Yeah, my shader doesn’t use ddx/ddy because it looks like crap. It’s really noisy 'cause it’s a harsh screen-space thing that only samples the 2x2 block of pixels for that fragment.
But depending on what you want to use it for, it can work ok.
For OpenGL shaders, you’ll need to use dFdX and dFdY - the OpenGL equivalents. You can wrap those up in #if blocks and let the compiler sort it out.
Noisy? It should be constant for linearly interpolated values.
Well here’s what I get with the multi-sample;
http://www.farfarer.com/temp/heightmapShader.png
Here’s what I get with ddx/ddy;
http://www.farfarer.com/temp/heightmapShaderDeriv.png
That looks weird… Did you use something similar to this implementation? Derivative Maps – CodeItNow (The first part with height maps)
I don’t have the shader code to hand any more but it wasn’t that complex. I basically used the same code as above but swapped the multi-sample for ddx/ddy instructions.
I did implement proper derivative maps a while back, though (based on Morten Mikkelsen’s code) - worked pretty well but didn’t find much of an advantage over regular normal maps.
Is anyone familiar with multiple pass shaders and able to tell me why my 2nd pass’s input isn’t the 1st passes output?
Because that’s not how they work.
If you want to feed the first pass into the second pass, you’ll need a grab pass (requires Pro).
Or you can so some simple maths between the results of the first pass and the results of the second pass using blend modes.