I am looking at producing point cloud with the same dimensions as 3d objects rendered to Unity Cameras.
So what I have been working on is to make point cloud based on RGB and Depth cameras’ target textures.
※ Two 2D RenderTexutures look like as below
- Depth Render texuture
Color Format: R8G8B8A8_UNORM, Depth Buffer: At least 24 bits depth (with stencil)
- RGB Render texture
Color Format: R16G16B16A16_SFLOAT, Depth Buffer: At least 16 bits depth (with stencil)
I populated the two of 2D RenderTexuture data in a geometry shader and seemed the point cloud had been successfully rendered.
The point cloud is 3D representation with depth completion if looking directly in front of it, which is almost identical to the source object.
※ The source object is a animating square CubeField consisting of 100(10x10) 3d cubes.
But when I set the X-axis of it to perpendicular to the screen, the point cloud’s depth gone, looked thin as if it renderer on a piece of paper.
So my question is if it is possible to render 3d point cloud with depth completion using 2d rendertexture.
Shader:
Properties{
[NoScaleOffset] _Pos("Pos", 2D) = "white" {}
[NoScaleOffset] _Col("Col", 2D) = "white" {}
_PointSize("Point Size", Float) = 1.0
_Color("PointCloud Color", Color) = (0, 0, 0, 0)
[Toggle(USE_DISTANCE)]_UseDistance("Scale by distance?", float) = 0
}
SubShader
{
Cull Off
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma geometry geom
#pragma fragment frag
#pragma shader_feature USE_DISTANCE
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float4 vertex : SV_POSITION;
float2 uv : TEXCOORD0;
};
float _PointSize;
fixed4 _Color;
sampler2D _Col;
float4 _Col_TexelSize;
sampler2D _Pos;
float4 _Pos_TexelSize;
struct g2f
{
float4 vertex : SV_POSITION;
float2 uv : TEXCOORD0;
};
[maxvertexcount(4)]
void geom(point v2f i[1], inout TriangleStream<g2f> triStream)
{
g2f o;
float4 v = i[0].vertex;
// TODO: interpolate uvs on quad
float2 uv = i[0].uv;
float2 p = _PointSize * 0.001;
p.y *= _ScreenParams.x / _ScreenParams.y;
o.vertex = UnityObjectToClipPos(v);
#ifdef USE_DISTANCE
o.vertex += float4(-p.x, p.y, 0, 0);
#else
o.vertex += float4(-p.x, p.y, 0, 0) * o.vertex.w;
#endif
o.uv = uv;
triStream.Append(o);
o.vertex = UnityObjectToClipPos(v);
#ifdef USE_DISTANCE
o.vertex += float4(-p.x, -p.y, 0, 0);
#else
o.vertex += float4(-p.x, -p.y, 0, 0) * o.vertex.w;
#endif
o.uv = uv;
triStream.Append(o);
o.vertex = UnityObjectToClipPos(v);
#ifdef USE_DISTANCE
o.vertex += float4(p.x, p.y, 0, 0);
#else
o.vertex += float4(p.x, p.y, 0, 0) * o.vertex.w;
#endif
o.uv = uv;
triStream.Append(o);
o.vertex = UnityObjectToClipPos(v);
#ifdef USE_DISTANCE
o.vertex += float4(p.x, -p.y, 0, 0);
#else
o.vertex += float4(p.x, -p.y, 0, 0) * o.vertex.w;
#endif
o.uv = uv;
triStream.Append(o);
}
v2f vert(appdata v)
{
v2f o;
float dist = length(_WorldSpaceCameraPos - v.vertex);
o.vertex = v.vertex;
o.vertex.xyz = float3(o.vertex.x, o.vertex.y + 2.8, o.vertex.z - 4.2);
o.uv = v.uv;
return o;
}
fixed4 frag(g2f i) : SV_Target
{
float2 uv = tex2D(_Pos, i.uv);
if (any(uv <= 0 || uv >= 1))
discard;
return tex2D(_Col, i.uv) * _Color;
}
ENDCG
}
}