Hello, I tried another function HDShadowUtils.ExtractPointLightData() and made some modifications to it. I used it to retrieve my RSMBuffer for the point light.
Matrix4x4 MyExtractPointLightVP(NativeArray<Matrix4x4> cubemapFaces, VisibleLight vl, uint faceIdx, float nearPlane, bool reverseZ,
out Matrix4x4 view, out Matrix4x4 proj, out Vector4 deviceProjection, out Matrix4x4 deviceProjYFlip, out Matrix4x4 vpinverse, out Vector4 lightDir)
{
const float k_MinShadowNearPlane = 0.01f;
if (faceIdx > (uint)CubemapFace.NegativeZ)
Debug.LogError($"Tried to extract cubemap face {faceIdx}.");
// var splitData = new ShadowSplitData();
// splitData.cullingSphere.Set(0.0f, 0.0f, 0.0f, float.NegativeInfinity);
// get lightDir
lightDir = vl.GetForward();
// calculate the view matrices
Vector3 lpos = vl.GetPosition();
view = cubemapFaces[(int)faceIdx];
Vector3 inverted_viewpos = cubemapFaces[(int)faceIdx].MultiplyPoint(-lpos);
view.SetColumn(3, new Vector4(inverted_viewpos.x, inverted_viewpos.y, inverted_viewpos.z, 1.0f));
float nearZ = Mathf.Max(nearPlane, k_MinShadowNearPlane);
// float guardAngle = HDShadowUtils.CalcGuardAnglePerspective(90.0f, viewportSize.x, HDShadowUtils.GetPunctualFilterWidthInTexels(punctualShadowFilteringQuality), normalBiasMax, 79.0f);
proj = HDShadowUtils.SetPerspective(90.0f, 1.0f, 0.01f, vl.range);
// and the compound (deviceProj will potentially inverse-Z)
Matrix4x4 deviceProj = HDShadowUtils.GetGPUProjectionMatrix(proj, false, reverseZ);
deviceProjection = new Vector4(deviceProj.m00, deviceProj.m11, deviceProj.m22, deviceProj.m23);
deviceProjYFlip = HDShadowUtils.GetGPUProjectionMatrix(proj, true, reverseZ);
HDShadowUtils.InvertPerspective(ref deviceProj, ref view, out vpinverse);
Matrix4x4 viewProj = CoreMatrixUtils.MultiplyPerspectiveMatrix(proj, view);
// HDShadowUtils.SetSplitDataCullingPlanesFromViewProjMatrix(ref splitData, viewProj, reverseZ);
Matrix4x4 deviceViewProj = CoreMatrixUtils.MultiplyPerspectiveMatrix(deviceProj, view);
return deviceViewProj;
}
I tried using viewproj in the render pass to transform to the light.
void RenderRSMGIBuffer(RenderGraph renderGraph,
HDCamera hdCamera,
TextureHandle colorBuffer,
in LightingBuffers lightingBuffers,
in BuildGPULightListOutput lightLists,
RSMGIBuffers rendergraph_rsmgiBuffers,
ref PrepassOutput prepassOutput,
TextureHandle vtFeedbackBuffer,
ShadowResult shadowResult,
CullingResults cullResults)
{
bool debugDisplay = m_CurrentDebugDisplaySettings.IsDebugDisplayEnabled();
using (var builder = renderGraph.AddRenderPass<RSMGIPassData>(debugDisplay ? "RSM Debug" : "RSM",
out var passData,
debugDisplay ? ProfilingSampler.Get(HDProfileId.RSMDebug) : ProfilingSampler.Get(HDProfileId.RSM)))
{
builder.EnableAsyncCompute(false);
var rendererList = renderGraph.CreateRendererList(RSMBufferRendererList(cullResults, hdCamera));
builder.UseRendererList(rendererList);
passData.noUseDepthBuffer = builder.UseDepthBuffer(prepassOutput.depthBuffer, DepthAccess.ReadWrite);
int index = 0;
passData.rsmGIPassData_rsmgiBuffers.rsmNormalBuffer = builder.ReadWriteTexture(rendergraph_rsmgiBuffers.rsmNormalBuffer);
passData.rsmGIPassData_rsmgiBuffers.rsmFluxBuffer = builder.ReadWriteTexture(rendergraph_rsmgiBuffers.rsmFluxBuffer);
passData.rsmGIPassData_rsmgiBuffers.rsmPositionBuffer = builder.ReadWriteTexture(rendergraph_rsmgiBuffers.rsmPositionBuffer);
builder.AllowRendererListCulling(false);
// start: render rsm for each light
NativeArray<Matrix4x4> cubemapFaces = new NativeArray<Matrix4x4>(HDShadowUtils.kCubemapFaces, Allocator.TempJob);
for (int lightID = 0; lightID < cullResults.visibleLights.Length; lightID++)
{
VisibleLight visibleLight = cullResults.visibleLights.ElementAt(lightID);
var light = visibleLight.light;
const int faceCount = 6;
// for (int faceID = 0; faceID < faceCount ; faceID++)
for (int faceID = 0; faceID < 1; faceID++)
{
Matrix4x4 view;
Matrix4x4 deviceProjectionYFlip;
Matrix4x4 deviceProjectionMatrix;
Matrix4x4 projection;
Matrix4x4 invViewProjection;
Vector4 deviceProjection;
Vector4 lightDir;
// int cascadeShadowSplitCount = 6;
var usesReversedZBuffer = SystemInfo.usesReversedZBuffer;
// lightVP
Matrix4x4 lightVP = MyExtractPointLightVP(cubemapFaces, visibleLight, (uint)faceID, light.shadowNearPlane,
usesReversedZBuffer,
out view, out projection, out deviceProjection, out deviceProjectionYFlip, out invViewProjection, out lightDir);
builder.SetRenderFunc(
(RSMGIPassData data, RenderGraphContext context) =>
{
BindGlobalRSMPassBuffers(data, context.cmd);
context.cmd.SetGlobalMatrix("LightVP", lightVP);
context.cmd.SetGlobalInt("gCubeFaceN", faceID);
context.cmd.SetGlobalInt("gLightN", lightID);
context.cmd.SetGlobalVector("gPointLightPos", new Vector4(light.transform.position.x, light.transform.position.y, light.transform.position.z, 1));
context.cmd.SetRandomWriteTarget(1, data.rsmGIPassData_rsmgiBuffers.rsmNormalBuffer);
context.cmd.SetRandomWriteTarget(2, data.rsmGIPassData_rsmgiBuffers.rsmFluxBuffer);
context.cmd.SetRandomWriteTarget(3, data.rsmGIPassData_rsmgiBuffers.rsmPositionBuffer);
CoreUtils.DrawRendererList(context.renderContext, context.cmd, rendererList);
// context.cmd.ClearRandomWriteTargets();
}
);
builder.EnableAsyncCompute(false);
}
}
cubemapFaces.Dispose();
// end: render rsm for each light
}
}
But I only got strange output textures.
If I comment out varyingsType.vmesh.positionCS = mul(LightVP, float4(varyingsType.vmesh.positionRWS, 1.f));
just use the GameView camera to render, it looks like this:
So, it seems that the LightVP above didnât convert to light space, but rather applied some transformations to the current cameraâs screen space.