Skip to content

Hdrp/multi view pt #518

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 11 commits into from
May 27, 2020
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions com.unity.render-pipelines.high-definition/CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
- Added CustomPassUtils API to simplify Blur, Copy and DrawRenderers custom passes.
- Added Histogram guided automatic exposure.
- Added few exposure debug modes.
- Added support for multiple path-traced views at once (e.g., scene and game views).

### Fixed
- Fix when rescale probe all direction below zero (1219246)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -536,7 +536,7 @@ static class UnlitKeywords
static class UnlitIncludes
{
const string kPassForwardUnlit = "Packages/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/ShaderPass/ShaderPassForwardUnlit.hlsl";

public static IncludeCollection Meta = new IncludeCollection
{
{ CoreIncludes.CorePregraph },
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
using System;
using System.Linq;
using System.Collections.Generic;
using UnityEngine.Experimental.Rendering;

#if UNITY_EDITOR
Expand All @@ -7,12 +9,30 @@

namespace UnityEngine.Rendering.HighDefinition
{
// Struct storing per-camera data, to handle accumulation and dirtiness
internal struct CameraData
{
public void ResetIteration()
{
accumulatedWeight = 0.0f;
currentIteration = 0;
}

public uint width;
public uint height;
public bool skyEnabled;
public bool fogEnabled;

public float accumulatedWeight;
public uint currentIteration;
}

// Helper class to manage time-scale in Unity when recording multi-frame sequences where one final frame is an accumulation of multiple sub-frames
internal class SubFrameManager
{
// Shutter settings
float m_ShutterInterval = 0.0f;
bool m_Centered = true;
bool m_Centered = true;
float m_ShutterFullyOpen = 0.0f;
float m_ShutterBeginsClosing = 1.0f;

Expand All @@ -21,8 +41,26 @@ internal class SubFrameManager
// Internal state
float m_OriginalTimeScale = 0;
float m_OriginalFixedDeltaTime = 0;
bool m_IsRenderingTheFirstFrame = true;
float m_AccumulatedWeight = 0;
bool m_IsRenderingTheFirstFrame = true;

// Per-camera data cache
Dictionary<int, CameraData> m_CameraCache = new Dictionary<int, CameraData>();

internal CameraData GetCameraData(int camID)
{
CameraData camData;
if (!m_CameraCache.TryGetValue(camID, out camData))
{
camData.ResetIteration();
m_CameraCache.Add(camID, camData);
}
return camData;
}

internal void SetCameraData(int camID, CameraData camData)
{
m_CameraCache[camID] = camData;
}

// The number of sub-frames that will be used to reconstruct a converged frame
public uint subFrameCount
Expand All @@ -32,13 +70,6 @@ public uint subFrameCount
}
uint m_AccumulationSamples = 0;

// The sequence number of the current sub-frame
public uint iteration
{
get { return m_CurrentIteration; }
}
uint m_CurrentIteration = 0;

// True when a recording session is in progress
public bool isRecording
{
Expand All @@ -47,18 +78,32 @@ public bool isRecording
bool m_IsRecording = false;

// Resets the sub-frame sequence
internal void Reset(int camID)
{
CameraData camData = GetCameraData(camID);
camData.ResetIteration();
SetCameraData(camID, camData);
}
internal void Reset()
{
m_CurrentIteration = 0;
m_AccumulatedWeight = 0;
foreach (int camID in m_CameraCache.Keys.ToList())
Reset(camID);
}

// Advances the sub-frame sequence
internal void Advance()
internal void Clear()
{
// Increment the iteration counter, if we haven't converged yet
if (m_CurrentIteration < m_AccumulationSamples)
m_CurrentIteration++;
m_CameraCache.Clear();
}
internal void SelectiveReset(uint maxSamples)
{
foreach (int camID in m_CameraCache.Keys.ToList())
{
CameraData camData = GetCameraData(camID);
if (camData.currentIteration >= maxSamples)
{
camData.ResetIteration();
SetCameraData(camID, camData);
}
}
}

void Init(int samples, float shutterInterval)
Expand All @@ -67,13 +112,14 @@ void Init(int samples, float shutterInterval)
m_ShutterInterval = samples > 1 ? shutterInterval : 0;
m_IsRecording = true;
m_IsRenderingTheFirstFrame = true;
Reset();
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should keep the Reset here, because it ensures that:

  • the first frame of motion blur is correct: we have to discard any previous progress, since it might have happened without motion blur.
  • all cameras are in-sync: this is necessary because we cannot have different motion blur progress per-camera (because we manipulate Time.*).

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done: I even clear the list here.


Clear();

m_OriginalTimeScale = Time.timeScale;

Time.timeScale = m_OriginalTimeScale * m_ShutterInterval / m_AccumulationSamples;

if (m_Centered && m_IsRenderingTheFirstFrame)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Perhaps I should have put a comment in the code, but we do need the check for m_IsRenderingTheFirstFrame.

The idea is that for centered motion blur, we "squeeze" the duration of the first frame to half (with timescale *=0.5), so the rest of the frames are correctly centered. But this extra scaling needs to be done only for the first frame.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

But we set it to true few lines above (l114 in the new file).

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

(I only removed it in the Init(), not in the PrepareNewSubFrame())

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh yeah, you are right, I though this was PrepareNewSubFrame, which has the same logic.

if (m_Centered)
{
Time.timeScale *= 0.5f;
}
Expand Down Expand Up @@ -108,12 +154,15 @@ internal void EndRecording()
// Should be called before rendering a new frame in a sequence (when accumulation is desired)
internal void PrepareNewSubFrame()
{
if (m_CurrentIteration == m_AccumulationSamples)
uint maxIteration = 0;
foreach (int camID in m_CameraCache.Keys.ToList())
maxIteration = Math.Max(maxIteration, GetCameraData(camID).currentIteration);

if (maxIteration == m_AccumulationSamples)
{
Reset();
}

if (m_CurrentIteration == m_AccumulationSamples - 1)
else if (maxIteration == m_AccumulationSamples - 1)
{
Time.timeScale = m_OriginalTimeScale * (1.0f - m_ShutterInterval);
m_IsRenderingTheFirstFrame = false;
Expand Down Expand Up @@ -167,24 +216,23 @@ float ShutterProfile(float time)
// y: sum of weights until now, without the current frame
// z: one over the sum of weights until now, including the current frame
// w: unused
internal Vector4 GetFrameWeights()
internal Vector4 ComputeFrameWeights(int camID)
{
float totalWeight = m_AccumulatedWeight;
float time = m_AccumulationSamples > 0 ? (float) m_CurrentIteration / m_AccumulationSamples : 0.0f;
CameraData camData = GetCameraData(camID);

float totalWeight = camData.accumulatedWeight;
float time = m_AccumulationSamples > 0 ? (float) camData.currentIteration / m_AccumulationSamples : 0.0f;

float weight = isRecording ? ShutterProfile(time) : 1.0f;

if (m_CurrentIteration < m_AccumulationSamples)
m_AccumulatedWeight += weight;
if (camData.currentIteration < m_AccumulationSamples)
camData.accumulatedWeight += weight;

if (m_AccumulatedWeight > 0)
{
return new Vector4(weight, totalWeight, 1.0f / m_AccumulatedWeight, 0.0f);
}
else
{
return new Vector4(weight, totalWeight, 0.0f, 0.0f);
}
SetCameraData(camID, camData);

return (camData.accumulatedWeight > 0) ?
new Vector4(weight, totalWeight, 1.0f / camData.accumulatedWeight, 0.0f) :
new Vector4(weight, totalWeight, 0.0f, 0.0f);
}
}

Expand Down Expand Up @@ -246,20 +294,28 @@ void RenderAccumulation(HDCamera hdCamera, CommandBuffer cmd, RTHandle inputText
if (!accumulationShader)
return;

uint currentIteration = m_SubFrameManager.iteration;
// Get the per-camera data
int camID = hdCamera.camera.GetInstanceID();
Vector4 frameWeights = m_SubFrameManager.ComputeFrameWeights(camID);
CameraData camData = m_SubFrameManager.GetCameraData(camID);

// Accumulate the path tracing results
int kernel = accumulationShader.FindKernel("KMain");
cmd.SetGlobalInt(HDShaderIDs._AccumulationFrameIndex, (int)currentIteration);
cmd.SetGlobalInt(HDShaderIDs._AccumulationFrameIndex, (int)camData.currentIteration);
cmd.SetGlobalInt(HDShaderIDs._AccumulationNumSamples, (int)m_SubFrameManager.subFrameCount);
cmd.SetComputeTextureParam(accumulationShader, kernel, HDShaderIDs._AccumulatedFrameTexture, history);
cmd.SetComputeTextureParam(accumulationShader, kernel, HDShaderIDs._CameraColorTextureRW, outputTexture);
cmd.SetComputeTextureParam(accumulationShader, kernel, HDShaderIDs._RadianceTexture, inputTexture);
cmd.SetComputeVectorParam(accumulationShader, HDShaderIDs._AccumulationWeights, m_SubFrameManager.GetFrameWeights());
cmd.SetComputeVectorParam(accumulationShader, HDShaderIDs._AccumulationWeights, frameWeights);
cmd.SetComputeIntParam(accumulationShader, HDShaderIDs._AccumulationNeedsExposure, needsExposure ? 1 : 0);
cmd.DispatchCompute(accumulationShader, kernel, (hdCamera.actualWidth + 7) / 8, (hdCamera.actualHeight + 7) / 8, hdCamera.viewCount);

m_SubFrameManager.Advance();
// Increment the iteration counter, if we haven't converged yet
if (camData.currentIteration < m_SubFrameManager.subFrameCount)
{
camData.currentIteration++;
m_SubFrameManager.SetCameraData(camID, camData);
}
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -59,11 +59,6 @@ public partial class HDRenderPipeline
#endif // UNITY_EDITOR
ulong m_CacheAccelSize = 0;
uint m_CacheLightCount = 0;
uint m_CacheCameraWidth = 0;
uint m_CacheCameraHeight = 0;

bool m_CameraSkyEnabled;
bool m_FogEnabled;

RTHandle m_RadianceTexture; // stores the per-pixel results of path tracing for this frame

Expand Down Expand Up @@ -111,12 +106,11 @@ private Vector4 ComputeDoFConstants(HDCamera hdCamera, PathTracing settings)

private void OnSceneEdit()
{
// If we just change the sample count, we don't want to reset iteration
// If we just change the sample count, we don't necessarily want to reset iteration
if (m_PathTracingSettings && m_CacheMaxIteration != m_PathTracingSettings.maximumSamples.value)
{
m_CacheMaxIteration = (uint) m_PathTracingSettings.maximumSamples.value;
if (m_SubFrameManager.iteration >= m_CacheMaxIteration)
ResetPathTracing();
m_SubFrameManager.SelectiveReset(m_CacheMaxIteration);
}
else
ResetPathTracing();
Expand All @@ -132,7 +126,7 @@ private UndoPropertyModification[] OnUndoRecorded(UndoPropertyModification[] mod
private void OnSceneGui(SceneView sv)
{
if (Event.current.type == EventType.MouseDrag)
ResetPathTracing();
m_SubFrameManager.Reset(sv.camera.GetInstanceID());
}

#endif // UNITY_EDITOR
Expand All @@ -144,45 +138,53 @@ private void CheckDirtiness(HDCamera hdCamera)
return;
}

// Check camera clear mode dirtiness
bool enabled = (hdCamera.clearColorMode == HDAdditionalCameraData.ClearColorMode.Sky);
if (enabled != m_CameraSkyEnabled)
// Grab the cached data for the current camera
int camID = hdCamera.camera.GetInstanceID();
CameraData camData = m_SubFrameManager.GetCameraData(camID);

// Check camera resolution dirtiness
if (hdCamera.actualWidth != camData.width || hdCamera.actualHeight != camData.height)
{
m_CameraSkyEnabled = enabled;
ResetPathTracing();
camData.width = (uint) hdCamera.actualWidth;
camData.height = (uint) hdCamera.actualHeight;
camData.ResetIteration();
m_SubFrameManager.SetCameraData(camID, camData);
return;
}

// Check camera resolution dirtiness
if (hdCamera.actualWidth != m_CacheCameraWidth || hdCamera.actualHeight != m_CacheCameraHeight)
// Check camera sky dirtiness
bool enabled = (hdCamera.clearColorMode == HDAdditionalCameraData.ClearColorMode.Sky);
if (enabled != camData.skyEnabled)
{
m_CacheCameraWidth = (uint) hdCamera.actualWidth;
m_CacheCameraHeight = (uint) hdCamera.actualHeight;
ResetPathTracing();
camData.skyEnabled = enabled;
camData.ResetIteration();
m_SubFrameManager.SetCameraData(camID, camData);
return;
}

// Check camera matrix dirtiness
if (hdCamera.mainViewConstants.nonJitteredViewProjMatrix != (hdCamera.mainViewConstants.prevViewProjMatrix))
// Check camera fog dirtiness
enabled = Fog.IsFogEnabled(hdCamera);
if (enabled != camData.fogEnabled)
{
ResetPathTracing();
camData.fogEnabled = enabled;
camData.ResetIteration();
m_SubFrameManager.SetCameraData(camID, camData);
return;
}

// Check fog dirtiness
enabled = Fog.IsFogEnabled(hdCamera);
if (enabled != m_FogEnabled)
// Check camera matrix dirtiness
if (hdCamera.mainViewConstants.nonJitteredViewProjMatrix != (hdCamera.mainViewConstants.prevViewProjMatrix))
{
m_FogEnabled = enabled;
ResetPathTracing();
camData.ResetIteration();
m_SubFrameManager.SetCameraData(camID, camData);
return;
}

// Check materials dirtiness
if (m_MaterialsDirty)
{
ResetPathTracing();
m_MaterialsDirty = false;
ResetPathTracing();
return;
}

Expand Down Expand Up @@ -241,8 +243,8 @@ void RenderPathTracing(HDCamera hdCamera, CommandBuffer cmd, RTHandle outputText
m_SubFrameManager.subFrameCount = 1;
#endif

uint currentIteration = m_SubFrameManager.iteration;
if (currentIteration < m_SubFrameManager.subFrameCount)
CameraData camData = m_SubFrameManager.GetCameraData(hdCamera.camera.GetInstanceID());
if (camData.currentIteration < m_SubFrameManager.subFrameCount)
{
// Define the shader pass to use for the path tracing pass
cmd.SetRayTracingShaderPass(pathTracingShader, "PathTracingDXR");
Expand All @@ -259,15 +261,15 @@ void RenderPathTracing(HDCamera hdCamera, CommandBuffer cmd, RTHandle outputText
m_ShaderVariablesRayTracingCB._RaytracingMinRecursion = m_PathTracingSettings.minimumDepth.value;
m_ShaderVariablesRayTracingCB._RaytracingMaxRecursion = m_PathTracingSettings.maximumDepth.value;
m_ShaderVariablesRayTracingCB._RaytracingIntensityClamp = m_PathTracingSettings.maximumIntensity.value;
m_ShaderVariablesRayTracingCB._RaytracingSampleIndex = (int)m_SubFrameManager.iteration;
m_ShaderVariablesRayTracingCB._RaytracingSampleIndex = (int)camData.currentIteration;
ConstantBuffer.PushGlobal(cmd, m_ShaderVariablesRayTracingCB, HDShaderIDs._ShaderVariablesRaytracing);

// LightLoop data
cmd.SetGlobalBuffer(HDShaderIDs._RaytracingLightCluster, lightCluster.GetCluster());
cmd.SetGlobalBuffer(HDShaderIDs._LightDatasRT, lightCluster.GetLightDatas());

// Set the data for the ray miss
cmd.SetRayTracingIntParam(pathTracingShader, HDShaderIDs._RaytracingCameraSkyEnabled, m_CameraSkyEnabled ? 1 : 0);
cmd.SetRayTracingIntParam(pathTracingShader, HDShaderIDs._RaytracingCameraSkyEnabled, camData.skyEnabled ? 1 : 0);
cmd.SetRayTracingVectorParam(pathTracingShader, HDShaderIDs._RaytracingCameraClearColor, hdCamera.backgroundColorHDR);
cmd.SetRayTracingTextureParam(pathTracingShader, HDShaderIDs._SkyTexture, m_SkyManager.GetSkyReflection(hdCamera));

Expand Down