Browse Source

Simplified gathering of color, depth and motion vector RTs by using the game camera texture directly, and blitting just before dispatch.

This gets rid of all the OnRenderImage song and dance in the game camera component, and requires less keeping track of instance variables.
mac-autoexp
Nico de Poel 3 years ago
parent
commit
c0a3b81d84
  1. 40
      Assets/Scripts/Fsr2Controller.cs
  2. 24
      Assets/Scripts/SubsampleTest.cs

40
Assets/Scripts/Fsr2Controller.cs

@ -37,7 +37,6 @@ public class Fsr2Controller : MonoBehaviour
private Fsr2Context _context;
private readonly Fsr2.DispatchDescription _dispatchDescription = new Fsr2.DispatchDescription();
private RenderTexture _colorRT, _depthRT, _motionVectorsRT;
private RenderTexture _outputRT;
private Material _copyDepthMat;
@ -89,6 +88,7 @@ public class Fsr2Controller : MonoBehaviour
_context = Fsr2.CreateContext(DisplaySize, RenderSize);
// TODO: do we need a depth buffer for the output? We will need depth & motion vectors for subsequent post-FX. How should FSR2 output these?
// TODO: can probably be a temporary RT
_outputRT = new RenderTexture(DisplaySize.x, DisplaySize.y, 24, RenderTextureFormat.ARGBHalf) { enableRandomWrite = true };
_outputRT.Create();
}
@ -110,22 +110,8 @@ public class Fsr2Controller : MonoBehaviour
RenderPipelineManager.endContextRendering -= OnEndContextRendering;
}
public void OnRenderGameCamera(RenderTexture renderTarget, Vector2 jitterOffset)
public void SetJitterOffset(Vector2 jitterOffset)
{
// I hate having to allocate extra RTs just to duplicate already existing Unity render buffers, but AFAIK there is no way to directly address these buffers individually from code
_colorRT = RenderTexture.GetTemporary(renderTarget.width, renderTarget.height, 0, RenderTextureFormat.ARGBHalf);
_depthRT = RenderTexture.GetTemporary(renderTarget.width, renderTarget.height, 0, RenderTextureFormat.RFloat);
_motionVectorsRT = RenderTexture.GetTemporary(renderTarget.width, renderTarget.height, 0, RenderTextureFormat.RGHalf);
// TODO: might be able to combine color + depth into a single RT and separate them out using RenderTextureSubElement
// TODO: we can copy to all RTs at the same time using a multi-target blit, requiring only a single shader + material
Graphics.Blit(renderTarget, _colorRT);
Graphics.Blit(renderTarget, _depthRT, CopyDepthMaterial);
Graphics.Blit(renderTarget, _motionVectorsRT, CopyMotionVectorsMaterial);
_dispatchDescription.Color = _colorRT;
_dispatchDescription.Depth = _depthRT;
_dispatchDescription.MotionVectors = _motionVectorsRT;
_dispatchDescription.JitterOffset = jitterOffset;
}
@ -138,10 +124,20 @@ public class Fsr2Controller : MonoBehaviour
// For legacy built-in render pipeline
private void OnRenderImage(RenderTexture src, RenderTexture dest)
{
// Ensure the input resources were updated correctly before upscaling
if (_colorRT == null || _depthRT == null || _motionVectorsRT == null)
return;
var color = gameCamera.targetTexture;
// I hate having to allocate extra RTs just to duplicate already existing Unity render buffers, but AFAIK there is no way to directly address these buffers individually from code
var depth = RenderTexture.GetTemporary(color.width, color.height, 0, RenderTextureFormat.RFloat);
var motionVectors = RenderTexture.GetTemporary(color.width, color.height, 0, RenderTextureFormat.RGHalf);
// TODO: might be able to combine color + depth into a single RT and separate them out using RenderTextureSubElement
// TODO: we can copy to all RTs at the same time using a multi-target blit, requiring only a single shader + material
Graphics.Blit(color, depth, CopyDepthMaterial);
Graphics.Blit(color, motionVectors, CopyMotionVectorsMaterial);
_dispatchDescription.Color = color;
_dispatchDescription.Depth = depth;
_dispatchDescription.MotionVectors = motionVectors;
_dispatchDescription.Output = _outputRT;
_dispatchDescription.Exposure = null;
_dispatchDescription.Reactive = null;
@ -158,10 +154,8 @@ public class Fsr2Controller : MonoBehaviour
_context.Dispatch(_dispatchDescription);
RenderTexture.ReleaseTemporary(_colorRT);
RenderTexture.ReleaseTemporary(_depthRT);
RenderTexture.ReleaseTemporary(_motionVectorsRT);
_colorRT = _depthRT = _motionVectorsRT = null;
RenderTexture.ReleaseTemporary(depth);
RenderTexture.ReleaseTemporary(motionVectors);
// Output upscaled image to screen
Graphics.Blit(_outputRT, dest);

24
Assets/Scripts/SubsampleTest.cs

@ -19,7 +19,6 @@ public class SubsampleTest : MonoBehaviour
private bool enableJitter;
private Fsr2Controller _fsr2Controller;
private Vector2 _jitterOffset;
private void OnEnable()
{
@ -86,13 +85,13 @@ public class SubsampleTest : MonoBehaviour
outputCamera.clearFlags = CameraClearFlags.Color;
}
private Rect tempRect;
private Rect _tempRect;
private void OnPreRender()
{
gameCamera.aspect = (Screen.width * gameCamera.rect.width) / (Screen.height * gameCamera.rect.height);
tempRect = gameCamera.rect;
_tempRect = gameCamera.rect;
gameCamera.rect = new Rect(0, 0, 1, 1);
if (enableJitter)
@ -100,28 +99,21 @@ public class SubsampleTest : MonoBehaviour
// Perform custom jittering of the camera's projection matrix according to FSR2's instructions
// Unity already does jittering behind the scenes for certain post-effects, so can we perhaps integrate with that?
int jitterPhaseCount = Fsr2.GetJitterPhaseCount(gameCamera.targetTexture.width, Screen.width);
Fsr2.GetJitterOffset(out _jitterOffset.x, out _jitterOffset.y, Time.frameCount, jitterPhaseCount);
Fsr2.GetJitterOffset(out float jitterX, out float jitterY, Time.frameCount, jitterPhaseCount);
float jitterX = 2.0f * _jitterOffset.x / gameCamera.targetTexture.width;
float jitterY = -2.0f * _jitterOffset.y / gameCamera.targetTexture.height;
jitterX = 2.0f * jitterX / gameCamera.targetTexture.width;
jitterY = -2.0f * jitterY / gameCamera.targetTexture.height;
var jitterTranslationMatrix = Matrix4x4.Translate(new Vector3(jitterX, jitterY, 0));
gameCamera.projectionMatrix = jitterTranslationMatrix * gameCamera.nonJitteredProjectionMatrix;
}
}
private void OnRenderImage(RenderTexture src, RenderTexture dest)
{
// We don't actually output any image from this camera; instead we pass its render buffers on to the FSR2 system
_fsr2Controller.OnRenderGameCamera(src, _jitterOffset);
// Shut up Unity warning about not writing anything to the destination texture
Graphics.SetRenderTarget(dest);
_fsr2Controller.SetJitterOffset(new Vector2(jitterX, jitterY));
}
}
private void OnPostRender()
{
gameCamera.rect = tempRect;
gameCamera.rect = _tempRect;
gameCamera.ResetProjectionMatrix();
}
}
Loading…
Cancel
Save