You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 

177 lines
8.2 KiB

#pragma kernel KMain
#pragma kernel KUpscaleFromDepth
#pragma kernel KChromaKeyToAlpha
#pragma only_renderers d3d11 playstation xboxone xboxseries vulkan metal switch
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Common.hlsl"
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Color.hlsl"
#include "Packages/com.unity.render-pipelines.high-definition/Runtime/ShaderLibrary/ShaderVariables.hlsl"
TEXTURE2D_X(_InputTexture); // Input alpha source at (lower) render resolution
RW_TEXTURE2D_X(float4, _OutputTexture); // Output texture at full display resolution
RW_TEXTURE2D(float4, _OutputTexture2D); // Output texture at target-rendertexture resolution, Texture2D instead of array
TEXTURE2D_X_FLOAT(_InputHistoryTexture);
RW_TEXTURE2D_X(float2, _OutputHistoryTexture);
struct DepthExtents
{
float fNearest;
int2 fNearestCoord;
};
DepthExtents FindDepthExtents(in float2 uv, in int2 renderSize)
{
DepthExtents extents;
const int iSampleCount = 9;
const int2 iSampleOffsets[iSampleCount] = {
int2(+0, +0),
int2(+1, +0),
int2(+0, +1),
int2(+0, -1),
int2(-1, +0),
int2(-1, +1),
int2(+1, +1),
int2(-1, -1),
int2(+1, -1),
};
const int2 iPxPos = uv * renderSize;
// pull out the depth loads to allow SC to batch them
float depth[9];
int iSampleIndex;
UNITY_UNROLL
for (iSampleIndex = 0; iSampleIndex < iSampleCount; ++iSampleIndex)
{
int2 iPos = iPxPos + iSampleOffsets[iSampleIndex];
depth[iSampleIndex] = LOAD_TEXTURE2D_X(_CameraDepthTexture, iPos).r;
}
// find closest depth
extents.fNearestCoord = iPxPos;
extents.fNearest = depth[0];
UNITY_UNROLL
for (iSampleIndex = 1; iSampleIndex < iSampleCount; ++iSampleIndex)
{
const int2 iPos = iPxPos + iSampleOffsets[iSampleIndex];
if (all(iPos < renderSize))
{
float fNdDepth = depth[iSampleIndex];
if (fNdDepth > extents.fNearest)
{
extents.fNearestCoord = iPos;
extents.fNearest = fNdDepth;
}
}
}
return extents;
}
[numthreads(8, 8, 1)]
void KMain(uint3 dispatchThreadId : SV_DispatchThreadID)
{
UNITY_XR_ASSIGN_VIEW_INDEX(dispatchThreadId.z);
const uint2 InputPos = dispatchThreadId.xy;
const float2 RenderSize = _PostProcessScreenSize.xy;
const float2 DisplaySize = _PostProcessScreenSize.xy / _DynamicResolutionFullscreenScale.xy;
const float2 InvDisplaySize = _PostProcessScreenSize.zw * _DynamicResolutionFullscreenScale.xy;
const float2 InvInputResourceSize = _PostProcessScreenSize.zw * _RTHandlePostProcessScale.xy;
const float2 fHrUv = (InputPos + 0.5f) * InvDisplaySize; // Convert the output pixel position to a UV
const float2 fLrUvJittered = fHrUv - _TaaJitterStrength.zw; // Compensate for jitter in the original render
// Scale and clamp the UV for its size in the input color texture
const float2 fSampleLocation = fLrUvJittered * RenderSize;
const float2 fClampedLocation = max(0.5f, min(fSampleLocation, RenderSize - 0.5f));
const float2 fLrUv_HwSampler = fClampedLocation * InvInputResourceSize;
// De-jittered and bilinear upscaled alpha
const float fAlpha = SAMPLE_TEXTURE2D_X_LOD(_InputTexture, s_linear_clamp_sampler, fLrUv_HwSampler, 0).a;
// Temporally reproject alpha from the previous frame and blend it with the current frame's upscaled alpha
const DepthExtents depthExtents = FindDepthExtents(fHrUv, RenderSize); // Dilate depth so we don't end up grabbing motion vectors from the background
const float2 fMotionVector = LOAD_TEXTURE2D_X(_CameraMotionVectorsTexture, depthExtents.fNearestCoord).xy;
const float fReprojectedAlphaHistory = SAMPLE_TEXTURE2D_X_LOD(_InputHistoryTexture, s_linear_clamp_sampler, fHrUv - fMotionVector, 0).r; // Sample reprojected history
const float fVelocityFactor = saturate(length(fMotionVector * DisplaySize) / 2.0f); // Adjust the amount of temporal blending based on the amount of motion
const float fBlend = depthExtents.fNearest > FLT_EPS && _TaaFrameInfo.z > 0 ? fVelocityFactor * 0.5f + 0.2f : 1.0f; // Depth clip to eliminate after-images
const float fAlphaAccumulate = lerp(fReprojectedAlphaHistory, fAlpha, fBlend);
_OutputHistoryTexture[COORD_TEXTURE2D_X(InputPos)] = fAlphaAccumulate;
const float3 fColor = _OutputTexture[COORD_TEXTURE2D_X(InputPos)].rgb;
_OutputTexture[COORD_TEXTURE2D_X(InputPos)] = float4(fColor, fAlphaAccumulate);
}
[numthreads(8, 8, 1)]
void KUpscaleFromDepth(uint3 dispatchThreadId : SV_DispatchThreadID)
{
UNITY_XR_ASSIGN_VIEW_INDEX(dispatchThreadId.z);
const uint2 InputPos = dispatchThreadId.xy;
const float2 RenderSize = _PostProcessScreenSize.xy;
const float2 DisplaySize = _PostProcessScreenSize.xy / _DynamicResolutionFullscreenScale.xy;
const float2 InvDisplaySize = _PostProcessScreenSize.zw * _DynamicResolutionFullscreenScale.xy;
const float2 InvInputResourceSize = _PostProcessScreenSize.zw * _RTHandlePostProcessScale.xy;
const float2 fHrUv = (InputPos + 0.5f) * InvDisplaySize; // Convert the output pixel position to a UV
const float2 fLrUvJittered = fHrUv - _TaaJitterStrength.zw; // Compensate for jitter in the original render
const float2 fHrUvhistory = ((InputPos + 0.5f) * _PostProcessScreenSize.zw)/* * _DynamicResolutionFullscreenScale.xy*/; //history is the size of the output
// Scale and clamp the UV for its size in the input color texture
const float2 fSampleLocation = fHrUv * RenderSize;
const float2 fClampedLocation = max(0.5f, min(fSampleLocation, RenderSize - 0.5f));
const float2 fLrUv_HwSampler = fClampedLocation * InvInputResourceSize;
// De-jittered and bilinear upscaled alpha from depth
const float fAlpha = Linear01Depth(SAMPLE_TEXTURE2D_X_LOD(_CameraDepthTexture, s_linear_clamp_sampler, fLrUv_HwSampler, 0).r, _ZBufferParams) < 1 ? 1.0 : 0.0;
// Temporally reproject alpha from the previous frame and blend it with the current frame's upscaled alpha
const DepthExtents depthExtents = FindDepthExtents(fHrUvhistory, RenderSize); // Dilate depth so we don't end up grabbing motion vectors from the background
const float2 fMotionVector = LOAD_TEXTURE2D_X(_CameraMotionVectorsTexture, depthExtents.fNearestCoord).xy;
const float fReprojectedAlphaHistory = SAMPLE_TEXTURE2D_X_LOD(_InputHistoryTexture, s_linear_clamp_sampler, fHrUvhistory - fMotionVector, 0).r; // Sample reprojected history //TODO WRONG UV
const float fVelocityFactor = saturate(length(fMotionVector * DisplaySize) / 2.0f); // Adjust the amount of temporal blending based on the amount of motion
const float fBlend = depthExtents.fNearest > FLT_EPS && _TaaFrameInfo.z > 0 ? fVelocityFactor * 0.5f + 0.2f : 1.0f; // Depth clip to eliminate after-images
const float fAlphaAccumulate = lerp(fReprojectedAlphaHistory, fAlpha, fBlend);
_OutputHistoryTexture[COORD_TEXTURE2D_X(InputPos)] = fAlphaAccumulate;
const float3 fColor = _InputTexture[COORD_TEXTURE2D_X(InputPos)].rgb;
_OutputTexture2D[(InputPos)] = float4(fColor, fAlphaAccumulate);
}
uniform float3 fKeyColor = float3(0, 1, 0); // Green screen chroma key, this needs to match the camera's background color
uniform float fToleranceA = 0.4f;
uniform float fToleranceB = 0.49f;
// Derived from http://gc-films.com/chromakey.html, using the faster YCoCg color space instead of YCbCr
float ChromaKey(float3 fColor)
{
const float3 fColorYCoCg = RGBToYCoCg(fColor);
const float3 fKeyYCoCg = RGBToYCoCg(fKeyColor);
const float3 fDelta = fKeyYCoCg - fColorYCoCg;
const float fDist = sqrt(fDelta.y * fDelta.y + fDelta.z * fDelta.z);
const float fMask = fDist < fToleranceA ? 0.0f : fDist < fToleranceB ? (fDist - fToleranceA) / (fToleranceB - fToleranceA) : 1.0f;
return fMask;
}
[numthreads(8, 8, 1)]
void KChromaKeyToAlpha(uint3 dispatchThreadId : SV_DispatchThreadID)
{
UNITY_XR_ASSIGN_VIEW_INDEX(dispatchThreadId.z);
const uint2 InputPos = dispatchThreadId.xy;
float3 fColor = _InputTexture[COORD_TEXTURE2D_X(InputPos)].rgb;
const float fAlpha = ChromaKey(fColor);
_OutputTexture2D[(InputPos)] = float4(fColor - (1.0f - fAlpha) * fKeyColor, fAlpha); // Remove green spill from the edges
}