#include "Packages/com.unity.render-pipelines.high-definition/Runtime/PostProcessing/Shaders/MotionBlurCommon.hlsl" #include "Packages/com.unity.render-pipelines.high-definition/Runtime/PostProcessing/Shaders/PostProcessDefines.hlsl" #pragma only_renderers d3d11 playstation xboxone xboxseries vulkan metal switch switch2 #pragma kernel MotionBlurCS //#pragma enable_d3d11_debug_symbols #pragma multi_compile _ ENABLE_ALPHA RW_TEXTURE2D_X(CTYPE, _OutputTexture); TEXTURE2D_X(_InputTexture); TEXTURE2D_X(_TileMaxNeighbourhood); // --------------------------------------------- // Debug // --------------------------------------------- // ------------Debug Visualization------------------ #define NO_DEBUG 0 #define TILE_VIS 1 #define WEIGHT_VIS 2 #define MAX_MOTION_VEC 3 #define CUSTOM 4 #define DEBUG_SAMPLE_COUNT 5 #define DEBUG_VIEW NO_DEBUG #define DEBUG_VISUALIZE_TILE 0 // ------------Debug Weight------------------ #define NO_DEBUG 0 #define CONSTANT 1 #define ONLY_SPREAD 2 #define ONLY_DEPTH_COMPARE 3 #define CUSTOM 4 #define ONLY_DOT_WEIGHT 5 #define DEBUG_WEIGHT NO_DEBUG // ------------Debug Execution ------------------ #define NORMAL 0 #define ONLY_SLOW_PATH 1 #define ONLY_FAST_PATH 2 #define DEBUG_EXECUTION NORMAL // ------------Options------------------ #define MIRROR_WEIGHTS 0 // disabling this: seems to be causing a lot of haloing artifacts on motion blur #define TILE_JITTER 1 // When this is set to 1, samples are taken from both the central direction and the maximum direction in the tile. This reduces bugs when very different velocities are available in a tile. // This however also can cause the blur to look slightly different. #define GUERTIN2014_DOUBLE_DIR 0 #define DOT_WEIGHTING 1 #define DOT_MULTIPLIER 1 * DOT_WEIGHTING // --------------------------------------------- // Misc utils // --------------------------------------------- #define INTERLEAVED_GRADIENT_NOISE 0 #define CHECKERBOARD 1 #define DITHER_SCHEME INTERLEAVED_GRADIENT_NOISE float RandomValue(uint2 pixCoord, uint frameCount) { #if DITHER_SCHEME == INTERLEAVED_GRADIENT_NOISE return InterleavedGradientNoise(pixCoord.xy, 0); #elif DITHER_SCHEME == CHECKERBOARD float scale = 0.25; float2 posMod = float2 (pixCoord & 1); return (-scale + 2.0 * scale * posMod.x) * (-1.0 + 2.0 * posMod.y); #endif return 0.0; } float2 GetTileUV(uint2 pixCoord, float2 randomVals) { randomVals -= 0.5; randomVals *= 0.5; #if !TILE_JITTER randomVals = 0; #endif float2 tileLocation = (0.5f + pixCoord) / _TileSize; float2 UV = (tileLocation + randomVals) * _TileTargetSize.zw; return ClampAndScaleUVForBilinearPostProcessTexture(UV, _TileTargetSize.zw); } // --------------------------------------------- // Weight functions // --------------------------------------------- // Return foreground and background weights float2 DepthCompare(float centralDepth, float sampleDepth, float depthScale) { return saturate(0.5 + float2(depthScale, -depthScale) * (sampleDepth - centralDepth)); } float2 SpreadCompare(float offsetLength, float2 spreadLength, float pixelToSampleScale) { return saturate(pixelToSampleScale * spreadLength - max(offsetLength - 1.0, 0.0)); } float SampleWeight(float centralDepth, float sampleDepth, float offsetLength, float centralSpreadLength, float sampleSpreadLength, float pixelToSampleScale, float depthScale, float motionVecDiffWeight) { float2 depthWeights = DepthCompare(centralDepth, sampleDepth, depthScale); float2 spread = SpreadCompare(offsetLength, float2(centralSpreadLength, sampleSpreadLength), pixelToSampleScale); #if DOT_WEIGHTING spread.y *= motionVecDiffWeight; #endif #if DEBUG_WEIGHT == CONSTANT return 1; #elif DEBUG_WEIGHT == ONLY_SPREAD return spread.y; #elif DEBUG_WEIGHT == ONLY_DEPTH_COMPARE return depthWeights.y ; #elif DEBUG_WEIGHT == CUSTOM return abs(depthWeights.x*spread.x - depthWeights.y*spread.y); #else return dot(depthWeights, spread); #endif } void MirrorWeights(float depth1, float depth2, float motionVec1, float motionVec2, inout float weight1, inout float weight2) { bool2 mirror = bool2( depth1 > depth2, motionVec1 > motionVec2); weight1 = all( mirror ) ? weight2 : weight1; weight2 = any( mirror ) ? weight2 : weight1; } // --------------------------------------------- // Sample processing // --------------------------------------------- CTYPE ProcessSampleFastPath(uint sampleNumber, float2 dir, float invSampleCount, float2 centralUV, float randomVal, float dirSign) { float2 currDir = dirSign * dir; float offsetLength = (sampleNumber + 0.5) + (dirSign * (randomVal - 0.5)); float2 sampleUV = ClampAndScaleUVForBilinearPostProcessTexture(centralUV + (offsetLength*invSampleCount) * currDir); CTYPE sampleColor = SAMPLE_TEXTURE2D_X_LOD(_InputTexture, s_point_clamp_sampler, sampleUV, 0).CTYPE_SWIZZLE; return sampleColor; } float4 ProcessSample(uint sampleNumber, float2 dir, float invSampleCount, float2 centralUV, float centralDepth, float centralMotionVecLen, float pixelToSampleScale, float randomVal, float dirSign, out float outDepth, out float outMotionVecLength, out float sampleAlpha) { float2 currDir = dirSign * dir; float offsetLength = (sampleNumber + 0.5) + (dirSign * (randomVal - 0.5)); float2 sampleUV = ClampAndScaleUVForBilinearPostProcessTexture(centralUV + (offsetLength*invSampleCount) * currDir); float3 sampleMotionVecDepth = SAMPLE_TEXTURE2D_X_LOD(_MotionVecAndDepth, s_point_clamp_sampler, sampleUV, 0).xyz; CTYPE sampleColor = SAMPLE_TEXTURE2D_X_LOD(_InputTexture, s_point_clamp_sampler, sampleUV, 0).CTYPE_SWIZZLE; outMotionVecLength = MotionVecLengthInPixelsFromEncoded(sampleMotionVecDepth.xy); outDepth = sampleMotionVecDepth.z; float motionVecDiffWeight = 1; #if DOT_WEIGHTING float2 sampleMotionVec = DecodeMotionVectorFromPacked(sampleMotionVecDepth.xy); motionVecDiffWeight = (centralMotionVecLen > 0 && outMotionVecLength > 0.3) ? saturate(abs(dot(sampleMotionVec, currDir) / dot(currDir, currDir)) * DOT_MULTIPLIER) : 1; #endif float sampleWeight = SampleWeight(centralDepth, outDepth, offsetLength, centralMotionVecLen, outMotionVecLength, pixelToSampleScale, _DepthScale, motionVecDiffWeight); #if ENABLE_ALPHA sampleAlpha = sampleColor.w; #else sampleAlpha = 1.0; #endif return float4(sampleColor.xyz, sampleWeight); } float3 BlendMotionBlurLayers(float3 centralColor, float4 accumulation, float scatterNeighborhoodIntensity, float invSampleCount) { // This function tries to mimic a multi layer blending approach for motion blur. // Inputs: // centralColor - the color of the unblurred pixel. // accumulation - rgb hold the unormalized weighted sum of the blur kernel. So Sum(c * w[i]). The w component holds the sum of all the weights Sum(w[i]) // scatterNeighborhoodIntensity - Within the tile (neighborhood) how blurry this pixel will look like. High value means this pixels motion vector has a high intensity relative to neighborhood, and viceversa // invSampleCount - reciprocal of number of samples added in accumulation (1.0 / N) // // Explanation of blur algorithm: // We split this blend into a top layer, and a back layer. // Top layer - // The fully blurred / pixel color normalized weighted blurr in accumulation. This avoids any ringing when the background is very blury and the foreground is static within neighborhood, because the top layer color is normalized. // This allows to recover energy loss by pixels that are occluded. // Bottom layer - // is the unormalized / low intensity color bleeding into background pixels. This looks good when a foreground object is moving fast, and the background is static. It however adds a ringing artifact when the background is fast moving and the foreground is static. // To get the best of both results, we blend them depending on how much the current pixel has to scatter. If the current pixel scatters a lot (high motion vector intensity relative to tile) we opt for a top layer. // on the other hand, if the current pixel doesn't scatter as much (has a low intensity motion vector) we are ok showing more of the bottom layer. float3 topLayerColor = accumulation.w < 1e-4 ? centralColor.rgb : accumulation.rgb/accumulation.w; float topLayerAlpha = accumulation.w * invSampleCount; float3 backLayerColor = lerp(centralColor.rgb, topLayerColor, topLayerAlpha); float layerBlendWeight = saturate(scatterNeighborhoodIntensity); return lerp(backLayerColor, topLayerColor, layerBlendWeight); } // --------------------------------------------- // Kernel // --------------------------------------------- [numthreads(16, 16, 1)] void MotionBlurCS(uint3 dispatchThreadId : SV_DispatchThreadID) { UNITY_XR_ASSIGN_VIEW_INDEX(dispatchThreadId.z); CTYPE debugColor = 1.0f; // Get central sample early. CTYPE centralColor = _InputTexture[COORD_TEXTURE2D_X(dispatchThreadId.xy)].CTYPE_SWIZZLE; float2 randomVals = float2(RandomValue(dispatchThreadId.xy, 0), RandomValue(dispatchThreadId.xy, 1)); float2 tileUV = GetTileUV(dispatchThreadId.xy, randomVals); float3 tileMaxNeighbourhood = SAMPLE_TEXTURE2D_X_LOD(_TileMaxNeighbourhood, s_point_clamp_sampler, tileUV, 0.0).xyz; // Get motion vector in pixels extents. float2 maxMotionVec = DecodeMotionVectorFromPacked(tileMaxNeighbourhood.xy); float maxMotionVecLen = MotionVecLengthInPixelsFromEncoded(tileMaxNeighbourhood.xy); float minMotionVecLenInPixels = tileMaxNeighbourhood.z * _ScreenMagnitude; bool earlyOut = maxMotionVecLen <= _MinMotionVecThreshold; // With very wide blurs the mismatch between the gathered min and scattered max is more likely to give wrong results, hence we fallback on the slowpath. bool fastPath = maxMotionVecLen < (_ScreenMagnitude * 0.333f) && minMotionVecLenInPixels > (_MinMaxMotionVecRatioForSlowPath * maxMotionVecLen); #if defined(PLATFORM_SUPPORTS_WAVE_INTRINSICS) earlyOut = WaveActiveAllTrue(earlyOut); fastPath = WaveActiveAllTrue(fastPath); #endif #if DEBUG_EXECUTION == ONLY_SLOW_PATH earlyOut = false; fastPath = false; #elif DEBUG_EXECUTION == ONLY_FAST_PATH earlyOut = false; fastPath = true; #endif #if DEBUG_VIEW == TILE_VIS if (earlyOut) { debugColor.xz = 0.0; } else if (fastPath) { debugColor.z = 0.0; } else { debugColor.yz = 0.0; } _OutputTexture[COORD_TEXTURE2D_X(dispatchThreadId.xy)] = 0.25 * centralColor + 0.75 * debugColor; #else float3 outVal = 1.0f; if (earlyOut) { _OutputTexture[COORD_TEXTURE2D_X(dispatchThreadId.xy)] = centralColor; } else { uint actualSampleCount = min(uint(maxMotionVecLen) + (uint(maxMotionVecLen) & 1u), _SampleCount); actualSampleCount = max(actualSampleCount, 2u); const float invSampleCount = rcp(actualSampleCount); uint stepCount = actualSampleCount / 2u; float2 centerSampleUV = (0.5 + dispatchThreadId.xy) * _PostProcessScreenSize.zw; float randomVal = randomVals.x; float2 dir = 0.5 * maxMotionVec; CTYPE colorAccumulation = 0.0; if (fastPath) { for (uint i = 0; i < stepCount; ++i) { colorAccumulation += ProcessSampleFastPath(i, dir, invSampleCount, centerSampleUV, randomVal, -1); colorAccumulation += ProcessSampleFastPath(i, dir, invSampleCount, centerSampleUV, randomVal, 1); } colorAccumulation *= invSampleCount; #if DEBUG_VIEW == WEIGHT_VIS colorAccumulation = 0; #endif } else { // Slow path. float3 motionVecAndDepth = SAMPLE_TEXTURE2D_X_LOD(_MotionVecAndDepth, s_point_clamp_sampler, ClampAndScaleUVPostProcessTextureForPoint(centerSampleUV), 0).xyz; float pixelToSampleScale = (float)stepCount * rcp(maxMotionVecLen); float2 centralTapMotionVec = DecodeMotionVectorFromPacked(motionVecAndDepth.xy); float centralMotionVecLen = MotionVecLengthInPixelsFromEncoded(motionVecAndDepth.xy); #if GUERTIN2014_DOUBLE_DIR float2 centralDir = centralMotionVecLen < 0.5f ? dir : centralTapMotionVec * 0.5; #endif float centralDepth = motionVecAndDepth.z; float4 accumulation = 0; float alphaAccumulation = 0; for (uint i = 0; i < stepCount; ++i) { #if GUERTIN2014_DOUBLE_DIR dir = ((i & 1u) == 0) ? centralDir : dir; #endif float fw_Depth, bw_Depth; float fw_MotionVecLen, bw_MotionVecLen; float fw_Alpha, bw_Alpha; float4 fw_Sample = ProcessSample(i, dir, invSampleCount, centerSampleUV, centralDepth, centralMotionVecLen, pixelToSampleScale, randomVal, -1, fw_Depth, fw_MotionVecLen, fw_Alpha); float4 bw_Sample = ProcessSample(i, dir, invSampleCount, centerSampleUV, centralDepth, centralMotionVecLen, pixelToSampleScale, randomVal, 1, bw_Depth, bw_MotionVecLen, bw_Alpha); // Mirror weights #if MIRROR_WEIGHTS MirrorWeights(fw_Depth, bw_Depth, fw_MotionVecLen, bw_MotionVecLen, fw_Sample.w, bw_Sample.w); #endif accumulation += float4(fw_Sample.w * fw_Sample.rgb, fw_Sample.w) + float4(bw_Sample.w * bw_Sample.rgb, bw_Sample.w); #if ENABLE_ALPHA alphaAccumulation += fw_Sample.w * fw_Alpha + bw_Sample.w * bw_Alpha; #endif } float scatterNeighborhoodIntensity = centralMotionVecLen/maxMotionVecLen; colorAccumulation.rgb = BlendMotionBlurLayers(centralColor.rgb, accumulation, scatterNeighborhoodIntensity, invSampleCount); #if ENABLE_ALPHA colorAccumulation.a = BlendMotionBlurLayers(centralColor.aaa, float4(alphaAccumulation.xxx, accumulation.w), scatterNeighborhoodIntensity, invSampleCount).x; #endif #if DEBUG_VIEW == WEIGHT_VIS colorAccumulation = accumulation.w; #endif } #if DEBUG_VIEW == MAX_MOTION_VEC colorAccumulation.xyz = float3(maxMotionVec, 0.0); float maxMotionVecLenPix = MotionVecLengthInPixelsFromEncoded(tileMaxNeighbourhood.xy); colorAccumulation = maxMotionVecLenPix; #endif #if DEBUG_VIEW == DEBUG_SAMPLE_COUNT colorAccumulation.xyz = colorAccumulation.xyz * 0.25 + 0.75 * lerp(float3(0.0, 1.0, 0.0), float3(1.0, 0.0, 0.0), float(actualSampleCount) / _SampleCount); #endif #if DEBUG_VISUALIZE_TILE { uint2 tileCoords = (uint2)floor(dispatchThreadId.xy / _TileSize); colorAccumulation = lerp(colorAccumulation, lerp(((tileCoords.y & 0x1) ? float3(1,0,0) : float3(0,0,1)), ((tileCoords.x & 0x1) ? float3(1,0,0) : float3(0,0,1)), 0.5), 0.3); } #endif _OutputTexture[COORD_TEXTURE2D_X(dispatchThreadId.xy)] = colorAccumulation; } #endif }