You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 

490 lines
21 KiB

// We do not rely on recursion, beyond subsurface scattering
#pragma max_recursion_depth 2
// On some consoles we get a timeout when compiling this, so we only target d3d platforms
#pragma only_renderers d3d11
// SRP/HDRP includes
#define SHADER_TARGET 50
// Include and define the shader pass (Required for light cluster code)
#include "Packages/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/ShaderPass/ShaderPass.cs.hlsl"
#define SHADERPASS SHADERPASS_PATH_TRACING
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Common.hlsl"
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Color.hlsl"
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Sampling/Sampling.hlsl"
#include "Packages/com.unity.render-pipelines.high-definition/Runtime/ShaderLibrary/ShaderVariables.hlsl"
#include "Packages/com.unity.render-pipelines.high-definition/Runtime/Material/Builtin/BuiltinData.hlsl"
// We need this for the potential volumetric integration on camera misses
#define HAS_LIGHTLOOP
// Ray tracing includes
#include "Packages/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/Raytracing/Shaders/ShaderVariablesRaytracing.hlsl"
#include "Packages/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/Raytracing/Shaders/Common/AtmosphericScatteringRayTracing.hlsl"
// Path tracing includes
#include "Packages/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/PathTracing/Shaders/PathTracingPayload.hlsl"
#include "Packages/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/PathTracing/Shaders/PathTracingSampling.hlsl"
#include "Packages/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/PathTracing/Shaders/PathTracingSkySampling.hlsl"
#include "Packages/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/PathTracing/Shaders/PathTracingVolume.hlsl"
#include "Packages/com.unity.render-pipelines.high-definition/Runtime/RenderPipeline/PathTracing/Shaders/PathTracingAOV.hlsl"
// Input(s)
float4x4 _PixelCoordToViewDirWS;
int _PathTracingCameraSkyEnabled;
float4 _PathTracingCameraClearColor;
float4 _PathTracingDoFParameters; // x: aperture radius, y: focus distance, zw: unused
float4 _PathTracingTilingParameters; // xy: tile count, zw: current tile index
// Output(s)
RW_TEXTURE2D_X(float4, _FrameTexture);
// AOVs
RW_TEXTURE2D_X(float4, _AlbedoAOV);
RW_TEXTURE2D_X(float4, _NormalAOV);
RW_TEXTURE2D_X(float4, _MotionVectorAOV);
RW_TEXTURE2D_X(float4, _VolumetricScatteringAOV);
[shader("miss")]
void CameraMiss(inout PathPayload payload : SV_RayPayload)
{
// Set initial "hit" to infinity (before potential volumetric scattering) and alpha to the proper value
payload.rayTHit = FLT_INF;
payload.alpha = IsSkyEnabled() && _PathTracingCameraSkyEnabled ? 1.0 : min(_PathTracingCameraClearColor.a, 1.0);
// In indirect-only mode, it makes more sense to return a null value
if (_RaytracingMinRecursion > 1)
{
payload.value = 0.0;
return;
}
// We use the pixel coordinates to access the skytexture, so we first have to remove the tiling
uint2 tileCount = uint2(_PathTracingTilingParameters.xy);
uint2 pixelCoord = payload.pixelCoord / tileCount;
payload.value = IsSkyEnabled() && _PathTracingCameraSkyEnabled ?
GetSkyBackground(pixelCoord).rgb : _PathTracingCameraClearColor.rgb * GetInverseCurrentExposureMultiplier();
ApplyFogAttenuation(WorldRayOrigin(), WorldRayDirection(), payload.value, payload.alpha);
if (_EnableVolumetricFog)
{
float3 lightPosition, envValue = payload.value;
// Generate a 4D unit-square sample for this depth, from our QMC sequence
float4 inputSample = GetSample4D(payload.pixelCoord, _RaytracingSampleIndex, 0);
// Compute volumetric scattering
payload.value = 0.0;
float volPdf = 1.0;
bool sampleLocalLights;
if (SampleVolumeScatteringPosition(payload.pixelCoord, inputSample.w, payload.rayTHit, volPdf, sampleLocalLights, lightPosition))
{
ComputeVolumeScattering(payload, inputSample.xyz, sampleLocalLights, lightPosition);
// Apply volumetric attenuation (beware of passing the right distance to the shading point)
ApplyFogAttenuation(WorldRayOrigin(), WorldRayDirection(), payload.rayTHit, payload.value, payload.lightSampleShadowColor, payload.alpha,
payload.lightSampleShadowOpacityAndShadowTint.y, payload.throughput, payload.segmentThroughput, payload.lightSampleValue, false);
// Apply the volume PDF, including to throughput as we might continue the path after sampling a volume event.
payload.value /= volPdf;
payload.alpha /= volPdf;
payload.throughput /= volPdf;
payload.segmentThroughput /= volPdf;
payload.lightSampleValue /= volPdf;
}
// Reinject the environment value
payload.value += envValue;
}
// Override AOV motion vector information
payload.aovMotionVector = 0.0;
}
[shader("miss")]
void EmptyMiss(inout PathPayload payload : SV_RayPayload)
{
}
void ApplyDepthOfField(uint2 pixelCoord, float dotDirection, inout float3 origin, inout float3 direction)
{
// Check aperture radius
if (_PathTracingDoFParameters.x <= 0.0)
return;
// Sample the lens aperture using the next available dimensions
// (we use 40 for path tracing, 2 for sub-pixel jittering, 64 for SSS -> 106, 107)
float2 uv = _PathTracingDoFParameters.x * SampleDiskUniform(GetSample(pixelCoord, _RaytracingSampleIndex, 106),
GetSample(pixelCoord, _RaytracingSampleIndex, 107));
// Compute the focus point by intersecting the pinhole ray with the focus plane
float t = _PathTracingDoFParameters.y / dotDirection;
float3 focusPoint = origin + t * direction;
// Compute the new ray origin (_ViewMatrix[0] = right, _ViewMatrix[1] = up)
origin += _ViewMatrix[0].xyz * uv.x + _ViewMatrix[1].xyz * uv.y;
// The new ray direction should pass through the focus point
direction = normalize(focusPoint - origin);
}
void GenerateCameraRay(uint2 pixelCoord, out PathPayload payload, out RayDesc ray, bool withAOV = false, bool withVolumetricScatteringAOV = false)
{
// Get the current tile coordinates (for interleaved tiling) and update pixel coordinates accordingly
uint2 tileCount = uint2(_PathTracingTilingParameters.xy);
uint2 tileIndex = uint2(_PathTracingTilingParameters.zw);
uint2 tiledPixelCoord = pixelCoord * tileCount + tileIndex;
// Jitter them (we use 4x10 dimensions of our sequence during path tracing atm, so pick the next available ones)
float4 jitteredPixelCoord = float4(pixelCoord, 1.0, 1.0);
jitteredPixelCoord.x += GetSample(tiledPixelCoord, _RaytracingSampleIndex, 40) / tileCount.x;
jitteredPixelCoord.y += GetSample(tiledPixelCoord, _RaytracingSampleIndex, 41) / tileCount.y;
// Initialize the payload for this camera ray
payload.throughput = 1.0;
payload.interactionThroughput = 1.0;
payload.segmentThroughput = 1.0;
payload.maxRoughness = 0.0;
payload.pixelCoord = tiledPixelCoord;
payload.segmentID = 0;
ClearPathTracingFlags(payload);
payload.materialSamplePdf = -1.f; // negative pdf value means we didn't sample, so don't apply MIS
ClearOutputFlags(payload);
if (withAOV)
SetOutputFlag(payload, OUTPUT_FLAG_AOV);
if (withVolumetricScatteringAOV)
SetOutputFlag(payload, OUTPUT_FLAG_SEPARATE_VOLUMETRICS);
// In order to achieve texture filtering, we need to compute the spread angle of the subpixel
payload.cone.spreadAngle = _RaytracingPixelSpreadAngle / min(tileCount.x, tileCount.y);
payload.cone.width = 0.0;
payload.aovMotionVector = jitteredPixelCoord.xy;
payload.aovAlbedo = 0.0;
payload.aovNormal = 0.0;
// Generate the ray descriptor for this pixel
ray.TMin = _RaytracingCameraNearPlane;
ray.TMax = FLT_INF;
// We need the camera forward direction in both types of projection
float3 cameraDirection = GetViewForwardDir();
// Compute the ray's origin and direction, for either perspective or orthographic projection
if (IsPerspectiveProjection())
{
ray.Origin = GetPrimaryCameraPosition();
ray.Direction = -normalize(mul(jitteredPixelCoord, _PixelCoordToViewDirWS).xyz);
// Use planar clipping, to match rasterization
float dotDirection = dot(cameraDirection, ray.Direction);
ray.TMin /= dotDirection;
ApplyDepthOfField(tiledPixelCoord, dotDirection, ray.Origin, ray.Direction);
}
else // Orthographic projection
{
uint2 pixelResolution = DispatchRaysDimensions().xy;
float4 screenCoord = float4(2.0 * jitteredPixelCoord.x / pixelResolution.x - 1.0,
-2.0 * jitteredPixelCoord.y / pixelResolution.y + 1.0,
0.0, 0.0);
ray.Origin = mul(_InvViewProjMatrix, screenCoord).xyz;
ray.Direction = cameraDirection;
}
}
float3 ClampValue(float3 value, float maxIntensity = _RaytracingIntensityClamp)
{
float intensity = Luminance(value) * GetCurrentExposureMultiplier();
return intensity > maxIntensity ? value * maxIntensity / intensity : value;
}
float3 ComputeLightSampleTransmission(in PathPayload basePayload)
{
if(!IsPathTracingFlagOn(basePayload, PATHTRACING_FLAG_SHADOW_RAY_NEEDED))
return 1.0;
RayDesc ray;
ray.Origin = basePayload.lightSampleRayOrigin;
ray.Direction = basePayload.lightSampleRayDirection;
ray.TMin = 0.0;
ray.TMax = basePayload.lightSampleRayDistance;
PathPayload shadowPayload;
shadowPayload.segmentID = IsPathTracingFlagOn(basePayload, PATHTRACING_FLAG_DUAL_SCATTERING_VIS) ? SEGMENT_ID_DUAL_SCATTERING_VIS : SEGMENT_ID_TRANSMISSION;
shadowPayload.value = 1.0;
TraceRay(_RaytracingAccelerationStructure, RAY_FLAG_ACCEPT_FIRST_HIT_AND_END_SEARCH | RAY_FLAG_FORCE_NON_OPAQUE | RAY_FLAG_SKIP_CLOSEST_HIT_SHADER,
RAYTRACINGRENDERERFLAG_CAST_SHADOW, 0, 1, 1, ray, shadowPayload);
return GetLightTransmission(shadowPayload.value, basePayload.lightSampleShadowOpacityAndShadowTint.x);
}
float3 HandleUnlitModel(inout PathPayload basePayload, bool computeDirect)
{
float3 lightValue = float3(0, 0, 0);
float transmission = 1.0;
if(IsPathTracingFlagOn(basePayload, PATHTRACING_FLAG_SHADOW_RAY_NEEDED))
{
transmission = Luminance(ComputeLightSampleTransmission(basePayload));
lightValue = lerp(basePayload.lightSampleShadowColor, basePayload.lightSampleValue, transmission);
}
// The below changes are only relevant for unlit, transparent surfaces, which have a material sample
if(!IsPathTracingFlagOn(basePayload, PATHTRACING_FLAG_MATERIAL_SAMPLE))
return lightValue;
float builtinOpacity = basePayload.alpha;
// Compute the new opacity value using the shadow visibility
if(IsPathTracingFlagOn(basePayload, PATHTRACING_FLAG_INTERPOLATE_OPACITY))
builtinOpacity = lerp(basePayload.lightSampleShadowOpacityAndShadowTint.y, builtinOpacity, transmission);
// cancel material sample if opacity is 1 after recomputing it
if(!(builtinOpacity < 1.0))
{
ClearPathTracingFlag(basePayload, PATHTRACING_FLAG_MATERIAL_SAMPLE);
// Set alpha back to 1.0
basePayload.alpha = 1.0;
}
// otherwise, update throughput and alpha with (potentially) new opacity.
else
{
basePayload.throughput *= 1.0 - builtinOpacity;
basePayload.interactionThroughput *= 1.0 - builtinOpacity;
basePayload.alpha = builtinOpacity;
if(computeDirect)
{
basePayload.value *= builtinOpacity;
lightValue *= builtinOpacity;
if(IsPathTracingFlagOn(basePayload, PATHTRACING_FLAG_INTERPOLATE_OPACITY))
basePayload.lightSampleShadowColor *= builtinOpacity;
}
}
return lightValue;
}
float3 GetLightingAlongVolumetricSegment(PathPayload basePayload)
{
if(!IsPathTracingFlagOn(basePayload, PATHTRACING_FLAG_MATERIAL_SAMPLE))
return 0.0;
// segmentID + 1 because this function traces the next segment from basePayload
if(basePayload.segmentID + 1 < _RaytracingMinRecursion - 1)
return 0.0;
if(basePayload.segmentID + 1 > _RaytracingMaxRecursion - 1)
return 0.0;
// Evaluate any lights we might hit along this ray
LightList lightList = CreateLightList(basePayload.materialSampleRayOrigin, (bool)basePayload.lightListParams.w, (float3) basePayload.lightListParams.xyz);
RayDesc ray;
ray.Origin = basePayload.materialSampleRayOrigin;
ray.Direction = basePayload.materialSampleRayDirection;
ray.TMin = 0.0;
ray.TMax = FLT_INF;
// Shoot a ray returning nearest tHit
PathPayload lightPayload;
lightPayload.rayTHit = FLT_INF;
lightPayload.segmentID = SEGMENT_ID_NEAREST_HIT;
TraceRay(_RaytracingAccelerationStructure, RAY_FLAG_FORCE_NON_OPAQUE | RAY_FLAG_SKIP_CLOSEST_HIT_SHADER | RAY_FLAG_CULL_BACK_FACING_TRIANGLES,
RAYTRACINGRENDERERFLAG_PATH_TRACING, 0, 1, 1, ray, lightPayload);
float3 lightValue = 0.0;
float lightPdf;
EvaluateLights(lightList, ray.Origin, ray.Direction, lightPayload.rayTHit + _RayTracingRayBias, lightValue, lightPdf);
return basePayload.throughput * lightValue * PowerHeuristic(basePayload.materialSamplePdf, lightPdf);
}
float3 GetLightingAlongSegment( uint segmentID, float3 scatteringPosition, float3 rayOrigin, float3 rayDirection, float rayHitDistance,
float materialSamplePdf, float4 lightListParams)
{
// only evaluate lights and sky if min depth allows it
if(segmentID < _RaytracingMinRecursion - 1)
return 0.0;
bool hit = rayHitDistance < FLT_INF;
bool computeMis = materialSamplePdf >= 0;
// Evaluate any lights we might have hit between the last position and this miss.
LightList lightList;
lightList = CreateLightList(scatteringPosition, lightListParams.xyz, (uint) lightListParams.w);
float3 lightValue = 0.0;
float lightPdf = 0.0;
EvaluateLights(lightList, rayOrigin, rayDirection, rayHitDistance + _RayTracingRayBias, lightValue, lightPdf);
float lightMisWeight = computeMis ? PowerHeuristic(materialSamplePdf, lightPdf) : 1.0;
float3 result = lightValue * lightMisWeight;
if(!hit && !IsSkySamplingEnabled())
{
// We need to evaluate the sky separately since the ray is a miss and we don't sample it in the lightlist
float3 skyValue = GetSkyValue(rayDirection);
#ifndef LIGHT_EVALUATION_NO_HEIGHT_FOG
ApplyFogAttenuation(rayOrigin, rayDirection, skyValue);
#endif
result += skyValue;
}
return result;
}
float3 CombineContributions(float3 payloadValue, float3 lightValue, uint segmentID)
{
// Add any lighting contributions that were calculated for the segment, while clamping fireflies.
// don't clamp contributions from 1 or 2 segments
if(segmentID == 0)
return (lightValue + payloadValue);
// don't clamp the light contributions we found on the second segment
if(segmentID == 1)
return ClampValue(payloadValue) + lightValue;
return ClampValue(payloadValue + lightValue);
}
void TracePath(uint2 pixelCoord, bool withAOV = false, bool withVolumetricScatteringAOV = false)
{
// Generate the initial camera ray
PathPayload payload;
RayDesc ray;
GenerateCameraRay(pixelCoord, payload, ray, withAOV, withVolumetricScatteringAOV);
// Accumulation variables
float3 pathContribution = 0;
float3 pathVolumetricContribution = 0;
float pathAlpha = 0;
// Loop variables
bool continuePath = true;
float3 throughputBeforePreviousInteraction = 1.0;
float3 interactionThroughputPreviousInteraction = 1.0;
// These are the quantities we need to store for computing light evaluation along the segment we just traced
float3 scatteringPosition;
float materialSamplePdf = 0.0;
float4 lightListParams = 0.0;
bool evalLightsAlongSegment = true;
for (uint segmentID = 0; segmentID < _RaytracingMaxRecursion && continuePath; segmentID++)
{
// We send the current ray into the scene to discover the next interaction.
// The updated payload includes the brdf/bsdf-based sampling information to trace the next rays.
TraceRay(_RaytracingAccelerationStructure, RAY_FLAG_CULL_BACK_FACING_TRIANGLES, RAYTRACINGRENDERERFLAG_PATH_TRACING, 0, 1, segmentID == 0 ? 0 : 1, ray, payload);
//------ Evaluate NEE
// Important to note that this call will send another ray, so is in fact tracing part of the next segment's contribution.
float3 lightSampleValue;
if (IsPathTracingFlagOn(payload, PATHTRACING_FLAG_UNLIT_MODEL))
lightSampleValue = HandleUnlitModel(payload, segmentID >= _RaytracingMinRecursion-1);
else
lightSampleValue = ComputeLightSampleTransmission(payload) * payload.lightSampleValue;
if(segmentID > 0) // clamp indirect lighting, NEE is a segment ahead of the current one
lightSampleValue = ClampValue(lightSampleValue);
lightSampleValue = interactionThroughputPreviousInteraction * lightSampleValue;
//------ Evaluate Emission found in the hit we just traced
float3 emission = interactionThroughputPreviousInteraction * payload.value;
if(segmentID > 1) // clamp indirect lighting
emission = ClampValue(emission);
//------ Evaluate lights along the segment we just traced
float3 lightValue = 0.0;
if(segmentID > 0 && evalLightsAlongSegment)
lightValue = interactionThroughputPreviousInteraction * GetLightingAlongSegment(segmentID, scatteringPosition, ray.Origin, ray.Direction, payload.rayTHit, materialSamplePdf, lightListParams);
if(segmentID > 1) // clamp indirect lighting
lightValue = ClampValue(lightValue);
//------ Add contributions to final result
float3 contribution = throughputBeforePreviousInteraction * (emission + lightSampleValue + lightValue);
if (withVolumetricScatteringAOV && IsPathTracingFlagOn(payload, PATHTRACING_FLAG_VOLUME_INTERACTION))
pathVolumetricContribution += contribution;
else
pathContribution += contribution;
// update path alpha
pathAlpha += (1.0 - pathAlpha) * payload.alpha;
//------ Set up the next iteration of te loop
// Check if we will continue tracing
continuePath = IsPathTracingFlagOn(payload, PATHTRACING_FLAG_MATERIAL_SAMPLE);
// Don't evaluate lights along the next segment if we've just hit Unlit
evalLightsAlongSegment = !IsPathTracingFlagOn(payload, PATHTRACING_FLAG_UNLIT_MODEL);
// store interaction parameters for computing light evaluation along the next segment
scatteringPosition = ray.Origin + payload.rayTHit * ray.Direction;
lightListParams = payload.lightListParams;
materialSamplePdf = payload.materialSamplePdf;
// Update throughput for the next interaction
throughputBeforePreviousInteraction *= interactionThroughputPreviousInteraction * payload.segmentThroughput;
interactionThroughputPreviousInteraction = payload.interactionThroughput;
// We don't do multiple scattering nor continuation rays after a volumetric interaction
if (IsPathTracingFlagOn(payload, PATHTRACING_FLAG_VOLUME_INTERACTION))
{
contribution = GetLightingAlongVolumetricSegment(payload);
if (withVolumetricScatteringAOV)
pathVolumetricContribution += contribution;
else
pathContribution += contribution;
break;
}
// Set the ray for tracing the next segment
ray.Origin = payload.materialSampleRayOrigin;
ray.Direction = payload.materialSampleRayDirection;
ray.TMin = 0.0;
ray.TMax = FLT_INF;
// Prepare Payload for tracing the next segment
SetPayloadForNextSegment(segmentID, payload);
}
_FrameTexture[COORD_TEXTURE2D_X(pixelCoord)] = float4(pathContribution, pathAlpha);
if (withAOV)
{
// If we computed AOVs, copy relevant values to our output buffers
_AlbedoAOV[COORD_TEXTURE2D_X(pixelCoord)] = float4(payload.aovAlbedo, 1.0);
_NormalAOV[COORD_TEXTURE2D_X(pixelCoord)] = float4(payload.aovNormal, 1.0);
_MotionVectorAOV[COORD_TEXTURE2D_X(pixelCoord)] = float4(payload.aovMotionVector, 0.0, 1.0);
}
if (withVolumetricScatteringAOV)
{
_VolumetricScatteringAOV[COORD_TEXTURE2D_X(pixelCoord)] = float4(pathVolumetricContribution, 1.0);
}
}
[shader("raygeneration")]
void RayGen()
{
TracePath(DispatchRaysIndex().xy);
}
[shader("raygeneration")]
void RayGenVolScattering()
{
TracePath(DispatchRaysIndex().xy, false, true);
}
[shader("raygeneration")]
void RayGenAOV()
{
TracePath(DispatchRaysIndex().xy, true);
}
[shader("raygeneration")]
void RayGenVolScatteringAOV()
{
TracePath(DispatchRaysIndex().xy, true, true);
}