blob: 0b00ebe2d1fa72f4cb8fca02da27dc05bb0c5809 [file] [log] [blame]
/****************************************************************************
**
** Copyright (C) 2014 NVIDIA Corporation.
** Copyright (C) 2019 The Qt Company Ltd.
** Contact: https://www.qt.io/licensing/
**
** This file is part of Qt 3D Studio.
**
** $QT_BEGIN_LICENSE:GPL$
** Commercial License Usage
** Licensees holding valid commercial Qt licenses may use this file in
** accordance with the commercial license agreement provided with the
** Software or, alternatively, in accordance with the terms contained in
** a written agreement between you and The Qt Company. For licensing terms
** and conditions see https://www.qt.io/terms-conditions. For further
** information use the contact form at https://www.qt.io/contact-us.
**
** GNU General Public License Usage
** Alternatively, this file may be used under the terms of the GNU
** General Public License version 3 or (at your option) any later version
** approved by the KDE Free Qt Foundation. The licenses are as published by
** the Free Software Foundation and appearing in the file LICENSE.GPL3
** included in the packaging of this file. Please review the following
** information to ensure the GNU General Public License requirements will
** be met: https://www.gnu.org/licenses/gpl-3.0.html.
**
** $QT_END_LICENSE$
**
****************************************************************************/
#ifndef SAMPLE_PROBE_GLSLLIB
#define SAMPLE_PROBE_GLSLLIB 1
#define USE_RGBE
uniform sampler2D lightProbe;
uniform vec4 lightProbeProperties;
uniform vec4 lightProbeRotation;
uniform vec4 lightProbeOffset; // lightProbeOffset.w = number of mipmaps
#if QSSG_ENABLE_LIGHT_PROBE_2
uniform sampler2D lightProbe2;
uniform vec4 lightProbe2Properties;
#endif
#if QSSG_ENABLE_IBL_FOV
uniform vec4 lightProbeOptions;
#endif
float noise1d(vec2 n)
{
return 0.5 + 0.5 * fract(sin(dot(n.xy, vec2(12.9898, 78.233)))* 43758.5453);
}
mat3 orthoNormalize(in mat3 tanFrame)
{
mat3 outMat;
outMat[0] = normalize(cross(tanFrame[1], tanFrame[2]));
outMat[1] = normalize(cross(tanFrame[2], outMat[0]));
outMat[2] = tanFrame[2];
return outMat;
}
mat3 tangentFrame(vec3 N, vec3 p)
{
// get edge vectors of the pixel triangle
vec3 dp1 = dFdx(p);
vec3 dp2 = dFdy(p);
// solve the linear system
vec3 dp2perp = cross(dp2, N);
vec3 dp1perp = cross(N, dp1);
vec3 T = normalize(dp1perp);
vec3 B = normalize(dp2perp);
return mat3(T , B , N);
}
vec2 transformSample(vec2 origUV, vec4 probeRot, vec2 probeOfs)
{
vec2 retUV;
retUV.x = dot(vec3(origUV, 1.0), vec3(probeRot.xy, probeOfs.x));
retUV.y = dot(vec3(origUV, 1.0), vec3(probeRot.zw, probeOfs.y));
return retUV;
}
vec3 textureProbe(sampler2D lightProbe, vec2 coord, float lod)
{
#ifdef USE_RGBE
vec4 ret = textureLod(lightProbe, coord, lod);
return ret.rgb * pow(2.0, ret.a * 255.0 - 128.0);
#else
return textureLod(lightProbe, coord, lod);
#endif
}
// This is broken out into its own routine so that if we get some other
// format image than a lat-long, then we can account for that by changing
// the code here alone.
vec2 getProbeSampleUV(vec3 smpDir, vec4 probeRot, vec2 probeOfs)
{
vec2 smpUV;
#if QSSG_ENABLE_IBL_FOV
smpUV = vec2(atan(smpDir.x, smpDir.z), asin(smpDir.y));
// assume equirectangular HDR spherical map (instead of cube map) and warp sample
// UV coordinates accordingly
smpUV *= 2.0 * vec2(0.1591596371160, 0.318319274232054);
// Default FOV is 180 deg = pi rad. Narrow the FOV
// by scaling texture coordinates by the ratio of
// incoming FOV to 180 degrees default
smpUV *= 3.14159265358 / lightProbeOptions.x;
#else
smpUV.x = atan( smpDir.x, smpDir.z) / 3.14159265359;
smpUV.y = 1.0 - (acos(smpDir.y) / 1.57079632679);
#endif
smpUV = transformSample( smpUV.xy * 0.5, probeRot, probeOfs ) + vec2(0.5, 0.5);
return smpUV;
}
vec4 getTopLayerSample(vec3 inDir, float lodShift, vec3 lodOffsets)
{
#if QSSG_ENABLE_LIGHT_PROBE_2
if ( lightProbe2Properties.w < 0.5 )
return vec4(0.0, 0.0, 0.0, 0.0);
vec2 smpUV = getProbeSampleUV( inDir, vec4(1.0, 0.0, 0.0, 1.0), lightProbeProperties.xy );
smpUV.x -= 0.5;
smpUV.x *= lightProbe2Properties.x;
smpUV.x += lightProbe2Properties.y;
vec4 retVal = 0.4 * textureLod( lightProbe2, smpUV , lodShift );
retVal += 0.2 * textureLod( lightProbe2, smpUV , lodShift+lodOffsets.x );
retVal += 0.3 * textureLod( lightProbe2, smpUV , lodShift+lodOffsets.y );
retVal += 0.1 * textureLod( lightProbe2, smpUV , lodShift+lodOffsets.z );
return retVal;
#else
return vec4(0.0, 0.0, 0.0, 0.0);
#endif
}
vec3 getProbeSample(vec3 smpDir, float lodShift, vec3 normal)
{
vec2 smpUV = getProbeSampleUV(smpDir, lightProbeRotation, lightProbeOffset.xy);
return textureProbe(lightProbe, smpUV, lodShift);
}
vec3 getProbeWeightedSample(vec3 smpDir, float lodShift, float roughness, vec3 normal)
{
// This gives us a weighted sum that approximates the total filter support
// of the full-blown convolution.
vec2 smpUV = getProbeSampleUV(smpDir, lightProbeRotation, lightProbeOffset.xy);
float wt = 1.0;
#if QSSG_ENABLE_IBL_FOV
wt = min(wt, smoothstep(roughness * -0.25, roughness * 0.25, smpUV.x));
wt = min(wt, smoothstep(roughness * -0.25, roughness * 0.25, smpUV.y));
wt = min(wt, 1.0 - smoothstep(1.0 - roughness*0.25, 1.0 + roughness*0.25, smpUV.x));
wt = min(wt, 1.0 - smoothstep(1.0 - roughness*0.25, 1.0 + roughness*0.25, smpUV.y));
#endif
vec3 lodOffsets;
lodOffsets.x = mix(-2.0, -0.70710678, roughness);
lodOffsets.y = min(2.0 * smoothstep(0.0, 0.1, roughness), 2.0 - 1.29289 * smoothstep(0.1, 1.0, roughness));
lodOffsets.z = min(6.0 * smoothstep(0.0, 0.1, roughness), 6.0 - 4.585786 * smoothstep(0.1, 1.0, roughness));
vec2 iSize = vec2(textureSize(lightProbe, 0));
vec3 ddx = dFdx(smpDir) * iSize.x;
vec3 ddy = dFdy(smpDir) * iSize.y;
float deriv;
deriv = max(dot(ddx, ddx), dot(ddy, ddy));
deriv = clamp(deriv, 1.0, iSize.x * iSize.y);
float lodBound = 0.5 * log2(deriv) - 1.0;
float minLod = lodBound;
float maxLod = log2(max(iSize.x, iSize.y));
minLod = clamp(minLod / maxLod, 0.0, 1.0);
minLod *= minLod * maxLod;
lodShift = max(lodShift, minLod);
vec3 retVal = 0.4 * textureProbe(lightProbe, smpUV , lodShift);
retVal += 0.2 * textureProbe(lightProbe, smpUV , max(minLod, lodShift+lodOffsets.x));
retVal += 0.3 * textureProbe(lightProbe, smpUV , lodShift+lodOffsets.y);
retVal += 0.1 * textureProbe(lightProbe, smpUV , lodShift+lodOffsets.z);
#if QSSG_ENABLE_LIGHT_PROBE_2
vec4 topSmp = getTopLayerSample( smpDir, lodShift, lodOffsets );
vec3 tempVal = mix( retVal, topSmp.xyz, topSmp.w );
retVal = mix( retVal, tempVal, lightProbe2Properties.z );
#endif
if (lightProbeProperties.z > -1.0) {
float ctr = 0.5 + 0.5 * lightProbeProperties.z;
float vertWt = smoothstep(ctr-roughness*0.25, ctr+roughness*0.25, smpUV.y);
float wtScaled = mix(1.0, vertWt, lightProbeProperties.z + 1.0);
retVal *= wtScaled;
}
return retVal * wt;
}
vec3 getTopLayerSampleSimple(vec3 inDir)
{
#if QSSG_ENABLE_LIGHT_PROBE_2
if (lightProbe2Properties.w < 0.5)
return vec3(0.0, 0.0, 0.0);
vec2 smpUV = getProbeSampleUV(inDir, vec4(1.0, 0.0, 0.0, 1.0), lightProbeProperties.xy);
smpUV.x -= 0.5;
smpUV.x *= lightProbe2Properties.x;
smpUV.x += lightProbe2Properties.y;
const float d = 0.761324705;
float baseLevel = lightProbeOffset.w - 1.0;
vec3 val = textureProbe(lightProbe, smpUV, max(baseLevel - 2.0, 0.0)) * 0.0625;
val += textureProbe(lightProbe, smpUV, max(baseLevel - 1.0, 0.0)) * 0.25;
val += textureProbe(lightProbe, smpUV, max(baseLevel, 0.0));
return d * val;
#else
return vec3(0.0, 0.0, 0.0);
#endif
}
vec4 sampleDiffuse(mat3 tanFrame)
{
if (lightProbeProperties.w < 0.005)
return vec4(0.0);
vec2 smpUV = getProbeSampleUV(tanFrame[2], lightProbeRotation, lightProbeOffset.xy);
const float d = 0.761324705; // 1.0 / 1.3125;
float baseLevel = lightProbeOffset.w - 1.0;
vec3 val = textureProbe(lightProbe, smpUV, max(baseLevel - 2.0, 0.0)) * 0.0625;
val += textureProbe(lightProbe, smpUV, max(baseLevel - 1.0, 0.0)) * 0.25;
val += textureProbe(lightProbe, smpUV, max(baseLevel, 0.0));
#if QSSG_ENABLE_LIGHT_PROBE_2
vec3 topSmp = getTopLayerSampleSimple(smpDir);
val = mix(val, topSmp, lightProbe2Properties.z);
#endif
if (lightProbeProperties.z > -1.0) {
float ctr = 0.5 + 0.5 * lightProbeProperties.z;
float vertWt = smoothstep(ctr * 0.25, ctr + 0.25, smpUV.y);
float wtScaled = mix(1.0, vertWt, lightProbeProperties.z + 1.0);
val *= wtScaled;
}
return vec4(d * lightProbeProperties.w * val, 1.0);
}
vec4 sampleDiffuseCustomMaterial(vec3 normal, vec3 worldPos, float aoFactor)
{
mat3 tanFrame = tangentFrame(normal, worldPos);
return sampleDiffuse(tanFrame);
}
vec4 sampleGlossy(mat3 tanFrame, vec3 viewDir, float rough)
{
if (lightProbeProperties.w < 0.005)
return vec4(0.0);
float sigma = smoothstep(0.0, 1.0, clamp(rough, 0.0001, 1.0));
vec3 ret = vec3(0, 0, 0);
vec3 smpDir = reflect(-viewDir, tanFrame[2]);
// Compute the Geometric occlusion/self-shadowing term
float NdotL = clamp(dot(smpDir, tanFrame[2]), 0.0, 0.999995);
float k = sigma * 0.31830988618; // roughness / pi
float Gl = clamp((NdotL / (NdotL*(1.0-k) + k) + (1.0 - k*k)) * 0.5, 0.0, 1.0 );
float levels = log2(max(vec2(textureSize(lightProbe, 0)).x, vec2(textureSize(lightProbe, 0)).y));
vec3 outColor;
outColor = getProbeSample(smpDir, sigma * levels, tanFrame[2]);
return vec4(lightProbeProperties.w * Gl * outColor, 1.0);
}
#endif