#

Allegorithmic Metal/Rough PBR shader

Import from library

import lib-defines.glsl
import lib-env.glsl
import lib-normal.glsl
import lib-random.glsl
#

All channel needed are bound here.

//: param auto channel_basecolor
uniform sampler2D diffuse_tex;
//: param auto channel_roughness
uniform sampler2D roughness_tex;
//: param auto channel_metallic
uniform sampler2D metallic_tex;
//: param auto channel_emissive
uniform sampler2D emissive_tex;
#

AO map.

//: param auto texture_ao
uniform sampler2D ao_tex;
#

Eye position.

//: param auto world_eye_position
uniform vec3 camera_pos;
#

Number of miplevels in the envmap.

//: param auto environment_max_lod
uniform float maxLod;
#

An int representing the number of samples made for specular contribution computation. The more the higher quality and the performance impact.

//: param custom {
//:   "default": 16,
//:   "label": "Quality",
//:   "widget": "combobox",
//:   "values": {
//:     "Low (4 spp)": 4,
//:     "Medium (16 spp)": 16,
//:     "High (64 spp)": 64
//:   }
//: }
uniform int nbSamples;
#

A value used to tweak the force of the height.

//: param custom {
//:   "default": 1.0,
//:   "label": "Height force",
//:   "min": 0.01,
//:   "max": 10.0
//: }
uniform float height_force;
#

A value used to tweak the Ambient Occlusion intensity.

//: param custom {
//:   "default": 0.75,
//:   "label": "AO Intensity",
//:   "min": 0.00,
//:   "max": 1.0
//: }
uniform float ao_intensity;
#

A value used to tweak the emissive intensity.

//: param custom {
//:   "default": 1.0,
//:   "label": "Emissive Intensity",
//:   "min": 0.00,
//:   "max": 10.0
//: }
uniform float emissive_intensity;


const float EPSILON_COEF = 1e-4;


float normal_distrib(
  float ndh,
  float Roughness)
{
  // use GGX / Trowbridge-Reitz, same as Disney and Unreal 4
  // cf http://blog.selfshadow.com/publications/s2013-shading-course/karis/s2013_pbs_epic_notes_v2.pdf p3
  float alpha = Roughness * Roughness;
  float tmp = alpha / max(1e-8,(ndh*ndh*(alpha*alpha-1.0)+1.0));
  return tmp * tmp * M_INV_PI;
}

vec3 fresnel(
  float vdh,
  vec3 F0)
{
  // Schlick with Spherical Gaussian approximation
  // cf http://blog.selfshadow.com/publications/s2013-shading-course/karis/s2013_pbs_epic_notes_v2.pdf p3
  float sphg = pow(2.0, (-5.55473*vdh - 6.98316) * vdh);
  return F0 + (vec3(1.0, 1.0, 1.0) - F0) * sphg;
}

float G1(
float ndw, // w is either Ln or Vn
float k)
{
  // One generic factor of the geometry function divided by ndw
  // NB : We should have k > 0
  return 1.0 / ( ndw*(1.0-k) +  k );
}

float visibility(
  float ndl,
  float ndv,
  float Roughness)
{
  // Schlick with Smith-like choice of k
  // cf http://blog.selfshadow.com/publications/s2013-shading-course/karis/s2013_pbs_epic_notes_v2.pdf p3
  // visibility is a Cook-Torrance geometry function divided by (n.l)*(n.v)
  float k = max(Roughness * Roughness * 0.5, 1e-5);
  return G1(ndl,k)*G1(ndv,k);
}

vec3 cook_torrance_contrib(
  float vdh,
  float ndh,
  float ndl,
  float ndv,
  vec3 Ks,
  float Roughness)
{
  // This is the contribution when using importance sampling with the GGX based
  // sample distribution. This means ct_contrib = ct_brdf / ggx_probability
  return fresnel(vdh,Ks) * (visibility(ndl,ndv,Roughness) * vdh * ndl / ndh );
}

vec3 importanceSampleGGX(vec2 Xi, vec3 A, vec3 B, vec3 C, float roughness)
{
  float a = roughness*roughness;
  float cosT = sqrt((1.0-Xi.y)/(1.0+(a*a-1.0)*Xi.y));
  float sinT = sqrt(1.0-cosT*cosT);
  float phi = 2.0*M_PI*Xi.x;
  return (sinT*cos(phi)) * A + (sinT*sin(phi)) * B + cosT * C;
}

float probabilityGGX(float ndh, float vdh, float Roughness)
{
  return normal_distrib(ndh, Roughness) * ndh / (4.0*vdh);
}

float distortion(vec3 Wn)
{
  // Computes the inverse of the solid angle of the (differential) pixel in
  // the cube map pointed at by Wn
  float sinT = sqrt(1.0-Wn.y*Wn.y);
  return sinT;
}

float computeLOD(vec3 Ln, float p)
{
  return max(0.0, (maxLod-1.5) - 0.5*(log(float(nbSamples)) + log( p * distortion(Ln) ))
	* M_INV_LOG2);
}

vec3 shade(V2F inputs)
{
  vec3 eye_vec = normalize(camera_pos - inputs.position);
  vec4 out_color = texture2D(diffuse_tex, inputs.tex_coord);
#

All tangent space normals

  vec3 normalBN = normalFromBaseNormal(inputs.tex_coord);
  vec3 normalH = normalFromHeight(inputs.tex_coord, height_force);
  vec3 normalTGT = normalBlend(normalBN, normalH); // Blend both normals
  // Normal form document channel, considered as details
  vec3 normalDetails = normalFromNormal(inputs.tex_coord);
  // Use oriented blend operator to add details to the base plus
  // height derived normal.
  normalTGT = normalBlendOriented(normalTGT,normalDetails);
#

Compute a world space normal from the tangent space one

  vec3 normalWS = normalize(
	normalTGT.x * inputs.tangent
	+ normalTGT.y * inputs.bitangent
	+ normalTGT.z * inputs.normal
	);
#

Compute material model (diffuse, specular & roughness)

  vec3 dielectricColor = vec3(0.04);

  vec2 roughness_a = texture2D(roughness_tex, inputs.tex_coord).rg;
  vec2 metallic_a = texture2D(metallic_tex, inputs.tex_coord).rg;

  vec3 baseColor;
  float roughness;
  float metallic;

  baseColor = out_color.rgb + vec3(CHANNELS_BACKGROUND.x) * (1.0 - out_color.a);
  roughness = roughness_a.r + CHANNELS_BACKGROUND.y * (1.0 - roughness_a.g);
  metallic = metallic_a.r + CHANNELS_BACKGROUND.z * (1.0 - metallic_a.g);
#

Modulate AO map by AO_intensity ad remove AO for metallic parts

  float ao = mix(1.0, texture2D(ao_tex, inputs.tex_coord).r, ao_intensity * (1.0 - metallic));

  vec3 diffColor = baseColor * (1.0 - metallic);
  vec3 specColor = mix(dielectricColor, baseColor, metallic);
#

Create a local basis for BRDF work

  vec3 Tp = normalize(
	inputs.tangent
	- normalWS*dot(inputs.tangent, normalWS)
  ); // local tangent
  vec3 Bp = normalize(
	inputs.bitangent
	- normalWS*dot(inputs.bitangent, normalWS)
	- Tp*dot(inputs.bitangent, Tp)
  ); // local bitangent

  float ndv = dot(eye_vec, normalWS);
#

Trick to remove black artefacts Backface ? place the eye at the opposite - removes black zones

  if (ndv < 0) {
	eye_vec = reflect(eye_vec, normalWS);
	ndv = abs(ndv);
  }
#

Diffuse contribution

  vec3 contribE = envIrradiance(normalWS) * diffColor * ao;
#

Specular contribution

  vec3 contribS = vec3(0.0);
  for(int i=0; i<nbSamples; ++i)
  {
	vec2 Xi = hammersley2D(i, nbSamples);
	vec3 Hn = importanceSampleGGX(Xi,Tp,Bp,normalWS,roughness);
	vec3 Ln = -reflect(eye_vec,Hn);
	float ndl = dot(normalWS, Ln);

	// Horizon fading trick from http://marmosetco.tumblr.com/post/81245981087
	const float horizonFade = 1.3;
	float horiz = clamp( 1.0 + horizonFade * ndl, 0.0, 1.0 );
	horiz *= horiz;

	ndl = max( 1e-8, abs(ndl) );
	float vdh = max(1e-8, dot(eye_vec, Hn));
	float ndh = max(1e-8, dot(normalWS, Hn));
	float lodS = roughness < 0.01 ? 0.0 : computeLOD(Ln,
	  probabilityGGX(ndh, vdh, roughness));
	contribS +=
	envSampleLOD(Ln, lodS) *
	cook_torrance_contrib(
	  vdh, ndh, ndl, ndv,
	  specColor,
	  roughness) * horiz * ao;
  }
  contribS /= float(nbSamples);

  // Emissive
  vec3 contribEm =  emissive_intensity * texture2D(emissive_tex, inputs.tex_coord).rgb;

  // Sum diffuse + spec + emissive
  return contribS + contribE + contribEm;
}